Merge branches 'acpi-apei', 'acpi-processor', 'acpi-tables', 'acpi-pci' and 'acpi...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / ocrdma / ocrdma_verbs.c
1 /* This file is part of the Emulex RoCE Device Driver for
2  * RoCE (RDMA over Converged Ethernet) adapters.
3  * Copyright (C) 2012-2015 Emulex. All rights reserved.
4  * EMULEX and SLI are trademarks of Emulex.
5  * www.emulex.com
6  *
7  * This software is available to you under a choice of one of two licenses.
8  * You may choose to be licensed under the terms of the GNU General Public
9  * License (GPL) Version 2, available from the file COPYING in the main
10  * directory of this source tree, or the BSD license below:
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  *
16  * - Redistributions of source code must retain the above copyright notice,
17  *   this list of conditions and the following disclaimer.
18  *
19  * - Redistributions in binary form must reproduce the above copyright
20  *   notice, this list of conditions and the following disclaimer in
21  *   the documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  *
35  * Contact Information:
36  * linux-drivers@emulex.com
37  *
38  * Emulex
39  * 3333 Susan Street
40  * Costa Mesa, CA 92626
41  */
42
43 #include <linux/dma-mapping.h>
44 #include <rdma/ib_verbs.h>
45 #include <rdma/ib_user_verbs.h>
46 #include <rdma/iw_cm.h>
47 #include <rdma/ib_umem.h>
48 #include <rdma/ib_addr.h>
49 #include <rdma/ib_cache.h>
50 #include <rdma/uverbs_ioctl.h>
51
52 #include "ocrdma.h"
53 #include "ocrdma_hw.h"
54 #include "ocrdma_verbs.h"
55 #include <rdma/ocrdma-abi.h>
56
57 int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
58 {
59         if (index > 0)
60                 return -EINVAL;
61
62         *pkey = 0xffff;
63         return 0;
64 }
65
66 int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
67                         struct ib_udata *uhw)
68 {
69         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
70
71         if (uhw->inlen || uhw->outlen)
72                 return -EINVAL;
73
74         memset(attr, 0, sizeof *attr);
75         memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
76                min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
77         ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
78         attr->max_mr_size = dev->attr.max_mr_size;
79         attr->page_size_cap = 0xffff000;
80         attr->vendor_id = dev->nic_info.pdev->vendor;
81         attr->vendor_part_id = dev->nic_info.pdev->device;
82         attr->hw_ver = dev->asic_id;
83         attr->max_qp = dev->attr.max_qp;
84         attr->max_ah = OCRDMA_MAX_AH;
85         attr->max_qp_wr = dev->attr.max_wqe;
86
87         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
88                                         IB_DEVICE_RC_RNR_NAK_GEN |
89                                         IB_DEVICE_SHUTDOWN_PORT |
90                                         IB_DEVICE_SYS_IMAGE_GUID |
91                                         IB_DEVICE_LOCAL_DMA_LKEY |
92                                         IB_DEVICE_MEM_MGT_EXTENSIONS;
93         attr->max_send_sge = dev->attr.max_send_sge;
94         attr->max_recv_sge = dev->attr.max_recv_sge;
95         attr->max_sge_rd = dev->attr.max_rdma_sge;
96         attr->max_cq = dev->attr.max_cq;
97         attr->max_cqe = dev->attr.max_cqe;
98         attr->max_mr = dev->attr.max_mr;
99         attr->max_mw = dev->attr.max_mw;
100         attr->max_pd = dev->attr.max_pd;
101         attr->atomic_cap = 0;
102         attr->max_fmr = 0;
103         attr->max_map_per_fmr = 0;
104         attr->max_qp_rd_atom =
105             min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
106         attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
107         attr->max_srq = dev->attr.max_srq;
108         attr->max_srq_sge = dev->attr.max_srq_sge;
109         attr->max_srq_wr = dev->attr.max_rqe;
110         attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
111         attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
112         attr->max_pkeys = 1;
113         return 0;
114 }
115
116 static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
117                                             u8 *ib_speed, u8 *ib_width)
118 {
119         int status;
120         u8 speed;
121
122         status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
123         if (status)
124                 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
125
126         switch (speed) {
127         case OCRDMA_PHYS_LINK_SPEED_1GBPS:
128                 *ib_speed = IB_SPEED_SDR;
129                 *ib_width = IB_WIDTH_1X;
130                 break;
131
132         case OCRDMA_PHYS_LINK_SPEED_10GBPS:
133                 *ib_speed = IB_SPEED_QDR;
134                 *ib_width = IB_WIDTH_1X;
135                 break;
136
137         case OCRDMA_PHYS_LINK_SPEED_20GBPS:
138                 *ib_speed = IB_SPEED_DDR;
139                 *ib_width = IB_WIDTH_4X;
140                 break;
141
142         case OCRDMA_PHYS_LINK_SPEED_40GBPS:
143                 *ib_speed = IB_SPEED_QDR;
144                 *ib_width = IB_WIDTH_4X;
145                 break;
146
147         default:
148                 /* Unsupported */
149                 *ib_speed = IB_SPEED_SDR;
150                 *ib_width = IB_WIDTH_1X;
151         }
152 }
153
154 int ocrdma_query_port(struct ib_device *ibdev,
155                       u8 port, struct ib_port_attr *props)
156 {
157         enum ib_port_state port_state;
158         struct ocrdma_dev *dev;
159         struct net_device *netdev;
160
161         /* props being zeroed by the caller, avoid zeroing it here */
162         dev = get_ocrdma_dev(ibdev);
163         netdev = dev->nic_info.netdev;
164         if (netif_running(netdev) && netif_oper_up(netdev)) {
165                 port_state = IB_PORT_ACTIVE;
166                 props->phys_state = 5;
167         } else {
168                 port_state = IB_PORT_DOWN;
169                 props->phys_state = 3;
170         }
171         props->max_mtu = IB_MTU_4096;
172         props->active_mtu = iboe_get_mtu(netdev->mtu);
173         props->lid = 0;
174         props->lmc = 0;
175         props->sm_lid = 0;
176         props->sm_sl = 0;
177         props->state = port_state;
178         props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
179                                 IB_PORT_DEVICE_MGMT_SUP |
180                                 IB_PORT_VENDOR_CLASS_SUP;
181         props->ip_gids = true;
182         props->gid_tbl_len = OCRDMA_MAX_SGID;
183         props->pkey_tbl_len = 1;
184         props->bad_pkey_cntr = 0;
185         props->qkey_viol_cntr = 0;
186         get_link_speed_and_width(dev, &props->active_speed,
187                                  &props->active_width);
188         props->max_msg_sz = 0x80000000;
189         props->max_vl_num = 4;
190         return 0;
191 }
192
193 int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
194                        struct ib_port_modify *props)
195 {
196         return 0;
197 }
198
199 static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
200                            unsigned long len)
201 {
202         struct ocrdma_mm *mm;
203
204         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
205         if (mm == NULL)
206                 return -ENOMEM;
207         mm->key.phy_addr = phy_addr;
208         mm->key.len = len;
209         INIT_LIST_HEAD(&mm->entry);
210
211         mutex_lock(&uctx->mm_list_lock);
212         list_add_tail(&mm->entry, &uctx->mm_head);
213         mutex_unlock(&uctx->mm_list_lock);
214         return 0;
215 }
216
217 static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
218                             unsigned long len)
219 {
220         struct ocrdma_mm *mm, *tmp;
221
222         mutex_lock(&uctx->mm_list_lock);
223         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
224                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
225                         continue;
226
227                 list_del(&mm->entry);
228                 kfree(mm);
229                 break;
230         }
231         mutex_unlock(&uctx->mm_list_lock);
232 }
233
234 static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
235                               unsigned long len)
236 {
237         bool found = false;
238         struct ocrdma_mm *mm;
239
240         mutex_lock(&uctx->mm_list_lock);
241         list_for_each_entry(mm, &uctx->mm_head, entry) {
242                 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
243                         continue;
244
245                 found = true;
246                 break;
247         }
248         mutex_unlock(&uctx->mm_list_lock);
249         return found;
250 }
251
252
253 static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
254 {
255         u16 pd_bitmap_idx = 0;
256         const unsigned long *pd_bitmap;
257
258         if (dpp_pool) {
259                 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
260                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
261                                                     dev->pd_mgr->max_dpp_pd);
262                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
263                 dev->pd_mgr->pd_dpp_count++;
264                 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
265                         dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
266         } else {
267                 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
268                 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
269                                                     dev->pd_mgr->max_normal_pd);
270                 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
271                 dev->pd_mgr->pd_norm_count++;
272                 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
273                         dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
274         }
275         return pd_bitmap_idx;
276 }
277
278 static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
279                                         bool dpp_pool)
280 {
281         u16 pd_count;
282         u16 pd_bit_index;
283
284         pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
285                               dev->pd_mgr->pd_norm_count;
286         if (pd_count == 0)
287                 return -EINVAL;
288
289         if (dpp_pool) {
290                 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
291                 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
292                         return -EINVAL;
293                 } else {
294                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
295                         dev->pd_mgr->pd_dpp_count--;
296                 }
297         } else {
298                 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
299                 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
300                         return -EINVAL;
301                 } else {
302                         __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
303                         dev->pd_mgr->pd_norm_count--;
304                 }
305         }
306
307         return 0;
308 }
309
310 static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
311                                    bool dpp_pool)
312 {
313         int status;
314
315         mutex_lock(&dev->dev_lock);
316         status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
317         mutex_unlock(&dev->dev_lock);
318         return status;
319 }
320
321 static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
322 {
323         u16 pd_idx = 0;
324         int status = 0;
325
326         mutex_lock(&dev->dev_lock);
327         if (pd->dpp_enabled) {
328                 /* try allocating DPP PD, if not available then normal PD */
329                 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
330                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
331                         pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
332                         pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
333                 } else if (dev->pd_mgr->pd_norm_count <
334                            dev->pd_mgr->max_normal_pd) {
335                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
336                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
337                         pd->dpp_enabled = false;
338                 } else {
339                         status = -EINVAL;
340                 }
341         } else {
342                 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
343                         pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
344                         pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
345                 } else {
346                         status = -EINVAL;
347                 }
348         }
349         mutex_unlock(&dev->dev_lock);
350         return status;
351 }
352
353 /*
354  * NOTE:
355  *
356  * ocrdma_ucontext must be used here because this function is also
357  * called from ocrdma_alloc_ucontext where ib_udata does not have
358  * valid ib_ucontext pointer. ib_uverbs_get_context does not call
359  * uobj_{alloc|get_xxx} helpers which are used to store the
360  * ib_ucontext in uverbs_attr_bundle wrapping the ib_udata. so
361  * ib_udata does NOT imply valid ib_ucontext here!
362  */
363 static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
364                             struct ocrdma_ucontext *uctx,
365                             struct ib_udata *udata)
366 {
367         int status;
368
369         if (udata && uctx && dev->attr.max_dpp_pds) {
370                 pd->dpp_enabled =
371                         ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
372                 pd->num_dpp_qp =
373                         pd->dpp_enabled ? (dev->nic_info.db_page_size /
374                                            dev->attr.wqe_size) : 0;
375         }
376
377         if (dev->pd_mgr->pd_prealloc_valid)
378                 return ocrdma_get_pd_num(dev, pd);
379
380 retry:
381         status = ocrdma_mbx_alloc_pd(dev, pd);
382         if (status) {
383                 if (pd->dpp_enabled) {
384                         pd->dpp_enabled = false;
385                         pd->num_dpp_qp = 0;
386                         goto retry;
387                 }
388                 return status;
389         }
390
391         return 0;
392 }
393
394 static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
395                                  struct ocrdma_pd *pd)
396 {
397         return (uctx->cntxt_pd == pd);
398 }
399
400 static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
401                               struct ocrdma_pd *pd)
402 {
403         if (dev->pd_mgr->pd_prealloc_valid)
404                 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
405         else
406                 ocrdma_mbx_dealloc_pd(dev, pd);
407 }
408
409 static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
410                                     struct ocrdma_ucontext *uctx,
411                                     struct ib_udata *udata)
412 {
413         struct ib_device *ibdev = &dev->ibdev;
414         struct ib_pd *pd;
415         int status;
416
417         pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
418         if (!pd)
419                 return -ENOMEM;
420
421         pd->device  = ibdev;
422         uctx->cntxt_pd = get_ocrdma_pd(pd);
423
424         status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
425         if (status) {
426                 kfree(uctx->cntxt_pd);
427                 goto err;
428         }
429
430         uctx->cntxt_pd->uctx = uctx;
431         uctx->cntxt_pd->ibpd.device = &dev->ibdev;
432 err:
433         return status;
434 }
435
436 static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
437 {
438         struct ocrdma_pd *pd = uctx->cntxt_pd;
439         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
440
441         if (uctx->pd_in_use) {
442                 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
443                        __func__, dev->id, pd->id);
444         }
445         kfree(uctx->cntxt_pd);
446         uctx->cntxt_pd = NULL;
447         _ocrdma_dealloc_pd(dev, pd);
448 }
449
450 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
451 {
452         struct ocrdma_pd *pd = NULL;
453
454         mutex_lock(&uctx->mm_list_lock);
455         if (!uctx->pd_in_use) {
456                 uctx->pd_in_use = true;
457                 pd = uctx->cntxt_pd;
458         }
459         mutex_unlock(&uctx->mm_list_lock);
460
461         return pd;
462 }
463
464 static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
465 {
466         mutex_lock(&uctx->mm_list_lock);
467         uctx->pd_in_use = false;
468         mutex_unlock(&uctx->mm_list_lock);
469 }
470
471 int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
472 {
473         struct ib_device *ibdev = uctx->device;
474         int status;
475         struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
476         struct ocrdma_alloc_ucontext_resp resp = {};
477         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
478         struct pci_dev *pdev = dev->nic_info.pdev;
479         u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
480
481         if (!udata)
482                 return -EFAULT;
483         INIT_LIST_HEAD(&ctx->mm_head);
484         mutex_init(&ctx->mm_list_lock);
485
486         ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
487                                             &ctx->ah_tbl.pa, GFP_KERNEL);
488         if (!ctx->ah_tbl.va)
489                 return -ENOMEM;
490
491         ctx->ah_tbl.len = map_len;
492
493         resp.ah_tbl_len = ctx->ah_tbl.len;
494         resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
495
496         status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
497         if (status)
498                 goto map_err;
499
500         status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
501         if (status)
502                 goto pd_err;
503
504         resp.dev_id = dev->id;
505         resp.max_inline_data = dev->attr.max_inline_data;
506         resp.wqe_size = dev->attr.wqe_size;
507         resp.rqe_size = dev->attr.rqe_size;
508         resp.dpp_wqe_size = dev->attr.wqe_size;
509
510         memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
511         status = ib_copy_to_udata(udata, &resp, sizeof(resp));
512         if (status)
513                 goto cpy_err;
514         return 0;
515
516 cpy_err:
517         ocrdma_dealloc_ucontext_pd(ctx);
518 pd_err:
519         ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
520 map_err:
521         dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
522                           ctx->ah_tbl.pa);
523         return status;
524 }
525
526 void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
527 {
528         struct ocrdma_mm *mm, *tmp;
529         struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
530         struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
531         struct pci_dev *pdev = dev->nic_info.pdev;
532
533         ocrdma_dealloc_ucontext_pd(uctx);
534
535         ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
536         dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
537                           uctx->ah_tbl.pa);
538
539         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
540                 list_del(&mm->entry);
541                 kfree(mm);
542         }
543 }
544
545 int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
546 {
547         struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
548         struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
549         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
550         u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
551         unsigned long len = (vma->vm_end - vma->vm_start);
552         int status;
553         bool found;
554
555         if (vma->vm_start & (PAGE_SIZE - 1))
556                 return -EINVAL;
557         found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
558         if (!found)
559                 return -EINVAL;
560
561         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
562                 dev->nic_info.db_total_size)) &&
563                 (len <= dev->nic_info.db_page_size)) {
564                 if (vma->vm_flags & VM_READ)
565                         return -EPERM;
566
567                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
568                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
569                                             len, vma->vm_page_prot);
570         } else if (dev->nic_info.dpp_unmapped_len &&
571                 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
572                 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
573                         dev->nic_info.dpp_unmapped_len)) &&
574                 (len <= dev->nic_info.dpp_unmapped_len)) {
575                 if (vma->vm_flags & VM_READ)
576                         return -EPERM;
577
578                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
579                 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
580                                             len, vma->vm_page_prot);
581         } else {
582                 status = remap_pfn_range(vma, vma->vm_start,
583                                          vma->vm_pgoff, len, vma->vm_page_prot);
584         }
585         return status;
586 }
587
588 static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
589                                 struct ib_udata *udata)
590 {
591         int status;
592         u64 db_page_addr;
593         u64 dpp_page_addr = 0;
594         u32 db_page_size;
595         struct ocrdma_alloc_pd_uresp rsp;
596         struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
597                 udata, struct ocrdma_ucontext, ibucontext);
598
599         memset(&rsp, 0, sizeof(rsp));
600         rsp.id = pd->id;
601         rsp.dpp_enabled = pd->dpp_enabled;
602         db_page_addr = ocrdma_get_db_addr(dev, pd->id);
603         db_page_size = dev->nic_info.db_page_size;
604
605         status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
606         if (status)
607                 return status;
608
609         if (pd->dpp_enabled) {
610                 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
611                                 (pd->id * PAGE_SIZE);
612                 status = ocrdma_add_mmap(uctx, dpp_page_addr,
613                                  PAGE_SIZE);
614                 if (status)
615                         goto dpp_map_err;
616                 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
617                 rsp.dpp_page_addr_lo = dpp_page_addr;
618         }
619
620         status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
621         if (status)
622                 goto ucopy_err;
623
624         pd->uctx = uctx;
625         return 0;
626
627 ucopy_err:
628         if (pd->dpp_enabled)
629                 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
630 dpp_map_err:
631         ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
632         return status;
633 }
634
635 int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
636 {
637         struct ib_device *ibdev = ibpd->device;
638         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
639         struct ocrdma_pd *pd;
640         int status;
641         u8 is_uctx_pd = false;
642         struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
643                 udata, struct ocrdma_ucontext, ibucontext);
644
645         if (udata) {
646                 pd = ocrdma_get_ucontext_pd(uctx);
647                 if (pd) {
648                         is_uctx_pd = true;
649                         goto pd_mapping;
650                 }
651         }
652
653         pd = get_ocrdma_pd(ibpd);
654         status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
655         if (status)
656                 goto exit;
657
658 pd_mapping:
659         if (udata) {
660                 status = ocrdma_copy_pd_uresp(dev, pd, udata);
661                 if (status)
662                         goto err;
663         }
664         return 0;
665
666 err:
667         if (is_uctx_pd)
668                 ocrdma_release_ucontext_pd(uctx);
669         else
670                 _ocrdma_dealloc_pd(dev, pd);
671 exit:
672         return status;
673 }
674
675 void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
676 {
677         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
678         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
679         struct ocrdma_ucontext *uctx = NULL;
680         u64 usr_db;
681
682         uctx = pd->uctx;
683         if (uctx) {
684                 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
685                         (pd->id * PAGE_SIZE);
686                 if (pd->dpp_enabled)
687                         ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
688                 usr_db = ocrdma_get_db_addr(dev, pd->id);
689                 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
690
691                 if (is_ucontext_pd(uctx, pd)) {
692                         ocrdma_release_ucontext_pd(uctx);
693                         return;
694                 }
695         }
696         _ocrdma_dealloc_pd(dev, pd);
697 }
698
699 static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
700                             u32 pdid, int acc, u32 num_pbls, u32 addr_check)
701 {
702         int status;
703
704         mr->hwmr.fr_mr = 0;
705         mr->hwmr.local_rd = 1;
706         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
707         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
708         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
709         mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
710         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
711         mr->hwmr.num_pbls = num_pbls;
712
713         status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
714         if (status)
715                 return status;
716
717         mr->ibmr.lkey = mr->hwmr.lkey;
718         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
719                 mr->ibmr.rkey = mr->hwmr.lkey;
720         return 0;
721 }
722
723 struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
724 {
725         int status;
726         struct ocrdma_mr *mr;
727         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
728         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
729
730         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
731                 pr_err("%s err, invalid access rights\n", __func__);
732                 return ERR_PTR(-EINVAL);
733         }
734
735         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
736         if (!mr)
737                 return ERR_PTR(-ENOMEM);
738
739         status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
740                                    OCRDMA_ADDR_CHECK_DISABLE);
741         if (status) {
742                 kfree(mr);
743                 return ERR_PTR(status);
744         }
745
746         return &mr->ibmr;
747 }
748
749 static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
750                                    struct ocrdma_hw_mr *mr)
751 {
752         struct pci_dev *pdev = dev->nic_info.pdev;
753         int i = 0;
754
755         if (mr->pbl_table) {
756                 for (i = 0; i < mr->num_pbls; i++) {
757                         if (!mr->pbl_table[i].va)
758                                 continue;
759                         dma_free_coherent(&pdev->dev, mr->pbl_size,
760                                           mr->pbl_table[i].va,
761                                           mr->pbl_table[i].pa);
762                 }
763                 kfree(mr->pbl_table);
764                 mr->pbl_table = NULL;
765         }
766 }
767
768 static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
769                               u32 num_pbes)
770 {
771         u32 num_pbls = 0;
772         u32 idx = 0;
773         int status = 0;
774         u32 pbl_size;
775
776         do {
777                 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
778                 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
779                         status = -EFAULT;
780                         break;
781                 }
782                 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
783                 num_pbls = num_pbls / (pbl_size / sizeof(u64));
784                 idx++;
785         } while (num_pbls >= dev->attr.max_num_mr_pbl);
786
787         mr->hwmr.num_pbes = num_pbes;
788         mr->hwmr.num_pbls = num_pbls;
789         mr->hwmr.pbl_size = pbl_size;
790         return status;
791 }
792
793 static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
794 {
795         int status = 0;
796         int i;
797         u32 dma_len = mr->pbl_size;
798         struct pci_dev *pdev = dev->nic_info.pdev;
799         void *va;
800         dma_addr_t pa;
801
802         mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
803                                 GFP_KERNEL);
804
805         if (!mr->pbl_table)
806                 return -ENOMEM;
807
808         for (i = 0; i < mr->num_pbls; i++) {
809                 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
810                 if (!va) {
811                         ocrdma_free_mr_pbl_tbl(dev, mr);
812                         status = -ENOMEM;
813                         break;
814                 }
815                 mr->pbl_table[i].va = va;
816                 mr->pbl_table[i].pa = pa;
817         }
818         return status;
819 }
820
821 static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
822                             u32 num_pbes)
823 {
824         struct ocrdma_pbe *pbe;
825         struct sg_dma_page_iter sg_iter;
826         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
827         struct ib_umem *umem = mr->umem;
828         int pbe_cnt, total_num_pbes = 0;
829         u64 pg_addr;
830
831         if (!mr->hwmr.num_pbes)
832                 return;
833
834         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
835         pbe_cnt = 0;
836
837         for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
838                 /* store the page address in pbe */
839                 pg_addr = sg_page_iter_dma_address(&sg_iter);
840                 pbe->pa_lo = cpu_to_le32(pg_addr);
841                 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
842                 pbe_cnt += 1;
843                 total_num_pbes += 1;
844                 pbe++;
845
846                 /* if done building pbes, issue the mbx cmd. */
847                 if (total_num_pbes == num_pbes)
848                         return;
849
850                 /* if the given pbl is full storing the pbes,
851                  * move to next pbl.
852                  */
853                 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
854                         pbl_tbl++;
855                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
856                         pbe_cnt = 0;
857                 }
858         }
859 }
860
861 struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
862                                  u64 usr_addr, int acc, struct ib_udata *udata)
863 {
864         int status = -ENOMEM;
865         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
866         struct ocrdma_mr *mr;
867         struct ocrdma_pd *pd;
868         u32 num_pbes;
869
870         pd = get_ocrdma_pd(ibpd);
871
872         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
873                 return ERR_PTR(-EINVAL);
874
875         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
876         if (!mr)
877                 return ERR_PTR(status);
878         mr->umem = ib_umem_get(udata, start, len, acc, 0);
879         if (IS_ERR(mr->umem)) {
880                 status = -EFAULT;
881                 goto umem_err;
882         }
883         num_pbes = ib_umem_page_count(mr->umem);
884         status = ocrdma_get_pbl_info(dev, mr, num_pbes);
885         if (status)
886                 goto umem_err;
887
888         mr->hwmr.pbe_size = PAGE_SIZE;
889         mr->hwmr.fbo = ib_umem_offset(mr->umem);
890         mr->hwmr.va = usr_addr;
891         mr->hwmr.len = len;
892         mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
893         mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
894         mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
895         mr->hwmr.local_rd = 1;
896         mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
897         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
898         if (status)
899                 goto umem_err;
900         build_user_pbes(dev, mr, num_pbes);
901         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
902         if (status)
903                 goto mbx_err;
904         mr->ibmr.lkey = mr->hwmr.lkey;
905         if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
906                 mr->ibmr.rkey = mr->hwmr.lkey;
907
908         return &mr->ibmr;
909
910 mbx_err:
911         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
912 umem_err:
913         kfree(mr);
914         return ERR_PTR(status);
915 }
916
917 int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
918 {
919         struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
920         struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
921
922         (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
923
924         kfree(mr->pages);
925         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
926
927         /* it could be user registered memory. */
928         ib_umem_release(mr->umem);
929         kfree(mr);
930
931         /* Don't stop cleanup, in case FW is unresponsive */
932         if (dev->mqe_ctx.fw_error_state) {
933                 pr_err("%s(%d) fw not responding.\n",
934                        __func__, dev->id);
935         }
936         return 0;
937 }
938
939 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
940                                 struct ib_udata *udata)
941 {
942         int status;
943         struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
944                 udata, struct ocrdma_ucontext, ibucontext);
945         struct ocrdma_create_cq_uresp uresp;
946
947         /* this must be user flow! */
948         if (!udata)
949                 return -EINVAL;
950
951         memset(&uresp, 0, sizeof(uresp));
952         uresp.cq_id = cq->id;
953         uresp.page_size = PAGE_ALIGN(cq->len);
954         uresp.num_pages = 1;
955         uresp.max_hw_cqe = cq->max_hw_cqe;
956         uresp.page_addr[0] = virt_to_phys(cq->va);
957         uresp.db_page_addr =  ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
958         uresp.db_page_size = dev->nic_info.db_page_size;
959         uresp.phase_change = cq->phase_change ? 1 : 0;
960         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
961         if (status) {
962                 pr_err("%s(%d) copy error cqid=0x%x.\n",
963                        __func__, dev->id, cq->id);
964                 goto err;
965         }
966         status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
967         if (status)
968                 goto err;
969         status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
970         if (status) {
971                 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
972                 goto err;
973         }
974         cq->ucontext = uctx;
975 err:
976         return status;
977 }
978
979 int ocrdma_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
980                      struct ib_udata *udata)
981 {
982         struct ib_device *ibdev = ibcq->device;
983         int entries = attr->cqe;
984         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
985         struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
986         struct ocrdma_ucontext *uctx = rdma_udata_to_drv_context(
987                 udata, struct ocrdma_ucontext, ibucontext);
988         u16 pd_id = 0;
989         int status;
990         struct ocrdma_create_cq_ureq ureq;
991
992         if (attr->flags)
993                 return -EINVAL;
994
995         if (udata) {
996                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
997                         return -EFAULT;
998         } else
999                 ureq.dpp_cq = 0;
1000
1001         spin_lock_init(&cq->cq_lock);
1002         spin_lock_init(&cq->comp_handler_lock);
1003         INIT_LIST_HEAD(&cq->sq_head);
1004         INIT_LIST_HEAD(&cq->rq_head);
1005
1006         if (udata)
1007                 pd_id = uctx->cntxt_pd->id;
1008
1009         status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
1010         if (status)
1011                 return status;
1012
1013         if (udata) {
1014                 status = ocrdma_copy_cq_uresp(dev, cq, udata);
1015                 if (status)
1016                         goto ctx_err;
1017         }
1018         cq->phase = OCRDMA_CQE_VALID;
1019         dev->cq_tbl[cq->id] = cq;
1020         return 0;
1021
1022 ctx_err:
1023         ocrdma_mbx_destroy_cq(dev, cq);
1024         return status;
1025 }
1026
1027 int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1028                      struct ib_udata *udata)
1029 {
1030         int status = 0;
1031         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1032
1033         if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1034                 status = -EINVAL;
1035                 return status;
1036         }
1037         ibcq->cqe = new_cnt;
1038         return status;
1039 }
1040
1041 static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1042 {
1043         int cqe_cnt;
1044         int valid_count = 0;
1045         unsigned long flags;
1046
1047         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1048         struct ocrdma_cqe *cqe = NULL;
1049
1050         cqe = cq->va;
1051         cqe_cnt = cq->cqe_cnt;
1052
1053         /* Last irq might have scheduled a polling thread
1054          * sync-up with it before hard flushing.
1055          */
1056         spin_lock_irqsave(&cq->cq_lock, flags);
1057         while (cqe_cnt) {
1058                 if (is_cqe_valid(cq, cqe))
1059                         valid_count++;
1060                 cqe++;
1061                 cqe_cnt--;
1062         }
1063         ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1064         spin_unlock_irqrestore(&cq->cq_lock, flags);
1065 }
1066
1067 void ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
1068 {
1069         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1070         struct ocrdma_eq *eq = NULL;
1071         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
1072         int pdid = 0;
1073         u32 irq, indx;
1074
1075         dev->cq_tbl[cq->id] = NULL;
1076         indx = ocrdma_get_eq_table_index(dev, cq->eqn);
1077
1078         eq = &dev->eq_tbl[indx];
1079         irq = ocrdma_get_irq(dev, eq);
1080         synchronize_irq(irq);
1081         ocrdma_flush_cq(cq);
1082
1083         ocrdma_mbx_destroy_cq(dev, cq);
1084         if (cq->ucontext) {
1085                 pdid = cq->ucontext->cntxt_pd->id;
1086                 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1087                                 PAGE_ALIGN(cq->len));
1088                 ocrdma_del_mmap(cq->ucontext,
1089                                 ocrdma_get_db_addr(dev, pdid),
1090                                 dev->nic_info.db_page_size);
1091         }
1092 }
1093
1094 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1095 {
1096         int status = -EINVAL;
1097
1098         if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1099                 dev->qp_tbl[qp->id] = qp;
1100                 status = 0;
1101         }
1102         return status;
1103 }
1104
1105 static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1106 {
1107         dev->qp_tbl[qp->id] = NULL;
1108 }
1109
1110 static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
1111                                   struct ib_qp_init_attr *attrs,
1112                                   struct ib_udata *udata)
1113 {
1114         if ((attrs->qp_type != IB_QPT_GSI) &&
1115             (attrs->qp_type != IB_QPT_RC) &&
1116             (attrs->qp_type != IB_QPT_UC) &&
1117             (attrs->qp_type != IB_QPT_UD)) {
1118                 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1119                        __func__, dev->id, attrs->qp_type);
1120                 return -EINVAL;
1121         }
1122         /* Skip the check for QP1 to support CM size of 128 */
1123         if ((attrs->qp_type != IB_QPT_GSI) &&
1124             (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
1125                 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1126                        __func__, dev->id, attrs->cap.max_send_wr);
1127                 pr_err("%s(%d) supported send_wr=0x%x\n",
1128                        __func__, dev->id, dev->attr.max_wqe);
1129                 return -EINVAL;
1130         }
1131         if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
1132                 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1133                        __func__, dev->id, attrs->cap.max_recv_wr);
1134                 pr_err("%s(%d) supported recv_wr=0x%x\n",
1135                        __func__, dev->id, dev->attr.max_rqe);
1136                 return -EINVAL;
1137         }
1138         if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
1139                 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1140                        __func__, dev->id, attrs->cap.max_inline_data);
1141                 pr_err("%s(%d) supported inline data size=0x%x\n",
1142                        __func__, dev->id, dev->attr.max_inline_data);
1143                 return -EINVAL;
1144         }
1145         if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
1146                 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1147                        __func__, dev->id, attrs->cap.max_send_sge);
1148                 pr_err("%s(%d) supported send_sge=0x%x\n",
1149                        __func__, dev->id, dev->attr.max_send_sge);
1150                 return -EINVAL;
1151         }
1152         if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
1153                 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1154                        __func__, dev->id, attrs->cap.max_recv_sge);
1155                 pr_err("%s(%d) supported recv_sge=0x%x\n",
1156                        __func__, dev->id, dev->attr.max_recv_sge);
1157                 return -EINVAL;
1158         }
1159         /* unprivileged user space cannot create special QP */
1160         if (udata && attrs->qp_type == IB_QPT_GSI) {
1161                 pr_err
1162                     ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1163                      __func__, dev->id, attrs->qp_type);
1164                 return -EINVAL;
1165         }
1166         /* allow creating only one GSI type of QP */
1167         if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
1168                 pr_err("%s(%d) GSI special QPs already created.\n",
1169                        __func__, dev->id);
1170                 return -EINVAL;
1171         }
1172         /* verify consumer QPs are not trying to use GSI QP's CQ */
1173         if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1174                 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
1175                         (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
1176                         pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
1177                                 __func__, dev->id);
1178                         return -EINVAL;
1179                 }
1180         }
1181         return 0;
1182 }
1183
1184 static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1185                                 struct ib_udata *udata, int dpp_offset,
1186                                 int dpp_credit_lmt, int srq)
1187 {
1188         int status;
1189         u64 usr_db;
1190         struct ocrdma_create_qp_uresp uresp;
1191         struct ocrdma_pd *pd = qp->pd;
1192         struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
1193
1194         memset(&uresp, 0, sizeof(uresp));
1195         usr_db = dev->nic_info.unmapped_db +
1196                         (pd->id * dev->nic_info.db_page_size);
1197         uresp.qp_id = qp->id;
1198         uresp.sq_dbid = qp->sq.dbid;
1199         uresp.num_sq_pages = 1;
1200         uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
1201         uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
1202         uresp.num_wqe_allocated = qp->sq.max_cnt;
1203         if (!srq) {
1204                 uresp.rq_dbid = qp->rq.dbid;
1205                 uresp.num_rq_pages = 1;
1206                 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
1207                 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
1208                 uresp.num_rqe_allocated = qp->rq.max_cnt;
1209         }
1210         uresp.db_page_addr = usr_db;
1211         uresp.db_page_size = dev->nic_info.db_page_size;
1212         uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1213         uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1214         uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
1215
1216         if (qp->dpp_enabled) {
1217                 uresp.dpp_credit = dpp_credit_lmt;
1218                 uresp.dpp_offset = dpp_offset;
1219         }
1220         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1221         if (status) {
1222                 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
1223                 goto err;
1224         }
1225         status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1226                                  uresp.sq_page_size);
1227         if (status)
1228                 goto err;
1229
1230         if (!srq) {
1231                 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1232                                          uresp.rq_page_size);
1233                 if (status)
1234                         goto rq_map_err;
1235         }
1236         return status;
1237 rq_map_err:
1238         ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1239 err:
1240         return status;
1241 }
1242
1243 static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1244                              struct ocrdma_pd *pd)
1245 {
1246         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1247                 qp->sq_db = dev->nic_info.db +
1248                         (pd->id * dev->nic_info.db_page_size) +
1249                         OCRDMA_DB_GEN2_SQ_OFFSET;
1250                 qp->rq_db = dev->nic_info.db +
1251                         (pd->id * dev->nic_info.db_page_size) +
1252                         OCRDMA_DB_GEN2_RQ_OFFSET;
1253         } else {
1254                 qp->sq_db = dev->nic_info.db +
1255                         (pd->id * dev->nic_info.db_page_size) +
1256                         OCRDMA_DB_SQ_OFFSET;
1257                 qp->rq_db = dev->nic_info.db +
1258                         (pd->id * dev->nic_info.db_page_size) +
1259                         OCRDMA_DB_RQ_OFFSET;
1260         }
1261 }
1262
1263 static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1264 {
1265         qp->wqe_wr_id_tbl =
1266             kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
1267                     GFP_KERNEL);
1268         if (qp->wqe_wr_id_tbl == NULL)
1269                 return -ENOMEM;
1270         qp->rqe_wr_id_tbl =
1271             kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
1272         if (qp->rqe_wr_id_tbl == NULL)
1273                 return -ENOMEM;
1274
1275         return 0;
1276 }
1277
1278 static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1279                                       struct ocrdma_pd *pd,
1280                                       struct ib_qp_init_attr *attrs)
1281 {
1282         qp->pd = pd;
1283         spin_lock_init(&qp->q_lock);
1284         INIT_LIST_HEAD(&qp->sq_entry);
1285         INIT_LIST_HEAD(&qp->rq_entry);
1286
1287         qp->qp_type = attrs->qp_type;
1288         qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1289         qp->max_inline_data = attrs->cap.max_inline_data;
1290         qp->sq.max_sges = attrs->cap.max_send_sge;
1291         qp->rq.max_sges = attrs->cap.max_recv_sge;
1292         qp->state = OCRDMA_QPS_RST;
1293         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1294 }
1295
1296 static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1297                                    struct ib_qp_init_attr *attrs)
1298 {
1299         if (attrs->qp_type == IB_QPT_GSI) {
1300                 dev->gsi_qp_created = 1;
1301                 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1302                 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1303         }
1304 }
1305
1306 struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1307                                struct ib_qp_init_attr *attrs,
1308                                struct ib_udata *udata)
1309 {
1310         int status;
1311         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1312         struct ocrdma_qp *qp;
1313         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
1314         struct ocrdma_create_qp_ureq ureq;
1315         u16 dpp_credit_lmt, dpp_offset;
1316
1317         status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
1318         if (status)
1319                 goto gen_err;
1320
1321         memset(&ureq, 0, sizeof(ureq));
1322         if (udata) {
1323                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1324                         return ERR_PTR(-EFAULT);
1325         }
1326         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1327         if (!qp) {
1328                 status = -ENOMEM;
1329                 goto gen_err;
1330         }
1331         ocrdma_set_qp_init_params(qp, pd, attrs);
1332         if (udata == NULL)
1333                 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1334                                         OCRDMA_QP_FAST_REG);
1335
1336         mutex_lock(&dev->dev_lock);
1337         status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1338                                         ureq.dpp_cq_id,
1339                                         &dpp_offset, &dpp_credit_lmt);
1340         if (status)
1341                 goto mbx_err;
1342
1343         /* user space QP's wr_id table are managed in library */
1344         if (udata == NULL) {
1345                 status = ocrdma_alloc_wr_id_tbl(qp);
1346                 if (status)
1347                         goto map_err;
1348         }
1349
1350         status = ocrdma_add_qpn_map(dev, qp);
1351         if (status)
1352                 goto map_err;
1353         ocrdma_set_qp_db(dev, qp, pd);
1354         if (udata) {
1355                 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1356                                               dpp_credit_lmt,
1357                                               (attrs->srq != NULL));
1358                 if (status)
1359                         goto cpy_err;
1360         }
1361         ocrdma_store_gsi_qp_cq(dev, attrs);
1362         qp->ibqp.qp_num = qp->id;
1363         mutex_unlock(&dev->dev_lock);
1364         return &qp->ibqp;
1365
1366 cpy_err:
1367         ocrdma_del_qpn_map(dev, qp);
1368 map_err:
1369         ocrdma_mbx_destroy_qp(dev, qp);
1370 mbx_err:
1371         mutex_unlock(&dev->dev_lock);
1372         kfree(qp->wqe_wr_id_tbl);
1373         kfree(qp->rqe_wr_id_tbl);
1374         kfree(qp);
1375         pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
1376 gen_err:
1377         return ERR_PTR(status);
1378 }
1379
1380 int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1381                       int attr_mask)
1382 {
1383         int status = 0;
1384         struct ocrdma_qp *qp;
1385         struct ocrdma_dev *dev;
1386         enum ib_qp_state old_qps;
1387
1388         qp = get_ocrdma_qp(ibqp);
1389         dev = get_ocrdma_dev(ibqp->device);
1390         if (attr_mask & IB_QP_STATE)
1391                 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
1392         /* if new and previous states are same hw doesn't need to
1393          * know about it.
1394          */
1395         if (status < 0)
1396                 return status;
1397         return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
1398 }
1399
1400 int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1401                      int attr_mask, struct ib_udata *udata)
1402 {
1403         unsigned long flags;
1404         int status = -EINVAL;
1405         struct ocrdma_qp *qp;
1406         struct ocrdma_dev *dev;
1407         enum ib_qp_state old_qps, new_qps;
1408
1409         qp = get_ocrdma_qp(ibqp);
1410         dev = get_ocrdma_dev(ibqp->device);
1411
1412         /* syncronize with multiple context trying to change, retrive qps */
1413         mutex_lock(&dev->dev_lock);
1414         /* syncronize with wqe, rqe posting and cqe processing contexts */
1415         spin_lock_irqsave(&qp->q_lock, flags);
1416         old_qps = get_ibqp_state(qp->state);
1417         if (attr_mask & IB_QP_STATE)
1418                 new_qps = attr->qp_state;
1419         else
1420                 new_qps = old_qps;
1421         spin_unlock_irqrestore(&qp->q_lock, flags);
1422
1423         if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
1424                 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1425                        "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1426                        __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1427                        old_qps, new_qps);
1428                 goto param_err;
1429         }
1430
1431         status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1432         if (status > 0)
1433                 status = 0;
1434 param_err:
1435         mutex_unlock(&dev->dev_lock);
1436         return status;
1437 }
1438
1439 static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1440 {
1441         switch (mtu) {
1442         case 256:
1443                 return IB_MTU_256;
1444         case 512:
1445                 return IB_MTU_512;
1446         case 1024:
1447                 return IB_MTU_1024;
1448         case 2048:
1449                 return IB_MTU_2048;
1450         case 4096:
1451                 return IB_MTU_4096;
1452         default:
1453                 return IB_MTU_1024;
1454         }
1455 }
1456
1457 static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1458 {
1459         int ib_qp_acc_flags = 0;
1460
1461         if (qp_cap_flags & OCRDMA_QP_INB_WR)
1462                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1463         if (qp_cap_flags & OCRDMA_QP_INB_RD)
1464                 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1465         return ib_qp_acc_flags;
1466 }
1467
1468 int ocrdma_query_qp(struct ib_qp *ibqp,
1469                     struct ib_qp_attr *qp_attr,
1470                     int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1471 {
1472         int status;
1473         u32 qp_state;
1474         struct ocrdma_qp_params params;
1475         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
1476         struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
1477
1478         memset(&params, 0, sizeof(params));
1479         mutex_lock(&dev->dev_lock);
1480         status = ocrdma_mbx_query_qp(dev, qp, &params);
1481         mutex_unlock(&dev->dev_lock);
1482         if (status)
1483                 goto mbx_err;
1484         if (qp->qp_type == IB_QPT_UD)
1485                 qp_attr->qkey = params.qkey;
1486         qp_attr->path_mtu =
1487                 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1488                                 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1489                                 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1490         qp_attr->path_mig_state = IB_MIG_MIGRATED;
1491         qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1492         qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1493         qp_attr->dest_qp_num =
1494             params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1495
1496         qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1497         qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1498         qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1499         qp_attr->cap.max_send_sge = qp->sq.max_sges;
1500         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
1501         qp_attr->cap.max_inline_data = qp->max_inline_data;
1502         qp_init_attr->cap = qp_attr->cap;
1503         qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
1504
1505         rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1506                         params.rnt_rc_sl_fl &
1507                           OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1508                         qp->sgid_idx,
1509                         (params.hop_lmt_rq_psn &
1510                          OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1511                          OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1512                         (params.tclass_sq_psn &
1513                          OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1514                          OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1515         rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
1516
1517         rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1518         rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1519                                            OCRDMA_QP_PARAMS_SL_MASK) >>
1520                                            OCRDMA_QP_PARAMS_SL_SHIFT);
1521         qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1522                             OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1523                                 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1524         qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1525                               OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1526                                 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1527         qp_attr->retry_cnt =
1528             (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1529                 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1530         qp_attr->min_rnr_timer = 0;
1531         qp_attr->pkey_index = 0;
1532         qp_attr->port_num = 1;
1533         rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1534         rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
1535         qp_attr->alt_pkey_index = 0;
1536         qp_attr->alt_port_num = 0;
1537         qp_attr->alt_timeout = 0;
1538         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1539         qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1540                     OCRDMA_QP_PARAMS_STATE_SHIFT;
1541         qp_attr->qp_state = get_ibqp_state(qp_state);
1542         qp_attr->cur_qp_state = qp_attr->qp_state;
1543         qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1544         qp_attr->max_dest_rd_atomic =
1545             params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1546         qp_attr->max_rd_atomic =
1547             params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1548         qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1549                                 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
1550         /* Sync driver QP state with FW */
1551         ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
1552 mbx_err:
1553         return status;
1554 }
1555
1556 static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
1557 {
1558         unsigned int i = idx / 32;
1559         u32 mask = (1U << (idx % 32));
1560
1561         srq->idx_bit_fields[i] ^= mask;
1562 }
1563
1564 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1565 {
1566         return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
1567 }
1568
1569 static int is_hw_sq_empty(struct ocrdma_qp *qp)
1570 {
1571         return (qp->sq.tail == qp->sq.head);
1572 }
1573
1574 static int is_hw_rq_empty(struct ocrdma_qp *qp)
1575 {
1576         return (qp->rq.tail == qp->rq.head);
1577 }
1578
1579 static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1580 {
1581         return q->va + (q->head * q->entry_size);
1582 }
1583
1584 static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1585                                       u32 idx)
1586 {
1587         return q->va + (idx * q->entry_size);
1588 }
1589
1590 static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1591 {
1592         q->head = (q->head + 1) & q->max_wqe_idx;
1593 }
1594
1595 static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1596 {
1597         q->tail = (q->tail + 1) & q->max_wqe_idx;
1598 }
1599
1600 /* discard the cqe for a given QP */
1601 static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1602 {
1603         unsigned long cq_flags;
1604         unsigned long flags;
1605         int discard_cnt = 0;
1606         u32 cur_getp, stop_getp;
1607         struct ocrdma_cqe *cqe;
1608         u32 qpn = 0, wqe_idx = 0;
1609
1610         spin_lock_irqsave(&cq->cq_lock, cq_flags);
1611
1612         /* traverse through the CQEs in the hw CQ,
1613          * find the matching CQE for a given qp,
1614          * mark the matching one discarded by clearing qpn.
1615          * ring the doorbell in the poll_cq() as
1616          * we don't complete out of order cqe.
1617          */
1618
1619         cur_getp = cq->getp;
1620         /* find upto when do we reap the cq. */
1621         stop_getp = cur_getp;
1622         do {
1623                 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1624                         break;
1625
1626                 cqe = cq->va + cur_getp;
1627                 /* if (a) done reaping whole hw cq, or
1628                  *    (b) qp_xq becomes empty.
1629                  * then exit
1630                  */
1631                 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1632                 /* if previously discarded cqe found, skip that too. */
1633                 /* check for matching qp */
1634                 if (qpn == 0 || qpn != qp->id)
1635                         goto skip_cqe;
1636
1637                 if (is_cqe_for_sq(cqe)) {
1638                         ocrdma_hwq_inc_tail(&qp->sq);
1639                 } else {
1640                         if (qp->srq) {
1641                                 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1642                                         OCRDMA_CQE_BUFTAG_SHIFT) &
1643                                         qp->srq->rq.max_wqe_idx;
1644                                 BUG_ON(wqe_idx < 1);
1645                                 spin_lock_irqsave(&qp->srq->q_lock, flags);
1646                                 ocrdma_hwq_inc_tail(&qp->srq->rq);
1647                                 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
1648                                 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1649
1650                         } else {
1651                                 ocrdma_hwq_inc_tail(&qp->rq);
1652                         }
1653                 }
1654                 /* mark cqe discarded so that it is not picked up later
1655                  * in the poll_cq().
1656                  */
1657                 discard_cnt += 1;
1658                 cqe->cmn.qpn = 0;
1659 skip_cqe:
1660                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1661         } while (cur_getp != stop_getp);
1662         spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1663 }
1664
1665 void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
1666 {
1667         int found = false;
1668         unsigned long flags;
1669         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
1670         /* sync with any active CQ poll */
1671
1672         spin_lock_irqsave(&dev->flush_q_lock, flags);
1673         found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1674         if (found)
1675                 list_del(&qp->sq_entry);
1676         if (!qp->srq) {
1677                 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1678                 if (found)
1679                         list_del(&qp->rq_entry);
1680         }
1681         spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1682 }
1683
1684 int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
1685 {
1686         struct ocrdma_pd *pd;
1687         struct ocrdma_qp *qp;
1688         struct ocrdma_dev *dev;
1689         struct ib_qp_attr attrs;
1690         int attr_mask;
1691         unsigned long flags;
1692
1693         qp = get_ocrdma_qp(ibqp);
1694         dev = get_ocrdma_dev(ibqp->device);
1695
1696         pd = qp->pd;
1697
1698         /* change the QP state to ERROR */
1699         if (qp->state != OCRDMA_QPS_RST) {
1700                 attrs.qp_state = IB_QPS_ERR;
1701                 attr_mask = IB_QP_STATE;
1702                 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1703         }
1704         /* ensure that CQEs for newly created QP (whose id may be same with
1705          * one which just getting destroyed are same), dont get
1706          * discarded until the old CQEs are discarded.
1707          */
1708         mutex_lock(&dev->dev_lock);
1709         (void) ocrdma_mbx_destroy_qp(dev, qp);
1710
1711         /*
1712          * acquire CQ lock while destroy is in progress, in order to
1713          * protect against proessing in-flight CQEs for this QP.
1714          */
1715         spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
1716         if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
1717                 spin_lock(&qp->rq_cq->cq_lock);
1718                 ocrdma_del_qpn_map(dev, qp);
1719                 spin_unlock(&qp->rq_cq->cq_lock);
1720         } else {
1721                 ocrdma_del_qpn_map(dev, qp);
1722         }
1723         spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
1724
1725         if (!pd->uctx) {
1726                 ocrdma_discard_cqes(qp, qp->sq_cq);
1727                 ocrdma_discard_cqes(qp, qp->rq_cq);
1728         }
1729         mutex_unlock(&dev->dev_lock);
1730
1731         if (pd->uctx) {
1732                 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1733                                 PAGE_ALIGN(qp->sq.len));
1734                 if (!qp->srq)
1735                         ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1736                                         PAGE_ALIGN(qp->rq.len));
1737         }
1738
1739         ocrdma_del_flush_qp(qp);
1740
1741         kfree(qp->wqe_wr_id_tbl);
1742         kfree(qp->rqe_wr_id_tbl);
1743         kfree(qp);
1744         return 0;
1745 }
1746
1747 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1748                                 struct ib_udata *udata)
1749 {
1750         int status;
1751         struct ocrdma_create_srq_uresp uresp;
1752
1753         memset(&uresp, 0, sizeof(uresp));
1754         uresp.rq_dbid = srq->rq.dbid;
1755         uresp.num_rq_pages = 1;
1756         uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
1757         uresp.rq_page_size = srq->rq.len;
1758         uresp.db_page_addr = dev->nic_info.unmapped_db +
1759             (srq->pd->id * dev->nic_info.db_page_size);
1760         uresp.db_page_size = dev->nic_info.db_page_size;
1761         uresp.num_rqe_allocated = srq->rq.max_cnt;
1762         if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1763                 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1764                 uresp.db_shift = 24;
1765         } else {
1766                 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1767                 uresp.db_shift = 16;
1768         }
1769
1770         status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1771         if (status)
1772                 return status;
1773         status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1774                                  uresp.rq_page_size);
1775         if (status)
1776                 return status;
1777         return status;
1778 }
1779
1780 int ocrdma_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init_attr,
1781                       struct ib_udata *udata)
1782 {
1783         int status;
1784         struct ocrdma_pd *pd = get_ocrdma_pd(ibsrq->pd);
1785         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1786         struct ocrdma_srq *srq = get_ocrdma_srq(ibsrq);
1787
1788         if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1789                 return -EINVAL;
1790         if (init_attr->attr.max_wr > dev->attr.max_rqe)
1791                 return -EINVAL;
1792
1793         spin_lock_init(&srq->q_lock);
1794         srq->pd = pd;
1795         srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
1796         status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
1797         if (status)
1798                 return status;
1799
1800         if (!udata) {
1801                 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1802                                              GFP_KERNEL);
1803                 if (!srq->rqe_wr_id_tbl) {
1804                         status = -ENOMEM;
1805                         goto arm_err;
1806                 }
1807
1808                 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1809                     (srq->rq.max_cnt % 32 ? 1 : 0);
1810                 srq->idx_bit_fields =
1811                     kmalloc_array(srq->bit_fields_len, sizeof(u32),
1812                                   GFP_KERNEL);
1813                 if (!srq->idx_bit_fields) {
1814                         status = -ENOMEM;
1815                         goto arm_err;
1816                 }
1817                 memset(srq->idx_bit_fields, 0xff,
1818                        srq->bit_fields_len * sizeof(u32));
1819         }
1820
1821         if (init_attr->attr.srq_limit) {
1822                 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1823                 if (status)
1824                         goto arm_err;
1825         }
1826
1827         if (udata) {
1828                 status = ocrdma_copy_srq_uresp(dev, srq, udata);
1829                 if (status)
1830                         goto arm_err;
1831         }
1832
1833         return 0;
1834
1835 arm_err:
1836         ocrdma_mbx_destroy_srq(dev, srq);
1837         kfree(srq->rqe_wr_id_tbl);
1838         kfree(srq->idx_bit_fields);
1839         return status;
1840 }
1841
1842 int ocrdma_modify_srq(struct ib_srq *ibsrq,
1843                       struct ib_srq_attr *srq_attr,
1844                       enum ib_srq_attr_mask srq_attr_mask,
1845                       struct ib_udata *udata)
1846 {
1847         int status;
1848         struct ocrdma_srq *srq;
1849
1850         srq = get_ocrdma_srq(ibsrq);
1851         if (srq_attr_mask & IB_SRQ_MAX_WR)
1852                 status = -EINVAL;
1853         else
1854                 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1855         return status;
1856 }
1857
1858 int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1859 {
1860         int status;
1861         struct ocrdma_srq *srq;
1862
1863         srq = get_ocrdma_srq(ibsrq);
1864         status = ocrdma_mbx_query_srq(srq, srq_attr);
1865         return status;
1866 }
1867
1868 void ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
1869 {
1870         struct ocrdma_srq *srq;
1871         struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
1872
1873         srq = get_ocrdma_srq(ibsrq);
1874
1875         ocrdma_mbx_destroy_srq(dev, srq);
1876
1877         if (srq->pd->uctx)
1878                 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1879                                 PAGE_ALIGN(srq->rq.len));
1880
1881         kfree(srq->idx_bit_fields);
1882         kfree(srq->rqe_wr_id_tbl);
1883 }
1884
1885 /* unprivileged verbs and their support functions. */
1886 static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1887                                 struct ocrdma_hdr_wqe *hdr,
1888                                 const struct ib_send_wr *wr)
1889 {
1890         struct ocrdma_ewqe_ud_hdr *ud_hdr =
1891                 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
1892         struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
1893
1894         ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
1895         if (qp->qp_type == IB_QPT_GSI)
1896                 ud_hdr->qkey = qp->qkey;
1897         else
1898                 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
1899         ud_hdr->rsvd_ahid = ah->id;
1900         ud_hdr->hdr_type = ah->hdr_type;
1901         if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1902                 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
1903 }
1904
1905 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1906                               struct ocrdma_sge *sge, int num_sge,
1907                               struct ib_sge *sg_list)
1908 {
1909         int i;
1910
1911         for (i = 0; i < num_sge; i++) {
1912                 sge[i].lrkey = sg_list[i].lkey;
1913                 sge[i].addr_lo = sg_list[i].addr;
1914                 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1915                 sge[i].len = sg_list[i].length;
1916                 hdr->total_len += sg_list[i].length;
1917         }
1918         if (num_sge == 0)
1919                 memset(sge, 0, sizeof(*sge));
1920 }
1921
1922 static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1923 {
1924         uint32_t total_len = 0, i;
1925
1926         for (i = 0; i < num_sge; i++)
1927                 total_len += sg_list[i].length;
1928         return total_len;
1929 }
1930
1931
1932 static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1933                                     struct ocrdma_hdr_wqe *hdr,
1934                                     struct ocrdma_sge *sge,
1935                                     const struct ib_send_wr *wr, u32 wqe_size)
1936 {
1937         int i;
1938         char *dpp_addr;
1939
1940         if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
1941                 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1942                 if (unlikely(hdr->total_len > qp->max_inline_data)) {
1943                         pr_err("%s() supported_len=0x%x,\n"
1944                                " unsupported len req=0x%x\n", __func__,
1945                                 qp->max_inline_data, hdr->total_len);
1946                         return -EINVAL;
1947                 }
1948                 dpp_addr = (char *)sge;
1949                 for (i = 0; i < wr->num_sge; i++) {
1950                         memcpy(dpp_addr,
1951                                (void *)(unsigned long)wr->sg_list[i].addr,
1952                                wr->sg_list[i].length);
1953                         dpp_addr += wr->sg_list[i].length;
1954                 }
1955
1956                 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
1957                 if (0 == hdr->total_len)
1958                         wqe_size += sizeof(struct ocrdma_sge);
1959                 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1960         } else {
1961                 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1962                 if (wr->num_sge)
1963                         wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1964                 else
1965                         wqe_size += sizeof(struct ocrdma_sge);
1966                 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1967         }
1968         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1969         return 0;
1970 }
1971
1972 static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1973                              const struct ib_send_wr *wr)
1974 {
1975         int status;
1976         struct ocrdma_sge *sge;
1977         u32 wqe_size = sizeof(*hdr);
1978
1979         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
1980                 ocrdma_build_ud_hdr(qp, hdr, wr);
1981                 sge = (struct ocrdma_sge *)(hdr + 2);
1982                 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
1983         } else {
1984                 sge = (struct ocrdma_sge *)(hdr + 1);
1985         }
1986
1987         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
1988         return status;
1989 }
1990
1991 static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
1992                               const struct ib_send_wr *wr)
1993 {
1994         int status;
1995         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
1996         struct ocrdma_sge *sge = ext_rw + 1;
1997         u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
1998
1999         status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2000         if (status)
2001                 return status;
2002         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2003         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2004         ext_rw->lrkey = rdma_wr(wr)->rkey;
2005         ext_rw->len = hdr->total_len;
2006         return 0;
2007 }
2008
2009 static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
2010                               const struct ib_send_wr *wr)
2011 {
2012         struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2013         struct ocrdma_sge *sge = ext_rw + 1;
2014         u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2015             sizeof(struct ocrdma_hdr_wqe);
2016
2017         ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2018         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2019         hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2020         hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2021
2022         ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2023         ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2024         ext_rw->lrkey = rdma_wr(wr)->rkey;
2025         ext_rw->len = hdr->total_len;
2026 }
2027
2028 static int get_encoded_page_size(int pg_sz)
2029 {
2030         /* Max size is 256M 4096 << 16 */
2031         int i = 0;
2032         for (; i < 17; i++)
2033                 if (pg_sz == (4096 << i))
2034                         break;
2035         return i;
2036 }
2037
2038 static int ocrdma_build_reg(struct ocrdma_qp *qp,
2039                             struct ocrdma_hdr_wqe *hdr,
2040                             const struct ib_reg_wr *wr)
2041 {
2042         u64 fbo;
2043         struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2044         struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2045         struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2046         struct ocrdma_pbe *pbe;
2047         u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2048         int num_pbes = 0, i;
2049
2050         wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2051
2052         hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2053         hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2054
2055         if (wr->access & IB_ACCESS_LOCAL_WRITE)
2056                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2057         if (wr->access & IB_ACCESS_REMOTE_WRITE)
2058                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2059         if (wr->access & IB_ACCESS_REMOTE_READ)
2060                 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2061         hdr->lkey = wr->key;
2062         hdr->total_len = mr->ibmr.length;
2063
2064         fbo = mr->ibmr.iova - mr->pages[0];
2065
2066         fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2067         fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2068         fast_reg->fbo_hi = upper_32_bits(fbo);
2069         fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2070         fast_reg->num_sges = mr->npages;
2071         fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2072
2073         pbe = pbl_tbl->va;
2074         for (i = 0; i < mr->npages; i++) {
2075                 u64 buf_addr = mr->pages[i];
2076
2077                 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2078                 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2079                 num_pbes += 1;
2080                 pbe++;
2081
2082                 /* if the pbl is full storing the pbes,
2083                  * move to next pbl.
2084                 */
2085                 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2086                         pbl_tbl++;
2087                         pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2088                 }
2089         }
2090
2091         return 0;
2092 }
2093
2094 static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2095 {
2096         u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
2097
2098         iowrite32(val, qp->sq_db);
2099 }
2100
2101 int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2102                      const struct ib_send_wr **bad_wr)
2103 {
2104         int status = 0;
2105         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2106         struct ocrdma_hdr_wqe *hdr;
2107         unsigned long flags;
2108
2109         spin_lock_irqsave(&qp->q_lock, flags);
2110         if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2111                 spin_unlock_irqrestore(&qp->q_lock, flags);
2112                 *bad_wr = wr;
2113                 return -EINVAL;
2114         }
2115
2116         while (wr) {
2117                 if (qp->qp_type == IB_QPT_UD &&
2118                     (wr->opcode != IB_WR_SEND &&
2119                      wr->opcode != IB_WR_SEND_WITH_IMM)) {
2120                         *bad_wr = wr;
2121                         status = -EINVAL;
2122                         break;
2123                 }
2124                 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2125                     wr->num_sge > qp->sq.max_sges) {
2126                         *bad_wr = wr;
2127                         status = -ENOMEM;
2128                         break;
2129                 }
2130                 hdr = ocrdma_hwq_head(&qp->sq);
2131                 hdr->cw = 0;
2132                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2133                         hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2134                 if (wr->send_flags & IB_SEND_FENCE)
2135                         hdr->cw |=
2136                             (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2137                 if (wr->send_flags & IB_SEND_SOLICITED)
2138                         hdr->cw |=
2139                             (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2140                 hdr->total_len = 0;
2141                 switch (wr->opcode) {
2142                 case IB_WR_SEND_WITH_IMM:
2143                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2144                         hdr->immdt = ntohl(wr->ex.imm_data);
2145                         /* fall through */
2146                 case IB_WR_SEND:
2147                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2148                         ocrdma_build_send(qp, hdr, wr);
2149                         break;
2150                 case IB_WR_SEND_WITH_INV:
2151                         hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2152                         hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2153                         hdr->lkey = wr->ex.invalidate_rkey;
2154                         status = ocrdma_build_send(qp, hdr, wr);
2155                         break;
2156                 case IB_WR_RDMA_WRITE_WITH_IMM:
2157                         hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2158                         hdr->immdt = ntohl(wr->ex.imm_data);
2159                         /* fall through */
2160                 case IB_WR_RDMA_WRITE:
2161                         hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2162                         status = ocrdma_build_write(qp, hdr, wr);
2163                         break;
2164                 case IB_WR_RDMA_READ:
2165                         ocrdma_build_read(qp, hdr, wr);
2166                         break;
2167                 case IB_WR_LOCAL_INV:
2168                         hdr->cw |=
2169                             (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
2170                         hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2171                                         sizeof(struct ocrdma_sge)) /
2172                                 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2173                         hdr->lkey = wr->ex.invalidate_rkey;
2174                         break;
2175                 case IB_WR_REG_MR:
2176                         status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2177                         break;
2178                 default:
2179                         status = -EINVAL;
2180                         break;
2181                 }
2182                 if (status) {
2183                         *bad_wr = wr;
2184                         break;
2185                 }
2186                 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
2187                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2188                 else
2189                         qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2190                 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2191                 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2192                                    OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2193                 /* make sure wqe is written before adapter can access it */
2194                 wmb();
2195                 /* inform hw to start processing it */
2196                 ocrdma_ring_sq_db(qp);
2197
2198                 /* update pointer, counter for next wr */
2199                 ocrdma_hwq_inc_head(&qp->sq);
2200                 wr = wr->next;
2201         }
2202         spin_unlock_irqrestore(&qp->q_lock, flags);
2203         return status;
2204 }
2205
2206 static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2207 {
2208         u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
2209
2210         iowrite32(val, qp->rq_db);
2211 }
2212
2213 static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2214                              const struct ib_recv_wr *wr, u16 tag)
2215 {
2216         u32 wqe_size = 0;
2217         struct ocrdma_sge *sge;
2218         if (wr->num_sge)
2219                 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2220         else
2221                 wqe_size = sizeof(*sge) + sizeof(*rqe);
2222
2223         rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2224                                 OCRDMA_WQE_SIZE_SHIFT);
2225         rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2226         rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2227         rqe->total_len = 0;
2228         rqe->rsvd_tag = tag;
2229         sge = (struct ocrdma_sge *)(rqe + 1);
2230         ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2231         ocrdma_cpu_to_le32(rqe, wqe_size);
2232 }
2233
2234 int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2235                      const struct ib_recv_wr **bad_wr)
2236 {
2237         int status = 0;
2238         unsigned long flags;
2239         struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2240         struct ocrdma_hdr_wqe *rqe;
2241
2242         spin_lock_irqsave(&qp->q_lock, flags);
2243         if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2244                 spin_unlock_irqrestore(&qp->q_lock, flags);
2245                 *bad_wr = wr;
2246                 return -EINVAL;
2247         }
2248         while (wr) {
2249                 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2250                     wr->num_sge > qp->rq.max_sges) {
2251                         *bad_wr = wr;
2252                         status = -ENOMEM;
2253                         break;
2254                 }
2255                 rqe = ocrdma_hwq_head(&qp->rq);
2256                 ocrdma_build_rqe(rqe, wr, 0);
2257
2258                 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2259                 /* make sure rqe is written before adapter can access it */
2260                 wmb();
2261
2262                 /* inform hw to start processing it */
2263                 ocrdma_ring_rq_db(qp);
2264
2265                 /* update pointer, counter for next wr */
2266                 ocrdma_hwq_inc_head(&qp->rq);
2267                 wr = wr->next;
2268         }
2269         spin_unlock_irqrestore(&qp->q_lock, flags);
2270         return status;
2271 }
2272
2273 /* cqe for srq's rqe can potentially arrive out of order.
2274  * index gives the entry in the shadow table where to store
2275  * the wr_id. tag/index is returned in cqe to reference back
2276  * for a given rqe.
2277  */
2278 static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2279 {
2280         int row = 0;
2281         int indx = 0;
2282
2283         for (row = 0; row < srq->bit_fields_len; row++) {
2284                 if (srq->idx_bit_fields[row]) {
2285                         indx = ffs(srq->idx_bit_fields[row]);
2286                         indx = (row * 32) + (indx - 1);
2287                         BUG_ON(indx >= srq->rq.max_cnt);
2288                         ocrdma_srq_toggle_bit(srq, indx);
2289                         break;
2290                 }
2291         }
2292
2293         BUG_ON(row == srq->bit_fields_len);
2294         return indx + 1; /* Use from index 1 */
2295 }
2296
2297 static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2298 {
2299         u32 val = srq->rq.dbid | (1 << 16);
2300
2301         iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2302 }
2303
2304 int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2305                          const struct ib_recv_wr **bad_wr)
2306 {
2307         int status = 0;
2308         unsigned long flags;
2309         struct ocrdma_srq *srq;
2310         struct ocrdma_hdr_wqe *rqe;
2311         u16 tag;
2312
2313         srq = get_ocrdma_srq(ibsrq);
2314
2315         spin_lock_irqsave(&srq->q_lock, flags);
2316         while (wr) {
2317                 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2318                     wr->num_sge > srq->rq.max_sges) {
2319                         status = -ENOMEM;
2320                         *bad_wr = wr;
2321                         break;
2322                 }
2323                 tag = ocrdma_srq_get_idx(srq);
2324                 rqe = ocrdma_hwq_head(&srq->rq);
2325                 ocrdma_build_rqe(rqe, wr, tag);
2326
2327                 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2328                 /* make sure rqe is written before adapter can perform DMA */
2329                 wmb();
2330                 /* inform hw to start processing it */
2331                 ocrdma_ring_srq_db(srq);
2332                 /* update pointer, counter for next wr */
2333                 ocrdma_hwq_inc_head(&srq->rq);
2334                 wr = wr->next;
2335         }
2336         spin_unlock_irqrestore(&srq->q_lock, flags);
2337         return status;
2338 }
2339
2340 static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2341 {
2342         enum ib_wc_status ibwc_status;
2343
2344         switch (status) {
2345         case OCRDMA_CQE_GENERAL_ERR:
2346                 ibwc_status = IB_WC_GENERAL_ERR;
2347                 break;
2348         case OCRDMA_CQE_LOC_LEN_ERR:
2349                 ibwc_status = IB_WC_LOC_LEN_ERR;
2350                 break;
2351         case OCRDMA_CQE_LOC_QP_OP_ERR:
2352                 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2353                 break;
2354         case OCRDMA_CQE_LOC_EEC_OP_ERR:
2355                 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2356                 break;
2357         case OCRDMA_CQE_LOC_PROT_ERR:
2358                 ibwc_status = IB_WC_LOC_PROT_ERR;
2359                 break;
2360         case OCRDMA_CQE_WR_FLUSH_ERR:
2361                 ibwc_status = IB_WC_WR_FLUSH_ERR;
2362                 break;
2363         case OCRDMA_CQE_MW_BIND_ERR:
2364                 ibwc_status = IB_WC_MW_BIND_ERR;
2365                 break;
2366         case OCRDMA_CQE_BAD_RESP_ERR:
2367                 ibwc_status = IB_WC_BAD_RESP_ERR;
2368                 break;
2369         case OCRDMA_CQE_LOC_ACCESS_ERR:
2370                 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2371                 break;
2372         case OCRDMA_CQE_REM_INV_REQ_ERR:
2373                 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2374                 break;
2375         case OCRDMA_CQE_REM_ACCESS_ERR:
2376                 ibwc_status = IB_WC_REM_ACCESS_ERR;
2377                 break;
2378         case OCRDMA_CQE_REM_OP_ERR:
2379                 ibwc_status = IB_WC_REM_OP_ERR;
2380                 break;
2381         case OCRDMA_CQE_RETRY_EXC_ERR:
2382                 ibwc_status = IB_WC_RETRY_EXC_ERR;
2383                 break;
2384         case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2385                 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2386                 break;
2387         case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2388                 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2389                 break;
2390         case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2391                 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2392                 break;
2393         case OCRDMA_CQE_REM_ABORT_ERR:
2394                 ibwc_status = IB_WC_REM_ABORT_ERR;
2395                 break;
2396         case OCRDMA_CQE_INV_EECN_ERR:
2397                 ibwc_status = IB_WC_INV_EECN_ERR;
2398                 break;
2399         case OCRDMA_CQE_INV_EEC_STATE_ERR:
2400                 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2401                 break;
2402         case OCRDMA_CQE_FATAL_ERR:
2403                 ibwc_status = IB_WC_FATAL_ERR;
2404                 break;
2405         case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2406                 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2407                 break;
2408         default:
2409                 ibwc_status = IB_WC_GENERAL_ERR;
2410                 break;
2411         }
2412         return ibwc_status;
2413 }
2414
2415 static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2416                       u32 wqe_idx)
2417 {
2418         struct ocrdma_hdr_wqe *hdr;
2419         struct ocrdma_sge *rw;
2420         int opcode;
2421
2422         hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2423
2424         ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2425         /* Undo the hdr->cw swap */
2426         opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2427         switch (opcode) {
2428         case OCRDMA_WRITE:
2429                 ibwc->opcode = IB_WC_RDMA_WRITE;
2430                 break;
2431         case OCRDMA_READ:
2432                 rw = (struct ocrdma_sge *)(hdr + 1);
2433                 ibwc->opcode = IB_WC_RDMA_READ;
2434                 ibwc->byte_len = rw->len;
2435                 break;
2436         case OCRDMA_SEND:
2437                 ibwc->opcode = IB_WC_SEND;
2438                 break;
2439         case OCRDMA_FR_MR:
2440                 ibwc->opcode = IB_WC_REG_MR;
2441                 break;
2442         case OCRDMA_LKEY_INV:
2443                 ibwc->opcode = IB_WC_LOCAL_INV;
2444                 break;
2445         default:
2446                 ibwc->status = IB_WC_GENERAL_ERR;
2447                 pr_err("%s() invalid opcode received = 0x%x\n",
2448                        __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
2449                 break;
2450         }
2451 }
2452
2453 static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2454                                                 struct ocrdma_cqe *cqe)
2455 {
2456         if (is_cqe_for_sq(cqe)) {
2457                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2458                                 cqe->flags_status_srcqpn) &
2459                                         ~OCRDMA_CQE_STATUS_MASK);
2460                 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2461                                 cqe->flags_status_srcqpn) |
2462                                 (OCRDMA_CQE_WR_FLUSH_ERR <<
2463                                         OCRDMA_CQE_STATUS_SHIFT));
2464         } else {
2465                 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2466                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2467                                         cqe->flags_status_srcqpn) &
2468                                                 ~OCRDMA_CQE_UD_STATUS_MASK);
2469                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2470                                         cqe->flags_status_srcqpn) |
2471                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2472                                                 OCRDMA_CQE_UD_STATUS_SHIFT));
2473                 } else {
2474                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2475                                         cqe->flags_status_srcqpn) &
2476                                                 ~OCRDMA_CQE_STATUS_MASK);
2477                         cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2478                                         cqe->flags_status_srcqpn) |
2479                                         (OCRDMA_CQE_WR_FLUSH_ERR <<
2480                                                 OCRDMA_CQE_STATUS_SHIFT));
2481                 }
2482         }
2483 }
2484
2485 static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2486                                   struct ocrdma_qp *qp, int status)
2487 {
2488         bool expand = false;
2489
2490         ibwc->byte_len = 0;
2491         ibwc->qp = &qp->ibqp;
2492         ibwc->status = ocrdma_to_ibwc_err(status);
2493
2494         ocrdma_flush_qp(qp);
2495         ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
2496
2497         /* if wqe/rqe pending for which cqe needs to be returned,
2498          * trigger inflating it.
2499          */
2500         if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2501                 expand = true;
2502                 ocrdma_set_cqe_status_flushed(qp, cqe);
2503         }
2504         return expand;
2505 }
2506
2507 static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2508                                   struct ocrdma_qp *qp, int status)
2509 {
2510         ibwc->opcode = IB_WC_RECV;
2511         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2512         ocrdma_hwq_inc_tail(&qp->rq);
2513
2514         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2515 }
2516
2517 static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2518                                   struct ocrdma_qp *qp, int status)
2519 {
2520         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2521         ocrdma_hwq_inc_tail(&qp->sq);
2522
2523         return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2524 }
2525
2526
2527 static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2528                                  struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2529                                  bool *polled, bool *stop)
2530 {
2531         bool expand;
2532         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2533         int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2534                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2535         if (status < OCRDMA_MAX_CQE_ERR)
2536                 atomic_inc(&dev->cqe_err_stats[status]);
2537
2538         /* when hw sq is empty, but rq is not empty, so we continue
2539          * to keep the cqe in order to get the cq event again.
2540          */
2541         if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2542                 /* when cq for rq and sq is same, it is safe to return
2543                  * flush cqe for RQEs.
2544                  */
2545                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2546                         *polled = true;
2547                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2548                         expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2549                 } else {
2550                         /* stop processing further cqe as this cqe is used for
2551                          * triggering cq event on buddy cq of RQ.
2552                          * When QP is destroyed, this cqe will be removed
2553                          * from the cq's hardware q.
2554                          */
2555                         *polled = false;
2556                         *stop = true;
2557                         expand = false;
2558                 }
2559         } else if (is_hw_sq_empty(qp)) {
2560                 /* Do nothing */
2561                 expand = false;
2562                 *polled = false;
2563                 *stop = false;
2564         } else {
2565                 *polled = true;
2566                 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2567         }
2568         return expand;
2569 }
2570
2571 static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2572                                      struct ocrdma_cqe *cqe,
2573                                      struct ib_wc *ibwc, bool *polled)
2574 {
2575         bool expand = false;
2576         int tail = qp->sq.tail;
2577         u32 wqe_idx;
2578
2579         if (!qp->wqe_wr_id_tbl[tail].signaled) {
2580                 *polled = false;    /* WC cannot be consumed yet */
2581         } else {
2582                 ibwc->status = IB_WC_SUCCESS;
2583                 ibwc->wc_flags = 0;
2584                 ibwc->qp = &qp->ibqp;
2585                 ocrdma_update_wc(qp, ibwc, tail);
2586                 *polled = true;
2587         }
2588         wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2589                         OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
2590         if (tail != wqe_idx)
2591                 expand = true; /* Coalesced CQE can't be consumed yet */
2592
2593         ocrdma_hwq_inc_tail(&qp->sq);
2594         return expand;
2595 }
2596
2597 static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2598                              struct ib_wc *ibwc, bool *polled, bool *stop)
2599 {
2600         int status;
2601         bool expand;
2602
2603         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2604                 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2605
2606         if (status == OCRDMA_CQE_SUCCESS)
2607                 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2608         else
2609                 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2610         return expand;
2611 }
2612
2613 static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2614                                  struct ocrdma_cqe *cqe)
2615 {
2616         int status;
2617         u16 hdr_type = 0;
2618
2619         status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2620                 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2621         ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2622                                                 OCRDMA_CQE_SRCQP_MASK;
2623         ibwc->pkey_index = 0;
2624         ibwc->wc_flags = IB_WC_GRH;
2625         ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2626                           OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2627                           OCRDMA_CQE_UD_XFER_LEN_MASK;
2628
2629         if (ocrdma_is_udp_encap_supported(dev)) {
2630                 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2631                             OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2632                             OCRDMA_CQE_UD_L3TYPE_MASK;
2633                 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2634                 ibwc->network_hdr_type = hdr_type;
2635         }
2636
2637         return status;
2638 }
2639
2640 static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2641                                        struct ocrdma_cqe *cqe,
2642                                        struct ocrdma_qp *qp)
2643 {
2644         unsigned long flags;
2645         struct ocrdma_srq *srq;
2646         u32 wqe_idx;
2647
2648         srq = get_ocrdma_srq(qp->ibqp.srq);
2649         wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
2650                 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
2651         BUG_ON(wqe_idx < 1);
2652
2653         ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2654         spin_lock_irqsave(&srq->q_lock, flags);
2655         ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
2656         spin_unlock_irqrestore(&srq->q_lock, flags);
2657         ocrdma_hwq_inc_tail(&srq->rq);
2658 }
2659
2660 static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2661                                 struct ib_wc *ibwc, bool *polled, bool *stop,
2662                                 int status)
2663 {
2664         bool expand;
2665         struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2666
2667         if (status < OCRDMA_MAX_CQE_ERR)
2668                 atomic_inc(&dev->cqe_err_stats[status]);
2669
2670         /* when hw_rq is empty, but wq is not empty, so continue
2671          * to keep the cqe to get the cq event again.
2672          */
2673         if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2674                 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2675                         *polled = true;
2676                         status = OCRDMA_CQE_WR_FLUSH_ERR;
2677                         expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2678                 } else {
2679                         *polled = false;
2680                         *stop = true;
2681                         expand = false;
2682                 }
2683         } else if (is_hw_rq_empty(qp)) {
2684                 /* Do nothing */
2685                 expand = false;
2686                 *polled = false;
2687                 *stop = false;
2688         } else {
2689                 *polled = true;
2690                 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2691         }
2692         return expand;
2693 }
2694
2695 static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2696                                      struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2697 {
2698         struct ocrdma_dev *dev;
2699
2700         dev = get_ocrdma_dev(qp->ibqp.device);
2701         ibwc->opcode = IB_WC_RECV;
2702         ibwc->qp = &qp->ibqp;
2703         ibwc->status = IB_WC_SUCCESS;
2704
2705         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
2706                 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
2707         else
2708                 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2709
2710         if (is_cqe_imm(cqe)) {
2711                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2712                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2713         } else if (is_cqe_wr_imm(cqe)) {
2714                 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2715                 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2716                 ibwc->wc_flags |= IB_WC_WITH_IMM;
2717         } else if (is_cqe_invalidated(cqe)) {
2718                 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2719                 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2720         }
2721         if (qp->ibqp.srq) {
2722                 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
2723         } else {
2724                 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2725                 ocrdma_hwq_inc_tail(&qp->rq);
2726         }
2727 }
2728
2729 static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2730                              struct ib_wc *ibwc, bool *polled, bool *stop)
2731 {
2732         int status;
2733         bool expand = false;
2734
2735         ibwc->wc_flags = 0;
2736         if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2737                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2738                                         OCRDMA_CQE_UD_STATUS_MASK) >>
2739                                         OCRDMA_CQE_UD_STATUS_SHIFT;
2740         } else {
2741                 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2742                              OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2743         }
2744
2745         if (status == OCRDMA_CQE_SUCCESS) {
2746                 *polled = true;
2747                 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2748         } else {
2749                 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2750                                               status);
2751         }
2752         return expand;
2753 }
2754
2755 static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2756                                    u16 cur_getp)
2757 {
2758         if (cq->phase_change) {
2759                 if (cur_getp == 0)
2760                         cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
2761         } else {
2762                 /* clear valid bit */
2763                 cqe->flags_status_srcqpn = 0;
2764         }
2765 }
2766
2767 static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2768                             struct ib_wc *ibwc)
2769 {
2770         u16 qpn = 0;
2771         int i = 0;
2772         bool expand = false;
2773         int polled_hw_cqes = 0;
2774         struct ocrdma_qp *qp = NULL;
2775         struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
2776         struct ocrdma_cqe *cqe;
2777         u16 cur_getp; bool polled = false; bool stop = false;
2778
2779         cur_getp = cq->getp;
2780         while (num_entries) {
2781                 cqe = cq->va + cur_getp;
2782                 /* check whether valid cqe or not */
2783                 if (!is_cqe_valid(cq, cqe))
2784                         break;
2785                 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2786                 /* ignore discarded cqe */
2787                 if (qpn == 0)
2788                         goto skip_cqe;
2789                 qp = dev->qp_tbl[qpn];
2790                 BUG_ON(qp == NULL);
2791
2792                 if (is_cqe_for_sq(cqe)) {
2793                         expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2794                                                   &stop);
2795                 } else {
2796                         expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2797                                                   &stop);
2798                 }
2799                 if (expand)
2800                         goto expand_cqe;
2801                 if (stop)
2802                         goto stop_cqe;
2803                 /* clear qpn to avoid duplicate processing by discard_cqe() */
2804                 cqe->cmn.qpn = 0;
2805 skip_cqe:
2806                 polled_hw_cqes += 1;
2807                 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2808                 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2809 expand_cqe:
2810                 if (polled) {
2811                         num_entries -= 1;
2812                         i += 1;
2813                         ibwc = ibwc + 1;
2814                         polled = false;
2815                 }
2816         }
2817 stop_cqe:
2818         cq->getp = cur_getp;
2819
2820         if (polled_hw_cqes)
2821                 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
2822
2823         return i;
2824 }
2825
2826 /* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2827 static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2828                               struct ocrdma_qp *qp, struct ib_wc *ibwc)
2829 {
2830         int err_cqes = 0;
2831
2832         while (num_entries) {
2833                 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2834                         break;
2835                 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2836                         ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2837                         ocrdma_hwq_inc_tail(&qp->sq);
2838                 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2839                         ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2840                         ocrdma_hwq_inc_tail(&qp->rq);
2841                 } else {
2842                         return err_cqes;
2843                 }
2844                 ibwc->byte_len = 0;
2845                 ibwc->status = IB_WC_WR_FLUSH_ERR;
2846                 ibwc = ibwc + 1;
2847                 err_cqes += 1;
2848                 num_entries -= 1;
2849         }
2850         return err_cqes;
2851 }
2852
2853 int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2854 {
2855         int cqes_to_poll = num_entries;
2856         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2857         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2858         int num_os_cqe = 0, err_cqes = 0;
2859         struct ocrdma_qp *qp;
2860         unsigned long flags;
2861
2862         /* poll cqes from adapter CQ */
2863         spin_lock_irqsave(&cq->cq_lock, flags);
2864         num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2865         spin_unlock_irqrestore(&cq->cq_lock, flags);
2866         cqes_to_poll -= num_os_cqe;
2867
2868         if (cqes_to_poll) {
2869                 wc = wc + num_os_cqe;
2870                 /* adapter returns single error cqe when qp moves to
2871                  * error state. So insert error cqes with wc_status as
2872                  * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2873                  * respectively which uses this CQ.
2874                  */
2875                 spin_lock_irqsave(&dev->flush_q_lock, flags);
2876                 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2877                         if (cqes_to_poll == 0)
2878                                 break;
2879                         err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2880                         cqes_to_poll -= err_cqes;
2881                         num_os_cqe += err_cqes;
2882                         wc = wc + err_cqes;
2883                 }
2884                 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2885         }
2886         return num_os_cqe;
2887 }
2888
2889 int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2890 {
2891         struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2892         struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
2893         u16 cq_id;
2894         unsigned long flags;
2895         bool arm_needed = false, sol_needed = false;
2896
2897         cq_id = cq->id;
2898
2899         spin_lock_irqsave(&cq->cq_lock, flags);
2900         if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
2901                 arm_needed = true;
2902         if (cq_flags & IB_CQ_SOLICITED)
2903                 sol_needed = true;
2904
2905         ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
2906         spin_unlock_irqrestore(&cq->cq_lock, flags);
2907
2908         return 0;
2909 }
2910
2911 struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2912                               u32 max_num_sg, struct ib_udata *udata)
2913 {
2914         int status;
2915         struct ocrdma_mr *mr;
2916         struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2917         struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2918
2919         if (mr_type != IB_MR_TYPE_MEM_REG)
2920                 return ERR_PTR(-EINVAL);
2921
2922         if (max_num_sg > dev->attr.max_pages_per_frmr)
2923                 return ERR_PTR(-EINVAL);
2924
2925         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2926         if (!mr)
2927                 return ERR_PTR(-ENOMEM);
2928
2929         mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2930         if (!mr->pages) {
2931                 status = -ENOMEM;
2932                 goto pl_err;
2933         }
2934
2935         status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
2936         if (status)
2937                 goto pbl_err;
2938         mr->hwmr.fr_mr = 1;
2939         mr->hwmr.remote_rd = 0;
2940         mr->hwmr.remote_wr = 0;
2941         mr->hwmr.local_rd = 0;
2942         mr->hwmr.local_wr = 0;
2943         mr->hwmr.mw_bind = 0;
2944         status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2945         if (status)
2946                 goto pbl_err;
2947         status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2948         if (status)
2949                 goto mbx_err;
2950         mr->ibmr.rkey = mr->hwmr.lkey;
2951         mr->ibmr.lkey = mr->hwmr.lkey;
2952         dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2953                 (unsigned long) mr;
2954         return &mr->ibmr;
2955 mbx_err:
2956         ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2957 pbl_err:
2958         kfree(mr->pages);
2959 pl_err:
2960         kfree(mr);
2961         return ERR_PTR(-ENOMEM);
2962 }
2963
2964 static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
2965 {
2966         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2967
2968         if (unlikely(mr->npages == mr->hwmr.num_pbes))
2969                 return -ENOMEM;
2970
2971         mr->pages[mr->npages++] = addr;
2972
2973         return 0;
2974 }
2975
2976 int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
2977                      unsigned int *sg_offset)
2978 {
2979         struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2980
2981         mr->npages = 0;
2982
2983         return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
2984 }