Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / qedr / verbs.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/dma-mapping.h>
33 #include <linux/crc32.h>
34 #include <net/ip.h>
35 #include <net/ipv6.h>
36 #include <net/udp.h>
37 #include <linux/iommu.h>
38
39 #include <rdma/ib_verbs.h>
40 #include <rdma/ib_user_verbs.h>
41 #include <rdma/iw_cm.h>
42 #include <rdma/ib_umem.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
45
46 #include "qedr_hsi.h"
47 #include <linux/qed/qed_if.h>
48 #include "qedr.h"
49 #include "verbs.h"
50 #include <rdma/qedr-abi.h>
51 #include "qedr_cm.h"
52
53 #define DB_ADDR_SHIFT(addr)             ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
54
55 int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
56 {
57         if (index > QEDR_ROCE_PKEY_TABLE_LEN)
58                 return -EINVAL;
59
60         *pkey = QEDR_ROCE_PKEY_DEFAULT;
61         return 0;
62 }
63
64 int qedr_query_gid(struct ib_device *ibdev, u8 port, int index,
65                    union ib_gid *sgid)
66 {
67         struct qedr_dev *dev = get_qedr_dev(ibdev);
68         int rc = 0;
69
70         if (!rdma_cap_roce_gid_table(ibdev, port))
71                 return -ENODEV;
72
73         rc = ib_get_cached_gid(ibdev, port, index, sgid, NULL);
74         if (rc == -EAGAIN) {
75                 memcpy(sgid, &zgid, sizeof(*sgid));
76                 return 0;
77         }
78
79         DP_DEBUG(dev, QEDR_MSG_INIT, "query gid: index=%d %llx:%llx\n", index,
80                  sgid->global.interface_id, sgid->global.subnet_prefix);
81
82         return rc;
83 }
84
85 int qedr_add_gid(struct ib_device *device, u8 port_num,
86                  unsigned int index, const union ib_gid *gid,
87                  const struct ib_gid_attr *attr, void **context)
88 {
89         if (!rdma_cap_roce_gid_table(device, port_num))
90                 return -EINVAL;
91
92         if (port_num > QEDR_MAX_PORT)
93                 return -EINVAL;
94
95         if (!context)
96                 return -EINVAL;
97
98         return 0;
99 }
100
101 int qedr_del_gid(struct ib_device *device, u8 port_num,
102                  unsigned int index, void **context)
103 {
104         if (!rdma_cap_roce_gid_table(device, port_num))
105                 return -EINVAL;
106
107         if (port_num > QEDR_MAX_PORT)
108                 return -EINVAL;
109
110         if (!context)
111                 return -EINVAL;
112
113         return 0;
114 }
115
116 int qedr_query_device(struct ib_device *ibdev,
117                       struct ib_device_attr *attr, struct ib_udata *udata)
118 {
119         struct qedr_dev *dev = get_qedr_dev(ibdev);
120         struct qedr_device_attr *qattr = &dev->attr;
121
122         if (!dev->rdma_ctx) {
123                 DP_ERR(dev,
124                        "qedr_query_device called with invalid params rdma_ctx=%p\n",
125                        dev->rdma_ctx);
126                 return -EINVAL;
127         }
128
129         memset(attr, 0, sizeof(*attr));
130
131         attr->fw_ver = qattr->fw_ver;
132         attr->sys_image_guid = qattr->sys_image_guid;
133         attr->max_mr_size = qattr->max_mr_size;
134         attr->page_size_cap = qattr->page_size_caps;
135         attr->vendor_id = qattr->vendor_id;
136         attr->vendor_part_id = qattr->vendor_part_id;
137         attr->hw_ver = qattr->hw_ver;
138         attr->max_qp = qattr->max_qp;
139         attr->max_qp_wr = max_t(u32, qattr->max_sqe, qattr->max_rqe);
140         attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
141             IB_DEVICE_RC_RNR_NAK_GEN |
142             IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_MGT_EXTENSIONS;
143
144         attr->max_sge = qattr->max_sge;
145         attr->max_sge_rd = qattr->max_sge;
146         attr->max_cq = qattr->max_cq;
147         attr->max_cqe = qattr->max_cqe;
148         attr->max_mr = qattr->max_mr;
149         attr->max_mw = qattr->max_mw;
150         attr->max_pd = qattr->max_pd;
151         attr->atomic_cap = dev->atomic_cap;
152         attr->max_fmr = qattr->max_fmr;
153         attr->max_map_per_fmr = 16;
154         attr->max_qp_init_rd_atom =
155             1 << (fls(qattr->max_qp_req_rd_atomic_resc) - 1);
156         attr->max_qp_rd_atom =
157             min(1 << (fls(qattr->max_qp_resp_rd_atomic_resc) - 1),
158                 attr->max_qp_init_rd_atom);
159
160         attr->max_srq = qattr->max_srq;
161         attr->max_srq_sge = qattr->max_srq_sge;
162         attr->max_srq_wr = qattr->max_srq_wr;
163
164         attr->local_ca_ack_delay = qattr->dev_ack_delay;
165         attr->max_fast_reg_page_list_len = qattr->max_mr / 8;
166         attr->max_pkeys = QEDR_ROCE_PKEY_MAX;
167         attr->max_ah = qattr->max_ah;
168
169         return 0;
170 }
171
172 #define QEDR_SPEED_SDR          (1)
173 #define QEDR_SPEED_DDR          (2)
174 #define QEDR_SPEED_QDR          (4)
175 #define QEDR_SPEED_FDR10        (8)
176 #define QEDR_SPEED_FDR          (16)
177 #define QEDR_SPEED_EDR          (32)
178
179 static inline void get_link_speed_and_width(int speed, u8 *ib_speed,
180                                             u8 *ib_width)
181 {
182         switch (speed) {
183         case 1000:
184                 *ib_speed = QEDR_SPEED_SDR;
185                 *ib_width = IB_WIDTH_1X;
186                 break;
187         case 10000:
188                 *ib_speed = QEDR_SPEED_QDR;
189                 *ib_width = IB_WIDTH_1X;
190                 break;
191
192         case 20000:
193                 *ib_speed = QEDR_SPEED_DDR;
194                 *ib_width = IB_WIDTH_4X;
195                 break;
196
197         case 25000:
198                 *ib_speed = QEDR_SPEED_EDR;
199                 *ib_width = IB_WIDTH_1X;
200                 break;
201
202         case 40000:
203                 *ib_speed = QEDR_SPEED_QDR;
204                 *ib_width = IB_WIDTH_4X;
205                 break;
206
207         case 50000:
208                 *ib_speed = QEDR_SPEED_QDR;
209                 *ib_width = IB_WIDTH_4X;
210                 break;
211
212         case 100000:
213                 *ib_speed = QEDR_SPEED_EDR;
214                 *ib_width = IB_WIDTH_4X;
215                 break;
216
217         default:
218                 /* Unsupported */
219                 *ib_speed = QEDR_SPEED_SDR;
220                 *ib_width = IB_WIDTH_1X;
221         }
222 }
223
224 int qedr_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *attr)
225 {
226         struct qedr_dev *dev;
227         struct qed_rdma_port *rdma_port;
228
229         dev = get_qedr_dev(ibdev);
230         if (port > 1) {
231                 DP_ERR(dev, "invalid_port=0x%x\n", port);
232                 return -EINVAL;
233         }
234
235         if (!dev->rdma_ctx) {
236                 DP_ERR(dev, "rdma_ctx is NULL\n");
237                 return -EINVAL;
238         }
239
240         rdma_port = dev->ops->rdma_query_port(dev->rdma_ctx);
241         memset(attr, 0, sizeof(*attr));
242
243         if (rdma_port->port_state == QED_RDMA_PORT_UP) {
244                 attr->state = IB_PORT_ACTIVE;
245                 attr->phys_state = 5;
246         } else {
247                 attr->state = IB_PORT_DOWN;
248                 attr->phys_state = 3;
249         }
250         attr->max_mtu = IB_MTU_4096;
251         attr->active_mtu = iboe_get_mtu(dev->ndev->mtu);
252         attr->lid = 0;
253         attr->lmc = 0;
254         attr->sm_lid = 0;
255         attr->sm_sl = 0;
256         attr->port_cap_flags = IB_PORT_IP_BASED_GIDS;
257         attr->gid_tbl_len = QEDR_MAX_SGID;
258         attr->pkey_tbl_len = QEDR_ROCE_PKEY_TABLE_LEN;
259         attr->bad_pkey_cntr = rdma_port->pkey_bad_counter;
260         attr->qkey_viol_cntr = 0;
261         get_link_speed_and_width(rdma_port->link_speed,
262                                  &attr->active_speed, &attr->active_width);
263         attr->max_msg_sz = rdma_port->max_msg_size;
264         attr->max_vl_num = 4;
265
266         return 0;
267 }
268
269 int qedr_modify_port(struct ib_device *ibdev, u8 port, int mask,
270                      struct ib_port_modify *props)
271 {
272         struct qedr_dev *dev;
273
274         dev = get_qedr_dev(ibdev);
275         if (port > 1) {
276                 DP_ERR(dev, "invalid_port=0x%x\n", port);
277                 return -EINVAL;
278         }
279
280         return 0;
281 }
282
283 static int qedr_add_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
284                          unsigned long len)
285 {
286         struct qedr_mm *mm;
287
288         mm = kzalloc(sizeof(*mm), GFP_KERNEL);
289         if (!mm)
290                 return -ENOMEM;
291
292         mm->key.phy_addr = phy_addr;
293         /* This function might be called with a length which is not a multiple
294          * of PAGE_SIZE, while the mapping is PAGE_SIZE grained and the kernel
295          * forces this granularity by increasing the requested size if needed.
296          * When qedr_mmap is called, it will search the list with the updated
297          * length as a key. To prevent search failures, the length is rounded up
298          * in advance to PAGE_SIZE.
299          */
300         mm->key.len = roundup(len, PAGE_SIZE);
301         INIT_LIST_HEAD(&mm->entry);
302
303         mutex_lock(&uctx->mm_list_lock);
304         list_add(&mm->entry, &uctx->mm_head);
305         mutex_unlock(&uctx->mm_list_lock);
306
307         DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
308                  "added (addr=0x%llx,len=0x%lx) for ctx=%p\n",
309                  (unsigned long long)mm->key.phy_addr,
310                  (unsigned long)mm->key.len, uctx);
311
312         return 0;
313 }
314
315 static bool qedr_search_mmap(struct qedr_ucontext *uctx, u64 phy_addr,
316                              unsigned long len)
317 {
318         bool found = false;
319         struct qedr_mm *mm;
320
321         mutex_lock(&uctx->mm_list_lock);
322         list_for_each_entry(mm, &uctx->mm_head, entry) {
323                 if (len != mm->key.len || phy_addr != mm->key.phy_addr)
324                         continue;
325
326                 found = true;
327                 break;
328         }
329         mutex_unlock(&uctx->mm_list_lock);
330         DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
331                  "searched for (addr=0x%llx,len=0x%lx) for ctx=%p, result=%d\n",
332                  mm->key.phy_addr, mm->key.len, uctx, found);
333
334         return found;
335 }
336
337 struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev,
338                                         struct ib_udata *udata)
339 {
340         int rc;
341         struct qedr_ucontext *ctx;
342         struct qedr_alloc_ucontext_resp uresp;
343         struct qedr_dev *dev = get_qedr_dev(ibdev);
344         struct qed_rdma_add_user_out_params oparams;
345
346         if (!udata)
347                 return ERR_PTR(-EFAULT);
348
349         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
350         if (!ctx)
351                 return ERR_PTR(-ENOMEM);
352
353         rc = dev->ops->rdma_add_user(dev->rdma_ctx, &oparams);
354         if (rc) {
355                 DP_ERR(dev,
356                        "failed to allocate a DPI for a new RoCE application, rc=%d. To overcome this consider to increase the number of DPIs, increase the doorbell BAR size or just close unnecessary RoCE applications. In order to increase the number of DPIs consult the qedr readme\n",
357                        rc);
358                 goto err;
359         }
360
361         ctx->dpi = oparams.dpi;
362         ctx->dpi_addr = oparams.dpi_addr;
363         ctx->dpi_phys_addr = oparams.dpi_phys_addr;
364         ctx->dpi_size = oparams.dpi_size;
365         INIT_LIST_HEAD(&ctx->mm_head);
366         mutex_init(&ctx->mm_list_lock);
367
368         memset(&uresp, 0, sizeof(uresp));
369
370         uresp.db_pa = ctx->dpi_phys_addr;
371         uresp.db_size = ctx->dpi_size;
372         uresp.max_send_wr = dev->attr.max_sqe;
373         uresp.max_recv_wr = dev->attr.max_rqe;
374         uresp.max_srq_wr = dev->attr.max_srq_wr;
375         uresp.sges_per_send_wr = QEDR_MAX_SQE_ELEMENTS_PER_SQE;
376         uresp.sges_per_recv_wr = QEDR_MAX_RQE_ELEMENTS_PER_RQE;
377         uresp.sges_per_srq_wr = dev->attr.max_srq_sge;
378         uresp.max_cqes = QEDR_MAX_CQES;
379
380         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
381         if (rc)
382                 goto err;
383
384         ctx->dev = dev;
385
386         rc = qedr_add_mmap(ctx, ctx->dpi_phys_addr, ctx->dpi_size);
387         if (rc)
388                 goto err;
389
390         DP_DEBUG(dev, QEDR_MSG_INIT, "Allocating user context %p\n",
391                  &ctx->ibucontext);
392         return &ctx->ibucontext;
393
394 err:
395         kfree(ctx);
396         return ERR_PTR(rc);
397 }
398
399 int qedr_dealloc_ucontext(struct ib_ucontext *ibctx)
400 {
401         struct qedr_ucontext *uctx = get_qedr_ucontext(ibctx);
402         struct qedr_mm *mm, *tmp;
403         int status = 0;
404
405         DP_DEBUG(uctx->dev, QEDR_MSG_INIT, "Deallocating user context %p\n",
406                  uctx);
407         uctx->dev->ops->rdma_remove_user(uctx->dev->rdma_ctx, uctx->dpi);
408
409         list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
410                 DP_DEBUG(uctx->dev, QEDR_MSG_MISC,
411                          "deleted (addr=0x%llx,len=0x%lx) for ctx=%p\n",
412                          mm->key.phy_addr, mm->key.len, uctx);
413                 list_del(&mm->entry);
414                 kfree(mm);
415         }
416
417         kfree(uctx);
418         return status;
419 }
420
421 int qedr_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
422 {
423         struct qedr_ucontext *ucontext = get_qedr_ucontext(context);
424         struct qedr_dev *dev = get_qedr_dev(context->device);
425         unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
426         u64 unmapped_db = dev->db_phys_addr;
427         unsigned long len = (vma->vm_end - vma->vm_start);
428         int rc = 0;
429         bool found;
430
431         DP_DEBUG(dev, QEDR_MSG_INIT,
432                  "qedr_mmap called vm_page=0x%lx vm_pgoff=0x%lx unmapped_db=0x%llx db_size=%x, len=%lx\n",
433                  vm_page, vma->vm_pgoff, unmapped_db, dev->db_size, len);
434         if (vma->vm_start & (PAGE_SIZE - 1)) {
435                 DP_ERR(dev, "Vma_start not page aligned = %ld\n",
436                        vma->vm_start);
437                 return -EINVAL;
438         }
439
440         found = qedr_search_mmap(ucontext, vm_page, len);
441         if (!found) {
442                 DP_ERR(dev, "Vma_pgoff not found in mapped array = %ld\n",
443                        vma->vm_pgoff);
444                 return -EINVAL;
445         }
446
447         DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
448
449         if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
450                                                      dev->db_size))) {
451                 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping doorbell bar\n");
452                 if (vma->vm_flags & VM_READ) {
453                         DP_ERR(dev, "Trying to map doorbell bar for read\n");
454                         return -EPERM;
455                 }
456
457                 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
458
459                 rc = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
460                                         PAGE_SIZE, vma->vm_page_prot);
461         } else {
462                 DP_DEBUG(dev, QEDR_MSG_INIT, "Mapping chains\n");
463                 rc = remap_pfn_range(vma, vma->vm_start,
464                                      vma->vm_pgoff, len, vma->vm_page_prot);
465         }
466         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_mmap return code: %d\n", rc);
467         return rc;
468 }
469
470 struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
471                             struct ib_ucontext *context, struct ib_udata *udata)
472 {
473         struct qedr_dev *dev = get_qedr_dev(ibdev);
474         struct qedr_ucontext *uctx = NULL;
475         struct qedr_alloc_pd_uresp uresp;
476         struct qedr_pd *pd;
477         u16 pd_id;
478         int rc;
479
480         DP_DEBUG(dev, QEDR_MSG_INIT, "Function called from: %s\n",
481                  (udata && context) ? "User Lib" : "Kernel");
482
483         if (!dev->rdma_ctx) {
484                 DP_ERR(dev, "invlaid RDMA context\n");
485                 return ERR_PTR(-EINVAL);
486         }
487
488         pd = kzalloc(sizeof(*pd), GFP_KERNEL);
489         if (!pd)
490                 return ERR_PTR(-ENOMEM);
491
492         dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
493
494         uresp.pd_id = pd_id;
495         pd->pd_id = pd_id;
496
497         if (udata && context) {
498                 rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
499                 if (rc)
500                         DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
501                 uctx = get_qedr_ucontext(context);
502                 uctx->pd = pd;
503                 pd->uctx = uctx;
504         }
505
506         return &pd->ibpd;
507 }
508
509 int qedr_dealloc_pd(struct ib_pd *ibpd)
510 {
511         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
512         struct qedr_pd *pd = get_qedr_pd(ibpd);
513
514         if (!pd)
515                 pr_err("Invalid PD received in dealloc_pd\n");
516
517         DP_DEBUG(dev, QEDR_MSG_INIT, "Deallocating PD %d\n", pd->pd_id);
518         dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd->pd_id);
519
520         kfree(pd);
521
522         return 0;
523 }
524
525 static void qedr_free_pbl(struct qedr_dev *dev,
526                           struct qedr_pbl_info *pbl_info, struct qedr_pbl *pbl)
527 {
528         struct pci_dev *pdev = dev->pdev;
529         int i;
530
531         for (i = 0; i < pbl_info->num_pbls; i++) {
532                 if (!pbl[i].va)
533                         continue;
534                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
535                                   pbl[i].va, pbl[i].pa);
536         }
537
538         kfree(pbl);
539 }
540
541 #define MIN_FW_PBL_PAGE_SIZE (4 * 1024)
542 #define MAX_FW_PBL_PAGE_SIZE (64 * 1024)
543
544 #define NUM_PBES_ON_PAGE(_page_size) (_page_size / sizeof(u64))
545 #define MAX_PBES_ON_PAGE NUM_PBES_ON_PAGE(MAX_FW_PBL_PAGE_SIZE)
546 #define MAX_PBES_TWO_LAYER (MAX_PBES_ON_PAGE * MAX_PBES_ON_PAGE)
547
548 static struct qedr_pbl *qedr_alloc_pbl_tbl(struct qedr_dev *dev,
549                                            struct qedr_pbl_info *pbl_info,
550                                            gfp_t flags)
551 {
552         struct pci_dev *pdev = dev->pdev;
553         struct qedr_pbl *pbl_table;
554         dma_addr_t *pbl_main_tbl;
555         dma_addr_t pa;
556         void *va;
557         int i;
558
559         pbl_table = kcalloc(pbl_info->num_pbls, sizeof(*pbl_table), flags);
560         if (!pbl_table)
561                 return ERR_PTR(-ENOMEM);
562
563         for (i = 0; i < pbl_info->num_pbls; i++) {
564                 va = dma_alloc_coherent(&pdev->dev, pbl_info->pbl_size,
565                                         &pa, flags);
566                 if (!va)
567                         goto err;
568
569                 memset(va, 0, pbl_info->pbl_size);
570                 pbl_table[i].va = va;
571                 pbl_table[i].pa = pa;
572         }
573
574         /* Two-Layer PBLs, if we have more than one pbl we need to initialize
575          * the first one with physical pointers to all of the rest
576          */
577         pbl_main_tbl = (dma_addr_t *)pbl_table[0].va;
578         for (i = 0; i < pbl_info->num_pbls - 1; i++)
579                 pbl_main_tbl[i] = pbl_table[i + 1].pa;
580
581         return pbl_table;
582
583 err:
584         for (i--; i >= 0; i--)
585                 dma_free_coherent(&pdev->dev, pbl_info->pbl_size,
586                                   pbl_table[i].va, pbl_table[i].pa);
587
588         qedr_free_pbl(dev, pbl_info, pbl_table);
589
590         return ERR_PTR(-ENOMEM);
591 }
592
593 static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
594                                 struct qedr_pbl_info *pbl_info,
595                                 u32 num_pbes, int two_layer_capable)
596 {
597         u32 pbl_capacity;
598         u32 pbl_size;
599         u32 num_pbls;
600
601         if ((num_pbes > MAX_PBES_ON_PAGE) && two_layer_capable) {
602                 if (num_pbes > MAX_PBES_TWO_LAYER) {
603                         DP_ERR(dev, "prepare pbl table: too many pages %d\n",
604                                num_pbes);
605                         return -EINVAL;
606                 }
607
608                 /* calculate required pbl page size */
609                 pbl_size = MIN_FW_PBL_PAGE_SIZE;
610                 pbl_capacity = NUM_PBES_ON_PAGE(pbl_size) *
611                                NUM_PBES_ON_PAGE(pbl_size);
612
613                 while (pbl_capacity < num_pbes) {
614                         pbl_size *= 2;
615                         pbl_capacity = pbl_size / sizeof(u64);
616                         pbl_capacity = pbl_capacity * pbl_capacity;
617                 }
618
619                 num_pbls = DIV_ROUND_UP(num_pbes, NUM_PBES_ON_PAGE(pbl_size));
620                 num_pbls++;     /* One for the layer0 ( points to the pbls) */
621                 pbl_info->two_layered = true;
622         } else {
623                 /* One layered PBL */
624                 num_pbls = 1;
625                 pbl_size = max_t(u32, MIN_FW_PBL_PAGE_SIZE,
626                                  roundup_pow_of_two((num_pbes * sizeof(u64))));
627                 pbl_info->two_layered = false;
628         }
629
630         pbl_info->num_pbls = num_pbls;
631         pbl_info->pbl_size = pbl_size;
632         pbl_info->num_pbes = num_pbes;
633
634         DP_DEBUG(dev, QEDR_MSG_MR,
635                  "prepare pbl table: num_pbes=%d, num_pbls=%d, pbl_size=%d\n",
636                  pbl_info->num_pbes, pbl_info->num_pbls, pbl_info->pbl_size);
637
638         return 0;
639 }
640
641 static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
642                                struct qedr_pbl *pbl,
643                                struct qedr_pbl_info *pbl_info)
644 {
645         int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
646         struct qedr_pbl *pbl_tbl;
647         struct scatterlist *sg;
648         struct regpair *pbe;
649         int entry;
650         u32 addr;
651
652         if (!pbl_info->num_pbes)
653                 return;
654
655         /* If we have a two layered pbl, the first pbl points to the rest
656          * of the pbls and the first entry lays on the second pbl in the table
657          */
658         if (pbl_info->two_layered)
659                 pbl_tbl = &pbl[1];
660         else
661                 pbl_tbl = pbl;
662
663         pbe = (struct regpair *)pbl_tbl->va;
664         if (!pbe) {
665                 DP_ERR(dev, "cannot populate PBL due to a NULL PBE\n");
666                 return;
667         }
668
669         pbe_cnt = 0;
670
671         shift = ilog2(umem->page_size);
672
673         for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
674                 pages = sg_dma_len(sg) >> shift;
675                 for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
676                         /* store the page address in pbe */
677                         pbe->lo = cpu_to_le32(sg_dma_address(sg) +
678                                               umem->page_size * pg_cnt);
679                         addr = upper_32_bits(sg_dma_address(sg) +
680                                              umem->page_size * pg_cnt);
681                         pbe->hi = cpu_to_le32(addr);
682                         pbe_cnt++;
683                         total_num_pbes++;
684                         pbe++;
685
686                         if (total_num_pbes == pbl_info->num_pbes)
687                                 return;
688
689                         /* If the given pbl is full storing the pbes,
690                          * move to next pbl.
691                          */
692                         if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) {
693                                 pbl_tbl++;
694                                 pbe = (struct regpair *)pbl_tbl->va;
695                                 pbe_cnt = 0;
696                         }
697                 }
698         }
699 }
700
701 static int qedr_copy_cq_uresp(struct qedr_dev *dev,
702                               struct qedr_cq *cq, struct ib_udata *udata)
703 {
704         struct qedr_create_cq_uresp uresp;
705         int rc;
706
707         memset(&uresp, 0, sizeof(uresp));
708
709         uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
710         uresp.icid = cq->icid;
711
712         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
713         if (rc)
714                 DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid);
715
716         return rc;
717 }
718
719 static void consume_cqe(struct qedr_cq *cq)
720 {
721         if (cq->latest_cqe == cq->toggle_cqe)
722                 cq->pbl_toggle ^= RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
723
724         cq->latest_cqe = qed_chain_consume(&cq->pbl);
725 }
726
727 static inline int qedr_align_cq_entries(int entries)
728 {
729         u64 size, aligned_size;
730
731         /* We allocate an extra entry that we don't report to the FW. */
732         size = (entries + 1) * QEDR_CQE_SIZE;
733         aligned_size = ALIGN(size, PAGE_SIZE);
734
735         return aligned_size / QEDR_CQE_SIZE;
736 }
737
738 static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
739                                        struct qedr_dev *dev,
740                                        struct qedr_userq *q,
741                                        u64 buf_addr, size_t buf_len,
742                                        int access, int dmasync)
743 {
744         int page_cnt;
745         int rc;
746
747         q->buf_addr = buf_addr;
748         q->buf_len = buf_len;
749         q->umem = ib_umem_get(ib_ctx, q->buf_addr, q->buf_len, access, dmasync);
750         if (IS_ERR(q->umem)) {
751                 DP_ERR(dev, "create user queue: failed ib_umem_get, got %ld\n",
752                        PTR_ERR(q->umem));
753                 return PTR_ERR(q->umem);
754         }
755
756         page_cnt = ib_umem_page_count(q->umem);
757         rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0);
758         if (rc)
759                 goto err0;
760
761         q->pbl_tbl = qedr_alloc_pbl_tbl(dev, &q->pbl_info, GFP_KERNEL);
762         if (IS_ERR_OR_NULL(q->pbl_tbl))
763                 goto err0;
764
765         qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info);
766
767         return 0;
768
769 err0:
770         ib_umem_release(q->umem);
771
772         return rc;
773 }
774
775 static inline void qedr_init_cq_params(struct qedr_cq *cq,
776                                        struct qedr_ucontext *ctx,
777                                        struct qedr_dev *dev, int vector,
778                                        int chain_entries, int page_cnt,
779                                        u64 pbl_ptr,
780                                        struct qed_rdma_create_cq_in_params
781                                        *params)
782 {
783         memset(params, 0, sizeof(*params));
784         params->cq_handle_hi = upper_32_bits((uintptr_t)cq);
785         params->cq_handle_lo = lower_32_bits((uintptr_t)cq);
786         params->cnq_id = vector;
787         params->cq_size = chain_entries - 1;
788         params->dpi = (ctx) ? ctx->dpi : dev->dpi;
789         params->pbl_num_pages = page_cnt;
790         params->pbl_ptr = pbl_ptr;
791         params->pbl_two_level = 0;
792 }
793
794 static void doorbell_cq(struct qedr_cq *cq, u32 cons, u8 flags)
795 {
796         /* Flush data before signalling doorbell */
797         wmb();
798         cq->db.data.agg_flags = flags;
799         cq->db.data.value = cpu_to_le32(cons);
800         writeq(cq->db.raw, cq->db_addr);
801
802         /* Make sure write would stick */
803         mmiowb();
804 }
805
806 int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
807 {
808         struct qedr_cq *cq = get_qedr_cq(ibcq);
809         unsigned long sflags;
810
811         if (cq->cq_type == QEDR_CQ_TYPE_GSI)
812                 return 0;
813
814         spin_lock_irqsave(&cq->cq_lock, sflags);
815
816         cq->arm_flags = 0;
817
818         if (flags & IB_CQ_SOLICITED)
819                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD;
820
821         if (flags & IB_CQ_NEXT_COMP)
822                 cq->arm_flags |= DQ_UCM_ROCE_CQ_ARM_CF_CMD;
823
824         doorbell_cq(cq, cq->cq_cons - 1, cq->arm_flags);
825
826         spin_unlock_irqrestore(&cq->cq_lock, sflags);
827
828         return 0;
829 }
830
831 struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
832                              const struct ib_cq_init_attr *attr,
833                              struct ib_ucontext *ib_ctx, struct ib_udata *udata)
834 {
835         struct qedr_ucontext *ctx = get_qedr_ucontext(ib_ctx);
836         struct qed_rdma_destroy_cq_out_params destroy_oparams;
837         struct qed_rdma_destroy_cq_in_params destroy_iparams;
838         struct qedr_dev *dev = get_qedr_dev(ibdev);
839         struct qed_rdma_create_cq_in_params params;
840         struct qedr_create_cq_ureq ureq;
841         int vector = attr->comp_vector;
842         int entries = attr->cqe;
843         struct qedr_cq *cq;
844         int chain_entries;
845         int page_cnt;
846         u64 pbl_ptr;
847         u16 icid;
848         int rc;
849
850         DP_DEBUG(dev, QEDR_MSG_INIT,
851                  "create_cq: called from %s. entries=%d, vector=%d\n",
852                  udata ? "User Lib" : "Kernel", entries, vector);
853
854         if (entries > QEDR_MAX_CQES) {
855                 DP_ERR(dev,
856                        "create cq: the number of entries %d is too high. Must be equal or below %d.\n",
857                        entries, QEDR_MAX_CQES);
858                 return ERR_PTR(-EINVAL);
859         }
860
861         chain_entries = qedr_align_cq_entries(entries);
862         chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
863
864         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
865         if (!cq)
866                 return ERR_PTR(-ENOMEM);
867
868         if (udata) {
869                 memset(&ureq, 0, sizeof(ureq));
870                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
871                         DP_ERR(dev,
872                                "create cq: problem copying data from user space\n");
873                         goto err0;
874                 }
875
876                 if (!ureq.len) {
877                         DP_ERR(dev,
878                                "create cq: cannot create a cq with 0 entries\n");
879                         goto err0;
880                 }
881
882                 cq->cq_type = QEDR_CQ_TYPE_USER;
883
884                 rc = qedr_init_user_queue(ib_ctx, dev, &cq->q, ureq.addr,
885                                           ureq.len, IB_ACCESS_LOCAL_WRITE, 1);
886                 if (rc)
887                         goto err0;
888
889                 pbl_ptr = cq->q.pbl_tbl->pa;
890                 page_cnt = cq->q.pbl_info.num_pbes;
891         } else {
892                 cq->cq_type = QEDR_CQ_TYPE_KERNEL;
893
894                 rc = dev->ops->common->chain_alloc(dev->cdev,
895                                                    QED_CHAIN_USE_TO_CONSUME,
896                                                    QED_CHAIN_MODE_PBL,
897                                                    QED_CHAIN_CNT_TYPE_U32,
898                                                    chain_entries,
899                                                    sizeof(union rdma_cqe),
900                                                    &cq->pbl);
901                 if (rc)
902                         goto err1;
903
904                 page_cnt = qed_chain_get_page_cnt(&cq->pbl);
905                 pbl_ptr = qed_chain_get_pbl_phys(&cq->pbl);
906         }
907
908         qedr_init_cq_params(cq, ctx, dev, vector, chain_entries, page_cnt,
909                             pbl_ptr, &params);
910
911         rc = dev->ops->rdma_create_cq(dev->rdma_ctx, &params, &icid);
912         if (rc)
913                 goto err2;
914
915         cq->icid = icid;
916         cq->sig = QEDR_CQ_MAGIC_NUMBER;
917         spin_lock_init(&cq->cq_lock);
918
919         if (ib_ctx) {
920                 rc = qedr_copy_cq_uresp(dev, cq, udata);
921                 if (rc)
922                         goto err3;
923         } else {
924                 /* Generate doorbell address. */
925                 cq->db_addr = dev->db_addr +
926                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
927                 cq->db.data.icid = cq->icid;
928                 cq->db.data.params = DB_AGG_CMD_SET <<
929                     RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT;
930
931                 /* point to the very last element, passing it we will toggle */
932                 cq->toggle_cqe = qed_chain_get_last_elem(&cq->pbl);
933                 cq->pbl_toggle = RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK;
934                 cq->latest_cqe = NULL;
935                 consume_cqe(cq);
936                 cq->cq_cons = qed_chain_get_cons_idx_u32(&cq->pbl);
937         }
938
939         DP_DEBUG(dev, QEDR_MSG_CQ,
940                  "create cq: icid=0x%0x, addr=%p, size(entries)=0x%0x\n",
941                  cq->icid, cq, params.cq_size);
942
943         return &cq->ibcq;
944
945 err3:
946         destroy_iparams.icid = cq->icid;
947         dev->ops->rdma_destroy_cq(dev->rdma_ctx, &destroy_iparams,
948                                   &destroy_oparams);
949 err2:
950         if (udata)
951                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
952         else
953                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
954 err1:
955         if (udata)
956                 ib_umem_release(cq->q.umem);
957 err0:
958         kfree(cq);
959         return ERR_PTR(-EINVAL);
960 }
961
962 int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
963 {
964         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
965         struct qedr_cq *cq = get_qedr_cq(ibcq);
966
967         DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
968
969         return 0;
970 }
971
972 int qedr_destroy_cq(struct ib_cq *ibcq)
973 {
974         struct qedr_dev *dev = get_qedr_dev(ibcq->device);
975         struct qed_rdma_destroy_cq_out_params oparams;
976         struct qed_rdma_destroy_cq_in_params iparams;
977         struct qedr_cq *cq = get_qedr_cq(ibcq);
978
979         DP_DEBUG(dev, QEDR_MSG_CQ, "destroy cq: cq_id %d", cq->icid);
980
981         /* GSIs CQs are handled by driver, so they don't exist in the FW */
982         if (cq->cq_type != QEDR_CQ_TYPE_GSI) {
983                 iparams.icid = cq->icid;
984                 dev->ops->rdma_destroy_cq(dev->rdma_ctx, &iparams, &oparams);
985                 dev->ops->common->chain_free(dev->cdev, &cq->pbl);
986         }
987
988         if (ibcq->uobject && ibcq->uobject->context) {
989                 qedr_free_pbl(dev, &cq->q.pbl_info, cq->q.pbl_tbl);
990                 ib_umem_release(cq->q.umem);
991         }
992
993         kfree(cq);
994
995         return 0;
996 }
997
998 static inline int get_gid_info_from_table(struct ib_qp *ibqp,
999                                           struct ib_qp_attr *attr,
1000                                           int attr_mask,
1001                                           struct qed_rdma_modify_qp_in_params
1002                                           *qp_params)
1003 {
1004         enum rdma_network_type nw_type;
1005         struct ib_gid_attr gid_attr;
1006         union ib_gid gid;
1007         u32 ipv4_addr;
1008         int rc = 0;
1009         int i;
1010
1011         rc = ib_get_cached_gid(ibqp->device, attr->ah_attr.port_num,
1012                                attr->ah_attr.grh.sgid_index, &gid, &gid_attr);
1013         if (rc)
1014                 return rc;
1015
1016         if (!memcmp(&gid, &zgid, sizeof(gid)))
1017                 return -ENOENT;
1018
1019         if (gid_attr.ndev) {
1020                 qp_params->vlan_id = rdma_vlan_dev_vlan_id(gid_attr.ndev);
1021
1022                 dev_put(gid_attr.ndev);
1023                 nw_type = ib_gid_to_network_type(gid_attr.gid_type, &gid);
1024                 switch (nw_type) {
1025                 case RDMA_NETWORK_IPV6:
1026                         memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1027                                sizeof(qp_params->sgid));
1028                         memcpy(&qp_params->dgid.bytes[0],
1029                                &attr->ah_attr.grh.dgid,
1030                                sizeof(qp_params->dgid));
1031                         qp_params->roce_mode = ROCE_V2_IPV6;
1032                         SET_FIELD(qp_params->modify_flags,
1033                                   QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1034                         break;
1035                 case RDMA_NETWORK_IB:
1036                         memcpy(&qp_params->sgid.bytes[0], &gid.raw[0],
1037                                sizeof(qp_params->sgid));
1038                         memcpy(&qp_params->dgid.bytes[0],
1039                                &attr->ah_attr.grh.dgid,
1040                                sizeof(qp_params->dgid));
1041                         qp_params->roce_mode = ROCE_V1;
1042                         break;
1043                 case RDMA_NETWORK_IPV4:
1044                         memset(&qp_params->sgid, 0, sizeof(qp_params->sgid));
1045                         memset(&qp_params->dgid, 0, sizeof(qp_params->dgid));
1046                         ipv4_addr = qedr_get_ipv4_from_gid(gid.raw);
1047                         qp_params->sgid.ipv4_addr = ipv4_addr;
1048                         ipv4_addr =
1049                             qedr_get_ipv4_from_gid(attr->ah_attr.grh.dgid.raw);
1050                         qp_params->dgid.ipv4_addr = ipv4_addr;
1051                         SET_FIELD(qp_params->modify_flags,
1052                                   QED_ROCE_MODIFY_QP_VALID_ROCE_MODE, 1);
1053                         qp_params->roce_mode = ROCE_V2_IPV4;
1054                         break;
1055                 }
1056         }
1057
1058         for (i = 0; i < 4; i++) {
1059                 qp_params->sgid.dwords[i] = ntohl(qp_params->sgid.dwords[i]);
1060                 qp_params->dgid.dwords[i] = ntohl(qp_params->dgid.dwords[i]);
1061         }
1062
1063         if (qp_params->vlan_id >= VLAN_CFI_MASK)
1064                 qp_params->vlan_id = 0;
1065
1066         return 0;
1067 }
1068
1069 static void qedr_cleanup_user_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1070 {
1071         qedr_free_pbl(dev, &qp->usq.pbl_info, qp->usq.pbl_tbl);
1072         ib_umem_release(qp->usq.umem);
1073 }
1074
1075 static void qedr_cleanup_user_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1076 {
1077         qedr_free_pbl(dev, &qp->urq.pbl_info, qp->urq.pbl_tbl);
1078         ib_umem_release(qp->urq.umem);
1079 }
1080
1081 static void qedr_cleanup_kernel_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1082 {
1083         dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1084         kfree(qp->wqe_wr_id);
1085 }
1086
1087 static void qedr_cleanup_kernel_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1088 {
1089         dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1090         kfree(qp->rqe_wr_id);
1091 }
1092
1093 static int qedr_check_qp_attrs(struct ib_pd *ibpd, struct qedr_dev *dev,
1094                                struct ib_qp_init_attr *attrs)
1095 {
1096         struct qedr_device_attr *qattr = &dev->attr;
1097
1098         /* QP0... attrs->qp_type == IB_QPT_GSI */
1099         if (attrs->qp_type != IB_QPT_RC && attrs->qp_type != IB_QPT_GSI) {
1100                 DP_DEBUG(dev, QEDR_MSG_QP,
1101                          "create qp: unsupported qp type=0x%x requested\n",
1102                          attrs->qp_type);
1103                 return -EINVAL;
1104         }
1105
1106         if (attrs->cap.max_send_wr > qattr->max_sqe) {
1107                 DP_ERR(dev,
1108                        "create qp: cannot create a SQ with %d elements (max_send_wr=0x%x)\n",
1109                        attrs->cap.max_send_wr, qattr->max_sqe);
1110                 return -EINVAL;
1111         }
1112
1113         if (attrs->cap.max_inline_data > qattr->max_inline) {
1114                 DP_ERR(dev,
1115                        "create qp: unsupported inline data size=0x%x requested (max_inline=0x%x)\n",
1116                        attrs->cap.max_inline_data, qattr->max_inline);
1117                 return -EINVAL;
1118         }
1119
1120         if (attrs->cap.max_send_sge > qattr->max_sge) {
1121                 DP_ERR(dev,
1122                        "create qp: unsupported send_sge=0x%x requested (max_send_sge=0x%x)\n",
1123                        attrs->cap.max_send_sge, qattr->max_sge);
1124                 return -EINVAL;
1125         }
1126
1127         if (attrs->cap.max_recv_sge > qattr->max_sge) {
1128                 DP_ERR(dev,
1129                        "create qp: unsupported recv_sge=0x%x requested (max_recv_sge=0x%x)\n",
1130                        attrs->cap.max_recv_sge, qattr->max_sge);
1131                 return -EINVAL;
1132         }
1133
1134         /* Unprivileged user space cannot create special QP */
1135         if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) {
1136                 DP_ERR(dev,
1137                        "create qp: userspace can't create special QPs of type=0x%x\n",
1138                        attrs->qp_type);
1139                 return -EINVAL;
1140         }
1141
1142         return 0;
1143 }
1144
1145 static void qedr_copy_rq_uresp(struct qedr_create_qp_uresp *uresp,
1146                                struct qedr_qp *qp)
1147 {
1148         uresp->rq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1149         uresp->rq_icid = qp->icid;
1150 }
1151
1152 static void qedr_copy_sq_uresp(struct qedr_create_qp_uresp *uresp,
1153                                struct qedr_qp *qp)
1154 {
1155         uresp->sq_db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1156         uresp->sq_icid = qp->icid + 1;
1157 }
1158
1159 static int qedr_copy_qp_uresp(struct qedr_dev *dev,
1160                               struct qedr_qp *qp, struct ib_udata *udata)
1161 {
1162         struct qedr_create_qp_uresp uresp;
1163         int rc;
1164
1165         memset(&uresp, 0, sizeof(uresp));
1166         qedr_copy_sq_uresp(&uresp, qp);
1167         qedr_copy_rq_uresp(&uresp, qp);
1168
1169         uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE;
1170         uresp.qp_id = qp->qp_id;
1171
1172         rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1173         if (rc)
1174                 DP_ERR(dev,
1175                        "create qp: failed a copy to user space with qp icid=0x%x.\n",
1176                        qp->icid);
1177
1178         return rc;
1179 }
1180
1181 static void qedr_set_qp_init_params(struct qedr_dev *dev,
1182                                     struct qedr_qp *qp,
1183                                     struct qedr_pd *pd,
1184                                     struct ib_qp_init_attr *attrs)
1185 {
1186         qp->pd = pd;
1187
1188         spin_lock_init(&qp->q_lock);
1189
1190         qp->qp_type = attrs->qp_type;
1191         qp->max_inline_data = attrs->cap.max_inline_data;
1192         qp->sq.max_sges = attrs->cap.max_send_sge;
1193         qp->state = QED_ROCE_QP_STATE_RESET;
1194         qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
1195         qp->sq_cq = get_qedr_cq(attrs->send_cq);
1196         qp->rq_cq = get_qedr_cq(attrs->recv_cq);
1197         qp->dev = dev;
1198
1199         DP_DEBUG(dev, QEDR_MSG_QP,
1200                  "QP params:\tpd = %d, qp_type = %d, max_inline_data = %d, state = %d, signaled = %d, use_srq=%d\n",
1201                  pd->pd_id, qp->qp_type, qp->max_inline_data,
1202                  qp->state, qp->signaled, (attrs->srq) ? 1 : 0);
1203         DP_DEBUG(dev, QEDR_MSG_QP,
1204                  "SQ params:\tsq_max_sges = %d, sq_cq_id = %d\n",
1205                  qp->sq.max_sges, qp->sq_cq->icid);
1206         qp->rq.max_sges = attrs->cap.max_recv_sge;
1207         DP_DEBUG(dev, QEDR_MSG_QP,
1208                  "RQ params:\trq_max_sges = %d, rq_cq_id = %d\n",
1209                  qp->rq.max_sges, qp->rq_cq->icid);
1210 }
1211
1212 static inline void
1213 qedr_init_qp_user_params(struct qed_rdma_create_qp_in_params *params,
1214                          struct qedr_create_qp_ureq *ureq)
1215 {
1216         /* QP handle to be written in CQE */
1217         params->qp_handle_lo = ureq->qp_handle_lo;
1218         params->qp_handle_hi = ureq->qp_handle_hi;
1219 }
1220
1221 static inline void
1222 qedr_init_qp_kernel_doorbell_sq(struct qedr_dev *dev, struct qedr_qp *qp)
1223 {
1224         qp->sq.db = dev->db_addr +
1225                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD);
1226         qp->sq.db_data.data.icid = qp->icid + 1;
1227 }
1228
1229 static inline void
1230 qedr_init_qp_kernel_doorbell_rq(struct qedr_dev *dev, struct qedr_qp *qp)
1231 {
1232         qp->rq.db = dev->db_addr +
1233                     DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD);
1234         qp->rq.db_data.data.icid = qp->icid;
1235 }
1236
1237 static inline int
1238 qedr_init_qp_kernel_params_rq(struct qedr_dev *dev,
1239                               struct qedr_qp *qp, struct ib_qp_init_attr *attrs)
1240 {
1241         /* Allocate driver internal RQ array */
1242         qp->rqe_wr_id = kcalloc(qp->rq.max_wr, sizeof(*qp->rqe_wr_id),
1243                                 GFP_KERNEL);
1244         if (!qp->rqe_wr_id)
1245                 return -ENOMEM;
1246
1247         DP_DEBUG(dev, QEDR_MSG_QP, "RQ max_wr set to %d.\n", qp->rq.max_wr);
1248
1249         return 0;
1250 }
1251
1252 static inline int
1253 qedr_init_qp_kernel_params_sq(struct qedr_dev *dev,
1254                               struct qedr_qp *qp,
1255                               struct ib_qp_init_attr *attrs,
1256                               struct qed_rdma_create_qp_in_params *params)
1257 {
1258         u32 temp_max_wr;
1259
1260         /* Allocate driver internal SQ array */
1261         temp_max_wr = attrs->cap.max_send_wr * dev->wq_multiplier;
1262         temp_max_wr = min_t(u32, temp_max_wr, dev->attr.max_sqe);
1263
1264         /* temp_max_wr < attr->max_sqe < u16 so the casting is safe */
1265         qp->sq.max_wr = (u16)temp_max_wr;
1266         qp->wqe_wr_id = kcalloc(qp->sq.max_wr, sizeof(*qp->wqe_wr_id),
1267                                 GFP_KERNEL);
1268         if (!qp->wqe_wr_id)
1269                 return -ENOMEM;
1270
1271         DP_DEBUG(dev, QEDR_MSG_QP, "SQ max_wr set to %d.\n", qp->sq.max_wr);
1272
1273         /* QP handle to be written in CQE */
1274         params->qp_handle_lo = lower_32_bits((uintptr_t)qp);
1275         params->qp_handle_hi = upper_32_bits((uintptr_t)qp);
1276
1277         return 0;
1278 }
1279
1280 static inline int qedr_init_qp_kernel_sq(struct qedr_dev *dev,
1281                                          struct qedr_qp *qp,
1282                                          struct ib_qp_init_attr *attrs)
1283 {
1284         u32 n_sq_elems, n_sq_entries;
1285         int rc;
1286
1287         /* A single work request may take up to QEDR_MAX_SQ_WQE_SIZE elements in
1288          * the ring. The ring should allow at least a single WR, even if the
1289          * user requested none, due to allocation issues.
1290          */
1291         n_sq_entries = attrs->cap.max_send_wr;
1292         n_sq_entries = min_t(u32, n_sq_entries, dev->attr.max_sqe);
1293         n_sq_entries = max_t(u32, n_sq_entries, 1);
1294         n_sq_elems = n_sq_entries * QEDR_MAX_SQE_ELEMENTS_PER_SQE;
1295         rc = dev->ops->common->chain_alloc(dev->cdev,
1296                                            QED_CHAIN_USE_TO_PRODUCE,
1297                                            QED_CHAIN_MODE_PBL,
1298                                            QED_CHAIN_CNT_TYPE_U32,
1299                                            n_sq_elems,
1300                                            QEDR_SQE_ELEMENT_SIZE,
1301                                            &qp->sq.pbl);
1302         if (rc) {
1303                 DP_ERR(dev, "failed to allocate QP %p SQ\n", qp);
1304                 return rc;
1305         }
1306
1307         DP_DEBUG(dev, QEDR_MSG_SQ,
1308                  "SQ Pbl base addr = %llx max_send_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1309                  qed_chain_get_pbl_phys(&qp->sq.pbl), attrs->cap.max_send_wr,
1310                  n_sq_entries, qed_chain_get_capacity(&qp->sq.pbl), rc);
1311         return 0;
1312 }
1313
1314 static inline int qedr_init_qp_kernel_rq(struct qedr_dev *dev,
1315                                          struct qedr_qp *qp,
1316                                          struct ib_qp_init_attr *attrs)
1317 {
1318         u32 n_rq_elems, n_rq_entries;
1319         int rc;
1320
1321         /* A single work request may take up to QEDR_MAX_RQ_WQE_SIZE elements in
1322          * the ring. There ring should allow at least a single WR, even if the
1323          * user requested none, due to allocation issues.
1324          */
1325         n_rq_entries = max_t(u32, attrs->cap.max_recv_wr, 1);
1326         n_rq_elems = n_rq_entries * QEDR_MAX_RQE_ELEMENTS_PER_RQE;
1327         rc = dev->ops->common->chain_alloc(dev->cdev,
1328                                            QED_CHAIN_USE_TO_CONSUME_PRODUCE,
1329                                            QED_CHAIN_MODE_PBL,
1330                                            QED_CHAIN_CNT_TYPE_U32,
1331                                            n_rq_elems,
1332                                            QEDR_RQE_ELEMENT_SIZE,
1333                                            &qp->rq.pbl);
1334
1335         if (rc) {
1336                 DP_ERR(dev, "failed to allocate memory for QP %p RQ\n", qp);
1337                 return -ENOMEM;
1338         }
1339
1340         DP_DEBUG(dev, QEDR_MSG_RQ,
1341                  "RQ Pbl base addr = %llx max_recv_wr=%d max_wr=%d capacity=%d, rc=%d\n",
1342                  qed_chain_get_pbl_phys(&qp->rq.pbl), attrs->cap.max_recv_wr,
1343                  n_rq_entries, qed_chain_get_capacity(&qp->rq.pbl), rc);
1344
1345         /* n_rq_entries < u16 so the casting is safe */
1346         qp->rq.max_wr = (u16)n_rq_entries;
1347
1348         return 0;
1349 }
1350
1351 static inline void
1352 qedr_init_qp_in_params_sq(struct qedr_dev *dev,
1353                           struct qedr_pd *pd,
1354                           struct qedr_qp *qp,
1355                           struct ib_qp_init_attr *attrs,
1356                           struct ib_udata *udata,
1357                           struct qed_rdma_create_qp_in_params *params)
1358 {
1359         /* QP handle to be written in an async event */
1360         params->qp_handle_async_lo = lower_32_bits((uintptr_t)qp);
1361         params->qp_handle_async_hi = upper_32_bits((uintptr_t)qp);
1362
1363         params->signal_all = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR);
1364         params->fmr_and_reserved_lkey = !udata;
1365         params->pd = pd->pd_id;
1366         params->dpi = pd->uctx ? pd->uctx->dpi : dev->dpi;
1367         params->sq_cq_id = get_qedr_cq(attrs->send_cq)->icid;
1368         params->max_sq_sges = 0;
1369         params->stats_queue = 0;
1370
1371         if (udata) {
1372                 params->sq_num_pages = qp->usq.pbl_info.num_pbes;
1373                 params->sq_pbl_ptr = qp->usq.pbl_tbl->pa;
1374         } else {
1375                 params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
1376                 params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
1377         }
1378 }
1379
1380 static inline void
1381 qedr_init_qp_in_params_rq(struct qedr_qp *qp,
1382                           struct ib_qp_init_attr *attrs,
1383                           struct ib_udata *udata,
1384                           struct qed_rdma_create_qp_in_params *params)
1385 {
1386         params->rq_cq_id = get_qedr_cq(attrs->recv_cq)->icid;
1387         params->srq_id = 0;
1388         params->use_srq = false;
1389
1390         if (udata) {
1391                 params->rq_num_pages = qp->urq.pbl_info.num_pbes;
1392                 params->rq_pbl_ptr = qp->urq.pbl_tbl->pa;
1393         } else {
1394                 params->rq_num_pages = qed_chain_get_page_cnt(&qp->rq.pbl);
1395                 params->rq_pbl_ptr = qed_chain_get_pbl_phys(&qp->rq.pbl);
1396         }
1397 }
1398
1399 static inline void qedr_qp_user_print(struct qedr_dev *dev, struct qedr_qp *qp)
1400 {
1401         DP_DEBUG(dev, QEDR_MSG_QP,
1402                  "create qp: successfully created user QP. qp=%p, sq_addr=0x%llx, sq_len=%zd, rq_addr=0x%llx, rq_len=%zd\n",
1403                  qp, qp->usq.buf_addr, qp->usq.buf_len, qp->urq.buf_addr,
1404                  qp->urq.buf_len);
1405 }
1406
1407 static inline int qedr_init_user_qp(struct ib_ucontext *ib_ctx,
1408                                     struct qedr_dev *dev,
1409                                     struct qedr_qp *qp,
1410                                     struct qedr_create_qp_ureq *ureq)
1411 {
1412         int rc;
1413
1414         /* SQ - read access only (0), dma sync not required (0) */
1415         rc = qedr_init_user_queue(ib_ctx, dev, &qp->usq, ureq->sq_addr,
1416                                   ureq->sq_len, 0, 0);
1417         if (rc)
1418                 return rc;
1419
1420         /* RQ - read access only (0), dma sync not required (0) */
1421         rc = qedr_init_user_queue(ib_ctx, dev, &qp->urq, ureq->rq_addr,
1422                                   ureq->rq_len, 0, 0);
1423
1424         if (rc)
1425                 qedr_cleanup_user_sq(dev, qp);
1426         return rc;
1427 }
1428
1429 static inline int
1430 qedr_init_kernel_qp(struct qedr_dev *dev,
1431                     struct qedr_qp *qp,
1432                     struct ib_qp_init_attr *attrs,
1433                     struct qed_rdma_create_qp_in_params *params)
1434 {
1435         int rc;
1436
1437         rc = qedr_init_qp_kernel_sq(dev, qp, attrs);
1438         if (rc) {
1439                 DP_ERR(dev, "failed to init kernel QP %p SQ\n", qp);
1440                 return rc;
1441         }
1442
1443         rc = qedr_init_qp_kernel_params_sq(dev, qp, attrs, params);
1444         if (rc) {
1445                 dev->ops->common->chain_free(dev->cdev, &qp->sq.pbl);
1446                 DP_ERR(dev, "failed to init kernel QP %p SQ params\n", qp);
1447                 return rc;
1448         }
1449
1450         rc = qedr_init_qp_kernel_rq(dev, qp, attrs);
1451         if (rc) {
1452                 qedr_cleanup_kernel_sq(dev, qp);
1453                 DP_ERR(dev, "failed to init kernel QP %p RQ\n", qp);
1454                 return rc;
1455         }
1456
1457         rc = qedr_init_qp_kernel_params_rq(dev, qp, attrs);
1458         if (rc) {
1459                 DP_ERR(dev, "failed to init kernel QP %p RQ params\n", qp);
1460                 qedr_cleanup_kernel_sq(dev, qp);
1461                 dev->ops->common->chain_free(dev->cdev, &qp->rq.pbl);
1462                 return rc;
1463         }
1464
1465         return rc;
1466 }
1467
1468 struct ib_qp *qedr_create_qp(struct ib_pd *ibpd,
1469                              struct ib_qp_init_attr *attrs,
1470                              struct ib_udata *udata)
1471 {
1472         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
1473         struct qed_rdma_create_qp_out_params out_params;
1474         struct qed_rdma_create_qp_in_params in_params;
1475         struct qedr_pd *pd = get_qedr_pd(ibpd);
1476         struct ib_ucontext *ib_ctx = NULL;
1477         struct qedr_ucontext *ctx = NULL;
1478         struct qedr_create_qp_ureq ureq;
1479         struct qedr_qp *qp;
1480         int rc = 0;
1481
1482         DP_DEBUG(dev, QEDR_MSG_QP, "create qp: called from %s, pd=%p\n",
1483                  udata ? "user library" : "kernel", pd);
1484
1485         rc = qedr_check_qp_attrs(ibpd, dev, attrs);
1486         if (rc)
1487                 return ERR_PTR(rc);
1488
1489         qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1490         if (!qp)
1491                 return ERR_PTR(-ENOMEM);
1492
1493         if (attrs->srq)
1494                 return ERR_PTR(-EINVAL);
1495
1496         DP_DEBUG(dev, QEDR_MSG_QP,
1497                  "create qp: sq_cq=%p, sq_icid=%d, rq_cq=%p, rq_icid=%d\n",
1498                  get_qedr_cq(attrs->send_cq),
1499                  get_qedr_cq(attrs->send_cq)->icid,
1500                  get_qedr_cq(attrs->recv_cq),
1501                  get_qedr_cq(attrs->recv_cq)->icid);
1502
1503         qedr_set_qp_init_params(dev, qp, pd, attrs);
1504
1505         if (attrs->qp_type == IB_QPT_GSI) {
1506                 if (udata) {
1507                         DP_ERR(dev,
1508                                "create qp: unexpected udata when creating GSI QP\n");
1509                         goto err0;
1510                 }
1511                 return qedr_create_gsi_qp(dev, attrs, qp);
1512         }
1513
1514         memset(&in_params, 0, sizeof(in_params));
1515
1516         if (udata) {
1517                 if (!(udata && ibpd->uobject && ibpd->uobject->context))
1518                         goto err0;
1519
1520                 ib_ctx = ibpd->uobject->context;
1521                 ctx = get_qedr_ucontext(ib_ctx);
1522
1523                 memset(&ureq, 0, sizeof(ureq));
1524                 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) {
1525                         DP_ERR(dev,
1526                                "create qp: problem copying data from user space\n");
1527                         goto err0;
1528                 }
1529
1530                 rc = qedr_init_user_qp(ib_ctx, dev, qp, &ureq);
1531                 if (rc)
1532                         goto err0;
1533
1534                 qedr_init_qp_user_params(&in_params, &ureq);
1535         } else {
1536                 rc = qedr_init_kernel_qp(dev, qp, attrs, &in_params);
1537                 if (rc)
1538                         goto err0;
1539         }
1540
1541         qedr_init_qp_in_params_sq(dev, pd, qp, attrs, udata, &in_params);
1542         qedr_init_qp_in_params_rq(qp, attrs, udata, &in_params);
1543
1544         qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
1545                                               &in_params, &out_params);
1546
1547         if (!qp->qed_qp)
1548                 goto err1;
1549
1550         qp->qp_id = out_params.qp_id;
1551         qp->icid = out_params.icid;
1552         qp->ibqp.qp_num = qp->qp_id;
1553
1554         if (udata) {
1555                 rc = qedr_copy_qp_uresp(dev, qp, udata);
1556                 if (rc)
1557                         goto err2;
1558
1559                 qedr_qp_user_print(dev, qp);
1560         } else {
1561                 qedr_init_qp_kernel_doorbell_sq(dev, qp);
1562                 qedr_init_qp_kernel_doorbell_rq(dev, qp);
1563         }
1564
1565         DP_DEBUG(dev, QEDR_MSG_QP, "created %s space QP %p\n",
1566                  udata ? "user" : "kernel", qp);
1567
1568         return &qp->ibqp;
1569
1570 err2:
1571         rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
1572         if (rc)
1573                 DP_ERR(dev, "create qp: fatal fault. rc=%d", rc);
1574 err1:
1575         if (udata) {
1576                 qedr_cleanup_user_sq(dev, qp);
1577                 qedr_cleanup_user_rq(dev, qp);
1578         } else {
1579                 qedr_cleanup_kernel_sq(dev, qp);
1580                 qedr_cleanup_kernel_rq(dev, qp);
1581         }
1582
1583 err0:
1584         kfree(qp);
1585
1586         return ERR_PTR(-EFAULT);
1587 }
1588
1589 enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
1590 {
1591         switch (qp_state) {
1592         case QED_ROCE_QP_STATE_RESET:
1593                 return IB_QPS_RESET;
1594         case QED_ROCE_QP_STATE_INIT:
1595                 return IB_QPS_INIT;
1596         case QED_ROCE_QP_STATE_RTR:
1597                 return IB_QPS_RTR;
1598         case QED_ROCE_QP_STATE_RTS:
1599                 return IB_QPS_RTS;
1600         case QED_ROCE_QP_STATE_SQD:
1601                 return IB_QPS_SQD;
1602         case QED_ROCE_QP_STATE_ERR:
1603                 return IB_QPS_ERR;
1604         case QED_ROCE_QP_STATE_SQE:
1605                 return IB_QPS_SQE;
1606         }
1607         return IB_QPS_ERR;
1608 }
1609
1610 enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
1611 {
1612         switch (qp_state) {
1613         case IB_QPS_RESET:
1614                 return QED_ROCE_QP_STATE_RESET;
1615         case IB_QPS_INIT:
1616                 return QED_ROCE_QP_STATE_INIT;
1617         case IB_QPS_RTR:
1618                 return QED_ROCE_QP_STATE_RTR;
1619         case IB_QPS_RTS:
1620                 return QED_ROCE_QP_STATE_RTS;
1621         case IB_QPS_SQD:
1622                 return QED_ROCE_QP_STATE_SQD;
1623         case IB_QPS_ERR:
1624                 return QED_ROCE_QP_STATE_ERR;
1625         default:
1626                 return QED_ROCE_QP_STATE_ERR;
1627         }
1628 }
1629
1630 static void qedr_reset_qp_hwq_info(struct qedr_qp_hwq_info *qph)
1631 {
1632         qed_chain_reset(&qph->pbl);
1633         qph->prod = 0;
1634         qph->cons = 0;
1635         qph->wqe_cons = 0;
1636         qph->db_data.data.value = cpu_to_le16(0);
1637 }
1638
1639 static int qedr_update_qp_state(struct qedr_dev *dev,
1640                                 struct qedr_qp *qp,
1641                                 enum qed_roce_qp_state new_state)
1642 {
1643         int status = 0;
1644
1645         if (new_state == qp->state)
1646                 return 1;
1647
1648         switch (qp->state) {
1649         case QED_ROCE_QP_STATE_RESET:
1650                 switch (new_state) {
1651                 case QED_ROCE_QP_STATE_INIT:
1652                         qp->prev_wqe_size = 0;
1653                         qedr_reset_qp_hwq_info(&qp->sq);
1654                         qedr_reset_qp_hwq_info(&qp->rq);
1655                         break;
1656                 default:
1657                         status = -EINVAL;
1658                         break;
1659                 };
1660                 break;
1661         case QED_ROCE_QP_STATE_INIT:
1662                 switch (new_state) {
1663                 case QED_ROCE_QP_STATE_RTR:
1664                         /* Update doorbell (in case post_recv was
1665                          * done before move to RTR)
1666                          */
1667                         wmb();
1668                         writel(qp->rq.db_data.raw, qp->rq.db);
1669                         /* Make sure write takes effect */
1670                         mmiowb();
1671                         break;
1672                 case QED_ROCE_QP_STATE_ERR:
1673                         break;
1674                 default:
1675                         /* Invalid state change. */
1676                         status = -EINVAL;
1677                         break;
1678                 };
1679                 break;
1680         case QED_ROCE_QP_STATE_RTR:
1681                 /* RTR->XXX */
1682                 switch (new_state) {
1683                 case QED_ROCE_QP_STATE_RTS:
1684                         break;
1685                 case QED_ROCE_QP_STATE_ERR:
1686                         break;
1687                 default:
1688                         /* Invalid state change. */
1689                         status = -EINVAL;
1690                         break;
1691                 };
1692                 break;
1693         case QED_ROCE_QP_STATE_RTS:
1694                 /* RTS->XXX */
1695                 switch (new_state) {
1696                 case QED_ROCE_QP_STATE_SQD:
1697                         break;
1698                 case QED_ROCE_QP_STATE_ERR:
1699                         break;
1700                 default:
1701                         /* Invalid state change. */
1702                         status = -EINVAL;
1703                         break;
1704                 };
1705                 break;
1706         case QED_ROCE_QP_STATE_SQD:
1707                 /* SQD->XXX */
1708                 switch (new_state) {
1709                 case QED_ROCE_QP_STATE_RTS:
1710                 case QED_ROCE_QP_STATE_ERR:
1711                         break;
1712                 default:
1713                         /* Invalid state change. */
1714                         status = -EINVAL;
1715                         break;
1716                 };
1717                 break;
1718         case QED_ROCE_QP_STATE_ERR:
1719                 /* ERR->XXX */
1720                 switch (new_state) {
1721                 case QED_ROCE_QP_STATE_RESET:
1722                         break;
1723                 default:
1724                         status = -EINVAL;
1725                         break;
1726                 };
1727                 break;
1728         default:
1729                 status = -EINVAL;
1730                 break;
1731         };
1732
1733         return status;
1734 }
1735
1736 int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1737                    int attr_mask, struct ib_udata *udata)
1738 {
1739         struct qedr_qp *qp = get_qedr_qp(ibqp);
1740         struct qed_rdma_modify_qp_in_params qp_params = { 0 };
1741         struct qedr_dev *dev = get_qedr_dev(&qp->dev->ibdev);
1742         enum ib_qp_state old_qp_state, new_qp_state;
1743         int rc = 0;
1744
1745         DP_DEBUG(dev, QEDR_MSG_QP,
1746                  "modify qp: qp %p attr_mask=0x%x, state=%d", qp, attr_mask,
1747                  attr->qp_state);
1748
1749         old_qp_state = qedr_get_ibqp_state(qp->state);
1750         if (attr_mask & IB_QP_STATE)
1751                 new_qp_state = attr->qp_state;
1752         else
1753                 new_qp_state = old_qp_state;
1754
1755         if (!ib_modify_qp_is_ok
1756             (old_qp_state, new_qp_state, ibqp->qp_type, attr_mask,
1757              IB_LINK_LAYER_ETHERNET)) {
1758                 DP_ERR(dev,
1759                        "modify qp: invalid attribute mask=0x%x specified for\n"
1760                        "qpn=0x%x of type=0x%x old_qp_state=0x%x, new_qp_state=0x%x\n",
1761                        attr_mask, qp->qp_id, ibqp->qp_type, old_qp_state,
1762                        new_qp_state);
1763                 rc = -EINVAL;
1764                 goto err;
1765         }
1766
1767         /* Translate the masks... */
1768         if (attr_mask & IB_QP_STATE) {
1769                 SET_FIELD(qp_params.modify_flags,
1770                           QED_RDMA_MODIFY_QP_VALID_NEW_STATE, 1);
1771                 qp_params.new_state = qedr_get_state_from_ibqp(attr->qp_state);
1772         }
1773
1774         if (attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
1775                 qp_params.sqd_async = true;
1776
1777         if (attr_mask & IB_QP_PKEY_INDEX) {
1778                 SET_FIELD(qp_params.modify_flags,
1779                           QED_ROCE_MODIFY_QP_VALID_PKEY, 1);
1780                 if (attr->pkey_index >= QEDR_ROCE_PKEY_TABLE_LEN) {
1781                         rc = -EINVAL;
1782                         goto err;
1783                 }
1784
1785                 qp_params.pkey = QEDR_ROCE_PKEY_DEFAULT;
1786         }
1787
1788         if (attr_mask & IB_QP_QKEY)
1789                 qp->qkey = attr->qkey;
1790
1791         if (attr_mask & IB_QP_ACCESS_FLAGS) {
1792                 SET_FIELD(qp_params.modify_flags,
1793                           QED_RDMA_MODIFY_QP_VALID_RDMA_OPS_EN, 1);
1794                 qp_params.incoming_rdma_read_en = attr->qp_access_flags &
1795                                                   IB_ACCESS_REMOTE_READ;
1796                 qp_params.incoming_rdma_write_en = attr->qp_access_flags &
1797                                                    IB_ACCESS_REMOTE_WRITE;
1798                 qp_params.incoming_atomic_en = attr->qp_access_flags &
1799                                                IB_ACCESS_REMOTE_ATOMIC;
1800         }
1801
1802         if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
1803                 if (attr_mask & IB_QP_PATH_MTU) {
1804                         if (attr->path_mtu < IB_MTU_256 ||
1805                             attr->path_mtu > IB_MTU_4096) {
1806                                 pr_err("error: Only MTU sizes of 256, 512, 1024, 2048 and 4096 are supported by RoCE\n");
1807                                 rc = -EINVAL;
1808                                 goto err;
1809                         }
1810                         qp->mtu = min(ib_mtu_enum_to_int(attr->path_mtu),
1811                                       ib_mtu_enum_to_int(iboe_get_mtu
1812                                                          (dev->ndev->mtu)));
1813                 }
1814
1815                 if (!qp->mtu) {
1816                         qp->mtu =
1817                         ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1818                         pr_err("Fixing zeroed MTU to qp->mtu = %d\n", qp->mtu);
1819                 }
1820
1821                 SET_FIELD(qp_params.modify_flags,
1822                           QED_ROCE_MODIFY_QP_VALID_ADDRESS_VECTOR, 1);
1823
1824                 qp_params.traffic_class_tos = attr->ah_attr.grh.traffic_class;
1825                 qp_params.flow_label = attr->ah_attr.grh.flow_label;
1826                 qp_params.hop_limit_ttl = attr->ah_attr.grh.hop_limit;
1827
1828                 qp->sgid_idx = attr->ah_attr.grh.sgid_index;
1829
1830                 rc = get_gid_info_from_table(ibqp, attr, attr_mask, &qp_params);
1831                 if (rc) {
1832                         DP_ERR(dev,
1833                                "modify qp: problems with GID index %d (rc=%d)\n",
1834                                attr->ah_attr.grh.sgid_index, rc);
1835                         return rc;
1836                 }
1837
1838                 rc = qedr_get_dmac(dev, &attr->ah_attr,
1839                                    qp_params.remote_mac_addr);
1840                 if (rc)
1841                         return rc;
1842
1843                 qp_params.use_local_mac = true;
1844                 ether_addr_copy(qp_params.local_mac_addr, dev->ndev->dev_addr);
1845
1846                 DP_DEBUG(dev, QEDR_MSG_QP, "dgid=%x:%x:%x:%x\n",
1847                          qp_params.dgid.dwords[0], qp_params.dgid.dwords[1],
1848                          qp_params.dgid.dwords[2], qp_params.dgid.dwords[3]);
1849                 DP_DEBUG(dev, QEDR_MSG_QP, "sgid=%x:%x:%x:%x\n",
1850                          qp_params.sgid.dwords[0], qp_params.sgid.dwords[1],
1851                          qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
1852                 DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
1853                          qp_params.remote_mac_addr);
1854 ;
1855
1856                 qp_params.mtu = qp->mtu;
1857                 qp_params.lb_indication = false;
1858         }
1859
1860         if (!qp_params.mtu) {
1861                 /* Stay with current MTU */
1862                 if (qp->mtu)
1863                         qp_params.mtu = qp->mtu;
1864                 else
1865                         qp_params.mtu =
1866                             ib_mtu_enum_to_int(iboe_get_mtu(dev->ndev->mtu));
1867         }
1868
1869         if (attr_mask & IB_QP_TIMEOUT) {
1870                 SET_FIELD(qp_params.modify_flags,
1871                           QED_ROCE_MODIFY_QP_VALID_ACK_TIMEOUT, 1);
1872
1873                 qp_params.ack_timeout = attr->timeout;
1874                 if (attr->timeout) {
1875                         u32 temp;
1876
1877                         temp = 4096 * (1UL << attr->timeout) / 1000 / 1000;
1878                         /* FW requires [msec] */
1879                         qp_params.ack_timeout = temp;
1880                 } else {
1881                         /* Infinite */
1882                         qp_params.ack_timeout = 0;
1883                 }
1884         }
1885         if (attr_mask & IB_QP_RETRY_CNT) {
1886                 SET_FIELD(qp_params.modify_flags,
1887                           QED_ROCE_MODIFY_QP_VALID_RETRY_CNT, 1);
1888                 qp_params.retry_cnt = attr->retry_cnt;
1889         }
1890
1891         if (attr_mask & IB_QP_RNR_RETRY) {
1892                 SET_FIELD(qp_params.modify_flags,
1893                           QED_ROCE_MODIFY_QP_VALID_RNR_RETRY_CNT, 1);
1894                 qp_params.rnr_retry_cnt = attr->rnr_retry;
1895         }
1896
1897         if (attr_mask & IB_QP_RQ_PSN) {
1898                 SET_FIELD(qp_params.modify_flags,
1899                           QED_ROCE_MODIFY_QP_VALID_RQ_PSN, 1);
1900                 qp_params.rq_psn = attr->rq_psn;
1901                 qp->rq_psn = attr->rq_psn;
1902         }
1903
1904         if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1905                 if (attr->max_rd_atomic > dev->attr.max_qp_req_rd_atomic_resc) {
1906                         rc = -EINVAL;
1907                         DP_ERR(dev,
1908                                "unsupported max_rd_atomic=%d, supported=%d\n",
1909                                attr->max_rd_atomic,
1910                                dev->attr.max_qp_req_rd_atomic_resc);
1911                         goto err;
1912                 }
1913
1914                 SET_FIELD(qp_params.modify_flags,
1915                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_REQ, 1);
1916                 qp_params.max_rd_atomic_req = attr->max_rd_atomic;
1917         }
1918
1919         if (attr_mask & IB_QP_MIN_RNR_TIMER) {
1920                 SET_FIELD(qp_params.modify_flags,
1921                           QED_ROCE_MODIFY_QP_VALID_MIN_RNR_NAK_TIMER, 1);
1922                 qp_params.min_rnr_nak_timer = attr->min_rnr_timer;
1923         }
1924
1925         if (attr_mask & IB_QP_SQ_PSN) {
1926                 SET_FIELD(qp_params.modify_flags,
1927                           QED_ROCE_MODIFY_QP_VALID_SQ_PSN, 1);
1928                 qp_params.sq_psn = attr->sq_psn;
1929                 qp->sq_psn = attr->sq_psn;
1930         }
1931
1932         if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1933                 if (attr->max_dest_rd_atomic >
1934                     dev->attr.max_qp_resp_rd_atomic_resc) {
1935                         DP_ERR(dev,
1936                                "unsupported max_dest_rd_atomic=%d, supported=%d\n",
1937                                attr->max_dest_rd_atomic,
1938                                dev->attr.max_qp_resp_rd_atomic_resc);
1939
1940                         rc = -EINVAL;
1941                         goto err;
1942                 }
1943
1944                 SET_FIELD(qp_params.modify_flags,
1945                           QED_RDMA_MODIFY_QP_VALID_MAX_RD_ATOMIC_RESP, 1);
1946                 qp_params.max_rd_atomic_resp = attr->max_dest_rd_atomic;
1947         }
1948
1949         if (attr_mask & IB_QP_DEST_QPN) {
1950                 SET_FIELD(qp_params.modify_flags,
1951                           QED_ROCE_MODIFY_QP_VALID_DEST_QP, 1);
1952
1953                 qp_params.dest_qp = attr->dest_qp_num;
1954                 qp->dest_qp_num = attr->dest_qp_num;
1955         }
1956
1957         if (qp->qp_type != IB_QPT_GSI)
1958                 rc = dev->ops->rdma_modify_qp(dev->rdma_ctx,
1959                                               qp->qed_qp, &qp_params);
1960
1961         if (attr_mask & IB_QP_STATE) {
1962                 if ((qp->qp_type != IB_QPT_GSI) && (!udata))
1963                         qedr_update_qp_state(dev, qp, qp_params.new_state);
1964                 qp->state = qp_params.new_state;
1965         }
1966
1967 err:
1968         return rc;
1969 }
1970
1971 static int qedr_to_ib_qp_acc_flags(struct qed_rdma_query_qp_out_params *params)
1972 {
1973         int ib_qp_acc_flags = 0;
1974
1975         if (params->incoming_rdma_write_en)
1976                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1977         if (params->incoming_rdma_read_en)
1978                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_READ;
1979         if (params->incoming_atomic_en)
1980                 ib_qp_acc_flags |= IB_ACCESS_REMOTE_ATOMIC;
1981         ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1982         return ib_qp_acc_flags;
1983 }
1984
1985 int qedr_query_qp(struct ib_qp *ibqp,
1986                   struct ib_qp_attr *qp_attr,
1987                   int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1988 {
1989         struct qed_rdma_query_qp_out_params params;
1990         struct qedr_qp *qp = get_qedr_qp(ibqp);
1991         struct qedr_dev *dev = qp->dev;
1992         int rc = 0;
1993
1994         memset(&params, 0, sizeof(params));
1995
1996         rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
1997         if (rc)
1998                 goto err;
1999
2000         memset(qp_attr, 0, sizeof(*qp_attr));
2001         memset(qp_init_attr, 0, sizeof(*qp_init_attr));
2002
2003         qp_attr->qp_state = qedr_get_ibqp_state(params.state);
2004         qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
2005         qp_attr->path_mtu = iboe_get_mtu(params.mtu);
2006         qp_attr->path_mig_state = IB_MIG_MIGRATED;
2007         qp_attr->rq_psn = params.rq_psn;
2008         qp_attr->sq_psn = params.sq_psn;
2009         qp_attr->dest_qp_num = params.dest_qp;
2010
2011         qp_attr->qp_access_flags = qedr_to_ib_qp_acc_flags(&params);
2012
2013         qp_attr->cap.max_send_wr = qp->sq.max_wr;
2014         qp_attr->cap.max_recv_wr = qp->rq.max_wr;
2015         qp_attr->cap.max_send_sge = qp->sq.max_sges;
2016         qp_attr->cap.max_recv_sge = qp->rq.max_sges;
2017         qp_attr->cap.max_inline_data = qp->max_inline_data;
2018         qp_init_attr->cap = qp_attr->cap;
2019
2020         memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
2021                sizeof(qp_attr->ah_attr.grh.dgid.raw));
2022
2023         qp_attr->ah_attr.grh.flow_label = params.flow_label;
2024         qp_attr->ah_attr.grh.sgid_index = qp->sgid_idx;
2025         qp_attr->ah_attr.grh.hop_limit = params.hop_limit_ttl;
2026         qp_attr->ah_attr.grh.traffic_class = params.traffic_class_tos;
2027
2028         qp_attr->ah_attr.ah_flags = IB_AH_GRH;
2029         qp_attr->ah_attr.port_num = 1;
2030         qp_attr->ah_attr.sl = 0;
2031         qp_attr->timeout = params.timeout;
2032         qp_attr->rnr_retry = params.rnr_retry;
2033         qp_attr->retry_cnt = params.retry_cnt;
2034         qp_attr->min_rnr_timer = params.min_rnr_nak_timer;
2035         qp_attr->pkey_index = params.pkey_index;
2036         qp_attr->port_num = 1;
2037         qp_attr->ah_attr.src_path_bits = 0;
2038         qp_attr->ah_attr.static_rate = 0;
2039         qp_attr->alt_pkey_index = 0;
2040         qp_attr->alt_port_num = 0;
2041         qp_attr->alt_timeout = 0;
2042         memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
2043
2044         qp_attr->sq_draining = (params.state == QED_ROCE_QP_STATE_SQD) ? 1 : 0;
2045         qp_attr->max_dest_rd_atomic = params.max_dest_rd_atomic;
2046         qp_attr->max_rd_atomic = params.max_rd_atomic;
2047         qp_attr->en_sqd_async_notify = (params.sqd_async) ? 1 : 0;
2048
2049         DP_DEBUG(dev, QEDR_MSG_QP, "QEDR_QUERY_QP: max_inline_data=%d\n",
2050                  qp_attr->cap.max_inline_data);
2051
2052 err:
2053         return rc;
2054 }
2055
2056 int qedr_destroy_qp(struct ib_qp *ibqp)
2057 {
2058         struct qedr_qp *qp = get_qedr_qp(ibqp);
2059         struct qedr_dev *dev = qp->dev;
2060         struct ib_qp_attr attr;
2061         int attr_mask = 0;
2062         int rc = 0;
2063
2064         DP_DEBUG(dev, QEDR_MSG_QP, "destroy qp: destroying %p, qp type=%d\n",
2065                  qp, qp->qp_type);
2066
2067         if (qp->state != (QED_ROCE_QP_STATE_RESET | QED_ROCE_QP_STATE_ERR |
2068                           QED_ROCE_QP_STATE_INIT)) {
2069                 attr.qp_state = IB_QPS_ERR;
2070                 attr_mask |= IB_QP_STATE;
2071
2072                 /* Change the QP state to ERROR */
2073                 qedr_modify_qp(ibqp, &attr, attr_mask, NULL);
2074         }
2075
2076         if (qp->qp_type != IB_QPT_GSI) {
2077                 rc = dev->ops->rdma_destroy_qp(dev->rdma_ctx, qp->qed_qp);
2078                 if (rc)
2079                         return rc;
2080         } else {
2081                 qedr_destroy_gsi_qp(dev);
2082         }
2083
2084         if (ibqp->uobject && ibqp->uobject->context) {
2085                 qedr_cleanup_user_sq(dev, qp);
2086                 qedr_cleanup_user_rq(dev, qp);
2087         } else {
2088                 qedr_cleanup_kernel_sq(dev, qp);
2089                 qedr_cleanup_kernel_rq(dev, qp);
2090         }
2091
2092         kfree(qp);
2093
2094         return rc;
2095 }
2096
2097 struct ib_ah *qedr_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
2098 {
2099         struct qedr_ah *ah;
2100
2101         ah = kzalloc(sizeof(*ah), GFP_ATOMIC);
2102         if (!ah)
2103                 return ERR_PTR(-ENOMEM);
2104
2105         ah->attr = *attr;
2106
2107         return &ah->ibah;
2108 }
2109
2110 int qedr_destroy_ah(struct ib_ah *ibah)
2111 {
2112         struct qedr_ah *ah = get_qedr_ah(ibah);
2113
2114         kfree(ah);
2115         return 0;
2116 }
2117
2118 static void free_mr_info(struct qedr_dev *dev, struct mr_info *info)
2119 {
2120         struct qedr_pbl *pbl, *tmp;
2121
2122         if (info->pbl_table)
2123                 list_add_tail(&info->pbl_table->list_entry,
2124                               &info->free_pbl_list);
2125
2126         if (!list_empty(&info->inuse_pbl_list))
2127                 list_splice(&info->inuse_pbl_list, &info->free_pbl_list);
2128
2129         list_for_each_entry_safe(pbl, tmp, &info->free_pbl_list, list_entry) {
2130                 list_del(&pbl->list_entry);
2131                 qedr_free_pbl(dev, &info->pbl_info, pbl);
2132         }
2133 }
2134
2135 static int init_mr_info(struct qedr_dev *dev, struct mr_info *info,
2136                         size_t page_list_len, bool two_layered)
2137 {
2138         struct qedr_pbl *tmp;
2139         int rc;
2140
2141         INIT_LIST_HEAD(&info->free_pbl_list);
2142         INIT_LIST_HEAD(&info->inuse_pbl_list);
2143
2144         rc = qedr_prepare_pbl_tbl(dev, &info->pbl_info,
2145                                   page_list_len, two_layered);
2146         if (rc)
2147                 goto done;
2148
2149         info->pbl_table = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2150         if (!info->pbl_table) {
2151                 rc = -ENOMEM;
2152                 goto done;
2153         }
2154
2155         DP_DEBUG(dev, QEDR_MSG_MR, "pbl_table_pa = %pa\n",
2156                  &info->pbl_table->pa);
2157
2158         /* in usual case we use 2 PBLs, so we add one to free
2159          * list and allocating another one
2160          */
2161         tmp = qedr_alloc_pbl_tbl(dev, &info->pbl_info, GFP_KERNEL);
2162         if (!tmp) {
2163                 DP_DEBUG(dev, QEDR_MSG_MR, "Extra PBL is not allocated\n");
2164                 goto done;
2165         }
2166
2167         list_add_tail(&tmp->list_entry, &info->free_pbl_list);
2168
2169         DP_DEBUG(dev, QEDR_MSG_MR, "extra pbl_table_pa = %pa\n", &tmp->pa);
2170
2171 done:
2172         if (rc)
2173                 free_mr_info(dev, info);
2174
2175         return rc;
2176 }
2177
2178 struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
2179                                u64 usr_addr, int acc, struct ib_udata *udata)
2180 {
2181         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2182         struct qedr_mr *mr;
2183         struct qedr_pd *pd;
2184         int rc = -ENOMEM;
2185
2186         pd = get_qedr_pd(ibpd);
2187         DP_DEBUG(dev, QEDR_MSG_MR,
2188                  "qedr_register user mr pd = %d start = %lld, len = %lld, usr_addr = %lld, acc = %d\n",
2189                  pd->pd_id, start, len, usr_addr, acc);
2190
2191         if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
2192                 return ERR_PTR(-EINVAL);
2193
2194         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2195         if (!mr)
2196                 return ERR_PTR(rc);
2197
2198         mr->type = QEDR_MR_USER;
2199
2200         mr->umem = ib_umem_get(ibpd->uobject->context, start, len, acc, 0);
2201         if (IS_ERR(mr->umem)) {
2202                 rc = -EFAULT;
2203                 goto err0;
2204         }
2205
2206         rc = init_mr_info(dev, &mr->info, ib_umem_page_count(mr->umem), 1);
2207         if (rc)
2208                 goto err1;
2209
2210         qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
2211                            &mr->info.pbl_info);
2212
2213         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2214         if (rc) {
2215                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2216                 goto err1;
2217         }
2218
2219         /* Index only, 18 bit long, lkey = itid << 8 | key */
2220         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2221         mr->hw_mr.key = 0;
2222         mr->hw_mr.pd = pd->pd_id;
2223         mr->hw_mr.local_read = 1;
2224         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2225         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2226         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2227         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2228         mr->hw_mr.mw_bind = false;
2229         mr->hw_mr.pbl_ptr = mr->info.pbl_table[0].pa;
2230         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2231         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2232         mr->hw_mr.page_size_log = ilog2(mr->umem->page_size);
2233         mr->hw_mr.fbo = ib_umem_offset(mr->umem);
2234         mr->hw_mr.length = len;
2235         mr->hw_mr.vaddr = usr_addr;
2236         mr->hw_mr.zbva = false;
2237         mr->hw_mr.phy_mr = false;
2238         mr->hw_mr.dma_mr = false;
2239
2240         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2241         if (rc) {
2242                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2243                 goto err2;
2244         }
2245
2246         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2247         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2248             mr->hw_mr.remote_atomic)
2249                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2250
2251         DP_DEBUG(dev, QEDR_MSG_MR, "register user mr lkey: %x\n",
2252                  mr->ibmr.lkey);
2253         return &mr->ibmr;
2254
2255 err2:
2256         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2257 err1:
2258         qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2259 err0:
2260         kfree(mr);
2261         return ERR_PTR(rc);
2262 }
2263
2264 int qedr_dereg_mr(struct ib_mr *ib_mr)
2265 {
2266         struct qedr_mr *mr = get_qedr_mr(ib_mr);
2267         struct qedr_dev *dev = get_qedr_dev(ib_mr->device);
2268         int rc = 0;
2269
2270         rc = dev->ops->rdma_deregister_tid(dev->rdma_ctx, mr->hw_mr.itid);
2271         if (rc)
2272                 return rc;
2273
2274         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2275
2276         if ((mr->type != QEDR_MR_DMA) && (mr->type != QEDR_MR_FRMR))
2277                 qedr_free_pbl(dev, &mr->info.pbl_info, mr->info.pbl_table);
2278
2279         /* it could be user registered memory. */
2280         if (mr->umem)
2281                 ib_umem_release(mr->umem);
2282
2283         kfree(mr);
2284
2285         return rc;
2286 }
2287
2288 struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
2289 {
2290         struct qedr_pd *pd = get_qedr_pd(ibpd);
2291         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2292         struct qedr_mr *mr;
2293         int rc = -ENOMEM;
2294
2295         DP_DEBUG(dev, QEDR_MSG_MR,
2296                  "qedr_alloc_frmr pd = %d max_page_list_len= %d\n", pd->pd_id,
2297                  max_page_list_len);
2298
2299         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2300         if (!mr)
2301                 return ERR_PTR(rc);
2302
2303         mr->dev = dev;
2304         mr->type = QEDR_MR_FRMR;
2305
2306         rc = init_mr_info(dev, &mr->info, max_page_list_len, 1);
2307         if (rc)
2308                 goto err0;
2309
2310         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2311         if (rc) {
2312                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2313                 goto err0;
2314         }
2315
2316         /* Index only, 18 bit long, lkey = itid << 8 | key */
2317         mr->hw_mr.tid_type = QED_RDMA_TID_FMR;
2318         mr->hw_mr.key = 0;
2319         mr->hw_mr.pd = pd->pd_id;
2320         mr->hw_mr.local_read = 1;
2321         mr->hw_mr.local_write = 0;
2322         mr->hw_mr.remote_read = 0;
2323         mr->hw_mr.remote_write = 0;
2324         mr->hw_mr.remote_atomic = 0;
2325         mr->hw_mr.mw_bind = false;
2326         mr->hw_mr.pbl_ptr = 0;
2327         mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered;
2328         mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size);
2329         mr->hw_mr.fbo = 0;
2330         mr->hw_mr.length = 0;
2331         mr->hw_mr.vaddr = 0;
2332         mr->hw_mr.zbva = false;
2333         mr->hw_mr.phy_mr = true;
2334         mr->hw_mr.dma_mr = false;
2335
2336         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2337         if (rc) {
2338                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2339                 goto err1;
2340         }
2341
2342         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2343         mr->ibmr.rkey = mr->ibmr.lkey;
2344
2345         DP_DEBUG(dev, QEDR_MSG_MR, "alloc frmr: %x\n", mr->ibmr.lkey);
2346         return mr;
2347
2348 err1:
2349         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2350 err0:
2351         kfree(mr);
2352         return ERR_PTR(rc);
2353 }
2354
2355 struct ib_mr *qedr_alloc_mr(struct ib_pd *ibpd,
2356                             enum ib_mr_type mr_type, u32 max_num_sg)
2357 {
2358         struct qedr_dev *dev;
2359         struct qedr_mr *mr;
2360
2361         if (mr_type != IB_MR_TYPE_MEM_REG)
2362                 return ERR_PTR(-EINVAL);
2363
2364         mr = __qedr_alloc_mr(ibpd, max_num_sg);
2365
2366         if (IS_ERR(mr))
2367                 return ERR_PTR(-EINVAL);
2368
2369         dev = mr->dev;
2370
2371         return &mr->ibmr;
2372 }
2373
2374 static int qedr_set_page(struct ib_mr *ibmr, u64 addr)
2375 {
2376         struct qedr_mr *mr = get_qedr_mr(ibmr);
2377         struct qedr_pbl *pbl_table;
2378         struct regpair *pbe;
2379         u32 pbes_in_page;
2380
2381         if (unlikely(mr->npages == mr->info.pbl_info.num_pbes)) {
2382                 DP_ERR(mr->dev, "qedr_set_page failes when %d\n", mr->npages);
2383                 return -ENOMEM;
2384         }
2385
2386         DP_DEBUG(mr->dev, QEDR_MSG_MR, "qedr_set_page pages[%d] = 0x%llx\n",
2387                  mr->npages, addr);
2388
2389         pbes_in_page = mr->info.pbl_info.pbl_size / sizeof(u64);
2390         pbl_table = mr->info.pbl_table + (mr->npages / pbes_in_page);
2391         pbe = (struct regpair *)pbl_table->va;
2392         pbe +=  mr->npages % pbes_in_page;
2393         pbe->lo = cpu_to_le32((u32)addr);
2394         pbe->hi = cpu_to_le32((u32)upper_32_bits(addr));
2395
2396         mr->npages++;
2397
2398         return 0;
2399 }
2400
2401 static void handle_completed_mrs(struct qedr_dev *dev, struct mr_info *info)
2402 {
2403         int work = info->completed - info->completed_handled - 1;
2404
2405         DP_DEBUG(dev, QEDR_MSG_MR, "Special FMR work = %d\n", work);
2406         while (work-- > 0 && !list_empty(&info->inuse_pbl_list)) {
2407                 struct qedr_pbl *pbl;
2408
2409                 /* Free all the page list that are possible to be freed
2410                  * (all the ones that were invalidated), under the assumption
2411                  * that if an FMR was completed successfully that means that
2412                  * if there was an invalidate operation before it also ended
2413                  */
2414                 pbl = list_first_entry(&info->inuse_pbl_list,
2415                                        struct qedr_pbl, list_entry);
2416                 list_del(&pbl->list_entry);
2417                 list_add_tail(&pbl->list_entry, &info->free_pbl_list);
2418                 info->completed_handled++;
2419         }
2420 }
2421
2422 int qedr_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
2423                    int sg_nents, unsigned int *sg_offset)
2424 {
2425         struct qedr_mr *mr = get_qedr_mr(ibmr);
2426
2427         mr->npages = 0;
2428
2429         handle_completed_mrs(mr->dev, &mr->info);
2430         return ib_sg_to_pages(ibmr, sg, sg_nents, NULL, qedr_set_page);
2431 }
2432
2433 struct ib_mr *qedr_get_dma_mr(struct ib_pd *ibpd, int acc)
2434 {
2435         struct qedr_dev *dev = get_qedr_dev(ibpd->device);
2436         struct qedr_pd *pd = get_qedr_pd(ibpd);
2437         struct qedr_mr *mr;
2438         int rc;
2439
2440         mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2441         if (!mr)
2442                 return ERR_PTR(-ENOMEM);
2443
2444         mr->type = QEDR_MR_DMA;
2445
2446         rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
2447         if (rc) {
2448                 DP_ERR(dev, "roce alloc tid returned an error %d\n", rc);
2449                 goto err1;
2450         }
2451
2452         /* index only, 18 bit long, lkey = itid << 8 | key */
2453         mr->hw_mr.tid_type = QED_RDMA_TID_REGISTERED_MR;
2454         mr->hw_mr.pd = pd->pd_id;
2455         mr->hw_mr.local_read = 1;
2456         mr->hw_mr.local_write = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
2457         mr->hw_mr.remote_read = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
2458         mr->hw_mr.remote_write = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2459         mr->hw_mr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
2460         mr->hw_mr.dma_mr = true;
2461
2462         rc = dev->ops->rdma_register_tid(dev->rdma_ctx, &mr->hw_mr);
2463         if (rc) {
2464                 DP_ERR(dev, "roce register tid returned an error %d\n", rc);
2465                 goto err2;
2466         }
2467
2468         mr->ibmr.lkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2469         if (mr->hw_mr.remote_write || mr->hw_mr.remote_read ||
2470             mr->hw_mr.remote_atomic)
2471                 mr->ibmr.rkey = mr->hw_mr.itid << 8 | mr->hw_mr.key;
2472
2473         DP_DEBUG(dev, QEDR_MSG_MR, "get dma mr: lkey = %x\n", mr->ibmr.lkey);
2474         return &mr->ibmr;
2475
2476 err2:
2477         dev->ops->rdma_free_tid(dev->rdma_ctx, mr->hw_mr.itid);
2478 err1:
2479         kfree(mr);
2480         return ERR_PTR(rc);
2481 }
2482
2483 static inline int qedr_wq_is_full(struct qedr_qp_hwq_info *wq)
2484 {
2485         return (((wq->prod + 1) % wq->max_wr) == wq->cons);
2486 }
2487
2488 static int sge_data_len(struct ib_sge *sg_list, int num_sge)
2489 {
2490         int i, len = 0;
2491
2492         for (i = 0; i < num_sge; i++)
2493                 len += sg_list[i].length;
2494
2495         return len;
2496 }
2497
2498 static void swap_wqe_data64(u64 *p)
2499 {
2500         int i;
2501
2502         for (i = 0; i < QEDR_SQE_ELEMENT_SIZE / sizeof(u64); i++, p++)
2503                 *p = cpu_to_be64(cpu_to_le64(*p));
2504 }
2505
2506 static u32 qedr_prepare_sq_inline_data(struct qedr_dev *dev,
2507                                        struct qedr_qp *qp, u8 *wqe_size,
2508                                        struct ib_send_wr *wr,
2509                                        struct ib_send_wr **bad_wr, u8 *bits,
2510                                        u8 bit)
2511 {
2512         u32 data_size = sge_data_len(wr->sg_list, wr->num_sge);
2513         char *seg_prt, *wqe;
2514         int i, seg_siz;
2515
2516         if (data_size > ROCE_REQ_MAX_INLINE_DATA_SIZE) {
2517                 DP_ERR(dev, "Too much inline data in WR: %d\n", data_size);
2518                 *bad_wr = wr;
2519                 return 0;
2520         }
2521
2522         if (!data_size)
2523                 return data_size;
2524
2525         *bits |= bit;
2526
2527         seg_prt = NULL;
2528         wqe = NULL;
2529         seg_siz = 0;
2530
2531         /* Copy data inline */
2532         for (i = 0; i < wr->num_sge; i++) {
2533                 u32 len = wr->sg_list[i].length;
2534                 void *src = (void *)(uintptr_t)wr->sg_list[i].addr;
2535
2536                 while (len > 0) {
2537                         u32 cur;
2538
2539                         /* New segment required */
2540                         if (!seg_siz) {
2541                                 wqe = (char *)qed_chain_produce(&qp->sq.pbl);
2542                                 seg_prt = wqe;
2543                                 seg_siz = sizeof(struct rdma_sq_common_wqe);
2544                                 (*wqe_size)++;
2545                         }
2546
2547                         /* Calculate currently allowed length */
2548                         cur = min_t(u32, len, seg_siz);
2549                         memcpy(seg_prt, src, cur);
2550
2551                         /* Update segment variables */
2552                         seg_prt += cur;
2553                         seg_siz -= cur;
2554
2555                         /* Update sge variables */
2556                         src += cur;
2557                         len -= cur;
2558
2559                         /* Swap fully-completed segments */
2560                         if (!seg_siz)
2561                                 swap_wqe_data64((u64 *)wqe);
2562                 }
2563         }
2564
2565         /* swap last not completed segment */
2566         if (seg_siz)
2567                 swap_wqe_data64((u64 *)wqe);
2568
2569         return data_size;
2570 }
2571
2572 #define RQ_SGE_SET(sge, vaddr, vlength, vflags)                 \
2573         do {                                                    \
2574                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
2575                 (sge)->length = cpu_to_le32(vlength);           \
2576                 (sge)->flags = cpu_to_le32(vflags);             \
2577         } while (0)
2578
2579 #define SRQ_HDR_SET(hdr, vwr_id, num_sge)                       \
2580         do {                                                    \
2581                 DMA_REGPAIR_LE(hdr->wr_id, vwr_id);             \
2582                 (hdr)->num_sges = num_sge;                      \
2583         } while (0)
2584
2585 #define SRQ_SGE_SET(sge, vaddr, vlength, vlkey)                 \
2586         do {                                                    \
2587                 DMA_REGPAIR_LE(sge->addr, vaddr);               \
2588                 (sge)->length = cpu_to_le32(vlength);           \
2589                 (sge)->l_key = cpu_to_le32(vlkey);              \
2590         } while (0)
2591
2592 static u32 qedr_prepare_sq_sges(struct qedr_qp *qp, u8 *wqe_size,
2593                                 struct ib_send_wr *wr)
2594 {
2595         u32 data_size = 0;
2596         int i;
2597
2598         for (i = 0; i < wr->num_sge; i++) {
2599                 struct rdma_sq_sge *sge = qed_chain_produce(&qp->sq.pbl);
2600
2601                 DMA_REGPAIR_LE(sge->addr, wr->sg_list[i].addr);
2602                 sge->l_key = cpu_to_le32(wr->sg_list[i].lkey);
2603                 sge->length = cpu_to_le32(wr->sg_list[i].length);
2604                 data_size += wr->sg_list[i].length;
2605         }
2606
2607         if (wqe_size)
2608                 *wqe_size += wr->num_sge;
2609
2610         return data_size;
2611 }
2612
2613 static u32 qedr_prepare_sq_rdma_data(struct qedr_dev *dev,
2614                                      struct qedr_qp *qp,
2615                                      struct rdma_sq_rdma_wqe_1st *rwqe,
2616                                      struct rdma_sq_rdma_wqe_2nd *rwqe2,
2617                                      struct ib_send_wr *wr,
2618                                      struct ib_send_wr **bad_wr)
2619 {
2620         rwqe2->r_key = cpu_to_le32(rdma_wr(wr)->rkey);
2621         DMA_REGPAIR_LE(rwqe2->remote_va, rdma_wr(wr)->remote_addr);
2622
2623         if (wr->send_flags & IB_SEND_INLINE) {
2624                 u8 flags = 0;
2625
2626                 SET_FIELD2(flags, RDMA_SQ_RDMA_WQE_1ST_INLINE_FLG, 1);
2627                 return qedr_prepare_sq_inline_data(dev, qp, &rwqe->wqe_size, wr,
2628                                                    bad_wr, &rwqe->flags, flags);
2629         }
2630
2631         return qedr_prepare_sq_sges(qp, &rwqe->wqe_size, wr);
2632 }
2633
2634 static u32 qedr_prepare_sq_send_data(struct qedr_dev *dev,
2635                                      struct qedr_qp *qp,
2636                                      struct rdma_sq_send_wqe_1st *swqe,
2637                                      struct rdma_sq_send_wqe_2st *swqe2,
2638                                      struct ib_send_wr *wr,
2639                                      struct ib_send_wr **bad_wr)
2640 {
2641         memset(swqe2, 0, sizeof(*swqe2));
2642         if (wr->send_flags & IB_SEND_INLINE) {
2643                 u8 flags = 0;
2644
2645                 SET_FIELD2(flags, RDMA_SQ_SEND_WQE_INLINE_FLG, 1);
2646                 return qedr_prepare_sq_inline_data(dev, qp, &swqe->wqe_size, wr,
2647                                                    bad_wr, &swqe->flags, flags);
2648         }
2649
2650         return qedr_prepare_sq_sges(qp, &swqe->wqe_size, wr);
2651 }
2652
2653 static int qedr_prepare_reg(struct qedr_qp *qp,
2654                             struct rdma_sq_fmr_wqe_1st *fwqe1,
2655                             struct ib_reg_wr *wr)
2656 {
2657         struct qedr_mr *mr = get_qedr_mr(wr->mr);
2658         struct rdma_sq_fmr_wqe_2nd *fwqe2;
2659
2660         fwqe2 = (struct rdma_sq_fmr_wqe_2nd *)qed_chain_produce(&qp->sq.pbl);
2661         fwqe1->addr.hi = upper_32_bits(mr->ibmr.iova);
2662         fwqe1->addr.lo = lower_32_bits(mr->ibmr.iova);
2663         fwqe1->l_key = wr->key;
2664
2665         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_READ,
2666                    !!(wr->access & IB_ACCESS_REMOTE_READ));
2667         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_REMOTE_WRITE,
2668                    !!(wr->access & IB_ACCESS_REMOTE_WRITE));
2669         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_ENABLE_ATOMIC,
2670                    !!(wr->access & IB_ACCESS_REMOTE_ATOMIC));
2671         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_READ, 1);
2672         SET_FIELD2(fwqe2->access_ctrl, RDMA_SQ_FMR_WQE_2ND_LOCAL_WRITE,
2673                    !!(wr->access & IB_ACCESS_LOCAL_WRITE));
2674         fwqe2->fmr_ctrl = 0;
2675
2676         SET_FIELD2(fwqe2->fmr_ctrl, RDMA_SQ_FMR_WQE_2ND_PAGE_SIZE_LOG,
2677                    ilog2(mr->ibmr.page_size) - 12);
2678
2679         fwqe2->length_hi = 0;
2680         fwqe2->length_lo = mr->ibmr.length;
2681         fwqe2->pbl_addr.hi = upper_32_bits(mr->info.pbl_table->pa);
2682         fwqe2->pbl_addr.lo = lower_32_bits(mr->info.pbl_table->pa);
2683
2684         qp->wqe_wr_id[qp->sq.prod].mr = mr;
2685
2686         return 0;
2687 }
2688
2689 enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
2690 {
2691         switch (opcode) {
2692         case IB_WR_RDMA_WRITE:
2693         case IB_WR_RDMA_WRITE_WITH_IMM:
2694                 return IB_WC_RDMA_WRITE;
2695         case IB_WR_SEND_WITH_IMM:
2696         case IB_WR_SEND:
2697         case IB_WR_SEND_WITH_INV:
2698                 return IB_WC_SEND;
2699         case IB_WR_RDMA_READ:
2700                 return IB_WC_RDMA_READ;
2701         case IB_WR_ATOMIC_CMP_AND_SWP:
2702                 return IB_WC_COMP_SWAP;
2703         case IB_WR_ATOMIC_FETCH_AND_ADD:
2704                 return IB_WC_FETCH_ADD;
2705         case IB_WR_REG_MR:
2706                 return IB_WC_REG_MR;
2707         case IB_WR_LOCAL_INV:
2708                 return IB_WC_LOCAL_INV;
2709         default:
2710                 return IB_WC_SEND;
2711         }
2712 }
2713
2714 inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
2715 {
2716         int wq_is_full, err_wr, pbl_is_full;
2717         struct qedr_dev *dev = qp->dev;
2718
2719         /* prevent SQ overflow and/or processing of a bad WR */
2720         err_wr = wr->num_sge > qp->sq.max_sges;
2721         wq_is_full = qedr_wq_is_full(&qp->sq);
2722         pbl_is_full = qed_chain_get_elem_left_u32(&qp->sq.pbl) <
2723                       QEDR_MAX_SQE_ELEMENTS_PER_SQE;
2724         if (wq_is_full || err_wr || pbl_is_full) {
2725                 if (wq_is_full && !(qp->err_bitmap & QEDR_QP_ERR_SQ_FULL)) {
2726                         DP_ERR(dev,
2727                                "error: WQ is full. Post send on QP %p failed (this error appears only once)\n",
2728                                qp);
2729                         qp->err_bitmap |= QEDR_QP_ERR_SQ_FULL;
2730                 }
2731
2732                 if (err_wr && !(qp->err_bitmap & QEDR_QP_ERR_BAD_SR)) {
2733                         DP_ERR(dev,
2734                                "error: WR is bad. Post send on QP %p failed (this error appears only once)\n",
2735                                qp);
2736                         qp->err_bitmap |= QEDR_QP_ERR_BAD_SR;
2737                 }
2738
2739                 if (pbl_is_full &&
2740                     !(qp->err_bitmap & QEDR_QP_ERR_SQ_PBL_FULL)) {
2741                         DP_ERR(dev,
2742                                "error: WQ PBL is full. Post send on QP %p failed (this error appears only once)\n",
2743                                qp);
2744                         qp->err_bitmap |= QEDR_QP_ERR_SQ_PBL_FULL;
2745                 }
2746                 return false;
2747         }
2748         return true;
2749 }
2750
2751 int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2752                      struct ib_send_wr **bad_wr)
2753 {
2754         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2755         struct qedr_qp *qp = get_qedr_qp(ibqp);
2756         struct rdma_sq_atomic_wqe_1st *awqe1;
2757         struct rdma_sq_atomic_wqe_2nd *awqe2;
2758         struct rdma_sq_atomic_wqe_3rd *awqe3;
2759         struct rdma_sq_send_wqe_2st *swqe2;
2760         struct rdma_sq_local_inv_wqe *iwqe;
2761         struct rdma_sq_rdma_wqe_2nd *rwqe2;
2762         struct rdma_sq_send_wqe_1st *swqe;
2763         struct rdma_sq_rdma_wqe_1st *rwqe;
2764         struct rdma_sq_fmr_wqe_1st *fwqe1;
2765         struct rdma_sq_common_wqe *wqe;
2766         u32 length;
2767         int rc = 0;
2768         bool comp;
2769
2770         if (!qedr_can_post_send(qp, wr)) {
2771                 *bad_wr = wr;
2772                 return -ENOMEM;
2773         }
2774
2775         wqe = qed_chain_produce(&qp->sq.pbl);
2776         qp->wqe_wr_id[qp->sq.prod].signaled =
2777                 !!(wr->send_flags & IB_SEND_SIGNALED) || qp->signaled;
2778
2779         wqe->flags = 0;
2780         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_SE_FLG,
2781                    !!(wr->send_flags & IB_SEND_SOLICITED));
2782         comp = (!!(wr->send_flags & IB_SEND_SIGNALED)) || qp->signaled;
2783         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_COMP_FLG, comp);
2784         SET_FIELD2(wqe->flags, RDMA_SQ_SEND_WQE_RD_FENCE_FLG,
2785                    !!(wr->send_flags & IB_SEND_FENCE));
2786         wqe->prev_wqe_size = qp->prev_wqe_size;
2787
2788         qp->wqe_wr_id[qp->sq.prod].opcode = qedr_ib_to_wc_opcode(wr->opcode);
2789
2790         switch (wr->opcode) {
2791         case IB_WR_SEND_WITH_IMM:
2792                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_IMM;
2793                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2794                 swqe->wqe_size = 2;
2795                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2796
2797                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.imm_data);
2798                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2799                                                    wr, bad_wr);
2800                 swqe->length = cpu_to_le32(length);
2801                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2802                 qp->prev_wqe_size = swqe->wqe_size;
2803                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2804                 break;
2805         case IB_WR_SEND:
2806                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND;
2807                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2808
2809                 swqe->wqe_size = 2;
2810                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2811                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2812                                                    wr, bad_wr);
2813                 swqe->length = cpu_to_le32(length);
2814                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2815                 qp->prev_wqe_size = swqe->wqe_size;
2816                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2817                 break;
2818         case IB_WR_SEND_WITH_INV:
2819                 wqe->req_type = RDMA_SQ_REQ_TYPE_SEND_WITH_INVALIDATE;
2820                 swqe = (struct rdma_sq_send_wqe_1st *)wqe;
2821                 swqe2 = qed_chain_produce(&qp->sq.pbl);
2822                 swqe->wqe_size = 2;
2823                 swqe->inv_key_or_imm_data = cpu_to_le32(wr->ex.invalidate_rkey);
2824                 length = qedr_prepare_sq_send_data(dev, qp, swqe, swqe2,
2825                                                    wr, bad_wr);
2826                 swqe->length = cpu_to_le32(length);
2827                 qp->wqe_wr_id[qp->sq.prod].wqe_size = swqe->wqe_size;
2828                 qp->prev_wqe_size = swqe->wqe_size;
2829                 qp->wqe_wr_id[qp->sq.prod].bytes_len = swqe->length;
2830                 break;
2831
2832         case IB_WR_RDMA_WRITE_WITH_IMM:
2833                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR_WITH_IMM;
2834                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2835
2836                 rwqe->wqe_size = 2;
2837                 rwqe->imm_data = htonl(cpu_to_le32(wr->ex.imm_data));
2838                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2839                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2840                                                    wr, bad_wr);
2841                 rwqe->length = cpu_to_le32(length);
2842                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2843                 qp->prev_wqe_size = rwqe->wqe_size;
2844                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2845                 break;
2846         case IB_WR_RDMA_WRITE:
2847                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_WR;
2848                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2849
2850                 rwqe->wqe_size = 2;
2851                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2852                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2853                                                    wr, bad_wr);
2854                 rwqe->length = cpu_to_le32(length);
2855                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2856                 qp->prev_wqe_size = rwqe->wqe_size;
2857                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2858                 break;
2859         case IB_WR_RDMA_READ_WITH_INV:
2860                 DP_ERR(dev,
2861                        "RDMA READ WITH INVALIDATE not supported\n");
2862                 *bad_wr = wr;
2863                 rc = -EINVAL;
2864                 break;
2865
2866         case IB_WR_RDMA_READ:
2867                 wqe->req_type = RDMA_SQ_REQ_TYPE_RDMA_RD;
2868                 rwqe = (struct rdma_sq_rdma_wqe_1st *)wqe;
2869
2870                 rwqe->wqe_size = 2;
2871                 rwqe2 = qed_chain_produce(&qp->sq.pbl);
2872                 length = qedr_prepare_sq_rdma_data(dev, qp, rwqe, rwqe2,
2873                                                    wr, bad_wr);
2874                 rwqe->length = cpu_to_le32(length);
2875                 qp->wqe_wr_id[qp->sq.prod].wqe_size = rwqe->wqe_size;
2876                 qp->prev_wqe_size = rwqe->wqe_size;
2877                 qp->wqe_wr_id[qp->sq.prod].bytes_len = rwqe->length;
2878                 break;
2879
2880         case IB_WR_ATOMIC_CMP_AND_SWP:
2881         case IB_WR_ATOMIC_FETCH_AND_ADD:
2882                 awqe1 = (struct rdma_sq_atomic_wqe_1st *)wqe;
2883                 awqe1->wqe_size = 4;
2884
2885                 awqe2 = qed_chain_produce(&qp->sq.pbl);
2886                 DMA_REGPAIR_LE(awqe2->remote_va, atomic_wr(wr)->remote_addr);
2887                 awqe2->r_key = cpu_to_le32(atomic_wr(wr)->rkey);
2888
2889                 awqe3 = qed_chain_produce(&qp->sq.pbl);
2890
2891                 if (wr->opcode == IB_WR_ATOMIC_FETCH_AND_ADD) {
2892                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_ADD;
2893                         DMA_REGPAIR_LE(awqe3->swap_data,
2894                                        atomic_wr(wr)->compare_add);
2895                 } else {
2896                         wqe->req_type = RDMA_SQ_REQ_TYPE_ATOMIC_CMP_AND_SWAP;
2897                         DMA_REGPAIR_LE(awqe3->swap_data,
2898                                        atomic_wr(wr)->swap);
2899                         DMA_REGPAIR_LE(awqe3->cmp_data,
2900                                        atomic_wr(wr)->compare_add);
2901                 }
2902
2903                 qedr_prepare_sq_sges(qp, NULL, wr);
2904
2905                 qp->wqe_wr_id[qp->sq.prod].wqe_size = awqe1->wqe_size;
2906                 qp->prev_wqe_size = awqe1->wqe_size;
2907                 break;
2908
2909         case IB_WR_LOCAL_INV:
2910                 iwqe = (struct rdma_sq_local_inv_wqe *)wqe;
2911                 iwqe->wqe_size = 1;
2912
2913                 iwqe->req_type = RDMA_SQ_REQ_TYPE_LOCAL_INVALIDATE;
2914                 iwqe->inv_l_key = wr->ex.invalidate_rkey;
2915                 qp->wqe_wr_id[qp->sq.prod].wqe_size = iwqe->wqe_size;
2916                 qp->prev_wqe_size = iwqe->wqe_size;
2917                 break;
2918         case IB_WR_REG_MR:
2919                 DP_DEBUG(dev, QEDR_MSG_CQ, "REG_MR\n");
2920                 wqe->req_type = RDMA_SQ_REQ_TYPE_FAST_MR;
2921                 fwqe1 = (struct rdma_sq_fmr_wqe_1st *)wqe;
2922                 fwqe1->wqe_size = 2;
2923
2924                 rc = qedr_prepare_reg(qp, fwqe1, reg_wr(wr));
2925                 if (rc) {
2926                         DP_ERR(dev, "IB_REG_MR failed rc=%d\n", rc);
2927                         *bad_wr = wr;
2928                         break;
2929                 }
2930
2931                 qp->wqe_wr_id[qp->sq.prod].wqe_size = fwqe1->wqe_size;
2932                 qp->prev_wqe_size = fwqe1->wqe_size;
2933                 break;
2934         default:
2935                 DP_ERR(dev, "invalid opcode 0x%x!\n", wr->opcode);
2936                 rc = -EINVAL;
2937                 *bad_wr = wr;
2938                 break;
2939         }
2940
2941         if (*bad_wr) {
2942                 u16 value;
2943
2944                 /* Restore prod to its position before
2945                  * this WR was processed
2946                  */
2947                 value = le16_to_cpu(qp->sq.db_data.data.value);
2948                 qed_chain_set_prod(&qp->sq.pbl, value, wqe);
2949
2950                 /* Restore prev_wqe_size */
2951                 qp->prev_wqe_size = wqe->prev_wqe_size;
2952                 rc = -EINVAL;
2953                 DP_ERR(dev, "POST SEND FAILED\n");
2954         }
2955
2956         return rc;
2957 }
2958
2959 int qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2960                    struct ib_send_wr **bad_wr)
2961 {
2962         struct qedr_dev *dev = get_qedr_dev(ibqp->device);
2963         struct qedr_qp *qp = get_qedr_qp(ibqp);
2964         unsigned long flags;
2965         int rc = 0;
2966
2967         *bad_wr = NULL;
2968
2969         if (qp->qp_type == IB_QPT_GSI)
2970                 return qedr_gsi_post_send(ibqp, wr, bad_wr);
2971
2972         spin_lock_irqsave(&qp->q_lock, flags);
2973
2974         if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
2975             (qp->state == QED_ROCE_QP_STATE_ERR)) {
2976                 spin_unlock_irqrestore(&qp->q_lock, flags);
2977                 *bad_wr = wr;
2978                 DP_DEBUG(dev, QEDR_MSG_CQ,
2979                          "QP in wrong state! QP icid=0x%x state %d\n",
2980                          qp->icid, qp->state);
2981                 return -EINVAL;
2982         }
2983
2984         if (!wr) {
2985                 DP_ERR(dev, "Got an empty post send.\n");
2986                 return -EINVAL;
2987         }
2988
2989         while (wr) {
2990                 rc = __qedr_post_send(ibqp, wr, bad_wr);
2991                 if (rc)
2992                         break;
2993
2994                 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id;
2995
2996                 qedr_inc_sw_prod(&qp->sq);
2997
2998                 qp->sq.db_data.data.value++;
2999
3000                 wr = wr->next;
3001         }
3002
3003         /* Trigger doorbell
3004          * If there was a failure in the first WR then it will be triggered in
3005          * vane. However this is not harmful (as long as the producer value is
3006          * unchanged). For performance reasons we avoid checking for this
3007          * redundant doorbell.
3008          */
3009         wmb();
3010         writel(qp->sq.db_data.raw, qp->sq.db);
3011
3012         /* Make sure write sticks */
3013         mmiowb();
3014
3015         spin_unlock_irqrestore(&qp->q_lock, flags);
3016
3017         return rc;
3018 }
3019
3020 int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
3021                    struct ib_recv_wr **bad_wr)
3022 {
3023         struct qedr_qp *qp = get_qedr_qp(ibqp);
3024         struct qedr_dev *dev = qp->dev;
3025         unsigned long flags;
3026         int status = 0;
3027
3028         if (qp->qp_type == IB_QPT_GSI)
3029                 return qedr_gsi_post_recv(ibqp, wr, bad_wr);
3030
3031         spin_lock_irqsave(&qp->q_lock, flags);
3032
3033         if ((qp->state == QED_ROCE_QP_STATE_RESET) ||
3034             (qp->state == QED_ROCE_QP_STATE_ERR)) {
3035                 spin_unlock_irqrestore(&qp->q_lock, flags);
3036                 *bad_wr = wr;
3037                 return -EINVAL;
3038         }
3039
3040         while (wr) {
3041                 int i;
3042
3043                 if (qed_chain_get_elem_left_u32(&qp->rq.pbl) <
3044                     QEDR_MAX_RQE_ELEMENTS_PER_RQE ||
3045                     wr->num_sge > qp->rq.max_sges) {
3046                         DP_ERR(dev, "Can't post WR  (%d < %d) || (%d > %d)\n",
3047                                qed_chain_get_elem_left_u32(&qp->rq.pbl),
3048                                QEDR_MAX_RQE_ELEMENTS_PER_RQE, wr->num_sge,
3049                                qp->rq.max_sges);
3050                         status = -ENOMEM;
3051                         *bad_wr = wr;
3052                         break;
3053                 }
3054                 for (i = 0; i < wr->num_sge; i++) {
3055                         u32 flags = 0;
3056                         struct rdma_rq_sge *rqe =
3057                             qed_chain_produce(&qp->rq.pbl);
3058
3059                         /* First one must include the number
3060                          * of SGE in the list
3061                          */
3062                         if (!i)
3063                                 SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
3064                                           wr->num_sge);
3065
3066                         SET_FIELD(flags, RDMA_RQ_SGE_L_KEY,
3067                                   wr->sg_list[i].lkey);
3068
3069                         RQ_SGE_SET(rqe, wr->sg_list[i].addr,
3070                                    wr->sg_list[i].length, flags);
3071                 }
3072
3073                 /* Special case of no sges. FW requires between 1-4 sges...
3074                  *&nb