ecryptfs: convert to file_write_and_wait in ->fsync
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / qedr / main.c
1 /* QLogic qedr NIC Driver
2  * Copyright (c) 2015-2016  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_addr.h>
35 #include <rdma/ib_user_verbs.h>
36 #include <linux/netdevice.h>
37 #include <linux/iommu.h>
38 #include <linux/pci.h>
39 #include <net/addrconf.h>
40
41 #include <linux/qed/qed_chain.h>
42 #include <linux/qed/qed_if.h>
43 #include "qedr.h"
44 #include "verbs.h"
45 #include <rdma/qedr-abi.h>
46
47 MODULE_DESCRIPTION("QLogic 40G/100G ROCE Driver");
48 MODULE_AUTHOR("QLogic Corporation");
49 MODULE_LICENSE("Dual BSD/GPL");
50 MODULE_VERSION(QEDR_MODULE_VERSION);
51
52 #define QEDR_WQ_MULTIPLIER_DFT  (3)
53
54 void qedr_ib_dispatch_event(struct qedr_dev *dev, u8 port_num,
55                             enum ib_event_type type)
56 {
57         struct ib_event ibev;
58
59         ibev.device = &dev->ibdev;
60         ibev.element.port_num = port_num;
61         ibev.event = type;
62
63         ib_dispatch_event(&ibev);
64 }
65
66 static enum rdma_link_layer qedr_link_layer(struct ib_device *device,
67                                             u8 port_num)
68 {
69         return IB_LINK_LAYER_ETHERNET;
70 }
71
72 static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str,
73                                 size_t str_len)
74 {
75         struct qedr_dev *qedr = get_qedr_dev(ibdev);
76         u32 fw_ver = (u32)qedr->attr.fw_ver;
77
78         snprintf(str, str_len, "%d. %d. %d. %d",
79                  (fw_ver >> 24) & 0xFF, (fw_ver >> 16) & 0xFF,
80                  (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
81 }
82
83 static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
84 {
85         struct qedr_dev *qdev;
86
87         qdev = get_qedr_dev(dev);
88         dev_hold(qdev->ndev);
89
90         /* The HW vendor's device driver must guarantee
91          * that this function returns NULL before the net device reaches
92          * NETDEV_UNREGISTER_FINAL state.
93          */
94         return qdev->ndev;
95 }
96
97 static int qedr_register_device(struct qedr_dev *dev)
98 {
99         strlcpy(dev->ibdev.name, "qedr%d", IB_DEVICE_NAME_MAX);
100
101         dev->ibdev.node_guid = dev->attr.node_guid;
102         memcpy(dev->ibdev.node_desc, QEDR_NODE_DESC, sizeof(QEDR_NODE_DESC));
103         dev->ibdev.owner = THIS_MODULE;
104         dev->ibdev.uverbs_abi_ver = QEDR_ABI_VERSION;
105
106         dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
107                                      QEDR_UVERBS(QUERY_DEVICE) |
108                                      QEDR_UVERBS(QUERY_PORT) |
109                                      QEDR_UVERBS(ALLOC_PD) |
110                                      QEDR_UVERBS(DEALLOC_PD) |
111                                      QEDR_UVERBS(CREATE_COMP_CHANNEL) |
112                                      QEDR_UVERBS(CREATE_CQ) |
113                                      QEDR_UVERBS(RESIZE_CQ) |
114                                      QEDR_UVERBS(DESTROY_CQ) |
115                                      QEDR_UVERBS(REQ_NOTIFY_CQ) |
116                                      QEDR_UVERBS(CREATE_QP) |
117                                      QEDR_UVERBS(MODIFY_QP) |
118                                      QEDR_UVERBS(QUERY_QP) |
119                                      QEDR_UVERBS(DESTROY_QP) |
120                                      QEDR_UVERBS(REG_MR) |
121                                      QEDR_UVERBS(DEREG_MR) |
122                                      QEDR_UVERBS(POLL_CQ) |
123                                      QEDR_UVERBS(POST_SEND) |
124                                      QEDR_UVERBS(POST_RECV);
125
126         dev->ibdev.phys_port_cnt = 1;
127         dev->ibdev.num_comp_vectors = dev->num_cnq;
128         dev->ibdev.node_type = RDMA_NODE_IB_CA;
129
130         dev->ibdev.query_device = qedr_query_device;
131         dev->ibdev.query_port = qedr_query_port;
132         dev->ibdev.modify_port = qedr_modify_port;
133
134         dev->ibdev.query_gid = qedr_query_gid;
135         dev->ibdev.add_gid = qedr_add_gid;
136         dev->ibdev.del_gid = qedr_del_gid;
137
138         dev->ibdev.alloc_ucontext = qedr_alloc_ucontext;
139         dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
140         dev->ibdev.mmap = qedr_mmap;
141
142         dev->ibdev.alloc_pd = qedr_alloc_pd;
143         dev->ibdev.dealloc_pd = qedr_dealloc_pd;
144
145         dev->ibdev.create_cq = qedr_create_cq;
146         dev->ibdev.destroy_cq = qedr_destroy_cq;
147         dev->ibdev.resize_cq = qedr_resize_cq;
148         dev->ibdev.req_notify_cq = qedr_arm_cq;
149
150         dev->ibdev.create_qp = qedr_create_qp;
151         dev->ibdev.modify_qp = qedr_modify_qp;
152         dev->ibdev.query_qp = qedr_query_qp;
153         dev->ibdev.destroy_qp = qedr_destroy_qp;
154
155         dev->ibdev.query_pkey = qedr_query_pkey;
156
157         dev->ibdev.create_ah = qedr_create_ah;
158         dev->ibdev.destroy_ah = qedr_destroy_ah;
159
160         dev->ibdev.get_dma_mr = qedr_get_dma_mr;
161         dev->ibdev.dereg_mr = qedr_dereg_mr;
162         dev->ibdev.reg_user_mr = qedr_reg_user_mr;
163         dev->ibdev.alloc_mr = qedr_alloc_mr;
164         dev->ibdev.map_mr_sg = qedr_map_mr_sg;
165
166         dev->ibdev.poll_cq = qedr_poll_cq;
167         dev->ibdev.post_send = qedr_post_send;
168         dev->ibdev.post_recv = qedr_post_recv;
169
170         dev->ibdev.process_mad = qedr_process_mad;
171         dev->ibdev.get_port_immutable = qedr_port_immutable;
172         dev->ibdev.get_netdev = qedr_get_netdev;
173
174         dev->ibdev.dev.parent = &dev->pdev->dev;
175
176         dev->ibdev.get_link_layer = qedr_link_layer;
177         dev->ibdev.get_dev_fw_str = qedr_get_dev_fw_str;
178
179         return ib_register_device(&dev->ibdev, NULL);
180 }
181
182 /* This function allocates fast-path status block memory */
183 static int qedr_alloc_mem_sb(struct qedr_dev *dev,
184                              struct qed_sb_info *sb_info, u16 sb_id)
185 {
186         struct status_block *sb_virt;
187         dma_addr_t sb_phys;
188         int rc;
189
190         sb_virt = dma_alloc_coherent(&dev->pdev->dev,
191                                      sizeof(*sb_virt), &sb_phys, GFP_KERNEL);
192         if (!sb_virt)
193                 return -ENOMEM;
194
195         rc = dev->ops->common->sb_init(dev->cdev, sb_info,
196                                        sb_virt, sb_phys, sb_id,
197                                        QED_SB_TYPE_CNQ);
198         if (rc) {
199                 pr_err("Status block initialization failed\n");
200                 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_virt),
201                                   sb_virt, sb_phys);
202                 return rc;
203         }
204
205         return 0;
206 }
207
208 static void qedr_free_mem_sb(struct qedr_dev *dev,
209                              struct qed_sb_info *sb_info, int sb_id)
210 {
211         if (sb_info->sb_virt) {
212                 dev->ops->common->sb_release(dev->cdev, sb_info, sb_id);
213                 dma_free_coherent(&dev->pdev->dev, sizeof(*sb_info->sb_virt),
214                                   (void *)sb_info->sb_virt, sb_info->sb_phys);
215         }
216 }
217
218 static void qedr_free_resources(struct qedr_dev *dev)
219 {
220         int i;
221
222         for (i = 0; i < dev->num_cnq; i++) {
223                 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
224                 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
225         }
226
227         kfree(dev->cnq_array);
228         kfree(dev->sb_array);
229         kfree(dev->sgid_tbl);
230 }
231
232 static int qedr_alloc_resources(struct qedr_dev *dev)
233 {
234         struct qedr_cnq *cnq;
235         __le16 *cons_pi;
236         u16 n_entries;
237         int i, rc;
238
239         dev->sgid_tbl = kzalloc(sizeof(union ib_gid) *
240                                 QEDR_MAX_SGID, GFP_KERNEL);
241         if (!dev->sgid_tbl)
242                 return -ENOMEM;
243
244         spin_lock_init(&dev->sgid_lock);
245
246         /* Allocate Status blocks for CNQ */
247         dev->sb_array = kcalloc(dev->num_cnq, sizeof(*dev->sb_array),
248                                 GFP_KERNEL);
249         if (!dev->sb_array) {
250                 rc = -ENOMEM;
251                 goto err1;
252         }
253
254         dev->cnq_array = kcalloc(dev->num_cnq,
255                                  sizeof(*dev->cnq_array), GFP_KERNEL);
256         if (!dev->cnq_array) {
257                 rc = -ENOMEM;
258                 goto err2;
259         }
260
261         dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
262
263         /* Allocate CNQ PBLs */
264         n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
265         for (i = 0; i < dev->num_cnq; i++) {
266                 cnq = &dev->cnq_array[i];
267
268                 rc = qedr_alloc_mem_sb(dev, &dev->sb_array[i],
269                                        dev->sb_start + i);
270                 if (rc)
271                         goto err3;
272
273                 rc = dev->ops->common->chain_alloc(dev->cdev,
274                                                    QED_CHAIN_USE_TO_CONSUME,
275                                                    QED_CHAIN_MODE_PBL,
276                                                    QED_CHAIN_CNT_TYPE_U16,
277                                                    n_entries,
278                                                    sizeof(struct regpair *),
279                                                    &cnq->pbl, NULL);
280                 if (rc)
281                         goto err4;
282
283                 cnq->dev = dev;
284                 cnq->sb = &dev->sb_array[i];
285                 cons_pi = dev->sb_array[i].sb_virt->pi_array;
286                 cnq->hw_cons_ptr = &cons_pi[QED_ROCE_PROTOCOL_INDEX];
287                 cnq->index = i;
288                 sprintf(cnq->name, "qedr%d@pci:%s", i, pci_name(dev->pdev));
289
290                 DP_DEBUG(dev, QEDR_MSG_INIT, "cnq[%d].cons=%d\n",
291                          i, qed_chain_get_cons_idx(&cnq->pbl));
292         }
293
294         return 0;
295 err4:
296         qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
297 err3:
298         for (--i; i >= 0; i--) {
299                 dev->ops->common->chain_free(dev->cdev, &dev->cnq_array[i].pbl);
300                 qedr_free_mem_sb(dev, &dev->sb_array[i], dev->sb_start + i);
301         }
302         kfree(dev->cnq_array);
303 err2:
304         kfree(dev->sb_array);
305 err1:
306         kfree(dev->sgid_tbl);
307         return rc;
308 }
309
310 /* QEDR sysfs interface */
311 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
312                         char *buf)
313 {
314         struct qedr_dev *dev = dev_get_drvdata(device);
315
316         return scnprintf(buf, PAGE_SIZE, "0x%x\n", dev->pdev->vendor);
317 }
318
319 static ssize_t show_hca_type(struct device *device,
320                              struct device_attribute *attr, char *buf)
321 {
322         return scnprintf(buf, PAGE_SIZE, "%s\n", "HCA_TYPE_TO_SET");
323 }
324
325 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
326 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca_type, NULL);
327
328 static struct device_attribute *qedr_attributes[] = {
329         &dev_attr_hw_rev,
330         &dev_attr_hca_type
331 };
332
333 static void qedr_remove_sysfiles(struct qedr_dev *dev)
334 {
335         int i;
336
337         for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
338                 device_remove_file(&dev->ibdev.dev, qedr_attributes[i]);
339 }
340
341 static void qedr_pci_set_atomic(struct qedr_dev *dev, struct pci_dev *pdev)
342 {
343         struct pci_dev *bridge;
344         u32 ctl2, cap2;
345         u16 flags;
346         int rc;
347
348         bridge = pdev->bus->self;
349         if (!bridge)
350                 goto disable;
351
352         /* Check atomic routing support all the way to root complex */
353         while (bridge->bus->parent) {
354                 rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
355                 if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
356                         goto disable;
357
358                 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
359                 if (rc)
360                         goto disable;
361
362                 rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl2);
363                 if (rc)
364                         goto disable;
365
366                 if (!(cap2 & PCI_EXP_DEVCAP2_ATOMIC_ROUTE) ||
367                     (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK))
368                         goto disable;
369                 bridge = bridge->bus->parent->self;
370         }
371
372         rc = pcie_capability_read_word(bridge, PCI_EXP_FLAGS, &flags);
373         if (rc || ((flags & PCI_EXP_FLAGS_VERS) < 2))
374                 goto disable;
375
376         rc = pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap2);
377         if (rc || !(cap2 & PCI_EXP_DEVCAP2_ATOMIC_COMP64))
378                 goto disable;
379
380         /* Set atomic operations */
381         pcie_capability_set_word(pdev, PCI_EXP_DEVCTL2,
382                                  PCI_EXP_DEVCTL2_ATOMIC_REQ);
383         dev->atomic_cap = IB_ATOMIC_GLOB;
384
385         DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability enabled\n");
386
387         return;
388
389 disable:
390         pcie_capability_clear_word(pdev, PCI_EXP_DEVCTL2,
391                                    PCI_EXP_DEVCTL2_ATOMIC_REQ);
392         dev->atomic_cap = IB_ATOMIC_NONE;
393
394         DP_DEBUG(dev, QEDR_MSG_INIT, "Atomic capability disabled\n");
395
396 }
397
398 static const struct qed_rdma_ops *qed_ops;
399
400 #define HILO_U64(hi, lo)                ((((u64)(hi)) << 32) + (lo))
401
402 static irqreturn_t qedr_irq_handler(int irq, void *handle)
403 {
404         u16 hw_comp_cons, sw_comp_cons;
405         struct qedr_cnq *cnq = handle;
406         struct regpair *cq_handle;
407         struct qedr_cq *cq;
408
409         qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
410
411         qed_sb_update_sb_idx(cnq->sb);
412
413         hw_comp_cons = le16_to_cpu(*cnq->hw_cons_ptr);
414         sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
415
416         /* Align protocol-index and chain reads */
417         rmb();
418
419         while (sw_comp_cons != hw_comp_cons) {
420                 cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
421                 cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
422                                 cq_handle->lo);
423
424                 if (cq == NULL) {
425                         DP_ERR(cnq->dev,
426                                "Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
427                                cq_handle->hi, cq_handle->lo, sw_comp_cons,
428                                hw_comp_cons);
429
430                         break;
431                 }
432
433                 if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
434                         DP_ERR(cnq->dev,
435                                "Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
436                                cq_handle->hi, cq_handle->lo, cq);
437                         break;
438                 }
439
440                 cq->arm_flags = 0;
441
442                 if (!cq->destroyed && cq->ibcq.comp_handler)
443                         (*cq->ibcq.comp_handler)
444                                 (&cq->ibcq, cq->ibcq.cq_context);
445
446                 /* The CQ's CNQ notification counter is checked before
447                  * destroying the CQ in a busy-wait loop that waits for all of
448                  * the CQ's CNQ interrupts to be processed. It is increased
449                  * here, only after the completion handler, to ensure that the
450                  * the handler is not running when the CQ is destroyed.
451                  */
452                 cq->cnq_notif++;
453
454                 sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
455
456                 cnq->n_comp++;
457         }
458
459         qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
460                                       sw_comp_cons);
461
462         qed_sb_ack(cnq->sb, IGU_INT_ENABLE, 1);
463
464         return IRQ_HANDLED;
465 }
466
467 static void qedr_sync_free_irqs(struct qedr_dev *dev)
468 {
469         u32 vector;
470         int i;
471
472         for (i = 0; i < dev->int_info.used_cnt; i++) {
473                 if (dev->int_info.msix_cnt) {
474                         vector = dev->int_info.msix[i * dev->num_hwfns].vector;
475                         synchronize_irq(vector);
476                         free_irq(vector, &dev->cnq_array[i]);
477                 }
478         }
479
480         dev->int_info.used_cnt = 0;
481 }
482
483 static int qedr_req_msix_irqs(struct qedr_dev *dev)
484 {
485         int i, rc = 0;
486
487         if (dev->num_cnq > dev->int_info.msix_cnt) {
488                 DP_ERR(dev,
489                        "Interrupt mismatch: %d CNQ queues > %d MSI-x vectors\n",
490                        dev->num_cnq, dev->int_info.msix_cnt);
491                 return -EINVAL;
492         }
493
494         for (i = 0; i < dev->num_cnq; i++) {
495                 rc = request_irq(dev->int_info.msix[i * dev->num_hwfns].vector,
496                                  qedr_irq_handler, 0, dev->cnq_array[i].name,
497                                  &dev->cnq_array[i]);
498                 if (rc) {
499                         DP_ERR(dev, "Request cnq %d irq failed\n", i);
500                         qedr_sync_free_irqs(dev);
501                 } else {
502                         DP_DEBUG(dev, QEDR_MSG_INIT,
503                                  "Requested cnq irq for %s [entry %d]. Cookie is at %p\n",
504                                  dev->cnq_array[i].name, i,
505                                  &dev->cnq_array[i]);
506                         dev->int_info.used_cnt++;
507                 }
508         }
509
510         return rc;
511 }
512
513 static int qedr_setup_irqs(struct qedr_dev *dev)
514 {
515         int rc;
516
517         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs\n");
518
519         /* Learn Interrupt configuration */
520         rc = dev->ops->rdma_set_rdma_int(dev->cdev, dev->num_cnq);
521         if (rc < 0)
522                 return rc;
523
524         rc = dev->ops->rdma_get_rdma_int(dev->cdev, &dev->int_info);
525         if (rc) {
526                 DP_DEBUG(dev, QEDR_MSG_INIT, "get_rdma_int failed\n");
527                 return rc;
528         }
529
530         if (dev->int_info.msix_cnt) {
531                 DP_DEBUG(dev, QEDR_MSG_INIT, "rdma msix_cnt = %d\n",
532                          dev->int_info.msix_cnt);
533                 rc = qedr_req_msix_irqs(dev);
534                 if (rc)
535                         return rc;
536         }
537
538         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr_setup_irqs succeeded\n");
539
540         return 0;
541 }
542
543 static int qedr_set_device_attr(struct qedr_dev *dev)
544 {
545         struct qed_rdma_device *qed_attr;
546         struct qedr_device_attr *attr;
547         u32 page_size;
548
549         /* Part 1 - query core capabilities */
550         qed_attr = dev->ops->rdma_query_device(dev->rdma_ctx);
551
552         /* Part 2 - check capabilities */
553         page_size = ~dev->attr.page_size_caps + 1;
554         if (page_size > PAGE_SIZE) {
555                 DP_ERR(dev,
556                        "Kernel PAGE_SIZE is %ld which is smaller than minimum page size (%d) required by qedr\n",
557                        PAGE_SIZE, page_size);
558                 return -ENODEV;
559         }
560
561         /* Part 3 - copy and update capabilities */
562         attr = &dev->attr;
563         attr->vendor_id = qed_attr->vendor_id;
564         attr->vendor_part_id = qed_attr->vendor_part_id;
565         attr->hw_ver = qed_attr->hw_ver;
566         attr->fw_ver = qed_attr->fw_ver;
567         attr->node_guid = qed_attr->node_guid;
568         attr->sys_image_guid = qed_attr->sys_image_guid;
569         attr->max_cnq = qed_attr->max_cnq;
570         attr->max_sge = qed_attr->max_sge;
571         attr->max_inline = qed_attr->max_inline;
572         attr->max_sqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_SQE);
573         attr->max_rqe = min_t(u32, qed_attr->max_wqe, QEDR_MAX_RQE);
574         attr->max_qp_resp_rd_atomic_resc = qed_attr->max_qp_resp_rd_atomic_resc;
575         attr->max_qp_req_rd_atomic_resc = qed_attr->max_qp_req_rd_atomic_resc;
576         attr->max_dev_resp_rd_atomic_resc =
577             qed_attr->max_dev_resp_rd_atomic_resc;
578         attr->max_cq = qed_attr->max_cq;
579         attr->max_qp = qed_attr->max_qp;
580         attr->max_mr = qed_attr->max_mr;
581         attr->max_mr_size = qed_attr->max_mr_size;
582         attr->max_cqe = min_t(u64, qed_attr->max_cqe, QEDR_MAX_CQES);
583         attr->max_mw = qed_attr->max_mw;
584         attr->max_fmr = qed_attr->max_fmr;
585         attr->max_mr_mw_fmr_pbl = qed_attr->max_mr_mw_fmr_pbl;
586         attr->max_mr_mw_fmr_size = qed_attr->max_mr_mw_fmr_size;
587         attr->max_pd = qed_attr->max_pd;
588         attr->max_ah = qed_attr->max_ah;
589         attr->max_pkey = qed_attr->max_pkey;
590         attr->max_srq = qed_attr->max_srq;
591         attr->max_srq_wr = qed_attr->max_srq_wr;
592         attr->dev_caps = qed_attr->dev_caps;
593         attr->page_size_caps = qed_attr->page_size_caps;
594         attr->dev_ack_delay = qed_attr->dev_ack_delay;
595         attr->reserved_lkey = qed_attr->reserved_lkey;
596         attr->bad_pkey_counter = qed_attr->bad_pkey_counter;
597         attr->max_stats_queues = qed_attr->max_stats_queues;
598
599         return 0;
600 }
601
602 void qedr_unaffiliated_event(void *context, u8 event_code)
603 {
604         pr_err("unaffiliated event not implemented yet\n");
605 }
606
607 void qedr_affiliated_event(void *context, u8 e_code, void *fw_handle)
608 {
609 #define EVENT_TYPE_NOT_DEFINED  0
610 #define EVENT_TYPE_CQ           1
611 #define EVENT_TYPE_QP           2
612         struct qedr_dev *dev = (struct qedr_dev *)context;
613         struct regpair *async_handle = (struct regpair *)fw_handle;
614         u64 roce_handle64 = ((u64) async_handle->hi << 32) + async_handle->lo;
615         u8 event_type = EVENT_TYPE_NOT_DEFINED;
616         struct ib_event event;
617         struct ib_cq *ibcq;
618         struct ib_qp *ibqp;
619         struct qedr_cq *cq;
620         struct qedr_qp *qp;
621
622         switch (e_code) {
623         case ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR:
624                 event.event = IB_EVENT_CQ_ERR;
625                 event_type = EVENT_TYPE_CQ;
626                 break;
627         case ROCE_ASYNC_EVENT_SQ_DRAINED:
628                 event.event = IB_EVENT_SQ_DRAINED;
629                 event_type = EVENT_TYPE_QP;
630                 break;
631         case ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR:
632                 event.event = IB_EVENT_QP_FATAL;
633                 event_type = EVENT_TYPE_QP;
634                 break;
635         case ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR:
636                 event.event = IB_EVENT_QP_REQ_ERR;
637                 event_type = EVENT_TYPE_QP;
638                 break;
639         case ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR:
640                 event.event = IB_EVENT_QP_ACCESS_ERR;
641                 event_type = EVENT_TYPE_QP;
642                 break;
643         default:
644                 DP_ERR(dev, "unsupported event %d on handle=%llx\n", e_code,
645                        roce_handle64);
646         }
647
648         switch (event_type) {
649         case EVENT_TYPE_CQ:
650                 cq = (struct qedr_cq *)(uintptr_t)roce_handle64;
651                 if (cq) {
652                         ibcq = &cq->ibcq;
653                         if (ibcq->event_handler) {
654                                 event.device = ibcq->device;
655                                 event.element.cq = ibcq;
656                                 ibcq->event_handler(&event, ibcq->cq_context);
657                         }
658                 } else {
659                         WARN(1,
660                              "Error: CQ event with NULL pointer ibcq. Handle=%llx\n",
661                              roce_handle64);
662                 }
663                 DP_ERR(dev, "CQ event %d on hanlde %p\n", e_code, cq);
664                 break;
665         case EVENT_TYPE_QP:
666                 qp = (struct qedr_qp *)(uintptr_t)roce_handle64;
667                 if (qp) {
668                         ibqp = &qp->ibqp;
669                         if (ibqp->event_handler) {
670                                 event.device = ibqp->device;
671                                 event.element.qp = ibqp;
672                                 ibqp->event_handler(&event, ibqp->qp_context);
673                         }
674                 } else {
675                         WARN(1,
676                              "Error: QP event with NULL pointer ibqp. Handle=%llx\n",
677                              roce_handle64);
678                 }
679                 DP_ERR(dev, "QP event %d on hanlde %p\n", e_code, qp);
680                 break;
681         default:
682                 break;
683         }
684 }
685
686 static int qedr_init_hw(struct qedr_dev *dev)
687 {
688         struct qed_rdma_add_user_out_params out_params;
689         struct qed_rdma_start_in_params *in_params;
690         struct qed_rdma_cnq_params *cur_pbl;
691         struct qed_rdma_events events;
692         dma_addr_t p_phys_table;
693         u32 page_cnt;
694         int rc = 0;
695         int i;
696
697         in_params =  kzalloc(sizeof(*in_params), GFP_KERNEL);
698         if (!in_params) {
699                 rc = -ENOMEM;
700                 goto out;
701         }
702
703         in_params->desired_cnq = dev->num_cnq;
704         for (i = 0; i < dev->num_cnq; i++) {
705                 cur_pbl = &in_params->cnq_pbl_list[i];
706
707                 page_cnt = qed_chain_get_page_cnt(&dev->cnq_array[i].pbl);
708                 cur_pbl->num_pbl_pages = page_cnt;
709
710                 p_phys_table = qed_chain_get_pbl_phys(&dev->cnq_array[i].pbl);
711                 cur_pbl->pbl_ptr = (u64)p_phys_table;
712         }
713
714         events.affiliated_event = qedr_affiliated_event;
715         events.unaffiliated_event = qedr_unaffiliated_event;
716         events.context = dev;
717
718         in_params->events = &events;
719         in_params->cq_mode = QED_RDMA_CQ_MODE_32_BITS;
720         in_params->max_mtu = dev->ndev->mtu;
721         ether_addr_copy(&in_params->mac_addr[0], dev->ndev->dev_addr);
722
723         rc = dev->ops->rdma_init(dev->cdev, in_params);
724         if (rc)
725                 goto out;
726
727         rc = dev->ops->rdma_add_user(dev->rdma_ctx, &out_params);
728         if (rc)
729                 goto out;
730
731         dev->db_addr = (void *)(uintptr_t)out_params.dpi_addr;
732         dev->db_phys_addr = out_params.dpi_phys_addr;
733         dev->db_size = out_params.dpi_size;
734         dev->dpi = out_params.dpi;
735
736         rc = qedr_set_device_attr(dev);
737 out:
738         kfree(in_params);
739         if (rc)
740                 DP_ERR(dev, "Init HW Failed rc = %d\n", rc);
741
742         return rc;
743 }
744
745 void qedr_stop_hw(struct qedr_dev *dev)
746 {
747         dev->ops->rdma_remove_user(dev->rdma_ctx, dev->dpi);
748         dev->ops->rdma_stop(dev->rdma_ctx);
749 }
750
751 static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
752                                  struct net_device *ndev)
753 {
754         struct qed_dev_rdma_info dev_info;
755         struct qedr_dev *dev;
756         int rc = 0, i;
757
758         dev = (struct qedr_dev *)ib_alloc_device(sizeof(*dev));
759         if (!dev) {
760                 pr_err("Unable to allocate ib device\n");
761                 return NULL;
762         }
763
764         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr add device called\n");
765
766         dev->pdev = pdev;
767         dev->ndev = ndev;
768         dev->cdev = cdev;
769
770         qed_ops = qed_get_rdma_ops();
771         if (!qed_ops) {
772                 DP_ERR(dev, "Failed to get qed roce operations\n");
773                 goto init_err;
774         }
775
776         dev->ops = qed_ops;
777         rc = qed_ops->fill_dev_info(cdev, &dev_info);
778         if (rc)
779                 goto init_err;
780
781         dev->num_hwfns = dev_info.common.num_hwfns;
782         dev->rdma_ctx = dev->ops->rdma_get_rdma_ctx(cdev);
783
784         dev->num_cnq = dev->ops->rdma_get_min_cnq_msix(cdev);
785         if (!dev->num_cnq) {
786                 DP_ERR(dev, "not enough CNQ resources.\n");
787                 goto init_err;
788         }
789
790         dev->wq_multiplier = QEDR_WQ_MULTIPLIER_DFT;
791
792         qedr_pci_set_atomic(dev, pdev);
793
794         rc = qedr_alloc_resources(dev);
795         if (rc)
796                 goto init_err;
797
798         rc = qedr_init_hw(dev);
799         if (rc)
800                 goto alloc_err;
801
802         rc = qedr_setup_irqs(dev);
803         if (rc)
804                 goto irq_err;
805
806         rc = qedr_register_device(dev);
807         if (rc) {
808                 DP_ERR(dev, "Unable to allocate register device\n");
809                 goto reg_err;
810         }
811
812         for (i = 0; i < ARRAY_SIZE(qedr_attributes); i++)
813                 if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
814                         goto sysfs_err;
815
816         if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
817                 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
818
819         DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
820         return dev;
821
822 sysfs_err:
823         ib_unregister_device(&dev->ibdev);
824 reg_err:
825         qedr_sync_free_irqs(dev);
826 irq_err:
827         qedr_stop_hw(dev);
828 alloc_err:
829         qedr_free_resources(dev);
830 init_err:
831         ib_dealloc_device(&dev->ibdev);
832         DP_ERR(dev, "qedr driver load failed rc=%d\n", rc);
833
834         return NULL;
835 }
836
837 static void qedr_remove(struct qedr_dev *dev)
838 {
839         /* First unregister with stack to stop all the active traffic
840          * of the registered clients.
841          */
842         qedr_remove_sysfiles(dev);
843         ib_unregister_device(&dev->ibdev);
844
845         qedr_stop_hw(dev);
846         qedr_sync_free_irqs(dev);
847         qedr_free_resources(dev);
848         ib_dealloc_device(&dev->ibdev);
849 }
850
851 static void qedr_close(struct qedr_dev *dev)
852 {
853         if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
854                 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
855 }
856
857 static void qedr_shutdown(struct qedr_dev *dev)
858 {
859         qedr_close(dev);
860         qedr_remove(dev);
861 }
862
863 static void qedr_open(struct qedr_dev *dev)
864 {
865         if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
866                 qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
867 }
868
869 static void qedr_mac_address_change(struct qedr_dev *dev)
870 {
871         union ib_gid *sgid = &dev->sgid_tbl[0];
872         u8 guid[8], mac_addr[6];
873         int rc;
874
875         /* Update SGID */
876         ether_addr_copy(&mac_addr[0], dev->ndev->dev_addr);
877         guid[0] = mac_addr[0] ^ 2;
878         guid[1] = mac_addr[1];
879         guid[2] = mac_addr[2];
880         guid[3] = 0xff;
881         guid[4] = 0xfe;
882         guid[5] = mac_addr[3];
883         guid[6] = mac_addr[4];
884         guid[7] = mac_addr[5];
885         sgid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
886         memcpy(&sgid->raw[8], guid, sizeof(guid));
887
888         /* Update LL2 */
889         rc = dev->ops->ll2_set_mac_filter(dev->cdev,
890                                           dev->gsi_ll2_mac_address,
891                                           dev->ndev->dev_addr);
892
893         ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
894
895         qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
896
897         if (rc)
898                 DP_ERR(dev, "Error updating mac filter\n");
899 }
900
901 /* event handling via NIC driver ensures that all the NIC specific
902  * initialization done before RoCE driver notifies
903  * event to stack.
904  */
905 static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
906 {
907         switch (event) {
908         case QEDE_UP:
909                 qedr_open(dev);
910                 break;
911         case QEDE_DOWN:
912                 qedr_close(dev);
913                 break;
914         case QEDE_CLOSE:
915                 qedr_shutdown(dev);
916                 break;
917         case QEDE_CHANGE_ADDR:
918                 qedr_mac_address_change(dev);
919                 break;
920         default:
921                 pr_err("Event not supported\n");
922         }
923 }
924
925 static struct qedr_driver qedr_drv = {
926         .name = "qedr_driver",
927         .add = qedr_add,
928         .remove = qedr_remove,
929         .notify = qedr_notify,
930 };
931
932 static int __init qedr_init_module(void)
933 {
934         return qede_rdma_register_driver(&qedr_drv);
935 }
936
937 static void __exit qedr_exit_module(void)
938 {
939         qede_rdma_unregister_driver(&qedr_drv);
940 }
941
942 module_init(qedr_init_module);
943 module_exit(qedr_exit_module);