2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
17 * The BSD 2-Clause License
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
46 #include <linux/errno.h>
47 #include <linux/inetdevice.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
50 #include <linux/slab.h>
51 #include <rdma/ib_addr.h>
52 #include <rdma/ib_smi.h>
53 #include <rdma/ib_user_verbs.h>
54 #include <net/addrconf.h>
58 #define DRV_NAME "vmw_pvrdma"
59 #define DRV_VERSION "1.0.1.0-k"
61 static DEFINE_MUTEX(pvrdma_device_list_lock);
62 static LIST_HEAD(pvrdma_device_list);
63 static struct workqueue_struct *event_wq;
65 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context);
66 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context);
68 static ssize_t hca_type_show(struct device *device,
69 struct device_attribute *attr, char *buf)
71 return sprintf(buf, "VMW_PVRDMA-%s\n", DRV_VERSION);
73 static DEVICE_ATTR_RO(hca_type);
75 static ssize_t hw_rev_show(struct device *device,
76 struct device_attribute *attr, char *buf)
78 return sprintf(buf, "%d\n", PVRDMA_REV_ID);
80 static DEVICE_ATTR_RO(hw_rev);
82 static ssize_t board_id_show(struct device *device,
83 struct device_attribute *attr, char *buf)
85 return sprintf(buf, "%d\n", PVRDMA_BOARD_ID);
87 static DEVICE_ATTR_RO(board_id);
89 static struct attribute *pvrdma_class_attributes[] = {
90 &dev_attr_hw_rev.attr,
91 &dev_attr_hca_type.attr,
92 &dev_attr_board_id.attr,
96 static const struct attribute_group pvrdma_attr_group = {
97 .attrs = pvrdma_class_attributes,
100 static void pvrdma_get_fw_ver_str(struct ib_device *device, char *str)
102 struct pvrdma_dev *dev =
103 container_of(device, struct pvrdma_dev, ib_dev);
104 snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d\n",
105 (int) (dev->dsr->caps.fw_ver >> 32),
106 (int) (dev->dsr->caps.fw_ver >> 16) & 0xffff,
107 (int) dev->dsr->caps.fw_ver & 0xffff);
110 static int pvrdma_init_device(struct pvrdma_dev *dev)
112 /* Initialize some device related stuff */
113 spin_lock_init(&dev->cmd_lock);
114 sema_init(&dev->cmd_sema, 1);
115 atomic_set(&dev->num_qps, 0);
116 atomic_set(&dev->num_srqs, 0);
117 atomic_set(&dev->num_cqs, 0);
118 atomic_set(&dev->num_pds, 0);
119 atomic_set(&dev->num_ahs, 0);
124 static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
125 struct ib_port_immutable *immutable)
127 struct pvrdma_dev *dev = to_vdev(ibdev);
128 struct ib_port_attr attr;
131 if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V1)
132 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE;
133 else if (dev->dsr->caps.gid_types == PVRDMA_GID_TYPE_FLAG_ROCE_V2)
134 immutable->core_cap_flags |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
136 err = ib_query_port(ibdev, port_num, &attr);
140 immutable->pkey_tbl_len = attr.pkey_tbl_len;
141 immutable->gid_tbl_len = attr.gid_tbl_len;
142 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
146 static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
149 struct net_device *netdev;
150 struct pvrdma_dev *dev = to_vdev(ibdev);
156 netdev = dev->netdev;
164 static const struct ib_device_ops pvrdma_dev_ops = {
165 .add_gid = pvrdma_add_gid,
166 .alloc_mr = pvrdma_alloc_mr,
167 .alloc_pd = pvrdma_alloc_pd,
168 .alloc_ucontext = pvrdma_alloc_ucontext,
169 .create_ah = pvrdma_create_ah,
170 .create_cq = pvrdma_create_cq,
171 .create_qp = pvrdma_create_qp,
172 .dealloc_pd = pvrdma_dealloc_pd,
173 .dealloc_ucontext = pvrdma_dealloc_ucontext,
174 .del_gid = pvrdma_del_gid,
175 .dereg_mr = pvrdma_dereg_mr,
176 .destroy_ah = pvrdma_destroy_ah,
177 .destroy_cq = pvrdma_destroy_cq,
178 .destroy_qp = pvrdma_destroy_qp,
179 .get_dev_fw_str = pvrdma_get_fw_ver_str,
180 .get_dma_mr = pvrdma_get_dma_mr,
181 .get_link_layer = pvrdma_port_link_layer,
182 .get_netdev = pvrdma_get_netdev,
183 .get_port_immutable = pvrdma_port_immutable,
184 .map_mr_sg = pvrdma_map_mr_sg,
186 .modify_port = pvrdma_modify_port,
187 .modify_qp = pvrdma_modify_qp,
188 .poll_cq = pvrdma_poll_cq,
189 .post_recv = pvrdma_post_recv,
190 .post_send = pvrdma_post_send,
191 .query_device = pvrdma_query_device,
192 .query_gid = pvrdma_query_gid,
193 .query_pkey = pvrdma_query_pkey,
194 .query_port = pvrdma_query_port,
195 .query_qp = pvrdma_query_qp,
196 .reg_user_mr = pvrdma_reg_user_mr,
197 .req_notify_cq = pvrdma_req_notify_cq,
200 static const struct ib_device_ops pvrdma_dev_srq_ops = {
201 .create_srq = pvrdma_create_srq,
202 .destroy_srq = pvrdma_destroy_srq,
203 .modify_srq = pvrdma_modify_srq,
204 .query_srq = pvrdma_query_srq,
207 static int pvrdma_register_device(struct pvrdma_dev *dev)
211 dev->ib_dev.node_guid = dev->dsr->caps.node_guid;
212 dev->sys_image_guid = dev->dsr->caps.sys_image_guid;
214 dev->ib_dev.owner = THIS_MODULE;
215 dev->ib_dev.num_comp_vectors = 1;
216 dev->ib_dev.dev.parent = &dev->pdev->dev;
217 dev->ib_dev.uverbs_abi_ver = PVRDMA_UVERBS_ABI_VERSION;
218 dev->ib_dev.uverbs_cmd_mask =
219 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
220 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
221 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
222 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
223 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
224 (1ull << IB_USER_VERBS_CMD_REG_MR) |
225 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
226 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
227 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
228 (1ull << IB_USER_VERBS_CMD_POLL_CQ) |
229 (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) |
230 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
231 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
232 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
233 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
234 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
235 (1ull << IB_USER_VERBS_CMD_POST_SEND) |
236 (1ull << IB_USER_VERBS_CMD_POST_RECV) |
237 (1ull << IB_USER_VERBS_CMD_CREATE_AH) |
238 (1ull << IB_USER_VERBS_CMD_DESTROY_AH);
240 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
241 dev->ib_dev.phys_port_cnt = dev->dsr->caps.phys_port_cnt;
243 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_ops);
245 mutex_init(&dev->port_mutex);
246 spin_lock_init(&dev->desc_lock);
248 dev->cq_tbl = kcalloc(dev->dsr->caps.max_cq, sizeof(struct pvrdma_cq *),
252 spin_lock_init(&dev->cq_tbl_lock);
254 dev->qp_tbl = kcalloc(dev->dsr->caps.max_qp, sizeof(struct pvrdma_qp *),
258 spin_lock_init(&dev->qp_tbl_lock);
260 /* Check if SRQ is supported by backend */
261 if (dev->dsr->caps.max_srq) {
262 dev->ib_dev.uverbs_cmd_mask |=
263 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
264 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
265 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
266 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
267 (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV);
269 ib_set_device_ops(&dev->ib_dev, &pvrdma_dev_srq_ops);
271 dev->srq_tbl = kcalloc(dev->dsr->caps.max_srq,
272 sizeof(struct pvrdma_srq *),
277 dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
278 spin_lock_init(&dev->srq_tbl_lock);
279 rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
281 ret = ib_register_device(&dev->ib_dev, "vmw_pvrdma%d", NULL);
285 dev->ib_active = true;
299 static irqreturn_t pvrdma_intr0_handler(int irq, void *dev_id)
301 u32 icr = PVRDMA_INTR_CAUSE_RESPONSE;
302 struct pvrdma_dev *dev = dev_id;
304 dev_dbg(&dev->pdev->dev, "interrupt 0 (response) handler\n");
306 if (!dev->pdev->msix_enabled) {
308 icr = pvrdma_read_reg(dev, PVRDMA_REG_ICR);
313 if (icr == PVRDMA_INTR_CAUSE_RESPONSE)
314 complete(&dev->cmd_done);
319 static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
321 struct pvrdma_qp *qp;
324 spin_lock_irqsave(&dev->qp_tbl_lock, flags);
325 qp = dev->qp_tbl[qpn % dev->dsr->caps.max_qp];
327 refcount_inc(&qp->refcnt);
328 spin_unlock_irqrestore(&dev->qp_tbl_lock, flags);
330 if (qp && qp->ibqp.event_handler) {
331 struct ib_qp *ibqp = &qp->ibqp;
334 e.device = ibqp->device;
336 e.event = type; /* 1:1 mapping for now. */
337 ibqp->event_handler(&e, ibqp->qp_context);
340 if (refcount_dec_and_test(&qp->refcnt))
345 static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
347 struct pvrdma_cq *cq;
350 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
351 cq = dev->cq_tbl[cqn % dev->dsr->caps.max_cq];
353 refcount_inc(&cq->refcnt);
354 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
356 if (cq && cq->ibcq.event_handler) {
357 struct ib_cq *ibcq = &cq->ibcq;
360 e.device = ibcq->device;
362 e.event = type; /* 1:1 mapping for now. */
363 ibcq->event_handler(&e, ibcq->cq_context);
366 if (refcount_dec_and_test(&cq->refcnt))
371 static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
373 struct pvrdma_srq *srq;
376 spin_lock_irqsave(&dev->srq_tbl_lock, flags);
378 srq = dev->srq_tbl[srqn % dev->dsr->caps.max_srq];
382 refcount_inc(&srq->refcnt);
383 spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
385 if (srq && srq->ibsrq.event_handler) {
386 struct ib_srq *ibsrq = &srq->ibsrq;
389 e.device = ibsrq->device;
390 e.element.srq = ibsrq;
391 e.event = type; /* 1:1 mapping for now. */
392 ibsrq->event_handler(&e, ibsrq->srq_context);
395 if (refcount_dec_and_test(&srq->refcnt))
396 complete(&srq->free);
400 static void pvrdma_dispatch_event(struct pvrdma_dev *dev, int port,
401 enum ib_event_type event)
403 struct ib_event ib_event;
405 memset(&ib_event, 0, sizeof(ib_event));
406 ib_event.device = &dev->ib_dev;
407 ib_event.element.port_num = port;
408 ib_event.event = event;
409 ib_dispatch_event(&ib_event);
412 static void pvrdma_dev_event(struct pvrdma_dev *dev, u8 port, int type)
414 if (port < 1 || port > dev->dsr->caps.phys_port_cnt) {
415 dev_warn(&dev->pdev->dev, "event on port %d\n", port);
419 pvrdma_dispatch_event(dev, port, type);
422 static inline struct pvrdma_eqe *get_eqe(struct pvrdma_dev *dev, unsigned int i)
424 return (struct pvrdma_eqe *)pvrdma_page_dir_get_ptr(
427 sizeof(struct pvrdma_eqe) * i);
430 static irqreturn_t pvrdma_intr1_handler(int irq, void *dev_id)
432 struct pvrdma_dev *dev = dev_id;
433 struct pvrdma_ring *ring = &dev->async_ring_state->rx;
434 int ring_slots = (dev->dsr->async_ring_pages.num_pages - 1) *
435 PAGE_SIZE / sizeof(struct pvrdma_eqe);
438 dev_dbg(&dev->pdev->dev, "interrupt 1 (async event) handler\n");
441 * Don't process events until the IB device is registered. Otherwise
442 * we'll try to ib_dispatch_event() on an invalid device.
447 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
448 struct pvrdma_eqe *eqe;
450 eqe = get_eqe(dev, head);
453 case PVRDMA_EVENT_QP_FATAL:
454 case PVRDMA_EVENT_QP_REQ_ERR:
455 case PVRDMA_EVENT_QP_ACCESS_ERR:
456 case PVRDMA_EVENT_COMM_EST:
457 case PVRDMA_EVENT_SQ_DRAINED:
458 case PVRDMA_EVENT_PATH_MIG:
459 case PVRDMA_EVENT_PATH_MIG_ERR:
460 case PVRDMA_EVENT_QP_LAST_WQE_REACHED:
461 pvrdma_qp_event(dev, eqe->info, eqe->type);
464 case PVRDMA_EVENT_CQ_ERR:
465 pvrdma_cq_event(dev, eqe->info, eqe->type);
468 case PVRDMA_EVENT_SRQ_ERR:
469 case PVRDMA_EVENT_SRQ_LIMIT_REACHED:
470 pvrdma_srq_event(dev, eqe->info, eqe->type);
473 case PVRDMA_EVENT_PORT_ACTIVE:
474 case PVRDMA_EVENT_PORT_ERR:
475 case PVRDMA_EVENT_LID_CHANGE:
476 case PVRDMA_EVENT_PKEY_CHANGE:
477 case PVRDMA_EVENT_SM_CHANGE:
478 case PVRDMA_EVENT_CLIENT_REREGISTER:
479 case PVRDMA_EVENT_GID_CHANGE:
480 pvrdma_dev_event(dev, eqe->info, eqe->type);
483 case PVRDMA_EVENT_DEVICE_FATAL:
484 pvrdma_dev_event(dev, 1, eqe->type);
491 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
497 static inline struct pvrdma_cqne *get_cqne(struct pvrdma_dev *dev,
500 return (struct pvrdma_cqne *)pvrdma_page_dir_get_ptr(
503 sizeof(struct pvrdma_cqne) * i);
506 static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
508 struct pvrdma_dev *dev = dev_id;
509 struct pvrdma_ring *ring = &dev->cq_ring_state->rx;
510 int ring_slots = (dev->dsr->cq_ring_pages.num_pages - 1) * PAGE_SIZE /
511 sizeof(struct pvrdma_cqne);
515 dev_dbg(&dev->pdev->dev, "interrupt x (completion) handler\n");
517 while (pvrdma_idx_ring_has_data(ring, ring_slots, &head) > 0) {
518 struct pvrdma_cqne *cqne;
519 struct pvrdma_cq *cq;
521 cqne = get_cqne(dev, head);
522 spin_lock_irqsave(&dev->cq_tbl_lock, flags);
523 cq = dev->cq_tbl[cqne->info % dev->dsr->caps.max_cq];
525 refcount_inc(&cq->refcnt);
526 spin_unlock_irqrestore(&dev->cq_tbl_lock, flags);
528 if (cq && cq->ibcq.comp_handler)
529 cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
531 if (refcount_dec_and_test(&cq->refcnt))
534 pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
540 static void pvrdma_free_irq(struct pvrdma_dev *dev)
544 dev_dbg(&dev->pdev->dev, "freeing interrupts\n");
545 for (i = 0; i < dev->nr_vectors; i++)
546 free_irq(pci_irq_vector(dev->pdev, i), dev);
549 static void pvrdma_enable_intrs(struct pvrdma_dev *dev)
551 dev_dbg(&dev->pdev->dev, "enable interrupts\n");
552 pvrdma_write_reg(dev, PVRDMA_REG_IMR, 0);
555 static void pvrdma_disable_intrs(struct pvrdma_dev *dev)
557 dev_dbg(&dev->pdev->dev, "disable interrupts\n");
558 pvrdma_write_reg(dev, PVRDMA_REG_IMR, ~0);
561 static int pvrdma_alloc_intrs(struct pvrdma_dev *dev)
563 struct pci_dev *pdev = dev->pdev;
566 ret = pci_alloc_irq_vectors(pdev, 1, PVRDMA_MAX_INTERRUPTS,
569 ret = pci_alloc_irq_vectors(pdev, 1, 1,
570 PCI_IRQ_MSI | PCI_IRQ_LEGACY);
574 dev->nr_vectors = ret;
576 ret = request_irq(pci_irq_vector(dev->pdev, 0), pvrdma_intr0_handler,
577 pdev->msix_enabled ? 0 : IRQF_SHARED, DRV_NAME, dev);
579 dev_err(&dev->pdev->dev,
580 "failed to request interrupt 0\n");
581 goto out_free_vectors;
584 for (i = 1; i < dev->nr_vectors; i++) {
585 ret = request_irq(pci_irq_vector(dev->pdev, i),
586 i == 1 ? pvrdma_intr1_handler :
587 pvrdma_intrx_handler,
590 dev_err(&dev->pdev->dev,
591 "failed to request interrupt %d\n", i);
600 free_irq(pci_irq_vector(dev->pdev, i), dev);
602 pci_free_irq_vectors(pdev);
606 static void pvrdma_free_slots(struct pvrdma_dev *dev)
608 struct pci_dev *pdev = dev->pdev;
611 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->resp_slot,
612 dev->dsr->resp_slot_dma);
614 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->cmd_slot,
615 dev->dsr->cmd_slot_dma);
618 static int pvrdma_add_gid_at_index(struct pvrdma_dev *dev,
619 const union ib_gid *gid,
624 union pvrdma_cmd_req req;
625 struct pvrdma_cmd_create_bind *cmd_bind = &req.create_bind;
627 if (!dev->sgid_tbl) {
628 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
632 memset(cmd_bind, 0, sizeof(*cmd_bind));
633 cmd_bind->hdr.cmd = PVRDMA_CMD_CREATE_BIND;
634 memcpy(cmd_bind->new_gid, gid->raw, 16);
635 cmd_bind->mtu = ib_mtu_enum_to_int(IB_MTU_1024);
636 cmd_bind->vlan = 0xfff;
637 cmd_bind->index = index;
638 cmd_bind->gid_type = gid_type;
640 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
642 dev_warn(&dev->pdev->dev,
643 "could not create binding, error: %d\n", ret);
646 memcpy(&dev->sgid_tbl[index], gid, sizeof(*gid));
650 static int pvrdma_add_gid(const struct ib_gid_attr *attr, void **context)
652 struct pvrdma_dev *dev = to_vdev(attr->device);
654 return pvrdma_add_gid_at_index(dev, &attr->gid,
655 ib_gid_type_to_pvrdma(attr->gid_type),
659 static int pvrdma_del_gid_at_index(struct pvrdma_dev *dev, int index)
662 union pvrdma_cmd_req req;
663 struct pvrdma_cmd_destroy_bind *cmd_dest = &req.destroy_bind;
665 /* Update sgid table. */
666 if (!dev->sgid_tbl) {
667 dev_warn(&dev->pdev->dev, "sgid table not initialized\n");
671 memset(cmd_dest, 0, sizeof(*cmd_dest));
672 cmd_dest->hdr.cmd = PVRDMA_CMD_DESTROY_BIND;
673 memcpy(cmd_dest->dest_gid, &dev->sgid_tbl[index], 16);
674 cmd_dest->index = index;
676 ret = pvrdma_cmd_post(dev, &req, NULL, 0);
678 dev_warn(&dev->pdev->dev,
679 "could not destroy binding, error: %d\n", ret);
682 memset(&dev->sgid_tbl[index], 0, 16);
686 static int pvrdma_del_gid(const struct ib_gid_attr *attr, void **context)
688 struct pvrdma_dev *dev = to_vdev(attr->device);
690 dev_dbg(&dev->pdev->dev, "removing gid at index %u from %s",
691 attr->index, dev->netdev->name);
693 return pvrdma_del_gid_at_index(dev, attr->index);
696 static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
697 struct net_device *ndev,
700 struct pci_dev *pdev_net;
706 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
709 pvrdma_write_reg(dev, PVRDMA_REG_CTL,
710 PVRDMA_DEVICE_CTL_UNQUIESCE);
714 if (pvrdma_read_reg(dev, PVRDMA_REG_ERR))
715 dev_err(&dev->pdev->dev,
716 "failed to activate device during link up\n");
718 pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
720 case NETDEV_UNREGISTER:
721 dev_put(dev->netdev);
724 case NETDEV_REGISTER:
725 /* vmxnet3 will have same bus, slot. But func will be 0 */
726 slot = PCI_SLOT(dev->pdev->devfn);
727 pdev_net = pci_get_slot(dev->pdev->bus,
729 if ((dev->netdev == NULL) &&
730 (pci_get_drvdata(pdev_net) == ndev)) {
731 /* this is our netdev */
735 pci_dev_put(pdev_net);
739 dev_dbg(&dev->pdev->dev, "ignore netdevice event %ld on %s\n",
740 event, dev_name(&dev->ib_dev.dev));
745 static void pvrdma_netdevice_event_work(struct work_struct *work)
747 struct pvrdma_netdevice_work *netdev_work;
748 struct pvrdma_dev *dev;
750 netdev_work = container_of(work, struct pvrdma_netdevice_work, work);
752 mutex_lock(&pvrdma_device_list_lock);
753 list_for_each_entry(dev, &pvrdma_device_list, device_link) {
754 if ((netdev_work->event == NETDEV_REGISTER) ||
755 (dev->netdev == netdev_work->event_netdev)) {
756 pvrdma_netdevice_event_handle(dev,
757 netdev_work->event_netdev,
762 mutex_unlock(&pvrdma_device_list_lock);
767 static int pvrdma_netdevice_event(struct notifier_block *this,
768 unsigned long event, void *ptr)
770 struct net_device *event_netdev = netdev_notifier_info_to_dev(ptr);
771 struct pvrdma_netdevice_work *netdev_work;
773 netdev_work = kmalloc(sizeof(*netdev_work), GFP_ATOMIC);
777 INIT_WORK(&netdev_work->work, pvrdma_netdevice_event_work);
778 netdev_work->event_netdev = event_netdev;
779 netdev_work->event = event;
780 queue_work(event_wq, &netdev_work->work);
785 static int pvrdma_pci_probe(struct pci_dev *pdev,
786 const struct pci_device_id *id)
788 struct pci_dev *pdev_net;
789 struct pvrdma_dev *dev;
793 dma_addr_t slot_dma = 0;
795 dev_dbg(&pdev->dev, "initializing driver %s\n", pci_name(pdev));
797 /* Allocate zero-out device */
798 dev = (struct pvrdma_dev *)ib_alloc_device(sizeof(*dev));
800 dev_err(&pdev->dev, "failed to allocate IB device\n");
804 mutex_lock(&pvrdma_device_list_lock);
805 list_add(&dev->device_link, &pvrdma_device_list);
806 mutex_unlock(&pvrdma_device_list_lock);
808 ret = pvrdma_init_device(dev);
810 goto err_free_device;
813 pci_set_drvdata(pdev, dev);
815 ret = pci_enable_device(pdev);
817 dev_err(&pdev->dev, "cannot enable PCI device\n");
818 goto err_free_device;
821 dev_dbg(&pdev->dev, "PCI resource flags BAR0 %#lx\n",
822 pci_resource_flags(pdev, 0));
823 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
824 (unsigned long long)pci_resource_len(pdev, 0));
825 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
826 (unsigned long long)pci_resource_start(pdev, 0));
827 dev_dbg(&pdev->dev, "PCI resource flags BAR1 %#lx\n",
828 pci_resource_flags(pdev, 1));
829 dev_dbg(&pdev->dev, "PCI resource len %#llx\n",
830 (unsigned long long)pci_resource_len(pdev, 1));
831 dev_dbg(&pdev->dev, "PCI resource start %#llx\n",
832 (unsigned long long)pci_resource_start(pdev, 1));
834 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
835 !(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
836 dev_err(&pdev->dev, "PCI BAR region not MMIO\n");
838 goto err_free_device;
841 ret = pci_request_regions(pdev, DRV_NAME);
843 dev_err(&pdev->dev, "cannot request PCI resources\n");
844 goto err_disable_pdev;
847 /* Enable 64-Bit DMA */
848 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
849 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
852 "pci_set_consistent_dma_mask failed\n");
853 goto err_free_resource;
856 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
859 "pci_set_dma_mask failed\n");
860 goto err_free_resource;
864 pci_set_master(pdev);
866 /* Map register space */
867 start = pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
868 len = pci_resource_len(dev->pdev, PVRDMA_PCI_RESOURCE_REG);
869 dev->regs = ioremap(start, len);
871 dev_err(&pdev->dev, "register mapping failed\n");
873 goto err_free_resource;
876 /* Setup per-device UAR. */
877 dev->driver_uar.index = 0;
878 dev->driver_uar.pfn =
879 pci_resource_start(dev->pdev, PVRDMA_PCI_RESOURCE_UAR) >>
881 dev->driver_uar.map =
882 ioremap(dev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
883 if (!dev->driver_uar.map) {
884 dev_err(&pdev->dev, "failed to remap UAR pages\n");
889 dev->dsr_version = pvrdma_read_reg(dev, PVRDMA_REG_VERSION);
890 dev_info(&pdev->dev, "device version %d, driver version %d\n",
891 dev->dsr_version, PVRDMA_VERSION);
893 dev->dsr = dma_alloc_coherent(&pdev->dev, sizeof(*dev->dsr),
894 &dev->dsrbase, GFP_KERNEL);
896 dev_err(&pdev->dev, "failed to allocate shared region\n");
901 /* Setup the shared region */
902 dev->dsr->driver_version = PVRDMA_VERSION;
903 dev->dsr->gos_info.gos_bits = sizeof(void *) == 4 ?
906 dev->dsr->gos_info.gos_type = PVRDMA_GOS_TYPE_LINUX;
907 dev->dsr->gos_info.gos_ver = 1;
908 dev->dsr->uar_pfn = dev->driver_uar.pfn;
911 dev->cmd_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
912 &slot_dma, GFP_KERNEL);
913 if (!dev->cmd_slot) {
918 dev->dsr->cmd_slot_dma = (u64)slot_dma;
921 dev->resp_slot = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
922 &slot_dma, GFP_KERNEL);
923 if (!dev->resp_slot) {
928 dev->dsr->resp_slot_dma = (u64)slot_dma;
930 /* Async event ring */
931 dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
932 ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
933 dev->dsr->async_ring_pages.num_pages, true);
936 dev->async_ring_state = dev->async_pdir.pages[0];
937 dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
939 /* CQ notification ring */
940 dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
941 ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
942 dev->dsr->cq_ring_pages.num_pages, true);
944 goto err_free_async_ring;
945 dev->cq_ring_state = dev->cq_pdir.pages[0];
946 dev->dsr->cq_ring_pages.pdir_dma = dev->cq_pdir.dir_dma;
949 * Write the PA of the shared region to the device. The writes must be
950 * ordered such that the high bits are written last. When the writes
951 * complete, the device will have filled out the capabilities.
954 pvrdma_write_reg(dev, PVRDMA_REG_DSRLOW, (u32)dev->dsrbase);
955 pvrdma_write_reg(dev, PVRDMA_REG_DSRHIGH,
956 (u32)((u64)(dev->dsrbase) >> 32));
958 /* Make sure the write is complete before reading status. */
961 /* The driver supports RoCE V1 and V2. */
962 if (!PVRDMA_SUPPORTED(dev)) {
963 dev_err(&pdev->dev, "driver needs RoCE v1 or v2 support\n");
965 goto err_free_cq_ring;
968 /* Paired vmxnet3 will have same bus, slot. But func will be 0 */
969 pdev_net = pci_get_slot(pdev->bus, PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
971 dev_err(&pdev->dev, "failed to find paired net device\n");
973 goto err_free_cq_ring;
976 if (pdev_net->vendor != PCI_VENDOR_ID_VMWARE ||
977 pdev_net->device != PCI_DEVICE_ID_VMWARE_VMXNET3) {
978 dev_err(&pdev->dev, "failed to find paired vmxnet3 device\n");
979 pci_dev_put(pdev_net);
981 goto err_free_cq_ring;
984 dev->netdev = pci_get_drvdata(pdev_net);
985 pci_dev_put(pdev_net);
987 dev_err(&pdev->dev, "failed to get vmxnet3 device\n");
989 goto err_free_cq_ring;
991 dev_hold(dev->netdev);
993 dev_info(&pdev->dev, "paired device to %s\n", dev->netdev->name);
995 /* Interrupt setup */
996 ret = pvrdma_alloc_intrs(dev);
998 dev_err(&pdev->dev, "failed to allocate interrupts\n");
1000 goto err_free_cq_ring;
1003 /* Allocate UAR table. */
1004 ret = pvrdma_uar_table_init(dev);
1006 dev_err(&pdev->dev, "failed to allocate UAR table\n");
1008 goto err_free_intrs;
1011 /* Allocate GID table */
1012 dev->sgid_tbl = kcalloc(dev->dsr->caps.gid_tbl_len,
1013 sizeof(union ib_gid), GFP_KERNEL);
1014 if (!dev->sgid_tbl) {
1016 goto err_free_uar_table;
1018 dev_dbg(&pdev->dev, "gid table len %d\n", dev->dsr->caps.gid_tbl_len);
1020 pvrdma_enable_intrs(dev);
1022 /* Activate pvrdma device */
1023 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_ACTIVATE);
1025 /* Make sure the write is complete before reading status. */
1028 /* Check if device was successfully activated */
1029 ret = pvrdma_read_reg(dev, PVRDMA_REG_ERR);
1031 dev_err(&pdev->dev, "failed to activate device\n");
1033 goto err_disable_intr;
1036 /* Register IB device */
1037 ret = pvrdma_register_device(dev);
1039 dev_err(&pdev->dev, "failed to register IB device\n");
1040 goto err_disable_intr;
1043 dev->nb_netdev.notifier_call = pvrdma_netdevice_event;
1044 ret = register_netdevice_notifier(&dev->nb_netdev);
1046 dev_err(&pdev->dev, "failed to register netdevice events\n");
1047 goto err_unreg_ibdev;
1050 dev_info(&pdev->dev, "attached to device\n");
1054 ib_unregister_device(&dev->ib_dev);
1056 pvrdma_disable_intrs(dev);
1057 kfree(dev->sgid_tbl);
1059 pvrdma_uar_table_cleanup(dev);
1061 pvrdma_free_irq(dev);
1062 pci_free_irq_vectors(pdev);
1065 dev_put(dev->netdev);
1068 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1069 err_free_async_ring:
1070 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1072 pvrdma_free_slots(dev);
1074 dma_free_coherent(&pdev->dev, sizeof(*dev->dsr), dev->dsr,
1077 iounmap(dev->driver_uar.map);
1081 pci_release_regions(pdev);
1083 pci_disable_device(pdev);
1084 pci_set_drvdata(pdev, NULL);
1086 mutex_lock(&pvrdma_device_list_lock);
1087 list_del(&dev->device_link);
1088 mutex_unlock(&pvrdma_device_list_lock);
1089 ib_dealloc_device(&dev->ib_dev);
1093 static void pvrdma_pci_remove(struct pci_dev *pdev)
1095 struct pvrdma_dev *dev = pci_get_drvdata(pdev);
1100 dev_info(&pdev->dev, "detaching from device\n");
1102 unregister_netdevice_notifier(&dev->nb_netdev);
1103 dev->nb_netdev.notifier_call = NULL;
1105 flush_workqueue(event_wq);
1108 dev_put(dev->netdev);
1112 /* Unregister ib device */
1113 ib_unregister_device(&dev->ib_dev);
1115 mutex_lock(&pvrdma_device_list_lock);
1116 list_del(&dev->device_link);
1117 mutex_unlock(&pvrdma_device_list_lock);
1119 pvrdma_disable_intrs(dev);
1120 pvrdma_free_irq(dev);
1121 pci_free_irq_vectors(pdev);
1123 /* Deactivate pvrdma device */
1124 pvrdma_write_reg(dev, PVRDMA_REG_CTL, PVRDMA_DEVICE_CTL_RESET);
1125 pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
1126 pvrdma_page_dir_cleanup(dev, &dev->async_pdir);
1127 pvrdma_free_slots(dev);
1130 kfree(dev->sgid_tbl);
1132 kfree(dev->srq_tbl);
1134 pvrdma_uar_table_cleanup(dev);
1135 iounmap(dev->driver_uar.map);
1137 ib_dealloc_device(&dev->ib_dev);
1139 /* Free pci resources */
1140 pci_release_regions(pdev);
1141 pci_disable_device(pdev);
1142 pci_set_drvdata(pdev, NULL);
1145 static const struct pci_device_id pvrdma_pci_table[] = {
1146 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE, PCI_DEVICE_ID_VMWARE_PVRDMA), },
1150 MODULE_DEVICE_TABLE(pci, pvrdma_pci_table);
1152 static struct pci_driver pvrdma_driver = {
1154 .id_table = pvrdma_pci_table,
1155 .probe = pvrdma_pci_probe,
1156 .remove = pvrdma_pci_remove,
1159 static int __init pvrdma_init(void)
1163 event_wq = alloc_ordered_workqueue("pvrdma_event_wq", WQ_MEM_RECLAIM);
1167 err = pci_register_driver(&pvrdma_driver);
1169 destroy_workqueue(event_wq);
1174 static void __exit pvrdma_cleanup(void)
1176 pci_unregister_driver(&pvrdma_driver);
1178 destroy_workqueue(event_wq);
1181 module_init(pvrdma_init);
1182 module_exit(pvrdma_cleanup);
1184 MODULE_AUTHOR("VMware, Inc");
1185 MODULE_DESCRIPTION("VMware Paravirtual RDMA driver");
1186 MODULE_LICENSE("Dual BSD/GPL");