RDMA: Handle AH allocations by IB/core
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mthca / mthca_provider.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_umem.h>
39 #include <rdma/ib_user_verbs.h>
40 #include <rdma/uverbs_ioctl.h>
41
42 #include <linux/sched.h>
43 #include <linux/slab.h>
44 #include <linux/stat.h>
45 #include <linux/mm.h>
46 #include <linux/export.h>
47
48 #include "mthca_dev.h"
49 #include "mthca_cmd.h"
50 #include <rdma/mthca-abi.h>
51 #include "mthca_memfree.h"
52
53 static void init_query_mad(struct ib_smp *mad)
54 {
55         mad->base_version  = 1;
56         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
57         mad->class_version = 1;
58         mad->method        = IB_MGMT_METHOD_GET;
59 }
60
61 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
62                               struct ib_udata *uhw)
63 {
64         struct ib_smp *in_mad  = NULL;
65         struct ib_smp *out_mad = NULL;
66         int err = -ENOMEM;
67         struct mthca_dev *mdev = to_mdev(ibdev);
68
69         if (uhw->inlen || uhw->outlen)
70                 return -EINVAL;
71
72         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
73         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
74         if (!in_mad || !out_mad)
75                 goto out;
76
77         memset(props, 0, sizeof *props);
78
79         props->fw_ver              = mdev->fw_ver;
80
81         init_query_mad(in_mad);
82         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
83
84         err = mthca_MAD_IFC(mdev, 1, 1,
85                             1, NULL, NULL, in_mad, out_mad);
86         if (err)
87                 goto out;
88
89         props->device_cap_flags    = mdev->device_cap_flags;
90         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
91                 0xffffff;
92         props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
93         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
94         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
95
96         props->max_mr_size         = ~0ull;
97         props->page_size_cap       = mdev->limits.page_size_cap;
98         props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
99         props->max_qp_wr           = mdev->limits.max_wqes;
100         props->max_send_sge        = mdev->limits.max_sg;
101         props->max_recv_sge        = mdev->limits.max_sg;
102         props->max_sge_rd          = mdev->limits.max_sg;
103         props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
104         props->max_cqe             = mdev->limits.max_cqes;
105         props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
106         props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
107         props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
108         props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
109         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
110         props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
111         props->max_srq_wr          = mdev->limits.max_srq_wqes;
112         props->max_srq_sge         = mdev->limits.max_srq_sge;
113         props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
114         props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
115                                         IB_ATOMIC_HCA : IB_ATOMIC_NONE;
116         props->max_pkeys           = mdev->limits.pkey_table_len;
117         props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
118         props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
119         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
120                                            props->max_mcast_grp;
121         /*
122          * If Sinai memory key optimization is being used, then only
123          * the 8-bit key portion will change.  For other HCAs, the
124          * unused index bits will also be used for FMR remapping.
125          */
126         if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
127                 props->max_map_per_fmr = 255;
128         else
129                 props->max_map_per_fmr =
130                         (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
131
132         err = 0;
133  out:
134         kfree(in_mad);
135         kfree(out_mad);
136         return err;
137 }
138
139 static int mthca_query_port(struct ib_device *ibdev,
140                             u8 port, struct ib_port_attr *props)
141 {
142         struct ib_smp *in_mad  = NULL;
143         struct ib_smp *out_mad = NULL;
144         int err = -ENOMEM;
145
146         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
147         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
148         if (!in_mad || !out_mad)
149                 goto out;
150
151         /* props being zeroed by the caller, avoid zeroing it here */
152
153         init_query_mad(in_mad);
154         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
155         in_mad->attr_mod = cpu_to_be32(port);
156
157         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
158                             port, NULL, NULL, in_mad, out_mad);
159         if (err)
160                 goto out;
161
162         props->lid               = be16_to_cpup((__be16 *) (out_mad->data + 16));
163         props->lmc               = out_mad->data[34] & 0x7;
164         props->sm_lid            = be16_to_cpup((__be16 *) (out_mad->data + 18));
165         props->sm_sl             = out_mad->data[36] & 0xf;
166         props->state             = out_mad->data[32] & 0xf;
167         props->phys_state        = out_mad->data[33] >> 4;
168         props->port_cap_flags    = be32_to_cpup((__be32 *) (out_mad->data + 20));
169         props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
170         props->max_msg_sz        = 0x80000000;
171         props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
172         props->bad_pkey_cntr     = be16_to_cpup((__be16 *) (out_mad->data + 46));
173         props->qkey_viol_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 48));
174         props->active_width      = out_mad->data[31] & 0xf;
175         props->active_speed      = out_mad->data[35] >> 4;
176         props->max_mtu           = out_mad->data[41] & 0xf;
177         props->active_mtu        = out_mad->data[36] >> 4;
178         props->subnet_timeout    = out_mad->data[51] & 0x1f;
179         props->max_vl_num        = out_mad->data[37] >> 4;
180         props->init_type_reply   = out_mad->data[41] >> 4;
181
182  out:
183         kfree(in_mad);
184         kfree(out_mad);
185         return err;
186 }
187
188 static int mthca_modify_device(struct ib_device *ibdev,
189                                int mask,
190                                struct ib_device_modify *props)
191 {
192         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
193                 return -EOPNOTSUPP;
194
195         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
196                 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
197                         return -ERESTARTSYS;
198                 memcpy(ibdev->node_desc, props->node_desc,
199                        IB_DEVICE_NODE_DESC_MAX);
200                 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
201         }
202
203         return 0;
204 }
205
206 static int mthca_modify_port(struct ib_device *ibdev,
207                              u8 port, int port_modify_mask,
208                              struct ib_port_modify *props)
209 {
210         struct mthca_set_ib_param set_ib;
211         struct ib_port_attr attr;
212         int err;
213
214         if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
215                 return -ERESTARTSYS;
216
217         err = ib_query_port(ibdev, port, &attr);
218         if (err)
219                 goto out;
220
221         set_ib.set_si_guid     = 0;
222         set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
223
224         set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
225                 ~props->clr_port_cap_mask;
226
227         err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
228         if (err)
229                 goto out;
230 out:
231         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
232         return err;
233 }
234
235 static int mthca_query_pkey(struct ib_device *ibdev,
236                             u8 port, u16 index, u16 *pkey)
237 {
238         struct ib_smp *in_mad  = NULL;
239         struct ib_smp *out_mad = NULL;
240         int err = -ENOMEM;
241
242         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
243         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
244         if (!in_mad || !out_mad)
245                 goto out;
246
247         init_query_mad(in_mad);
248         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
249         in_mad->attr_mod = cpu_to_be32(index / 32);
250
251         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
252                             port, NULL, NULL, in_mad, out_mad);
253         if (err)
254                 goto out;
255
256         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
257
258  out:
259         kfree(in_mad);
260         kfree(out_mad);
261         return err;
262 }
263
264 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
265                            int index, union ib_gid *gid)
266 {
267         struct ib_smp *in_mad  = NULL;
268         struct ib_smp *out_mad = NULL;
269         int err = -ENOMEM;
270
271         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
272         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
273         if (!in_mad || !out_mad)
274                 goto out;
275
276         init_query_mad(in_mad);
277         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
278         in_mad->attr_mod = cpu_to_be32(port);
279
280         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
281                             port, NULL, NULL, in_mad, out_mad);
282         if (err)
283                 goto out;
284
285         memcpy(gid->raw, out_mad->data + 8, 8);
286
287         init_query_mad(in_mad);
288         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
289         in_mad->attr_mod = cpu_to_be32(index / 8);
290
291         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
292                             port, NULL, NULL, in_mad, out_mad);
293         if (err)
294                 goto out;
295
296         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
297
298  out:
299         kfree(in_mad);
300         kfree(out_mad);
301         return err;
302 }
303
304 static int mthca_alloc_ucontext(struct ib_ucontext *uctx,
305                                 struct ib_udata *udata)
306 {
307         struct ib_device *ibdev = uctx->device;
308         struct mthca_alloc_ucontext_resp uresp = {};
309         struct mthca_ucontext *context = to_mucontext(uctx);
310         int                              err;
311
312         if (!(to_mdev(ibdev)->active))
313                 return -EAGAIN;
314
315         uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
316         if (mthca_is_memfree(to_mdev(ibdev)))
317                 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
318         else
319                 uresp.uarc_size = 0;
320
321         err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
322         if (err)
323                 return err;
324
325         context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
326         if (IS_ERR(context->db_tab)) {
327                 err = PTR_ERR(context->db_tab);
328                 mthca_uar_free(to_mdev(ibdev), &context->uar);
329                 return err;
330         }
331
332         if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
333                 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
334                 mthca_uar_free(to_mdev(ibdev), &context->uar);
335                 return -EFAULT;
336         }
337
338         context->reg_mr_warned = 0;
339
340         return 0;
341 }
342
343 static void mthca_dealloc_ucontext(struct ib_ucontext *context)
344 {
345         mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
346                                   to_mucontext(context)->db_tab);
347         mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
348 }
349
350 static int mthca_mmap_uar(struct ib_ucontext *context,
351                           struct vm_area_struct *vma)
352 {
353         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
354                 return -EINVAL;
355
356         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
357
358         if (io_remap_pfn_range(vma, vma->vm_start,
359                                to_mucontext(context)->uar.pfn,
360                                PAGE_SIZE, vma->vm_page_prot))
361                 return -EAGAIN;
362
363         return 0;
364 }
365
366 static int mthca_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
367 {
368         struct ib_device *ibdev = ibpd->device;
369         struct mthca_pd *pd = to_mpd(ibpd);
370         int err;
371
372         err = mthca_pd_alloc(to_mdev(ibdev), !udata, pd);
373         if (err)
374                 return err;
375
376         if (udata) {
377                 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
378                         mthca_pd_free(to_mdev(ibdev), pd);
379                         return -EFAULT;
380                 }
381         }
382
383         return 0;
384 }
385
386 static void mthca_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
387 {
388         mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
389 }
390
391 static int mthca_ah_create(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr,
392                            u32 flags, struct ib_udata *udata)
393
394 {
395         struct mthca_ah *ah = to_mah(ibah);
396
397         return mthca_create_ah(to_mdev(ibah->device), to_mpd(ibah->pd), ah_attr,
398                                ah);
399 }
400
401 static void mthca_ah_destroy(struct ib_ah *ah, u32 flags)
402 {
403         mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
404 }
405
406 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
407                                        struct ib_srq_init_attr *init_attr,
408                                        struct ib_udata *udata)
409 {
410         struct mthca_create_srq ucmd;
411         struct mthca_ucontext *context = rdma_udata_to_drv_context(
412                 udata, struct mthca_ucontext, ibucontext);
413         struct mthca_srq *srq;
414         int err;
415
416         if (init_attr->srq_type != IB_SRQT_BASIC)
417                 return ERR_PTR(-EOPNOTSUPP);
418
419         srq = kmalloc(sizeof *srq, GFP_KERNEL);
420         if (!srq)
421                 return ERR_PTR(-ENOMEM);
422
423         if (udata) {
424                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
425                         err = -EFAULT;
426                         goto err_free;
427                 }
428
429                 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
430                                         context->db_tab, ucmd.db_index,
431                                         ucmd.db_page);
432
433                 if (err)
434                         goto err_free;
435
436                 srq->mr.ibmr.lkey = ucmd.lkey;
437                 srq->db_index     = ucmd.db_index;
438         }
439
440         err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
441                               &init_attr->attr, srq, udata);
442
443         if (err && udata)
444                 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
445                                     context->db_tab, ucmd.db_index);
446
447         if (err)
448                 goto err_free;
449
450         if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
451                 mthca_free_srq(to_mdev(pd->device), srq);
452                 err = -EFAULT;
453                 goto err_free;
454         }
455
456         return &srq->ibsrq;
457
458 err_free:
459         kfree(srq);
460
461         return ERR_PTR(err);
462 }
463
464 static int mthca_destroy_srq(struct ib_srq *srq, struct ib_udata *udata)
465 {
466         if (udata) {
467                 struct mthca_ucontext *context =
468                         rdma_udata_to_drv_context(
469                                 udata,
470                                 struct mthca_ucontext,
471                                 ibucontext);
472
473                 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
474                                     context->db_tab, to_msrq(srq)->db_index);
475         }
476
477         mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
478         kfree(srq);
479
480         return 0;
481 }
482
483 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
484                                      struct ib_qp_init_attr *init_attr,
485                                      struct ib_udata *udata)
486 {
487         struct mthca_ucontext *context = rdma_udata_to_drv_context(
488                 udata, struct mthca_ucontext, ibucontext);
489         struct mthca_create_qp ucmd;
490         struct mthca_qp *qp;
491         int err;
492
493         if (init_attr->create_flags)
494                 return ERR_PTR(-EINVAL);
495
496         switch (init_attr->qp_type) {
497         case IB_QPT_RC:
498         case IB_QPT_UC:
499         case IB_QPT_UD:
500         {
501                 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
502                 if (!qp)
503                         return ERR_PTR(-ENOMEM);
504
505                 if (udata) {
506                         if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
507                                 kfree(qp);
508                                 return ERR_PTR(-EFAULT);
509                         }
510
511                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
512                                                 context->db_tab,
513                                                 ucmd.sq_db_index, ucmd.sq_db_page);
514                         if (err) {
515                                 kfree(qp);
516                                 return ERR_PTR(err);
517                         }
518
519                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
520                                                 context->db_tab,
521                                                 ucmd.rq_db_index, ucmd.rq_db_page);
522                         if (err) {
523                                 mthca_unmap_user_db(to_mdev(pd->device),
524                                                     &context->uar,
525                                                     context->db_tab,
526                                                     ucmd.sq_db_index);
527                                 kfree(qp);
528                                 return ERR_PTR(err);
529                         }
530
531                         qp->mr.ibmr.lkey = ucmd.lkey;
532                         qp->sq.db_index  = ucmd.sq_db_index;
533                         qp->rq.db_index  = ucmd.rq_db_index;
534                 }
535
536                 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
537                                      to_mcq(init_attr->send_cq),
538                                      to_mcq(init_attr->recv_cq),
539                                      init_attr->qp_type, init_attr->sq_sig_type,
540                                      &init_attr->cap, qp, udata);
541
542                 if (err && udata) {
543                         mthca_unmap_user_db(to_mdev(pd->device),
544                                             &context->uar,
545                                             context->db_tab,
546                                             ucmd.sq_db_index);
547                         mthca_unmap_user_db(to_mdev(pd->device),
548                                             &context->uar,
549                                             context->db_tab,
550                                             ucmd.rq_db_index);
551                 }
552
553                 qp->ibqp.qp_num = qp->qpn;
554                 break;
555         }
556         case IB_QPT_SMI:
557         case IB_QPT_GSI:
558         {
559                 /* Don't allow userspace to create special QPs */
560                 if (udata)
561                         return ERR_PTR(-EINVAL);
562
563                 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
564                 if (!qp)
565                         return ERR_PTR(-ENOMEM);
566
567                 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
568
569                 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
570                                       to_mcq(init_attr->send_cq),
571                                       to_mcq(init_attr->recv_cq),
572                                       init_attr->sq_sig_type, &init_attr->cap,
573                                       qp->ibqp.qp_num, init_attr->port_num,
574                                       to_msqp(qp), udata);
575                 break;
576         }
577         default:
578                 /* Don't support raw QPs */
579                 return ERR_PTR(-ENOSYS);
580         }
581
582         if (err) {
583                 kfree(qp);
584                 return ERR_PTR(err);
585         }
586
587         init_attr->cap.max_send_wr     = qp->sq.max;
588         init_attr->cap.max_recv_wr     = qp->rq.max;
589         init_attr->cap.max_send_sge    = qp->sq.max_gs;
590         init_attr->cap.max_recv_sge    = qp->rq.max_gs;
591         init_attr->cap.max_inline_data = qp->max_inline_data;
592
593         return &qp->ibqp;
594 }
595
596 static int mthca_destroy_qp(struct ib_qp *qp, struct ib_udata *udata)
597 {
598         if (udata) {
599                 struct mthca_ucontext *context =
600                         rdma_udata_to_drv_context(
601                                 udata,
602                                 struct mthca_ucontext,
603                                 ibucontext);
604
605                 mthca_unmap_user_db(to_mdev(qp->device),
606                                     &context->uar,
607                                     context->db_tab,
608                                     to_mqp(qp)->sq.db_index);
609                 mthca_unmap_user_db(to_mdev(qp->device),
610                                     &context->uar,
611                                     context->db_tab,
612                                     to_mqp(qp)->rq.db_index);
613         }
614         mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
615         kfree(qp);
616         return 0;
617 }
618
619 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
620                                      const struct ib_cq_init_attr *attr,
621                                      struct ib_udata *udata)
622 {
623         int entries = attr->cqe;
624         struct mthca_create_cq ucmd;
625         struct mthca_cq *cq;
626         int nent;
627         int err;
628         struct mthca_ucontext *context = rdma_udata_to_drv_context(
629                 udata, struct mthca_ucontext, ibucontext);
630
631         if (attr->flags)
632                 return ERR_PTR(-EINVAL);
633
634         if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
635                 return ERR_PTR(-EINVAL);
636
637         if (udata) {
638                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
639                         return ERR_PTR(-EFAULT);
640
641                 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
642                                         context->db_tab, ucmd.set_db_index,
643                                         ucmd.set_db_page);
644                 if (err)
645                         return ERR_PTR(err);
646
647                 err = mthca_map_user_db(to_mdev(ibdev), &context->uar,
648                                         context->db_tab, ucmd.arm_db_index,
649                                         ucmd.arm_db_page);
650                 if (err)
651                         goto err_unmap_set;
652         }
653
654         cq = kzalloc(sizeof(*cq), GFP_KERNEL);
655         if (!cq) {
656                 err = -ENOMEM;
657                 goto err_unmap_arm;
658         }
659
660         if (udata) {
661                 cq->buf.mr.ibmr.lkey = ucmd.lkey;
662                 cq->set_ci_db_index  = ucmd.set_db_index;
663                 cq->arm_db_index     = ucmd.arm_db_index;
664         }
665
666         for (nent = 1; nent <= entries; nent <<= 1)
667                 ; /* nothing */
668
669         err = mthca_init_cq(to_mdev(ibdev), nent, context,
670                             udata ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
671                             cq);
672         if (err)
673                 goto err_free;
674
675         if (udata && ib_copy_to_udata(udata, &cq->cqn, sizeof(__u32))) {
676                 mthca_free_cq(to_mdev(ibdev), cq);
677                 err = -EFAULT;
678                 goto err_free;
679         }
680
681         cq->resize_buf = NULL;
682
683         return &cq->ibcq;
684
685 err_free:
686         kfree(cq);
687
688 err_unmap_arm:
689         if (udata)
690                 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
691                                     context->db_tab, ucmd.arm_db_index);
692
693 err_unmap_set:
694         if (udata)
695                 mthca_unmap_user_db(to_mdev(ibdev), &context->uar,
696                                     context->db_tab, ucmd.set_db_index);
697
698         return ERR_PTR(err);
699 }
700
701 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
702                                   int entries)
703 {
704         int ret;
705
706         spin_lock_irq(&cq->lock);
707         if (cq->resize_buf) {
708                 ret = -EBUSY;
709                 goto unlock;
710         }
711
712         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
713         if (!cq->resize_buf) {
714                 ret = -ENOMEM;
715                 goto unlock;
716         }
717
718         cq->resize_buf->state = CQ_RESIZE_ALLOC;
719
720         ret = 0;
721
722 unlock:
723         spin_unlock_irq(&cq->lock);
724
725         if (ret)
726                 return ret;
727
728         ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
729         if (ret) {
730                 spin_lock_irq(&cq->lock);
731                 kfree(cq->resize_buf);
732                 cq->resize_buf = NULL;
733                 spin_unlock_irq(&cq->lock);
734                 return ret;
735         }
736
737         cq->resize_buf->cqe = entries - 1;
738
739         spin_lock_irq(&cq->lock);
740         cq->resize_buf->state = CQ_RESIZE_READY;
741         spin_unlock_irq(&cq->lock);
742
743         return 0;
744 }
745
746 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
747 {
748         struct mthca_dev *dev = to_mdev(ibcq->device);
749         struct mthca_cq *cq = to_mcq(ibcq);
750         struct mthca_resize_cq ucmd;
751         u32 lkey;
752         int ret;
753
754         if (entries < 1 || entries > dev->limits.max_cqes)
755                 return -EINVAL;
756
757         mutex_lock(&cq->mutex);
758
759         entries = roundup_pow_of_two(entries + 1);
760         if (entries == ibcq->cqe + 1) {
761                 ret = 0;
762                 goto out;
763         }
764
765         if (cq->is_kernel) {
766                 ret = mthca_alloc_resize_buf(dev, cq, entries);
767                 if (ret)
768                         goto out;
769                 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
770         } else {
771                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
772                         ret = -EFAULT;
773                         goto out;
774                 }
775                 lkey = ucmd.lkey;
776         }
777
778         ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
779
780         if (ret) {
781                 if (cq->resize_buf) {
782                         mthca_free_cq_buf(dev, &cq->resize_buf->buf,
783                                           cq->resize_buf->cqe);
784                         kfree(cq->resize_buf);
785                         spin_lock_irq(&cq->lock);
786                         cq->resize_buf = NULL;
787                         spin_unlock_irq(&cq->lock);
788                 }
789                 goto out;
790         }
791
792         if (cq->is_kernel) {
793                 struct mthca_cq_buf tbuf;
794                 int tcqe;
795
796                 spin_lock_irq(&cq->lock);
797                 if (cq->resize_buf->state == CQ_RESIZE_READY) {
798                         mthca_cq_resize_copy_cqes(cq);
799                         tbuf         = cq->buf;
800                         tcqe         = cq->ibcq.cqe;
801                         cq->buf      = cq->resize_buf->buf;
802                         cq->ibcq.cqe = cq->resize_buf->cqe;
803                 } else {
804                         tbuf = cq->resize_buf->buf;
805                         tcqe = cq->resize_buf->cqe;
806                 }
807
808                 kfree(cq->resize_buf);
809                 cq->resize_buf = NULL;
810                 spin_unlock_irq(&cq->lock);
811
812                 mthca_free_cq_buf(dev, &tbuf, tcqe);
813         } else
814                 ibcq->cqe = entries - 1;
815
816 out:
817         mutex_unlock(&cq->mutex);
818
819         return ret;
820 }
821
822 static int mthca_destroy_cq(struct ib_cq *cq, struct ib_udata *udata)
823 {
824         if (udata) {
825                 struct mthca_ucontext *context =
826                         rdma_udata_to_drv_context(
827                                 udata,
828                                 struct mthca_ucontext,
829                                 ibucontext);
830
831                 mthca_unmap_user_db(to_mdev(cq->device),
832                                     &context->uar,
833                                     context->db_tab,
834                                     to_mcq(cq)->arm_db_index);
835                 mthca_unmap_user_db(to_mdev(cq->device),
836                                     &context->uar,
837                                     context->db_tab,
838                                     to_mcq(cq)->set_ci_db_index);
839         }
840         mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
841         kfree(cq);
842
843         return 0;
844 }
845
846 static inline u32 convert_access(int acc)
847 {
848         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
849                (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
850                (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
851                (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
852                MTHCA_MPT_FLAG_LOCAL_READ;
853 }
854
855 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
856 {
857         struct mthca_mr *mr;
858         int err;
859
860         mr = kmalloc(sizeof *mr, GFP_KERNEL);
861         if (!mr)
862                 return ERR_PTR(-ENOMEM);
863
864         err = mthca_mr_alloc_notrans(to_mdev(pd->device),
865                                      to_mpd(pd)->pd_num,
866                                      convert_access(acc), mr);
867
868         if (err) {
869                 kfree(mr);
870                 return ERR_PTR(err);
871         }
872
873         mr->umem = NULL;
874
875         return &mr->ibmr;
876 }
877
878 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
879                                        u64 virt, int acc, struct ib_udata *udata)
880 {
881         struct mthca_dev *dev = to_mdev(pd->device);
882         struct sg_dma_page_iter sg_iter;
883         struct mthca_ucontext *context = rdma_udata_to_drv_context(
884                 udata, struct mthca_ucontext, ibucontext);
885         struct mthca_mr *mr;
886         struct mthca_reg_mr ucmd;
887         u64 *pages;
888         int n, i;
889         int err = 0;
890         int write_mtt_size;
891
892         if (udata->inlen < sizeof ucmd) {
893                 if (!context->reg_mr_warned) {
894                         mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
895                                    current->comm);
896                         mthca_warn(dev, "  Update libmthca to fix this.\n");
897                 }
898                 ++context->reg_mr_warned;
899                 ucmd.mr_attrs = 0;
900         } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
901                 return ERR_PTR(-EFAULT);
902
903         mr = kmalloc(sizeof *mr, GFP_KERNEL);
904         if (!mr)
905                 return ERR_PTR(-ENOMEM);
906
907         mr->umem = ib_umem_get(udata, start, length, acc,
908                                ucmd.mr_attrs & MTHCA_MR_DMASYNC);
909
910         if (IS_ERR(mr->umem)) {
911                 err = PTR_ERR(mr->umem);
912                 goto err;
913         }
914
915         n = ib_umem_num_pages(mr->umem);
916
917         mr->mtt = mthca_alloc_mtt(dev, n);
918         if (IS_ERR(mr->mtt)) {
919                 err = PTR_ERR(mr->mtt);
920                 goto err_umem;
921         }
922
923         pages = (u64 *) __get_free_page(GFP_KERNEL);
924         if (!pages) {
925                 err = -ENOMEM;
926                 goto err_mtt;
927         }
928
929         i = n = 0;
930
931         write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
932
933         for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) {
934                 pages[i++] = sg_page_iter_dma_address(&sg_iter);
935
936                 /*
937                  * Be friendly to write_mtt and pass it chunks
938                  * of appropriate size.
939                  */
940                 if (i == write_mtt_size) {
941                         err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
942                         if (err)
943                                 goto mtt_done;
944                         n += i;
945                         i = 0;
946                 }
947         }
948
949         if (i)
950                 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
951 mtt_done:
952         free_page((unsigned long) pages);
953         if (err)
954                 goto err_mtt;
955
956         err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, virt, length,
957                              convert_access(acc), mr);
958
959         if (err)
960                 goto err_mtt;
961
962         return &mr->ibmr;
963
964 err_mtt:
965         mthca_free_mtt(dev, mr->mtt);
966
967 err_umem:
968         ib_umem_release(mr->umem);
969
970 err:
971         kfree(mr);
972         return ERR_PTR(err);
973 }
974
975 static int mthca_dereg_mr(struct ib_mr *mr, struct ib_udata *udata)
976 {
977         struct mthca_mr *mmr = to_mmr(mr);
978
979         mthca_free_mr(to_mdev(mr->device), mmr);
980         if (mmr->umem)
981                 ib_umem_release(mmr->umem);
982         kfree(mmr);
983
984         return 0;
985 }
986
987 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
988                                       struct ib_fmr_attr *fmr_attr)
989 {
990         struct mthca_fmr *fmr;
991         int err;
992
993         fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
994         if (!fmr)
995                 return ERR_PTR(-ENOMEM);
996
997         memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
998         err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
999                              convert_access(mr_access_flags), fmr);
1000
1001         if (err) {
1002                 kfree(fmr);
1003                 return ERR_PTR(err);
1004         }
1005
1006         return &fmr->ibmr;
1007 }
1008
1009 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1010 {
1011         struct mthca_fmr *mfmr = to_mfmr(fmr);
1012         int err;
1013
1014         err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1015         if (err)
1016                 return err;
1017
1018         kfree(mfmr);
1019         return 0;
1020 }
1021
1022 static int mthca_unmap_fmr(struct list_head *fmr_list)
1023 {
1024         struct ib_fmr *fmr;
1025         int err;
1026         struct mthca_dev *mdev = NULL;
1027
1028         list_for_each_entry(fmr, fmr_list, list) {
1029                 if (mdev && to_mdev(fmr->device) != mdev)
1030                         return -EINVAL;
1031                 mdev = to_mdev(fmr->device);
1032         }
1033
1034         if (!mdev)
1035                 return 0;
1036
1037         if (mthca_is_memfree(mdev)) {
1038                 list_for_each_entry(fmr, fmr_list, list)
1039                         mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1040
1041                 wmb();
1042         } else
1043                 list_for_each_entry(fmr, fmr_list, list)
1044                         mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1045
1046         err = mthca_SYNC_TPT(mdev);
1047         return err;
1048 }
1049
1050 static ssize_t hw_rev_show(struct device *device,
1051                            struct device_attribute *attr, char *buf)
1052 {
1053         struct mthca_dev *dev =
1054                 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1055
1056         return sprintf(buf, "%x\n", dev->rev_id);
1057 }
1058 static DEVICE_ATTR_RO(hw_rev);
1059
1060 static ssize_t hca_type_show(struct device *device,
1061                              struct device_attribute *attr, char *buf)
1062 {
1063         struct mthca_dev *dev =
1064                 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1065
1066         switch (dev->pdev->device) {
1067         case PCI_DEVICE_ID_MELLANOX_TAVOR:
1068                 return sprintf(buf, "MT23108\n");
1069         case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1070                 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1071         case PCI_DEVICE_ID_MELLANOX_ARBEL:
1072                 return sprintf(buf, "MT25208\n");
1073         case PCI_DEVICE_ID_MELLANOX_SINAI:
1074         case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1075                 return sprintf(buf, "MT25204\n");
1076         default:
1077                 return sprintf(buf, "unknown\n");
1078         }
1079 }
1080 static DEVICE_ATTR_RO(hca_type);
1081
1082 static ssize_t board_id_show(struct device *device,
1083                              struct device_attribute *attr, char *buf)
1084 {
1085         struct mthca_dev *dev =
1086                 rdma_device_to_drv_device(device, struct mthca_dev, ib_dev);
1087
1088         return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1089 }
1090 static DEVICE_ATTR_RO(board_id);
1091
1092 static struct attribute *mthca_dev_attributes[] = {
1093         &dev_attr_hw_rev.attr,
1094         &dev_attr_hca_type.attr,
1095         &dev_attr_board_id.attr,
1096         NULL
1097 };
1098
1099 static const struct attribute_group mthca_attr_group = {
1100         .attrs = mthca_dev_attributes,
1101 };
1102
1103 static int mthca_init_node_data(struct mthca_dev *dev)
1104 {
1105         struct ib_smp *in_mad  = NULL;
1106         struct ib_smp *out_mad = NULL;
1107         int err = -ENOMEM;
1108
1109         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1110         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1111         if (!in_mad || !out_mad)
1112                 goto out;
1113
1114         init_query_mad(in_mad);
1115         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1116
1117         err = mthca_MAD_IFC(dev, 1, 1,
1118                             1, NULL, NULL, in_mad, out_mad);
1119         if (err)
1120                 goto out;
1121
1122         memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1123
1124         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1125
1126         err = mthca_MAD_IFC(dev, 1, 1,
1127                             1, NULL, NULL, in_mad, out_mad);
1128         if (err)
1129                 goto out;
1130
1131         if (mthca_is_memfree(dev))
1132                 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1133         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1134
1135 out:
1136         kfree(in_mad);
1137         kfree(out_mad);
1138         return err;
1139 }
1140
1141 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1142                                 struct ib_port_immutable *immutable)
1143 {
1144         struct ib_port_attr attr;
1145         int err;
1146
1147         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1148
1149         err = ib_query_port(ibdev, port_num, &attr);
1150         if (err)
1151                 return err;
1152
1153         immutable->pkey_tbl_len = attr.pkey_tbl_len;
1154         immutable->gid_tbl_len = attr.gid_tbl_len;
1155         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1156
1157         return 0;
1158 }
1159
1160 static void get_dev_fw_str(struct ib_device *device, char *str)
1161 {
1162         struct mthca_dev *dev =
1163                 container_of(device, struct mthca_dev, ib_dev);
1164         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1165                  (int) (dev->fw_ver >> 32),
1166                  (int) (dev->fw_ver >> 16) & 0xffff,
1167                  (int) dev->fw_ver & 0xffff);
1168 }
1169
1170 static const struct ib_device_ops mthca_dev_ops = {
1171         .alloc_pd = mthca_alloc_pd,
1172         .alloc_ucontext = mthca_alloc_ucontext,
1173         .attach_mcast = mthca_multicast_attach,
1174         .create_ah = mthca_ah_create,
1175         .create_cq = mthca_create_cq,
1176         .create_qp = mthca_create_qp,
1177         .dealloc_pd = mthca_dealloc_pd,
1178         .dealloc_ucontext = mthca_dealloc_ucontext,
1179         .dereg_mr = mthca_dereg_mr,
1180         .destroy_ah = mthca_ah_destroy,
1181         .destroy_cq = mthca_destroy_cq,
1182         .destroy_qp = mthca_destroy_qp,
1183         .detach_mcast = mthca_multicast_detach,
1184         .get_dev_fw_str = get_dev_fw_str,
1185         .get_dma_mr = mthca_get_dma_mr,
1186         .get_port_immutable = mthca_port_immutable,
1187         .mmap = mthca_mmap_uar,
1188         .modify_device = mthca_modify_device,
1189         .modify_port = mthca_modify_port,
1190         .modify_qp = mthca_modify_qp,
1191         .poll_cq = mthca_poll_cq,
1192         .process_mad = mthca_process_mad,
1193         .query_ah = mthca_ah_query,
1194         .query_device = mthca_query_device,
1195         .query_gid = mthca_query_gid,
1196         .query_pkey = mthca_query_pkey,
1197         .query_port = mthca_query_port,
1198         .query_qp = mthca_query_qp,
1199         .reg_user_mr = mthca_reg_user_mr,
1200         .resize_cq = mthca_resize_cq,
1201
1202         INIT_RDMA_OBJ_SIZE(ib_ah, mthca_ah, ibah),
1203         INIT_RDMA_OBJ_SIZE(ib_pd, mthca_pd, ibpd),
1204         INIT_RDMA_OBJ_SIZE(ib_ucontext, mthca_ucontext, ibucontext),
1205 };
1206
1207 static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1208         .create_srq = mthca_create_srq,
1209         .destroy_srq = mthca_destroy_srq,
1210         .modify_srq = mthca_modify_srq,
1211         .post_srq_recv = mthca_arbel_post_srq_recv,
1212         .query_srq = mthca_query_srq,
1213 };
1214
1215 static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1216         .create_srq = mthca_create_srq,
1217         .destroy_srq = mthca_destroy_srq,
1218         .modify_srq = mthca_modify_srq,
1219         .post_srq_recv = mthca_tavor_post_srq_recv,
1220         .query_srq = mthca_query_srq,
1221 };
1222
1223 static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
1224         .alloc_fmr = mthca_alloc_fmr,
1225         .dealloc_fmr = mthca_dealloc_fmr,
1226         .map_phys_fmr = mthca_arbel_map_phys_fmr,
1227         .unmap_fmr = mthca_unmap_fmr,
1228 };
1229
1230 static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
1231         .alloc_fmr = mthca_alloc_fmr,
1232         .dealloc_fmr = mthca_dealloc_fmr,
1233         .map_phys_fmr = mthca_tavor_map_phys_fmr,
1234         .unmap_fmr = mthca_unmap_fmr,
1235 };
1236
1237 static const struct ib_device_ops mthca_dev_arbel_ops = {
1238         .post_recv = mthca_arbel_post_receive,
1239         .post_send = mthca_arbel_post_send,
1240         .req_notify_cq = mthca_arbel_arm_cq,
1241 };
1242
1243 static const struct ib_device_ops mthca_dev_tavor_ops = {
1244         .post_recv = mthca_tavor_post_receive,
1245         .post_send = mthca_tavor_post_send,
1246         .req_notify_cq = mthca_tavor_arm_cq,
1247 };
1248
1249 int mthca_register_device(struct mthca_dev *dev)
1250 {
1251         int ret;
1252
1253         ret = mthca_init_node_data(dev);
1254         if (ret)
1255                 return ret;
1256
1257         dev->ib_dev.owner                = THIS_MODULE;
1258
1259         dev->ib_dev.uverbs_abi_ver       = MTHCA_UVERBS_ABI_VERSION;
1260         dev->ib_dev.uverbs_cmd_mask      =
1261                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1262                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1263                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1264                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1265                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1266                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1267                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1268                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1269                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1270                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1271                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1272                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1273                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1274                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1275                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1276                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1277                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1278         dev->ib_dev.node_type            = RDMA_NODE_IB_CA;
1279         dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
1280         dev->ib_dev.num_comp_vectors     = 1;
1281         dev->ib_dev.dev.parent           = &dev->pdev->dev;
1282
1283         if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1284                 dev->ib_dev.uverbs_cmd_mask     |=
1285                         (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1286                         (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1287                         (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1288                         (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1289
1290                 if (mthca_is_memfree(dev))
1291                         ib_set_device_ops(&dev->ib_dev,
1292                                           &mthca_dev_arbel_srq_ops);
1293                 else
1294                         ib_set_device_ops(&dev->ib_dev,
1295                                           &mthca_dev_tavor_srq_ops);
1296         }
1297
1298         if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1299                 if (mthca_is_memfree(dev))
1300                         ib_set_device_ops(&dev->ib_dev,
1301                                           &mthca_dev_arbel_fmr_ops);
1302                 else
1303                         ib_set_device_ops(&dev->ib_dev,
1304                                           &mthca_dev_tavor_fmr_ops);
1305         }
1306
1307         ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
1308
1309         if (mthca_is_memfree(dev))
1310                 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1311         else
1312                 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
1313
1314         mutex_init(&dev->cap_mask_mutex);
1315
1316         rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1317         dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
1318         ret = ib_register_device(&dev->ib_dev, "mthca%d");
1319         if (ret)
1320                 return ret;
1321
1322         mthca_start_catas_poll(dev);
1323
1324         return 0;
1325 }
1326
1327 void mthca_unregister_device(struct mthca_dev *dev)
1328 {
1329         mthca_stop_catas_poll(dev);
1330         ib_unregister_device(&dev->ib_dev);
1331 }