Merge tag 'nfs-for-5.0-3' of git://git.linux-nfs.org/projects/anna/linux-nfs
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mthca / mthca_provider.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
5  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
6  * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7  *
8  * This software is available to you under a choice of one of two
9  * licenses.  You may choose to be licensed under the terms of the GNU
10  * General Public License (GPL) Version 2, available from the file
11  * COPYING in the main directory of this source tree, or the
12  * OpenIB.org BSD license below:
13  *
14  *     Redistribution and use in source and binary forms, with or
15  *     without modification, are permitted provided that the following
16  *     conditions are met:
17  *
18  *      - Redistributions of source code must retain the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer.
21  *
22  *      - Redistributions in binary form must reproduce the above
23  *        copyright notice, this list of conditions and the following
24  *        disclaimer in the documentation and/or other materials
25  *        provided with the distribution.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34  * SOFTWARE.
35  */
36
37 #include <rdma/ib_smi.h>
38 #include <rdma/ib_umem.h>
39 #include <rdma/ib_user_verbs.h>
40
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/stat.h>
44 #include <linux/mm.h>
45 #include <linux/export.h>
46
47 #include "mthca_dev.h"
48 #include "mthca_cmd.h"
49 #include <rdma/mthca-abi.h>
50 #include "mthca_memfree.h"
51
52 static void init_query_mad(struct ib_smp *mad)
53 {
54         mad->base_version  = 1;
55         mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
56         mad->class_version = 1;
57         mad->method        = IB_MGMT_METHOD_GET;
58 }
59
60 static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props,
61                               struct ib_udata *uhw)
62 {
63         struct ib_smp *in_mad  = NULL;
64         struct ib_smp *out_mad = NULL;
65         int err = -ENOMEM;
66         struct mthca_dev *mdev = to_mdev(ibdev);
67
68         if (uhw->inlen || uhw->outlen)
69                 return -EINVAL;
70
71         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
72         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
73         if (!in_mad || !out_mad)
74                 goto out;
75
76         memset(props, 0, sizeof *props);
77
78         props->fw_ver              = mdev->fw_ver;
79
80         init_query_mad(in_mad);
81         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
82
83         err = mthca_MAD_IFC(mdev, 1, 1,
84                             1, NULL, NULL, in_mad, out_mad);
85         if (err)
86                 goto out;
87
88         props->device_cap_flags    = mdev->device_cap_flags;
89         props->vendor_id           = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
90                 0xffffff;
91         props->vendor_part_id      = be16_to_cpup((__be16 *) (out_mad->data + 30));
92         props->hw_ver              = be32_to_cpup((__be32 *) (out_mad->data + 32));
93         memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
94
95         props->max_mr_size         = ~0ull;
96         props->page_size_cap       = mdev->limits.page_size_cap;
97         props->max_qp              = mdev->limits.num_qps - mdev->limits.reserved_qps;
98         props->max_qp_wr           = mdev->limits.max_wqes;
99         props->max_send_sge        = mdev->limits.max_sg;
100         props->max_recv_sge        = mdev->limits.max_sg;
101         props->max_sge_rd          = mdev->limits.max_sg;
102         props->max_cq              = mdev->limits.num_cqs - mdev->limits.reserved_cqs;
103         props->max_cqe             = mdev->limits.max_cqes;
104         props->max_mr              = mdev->limits.num_mpts - mdev->limits.reserved_mrws;
105         props->max_pd              = mdev->limits.num_pds - mdev->limits.reserved_pds;
106         props->max_qp_rd_atom      = 1 << mdev->qp_table.rdb_shift;
107         props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma;
108         props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
109         props->max_srq             = mdev->limits.num_srqs - mdev->limits.reserved_srqs;
110         props->max_srq_wr          = mdev->limits.max_srq_wqes;
111         props->max_srq_sge         = mdev->limits.max_srq_sge;
112         props->local_ca_ack_delay  = mdev->limits.local_ca_ack_delay;
113         props->atomic_cap          = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ?
114                                         IB_ATOMIC_HCA : IB_ATOMIC_NONE;
115         props->max_pkeys           = mdev->limits.pkey_table_len;
116         props->max_mcast_grp       = mdev->limits.num_mgms + mdev->limits.num_amgms;
117         props->max_mcast_qp_attach = MTHCA_QP_PER_MGM;
118         props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
119                                            props->max_mcast_grp;
120         /*
121          * If Sinai memory key optimization is being used, then only
122          * the 8-bit key portion will change.  For other HCAs, the
123          * unused index bits will also be used for FMR remapping.
124          */
125         if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT)
126                 props->max_map_per_fmr = 255;
127         else
128                 props->max_map_per_fmr =
129                         (1 << (32 - ilog2(mdev->limits.num_mpts))) - 1;
130
131         err = 0;
132  out:
133         kfree(in_mad);
134         kfree(out_mad);
135         return err;
136 }
137
138 static int mthca_query_port(struct ib_device *ibdev,
139                             u8 port, struct ib_port_attr *props)
140 {
141         struct ib_smp *in_mad  = NULL;
142         struct ib_smp *out_mad = NULL;
143         int err = -ENOMEM;
144
145         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
146         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
147         if (!in_mad || !out_mad)
148                 goto out;
149
150         /* props being zeroed by the caller, avoid zeroing it here */
151
152         init_query_mad(in_mad);
153         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
154         in_mad->attr_mod = cpu_to_be32(port);
155
156         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
157                             port, NULL, NULL, in_mad, out_mad);
158         if (err)
159                 goto out;
160
161         props->lid               = be16_to_cpup((__be16 *) (out_mad->data + 16));
162         props->lmc               = out_mad->data[34] & 0x7;
163         props->sm_lid            = be16_to_cpup((__be16 *) (out_mad->data + 18));
164         props->sm_sl             = out_mad->data[36] & 0xf;
165         props->state             = out_mad->data[32] & 0xf;
166         props->phys_state        = out_mad->data[33] >> 4;
167         props->port_cap_flags    = be32_to_cpup((__be32 *) (out_mad->data + 20));
168         props->gid_tbl_len       = to_mdev(ibdev)->limits.gid_table_len;
169         props->max_msg_sz        = 0x80000000;
170         props->pkey_tbl_len      = to_mdev(ibdev)->limits.pkey_table_len;
171         props->bad_pkey_cntr     = be16_to_cpup((__be16 *) (out_mad->data + 46));
172         props->qkey_viol_cntr    = be16_to_cpup((__be16 *) (out_mad->data + 48));
173         props->active_width      = out_mad->data[31] & 0xf;
174         props->active_speed      = out_mad->data[35] >> 4;
175         props->max_mtu           = out_mad->data[41] & 0xf;
176         props->active_mtu        = out_mad->data[36] >> 4;
177         props->subnet_timeout    = out_mad->data[51] & 0x1f;
178         props->max_vl_num        = out_mad->data[37] >> 4;
179         props->init_type_reply   = out_mad->data[41] >> 4;
180
181  out:
182         kfree(in_mad);
183         kfree(out_mad);
184         return err;
185 }
186
187 static int mthca_modify_device(struct ib_device *ibdev,
188                                int mask,
189                                struct ib_device_modify *props)
190 {
191         if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
192                 return -EOPNOTSUPP;
193
194         if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
195                 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
196                         return -ERESTARTSYS;
197                 memcpy(ibdev->node_desc, props->node_desc,
198                        IB_DEVICE_NODE_DESC_MAX);
199                 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
200         }
201
202         return 0;
203 }
204
205 static int mthca_modify_port(struct ib_device *ibdev,
206                              u8 port, int port_modify_mask,
207                              struct ib_port_modify *props)
208 {
209         struct mthca_set_ib_param set_ib;
210         struct ib_port_attr attr;
211         int err;
212
213         if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
214                 return -ERESTARTSYS;
215
216         err = ib_query_port(ibdev, port, &attr);
217         if (err)
218                 goto out;
219
220         set_ib.set_si_guid     = 0;
221         set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR);
222
223         set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
224                 ~props->clr_port_cap_mask;
225
226         err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
227         if (err)
228                 goto out;
229 out:
230         mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
231         return err;
232 }
233
234 static int mthca_query_pkey(struct ib_device *ibdev,
235                             u8 port, u16 index, u16 *pkey)
236 {
237         struct ib_smp *in_mad  = NULL;
238         struct ib_smp *out_mad = NULL;
239         int err = -ENOMEM;
240
241         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
242         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
243         if (!in_mad || !out_mad)
244                 goto out;
245
246         init_query_mad(in_mad);
247         in_mad->attr_id  = IB_SMP_ATTR_PKEY_TABLE;
248         in_mad->attr_mod = cpu_to_be32(index / 32);
249
250         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
251                             port, NULL, NULL, in_mad, out_mad);
252         if (err)
253                 goto out;
254
255         *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
256
257  out:
258         kfree(in_mad);
259         kfree(out_mad);
260         return err;
261 }
262
263 static int mthca_query_gid(struct ib_device *ibdev, u8 port,
264                            int index, union ib_gid *gid)
265 {
266         struct ib_smp *in_mad  = NULL;
267         struct ib_smp *out_mad = NULL;
268         int err = -ENOMEM;
269
270         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
271         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
272         if (!in_mad || !out_mad)
273                 goto out;
274
275         init_query_mad(in_mad);
276         in_mad->attr_id  = IB_SMP_ATTR_PORT_INFO;
277         in_mad->attr_mod = cpu_to_be32(port);
278
279         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
280                             port, NULL, NULL, in_mad, out_mad);
281         if (err)
282                 goto out;
283
284         memcpy(gid->raw, out_mad->data + 8, 8);
285
286         init_query_mad(in_mad);
287         in_mad->attr_id  = IB_SMP_ATTR_GUID_INFO;
288         in_mad->attr_mod = cpu_to_be32(index / 8);
289
290         err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
291                             port, NULL, NULL, in_mad, out_mad);
292         if (err)
293                 goto out;
294
295         memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
296
297  out:
298         kfree(in_mad);
299         kfree(out_mad);
300         return err;
301 }
302
303 static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
304                                                 struct ib_udata *udata)
305 {
306         struct mthca_alloc_ucontext_resp uresp;
307         struct mthca_ucontext           *context;
308         int                              err;
309
310         if (!(to_mdev(ibdev)->active))
311                 return ERR_PTR(-EAGAIN);
312
313         memset(&uresp, 0, sizeof uresp);
314
315         uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
316         if (mthca_is_memfree(to_mdev(ibdev)))
317                 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
318         else
319                 uresp.uarc_size = 0;
320
321         context = kmalloc(sizeof *context, GFP_KERNEL);
322         if (!context)
323                 return ERR_PTR(-ENOMEM);
324
325         err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
326         if (err) {
327                 kfree(context);
328                 return ERR_PTR(err);
329         }
330
331         context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
332         if (IS_ERR(context->db_tab)) {
333                 err = PTR_ERR(context->db_tab);
334                 mthca_uar_free(to_mdev(ibdev), &context->uar);
335                 kfree(context);
336                 return ERR_PTR(err);
337         }
338
339         if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
340                 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
341                 mthca_uar_free(to_mdev(ibdev), &context->uar);
342                 kfree(context);
343                 return ERR_PTR(-EFAULT);
344         }
345
346         context->reg_mr_warned = 0;
347
348         return &context->ibucontext;
349 }
350
351 static int mthca_dealloc_ucontext(struct ib_ucontext *context)
352 {
353         mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
354                                   to_mucontext(context)->db_tab);
355         mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
356         kfree(to_mucontext(context));
357
358         return 0;
359 }
360
361 static int mthca_mmap_uar(struct ib_ucontext *context,
362                           struct vm_area_struct *vma)
363 {
364         if (vma->vm_end - vma->vm_start != PAGE_SIZE)
365                 return -EINVAL;
366
367         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
368
369         if (io_remap_pfn_range(vma, vma->vm_start,
370                                to_mucontext(context)->uar.pfn,
371                                PAGE_SIZE, vma->vm_page_prot))
372                 return -EAGAIN;
373
374         return 0;
375 }
376
377 static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
378                                     struct ib_ucontext *context,
379                                     struct ib_udata *udata)
380 {
381         struct mthca_pd *pd;
382         int err;
383
384         pd = kmalloc(sizeof *pd, GFP_KERNEL);
385         if (!pd)
386                 return ERR_PTR(-ENOMEM);
387
388         err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
389         if (err) {
390                 kfree(pd);
391                 return ERR_PTR(err);
392         }
393
394         if (context) {
395                 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
396                         mthca_pd_free(to_mdev(ibdev), pd);
397                         kfree(pd);
398                         return ERR_PTR(-EFAULT);
399                 }
400         }
401
402         return &pd->ibpd;
403 }
404
405 static int mthca_dealloc_pd(struct ib_pd *pd)
406 {
407         mthca_pd_free(to_mdev(pd->device), to_mpd(pd));
408         kfree(pd);
409
410         return 0;
411 }
412
413 static struct ib_ah *mthca_ah_create(struct ib_pd *pd,
414                                      struct rdma_ah_attr *ah_attr,
415                                      u32 flags,
416                                      struct ib_udata *udata)
417
418 {
419         int err;
420         struct mthca_ah *ah;
421
422         ah = kmalloc(sizeof *ah, GFP_ATOMIC);
423         if (!ah)
424                 return ERR_PTR(-ENOMEM);
425
426         err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah);
427         if (err) {
428                 kfree(ah);
429                 return ERR_PTR(err);
430         }
431
432         return &ah->ibah;
433 }
434
435 static int mthca_ah_destroy(struct ib_ah *ah, u32 flags)
436 {
437         mthca_destroy_ah(to_mdev(ah->device), to_mah(ah));
438         kfree(ah);
439
440         return 0;
441 }
442
443 static struct ib_srq *mthca_create_srq(struct ib_pd *pd,
444                                        struct ib_srq_init_attr *init_attr,
445                                        struct ib_udata *udata)
446 {
447         struct mthca_create_srq ucmd;
448         struct mthca_ucontext *context = NULL;
449         struct mthca_srq *srq;
450         int err;
451
452         if (init_attr->srq_type != IB_SRQT_BASIC)
453                 return ERR_PTR(-EOPNOTSUPP);
454
455         srq = kmalloc(sizeof *srq, GFP_KERNEL);
456         if (!srq)
457                 return ERR_PTR(-ENOMEM);
458
459         if (udata) {
460                 context = to_mucontext(pd->uobject->context);
461
462                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
463                         err = -EFAULT;
464                         goto err_free;
465                 }
466
467                 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
468                                         context->db_tab, ucmd.db_index,
469                                         ucmd.db_page);
470
471                 if (err)
472                         goto err_free;
473
474                 srq->mr.ibmr.lkey = ucmd.lkey;
475                 srq->db_index     = ucmd.db_index;
476         }
477
478         err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd),
479                               &init_attr->attr, srq, udata);
480
481         if (err && udata)
482                 mthca_unmap_user_db(to_mdev(pd->device), &context->uar,
483                                     context->db_tab, ucmd.db_index);
484
485         if (err)
486                 goto err_free;
487
488         if (context && ib_copy_to_udata(udata, &srq->srqn, sizeof (__u32))) {
489                 mthca_free_srq(to_mdev(pd->device), srq);
490                 err = -EFAULT;
491                 goto err_free;
492         }
493
494         return &srq->ibsrq;
495
496 err_free:
497         kfree(srq);
498
499         return ERR_PTR(err);
500 }
501
502 static int mthca_destroy_srq(struct ib_srq *srq)
503 {
504         struct mthca_ucontext *context;
505
506         if (srq->uobject) {
507                 context = to_mucontext(srq->uobject->context);
508
509                 mthca_unmap_user_db(to_mdev(srq->device), &context->uar,
510                                     context->db_tab, to_msrq(srq)->db_index);
511         }
512
513         mthca_free_srq(to_mdev(srq->device), to_msrq(srq));
514         kfree(srq);
515
516         return 0;
517 }
518
519 static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
520                                      struct ib_qp_init_attr *init_attr,
521                                      struct ib_udata *udata)
522 {
523         struct mthca_create_qp ucmd;
524         struct mthca_qp *qp;
525         int err;
526
527         if (init_attr->create_flags)
528                 return ERR_PTR(-EINVAL);
529
530         switch (init_attr->qp_type) {
531         case IB_QPT_RC:
532         case IB_QPT_UC:
533         case IB_QPT_UD:
534         {
535                 struct mthca_ucontext *context;
536
537                 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
538                 if (!qp)
539                         return ERR_PTR(-ENOMEM);
540
541                 if (udata) {
542                         context = to_mucontext(pd->uobject->context);
543
544                         if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
545                                 kfree(qp);
546                                 return ERR_PTR(-EFAULT);
547                         }
548
549                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
550                                                 context->db_tab,
551                                                 ucmd.sq_db_index, ucmd.sq_db_page);
552                         if (err) {
553                                 kfree(qp);
554                                 return ERR_PTR(err);
555                         }
556
557                         err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
558                                                 context->db_tab,
559                                                 ucmd.rq_db_index, ucmd.rq_db_page);
560                         if (err) {
561                                 mthca_unmap_user_db(to_mdev(pd->device),
562                                                     &context->uar,
563                                                     context->db_tab,
564                                                     ucmd.sq_db_index);
565                                 kfree(qp);
566                                 return ERR_PTR(err);
567                         }
568
569                         qp->mr.ibmr.lkey = ucmd.lkey;
570                         qp->sq.db_index  = ucmd.sq_db_index;
571                         qp->rq.db_index  = ucmd.rq_db_index;
572                 }
573
574                 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
575                                      to_mcq(init_attr->send_cq),
576                                      to_mcq(init_attr->recv_cq),
577                                      init_attr->qp_type, init_attr->sq_sig_type,
578                                      &init_attr->cap, qp, udata);
579
580                 if (err && udata) {
581                         context = to_mucontext(pd->uobject->context);
582
583                         mthca_unmap_user_db(to_mdev(pd->device),
584                                             &context->uar,
585                                             context->db_tab,
586                                             ucmd.sq_db_index);
587                         mthca_unmap_user_db(to_mdev(pd->device),
588                                             &context->uar,
589                                             context->db_tab,
590                                             ucmd.rq_db_index);
591                 }
592
593                 qp->ibqp.qp_num = qp->qpn;
594                 break;
595         }
596         case IB_QPT_SMI:
597         case IB_QPT_GSI:
598         {
599                 /* Don't allow userspace to create special QPs */
600                 if (udata)
601                         return ERR_PTR(-EINVAL);
602
603                 qp = kzalloc(sizeof(struct mthca_sqp), GFP_KERNEL);
604                 if (!qp)
605                         return ERR_PTR(-ENOMEM);
606
607                 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
608
609                 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
610                                       to_mcq(init_attr->send_cq),
611                                       to_mcq(init_attr->recv_cq),
612                                       init_attr->sq_sig_type, &init_attr->cap,
613                                       qp->ibqp.qp_num, init_attr->port_num,
614                                       to_msqp(qp), udata);
615                 break;
616         }
617         default:
618                 /* Don't support raw QPs */
619                 return ERR_PTR(-ENOSYS);
620         }
621
622         if (err) {
623                 kfree(qp);
624                 return ERR_PTR(err);
625         }
626
627         init_attr->cap.max_send_wr     = qp->sq.max;
628         init_attr->cap.max_recv_wr     = qp->rq.max;
629         init_attr->cap.max_send_sge    = qp->sq.max_gs;
630         init_attr->cap.max_recv_sge    = qp->rq.max_gs;
631         init_attr->cap.max_inline_data = qp->max_inline_data;
632
633         return &qp->ibqp;
634 }
635
636 static int mthca_destroy_qp(struct ib_qp *qp)
637 {
638         if (qp->uobject) {
639                 mthca_unmap_user_db(to_mdev(qp->device),
640                                     &to_mucontext(qp->uobject->context)->uar,
641                                     to_mucontext(qp->uobject->context)->db_tab,
642                                     to_mqp(qp)->sq.db_index);
643                 mthca_unmap_user_db(to_mdev(qp->device),
644                                     &to_mucontext(qp->uobject->context)->uar,
645                                     to_mucontext(qp->uobject->context)->db_tab,
646                                     to_mqp(qp)->rq.db_index);
647         }
648         mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
649         kfree(qp);
650         return 0;
651 }
652
653 static struct ib_cq *mthca_create_cq(struct ib_device *ibdev,
654                                      const struct ib_cq_init_attr *attr,
655                                      struct ib_ucontext *context,
656                                      struct ib_udata *udata)
657 {
658         int entries = attr->cqe;
659         struct mthca_create_cq ucmd;
660         struct mthca_cq *cq;
661         int nent;
662         int err;
663
664         if (attr->flags)
665                 return ERR_PTR(-EINVAL);
666
667         if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes)
668                 return ERR_PTR(-EINVAL);
669
670         if (context) {
671                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
672                         return ERR_PTR(-EFAULT);
673
674                 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
675                                         to_mucontext(context)->db_tab,
676                                         ucmd.set_db_index, ucmd.set_db_page);
677                 if (err)
678                         return ERR_PTR(err);
679
680                 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
681                                         to_mucontext(context)->db_tab,
682                                         ucmd.arm_db_index, ucmd.arm_db_page);
683                 if (err)
684                         goto err_unmap_set;
685         }
686
687         cq = kmalloc(sizeof *cq, GFP_KERNEL);
688         if (!cq) {
689                 err = -ENOMEM;
690                 goto err_unmap_arm;
691         }
692
693         if (context) {
694                 cq->buf.mr.ibmr.lkey = ucmd.lkey;
695                 cq->set_ci_db_index  = ucmd.set_db_index;
696                 cq->arm_db_index     = ucmd.arm_db_index;
697         }
698
699         for (nent = 1; nent <= entries; nent <<= 1)
700                 ; /* nothing */
701
702         err = mthca_init_cq(to_mdev(ibdev), nent,
703                             context ? to_mucontext(context) : NULL,
704                             context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
705                             cq);
706         if (err)
707                 goto err_free;
708
709         if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
710                 mthca_free_cq(to_mdev(ibdev), cq);
711                 err = -EFAULT;
712                 goto err_free;
713         }
714
715         cq->resize_buf = NULL;
716
717         return &cq->ibcq;
718
719 err_free:
720         kfree(cq);
721
722 err_unmap_arm:
723         if (context)
724                 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
725                                     to_mucontext(context)->db_tab, ucmd.arm_db_index);
726
727 err_unmap_set:
728         if (context)
729                 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
730                                     to_mucontext(context)->db_tab, ucmd.set_db_index);
731
732         return ERR_PTR(err);
733 }
734
735 static int mthca_alloc_resize_buf(struct mthca_dev *dev, struct mthca_cq *cq,
736                                   int entries)
737 {
738         int ret;
739
740         spin_lock_irq(&cq->lock);
741         if (cq->resize_buf) {
742                 ret = -EBUSY;
743                 goto unlock;
744         }
745
746         cq->resize_buf = kmalloc(sizeof *cq->resize_buf, GFP_ATOMIC);
747         if (!cq->resize_buf) {
748                 ret = -ENOMEM;
749                 goto unlock;
750         }
751
752         cq->resize_buf->state = CQ_RESIZE_ALLOC;
753
754         ret = 0;
755
756 unlock:
757         spin_unlock_irq(&cq->lock);
758
759         if (ret)
760                 return ret;
761
762         ret = mthca_alloc_cq_buf(dev, &cq->resize_buf->buf, entries);
763         if (ret) {
764                 spin_lock_irq(&cq->lock);
765                 kfree(cq->resize_buf);
766                 cq->resize_buf = NULL;
767                 spin_unlock_irq(&cq->lock);
768                 return ret;
769         }
770
771         cq->resize_buf->cqe = entries - 1;
772
773         spin_lock_irq(&cq->lock);
774         cq->resize_buf->state = CQ_RESIZE_READY;
775         spin_unlock_irq(&cq->lock);
776
777         return 0;
778 }
779
780 static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
781 {
782         struct mthca_dev *dev = to_mdev(ibcq->device);
783         struct mthca_cq *cq = to_mcq(ibcq);
784         struct mthca_resize_cq ucmd;
785         u32 lkey;
786         int ret;
787
788         if (entries < 1 || entries > dev->limits.max_cqes)
789                 return -EINVAL;
790
791         mutex_lock(&cq->mutex);
792
793         entries = roundup_pow_of_two(entries + 1);
794         if (entries == ibcq->cqe + 1) {
795                 ret = 0;
796                 goto out;
797         }
798
799         if (cq->is_kernel) {
800                 ret = mthca_alloc_resize_buf(dev, cq, entries);
801                 if (ret)
802                         goto out;
803                 lkey = cq->resize_buf->buf.mr.ibmr.lkey;
804         } else {
805                 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) {
806                         ret = -EFAULT;
807                         goto out;
808                 }
809                 lkey = ucmd.lkey;
810         }
811
812         ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
813
814         if (ret) {
815                 if (cq->resize_buf) {
816                         mthca_free_cq_buf(dev, &cq->resize_buf->buf,
817                                           cq->resize_buf->cqe);
818                         kfree(cq->resize_buf);
819                         spin_lock_irq(&cq->lock);
820                         cq->resize_buf = NULL;
821                         spin_unlock_irq(&cq->lock);
822                 }
823                 goto out;
824         }
825
826         if (cq->is_kernel) {
827                 struct mthca_cq_buf tbuf;
828                 int tcqe;
829
830                 spin_lock_irq(&cq->lock);
831                 if (cq->resize_buf->state == CQ_RESIZE_READY) {
832                         mthca_cq_resize_copy_cqes(cq);
833                         tbuf         = cq->buf;
834                         tcqe         = cq->ibcq.cqe;
835                         cq->buf      = cq->resize_buf->buf;
836                         cq->ibcq.cqe = cq->resize_buf->cqe;
837                 } else {
838                         tbuf = cq->resize_buf->buf;
839                         tcqe = cq->resize_buf->cqe;
840                 }
841
842                 kfree(cq->resize_buf);
843                 cq->resize_buf = NULL;
844                 spin_unlock_irq(&cq->lock);
845
846                 mthca_free_cq_buf(dev, &tbuf, tcqe);
847         } else
848                 ibcq->cqe = entries - 1;
849
850 out:
851         mutex_unlock(&cq->mutex);
852
853         return ret;
854 }
855
856 static int mthca_destroy_cq(struct ib_cq *cq)
857 {
858         if (cq->uobject) {
859                 mthca_unmap_user_db(to_mdev(cq->device),
860                                     &to_mucontext(cq->uobject->context)->uar,
861                                     to_mucontext(cq->uobject->context)->db_tab,
862                                     to_mcq(cq)->arm_db_index);
863                 mthca_unmap_user_db(to_mdev(cq->device),
864                                     &to_mucontext(cq->uobject->context)->uar,
865                                     to_mucontext(cq->uobject->context)->db_tab,
866                                     to_mcq(cq)->set_ci_db_index);
867         }
868         mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
869         kfree(cq);
870
871         return 0;
872 }
873
874 static inline u32 convert_access(int acc)
875 {
876         return (acc & IB_ACCESS_REMOTE_ATOMIC ? MTHCA_MPT_FLAG_ATOMIC       : 0) |
877                (acc & IB_ACCESS_REMOTE_WRITE  ? MTHCA_MPT_FLAG_REMOTE_WRITE : 0) |
878                (acc & IB_ACCESS_REMOTE_READ   ? MTHCA_MPT_FLAG_REMOTE_READ  : 0) |
879                (acc & IB_ACCESS_LOCAL_WRITE   ? MTHCA_MPT_FLAG_LOCAL_WRITE  : 0) |
880                MTHCA_MPT_FLAG_LOCAL_READ;
881 }
882
883 static struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, int acc)
884 {
885         struct mthca_mr *mr;
886         int err;
887
888         mr = kmalloc(sizeof *mr, GFP_KERNEL);
889         if (!mr)
890                 return ERR_PTR(-ENOMEM);
891
892         err = mthca_mr_alloc_notrans(to_mdev(pd->device),
893                                      to_mpd(pd)->pd_num,
894                                      convert_access(acc), mr);
895
896         if (err) {
897                 kfree(mr);
898                 return ERR_PTR(err);
899         }
900
901         mr->umem = NULL;
902
903         return &mr->ibmr;
904 }
905
906 static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
907                                        u64 virt, int acc, struct ib_udata *udata)
908 {
909         struct mthca_dev *dev = to_mdev(pd->device);
910         struct scatterlist *sg;
911         struct mthca_mr *mr;
912         struct mthca_reg_mr ucmd;
913         u64 *pages;
914         int shift, n, len;
915         int i, k, entry;
916         int err = 0;
917         int write_mtt_size;
918
919         if (udata->inlen < sizeof ucmd) {
920                 if (!to_mucontext(pd->uobject->context)->reg_mr_warned) {
921                         mthca_warn(dev, "Process '%s' did not pass in MR attrs.\n",
922                                    current->comm);
923                         mthca_warn(dev, "  Update libmthca to fix this.\n");
924                 }
925                 ++to_mucontext(pd->uobject->context)->reg_mr_warned;
926                 ucmd.mr_attrs = 0;
927         } else if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
928                 return ERR_PTR(-EFAULT);
929
930         mr = kmalloc(sizeof *mr, GFP_KERNEL);
931         if (!mr)
932                 return ERR_PTR(-ENOMEM);
933
934         mr->umem = ib_umem_get(pd->uobject->context, start, length, acc,
935                                ucmd.mr_attrs & MTHCA_MR_DMASYNC);
936
937         if (IS_ERR(mr->umem)) {
938                 err = PTR_ERR(mr->umem);
939                 goto err;
940         }
941
942         shift = mr->umem->page_shift;
943         n = mr->umem->nmap;
944
945         mr->mtt = mthca_alloc_mtt(dev, n);
946         if (IS_ERR(mr->mtt)) {
947                 err = PTR_ERR(mr->mtt);
948                 goto err_umem;
949         }
950
951         pages = (u64 *) __get_free_page(GFP_KERNEL);
952         if (!pages) {
953                 err = -ENOMEM;
954                 goto err_mtt;
955         }
956
957         i = n = 0;
958
959         write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
960
961         for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) {
962                 len = sg_dma_len(sg) >> shift;
963                 for (k = 0; k < len; ++k) {
964                         pages[i++] = sg_dma_address(sg) + (k << shift);
965                         /*
966                          * Be friendly to write_mtt and pass it chunks
967                          * of appropriate size.
968                          */
969                         if (i == write_mtt_size) {
970                                 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
971                                 if (err)
972                                         goto mtt_done;
973                                 n += i;
974                                 i = 0;
975                         }
976                 }
977         }
978
979         if (i)
980                 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
981 mtt_done:
982         free_page((unsigned long) pages);
983         if (err)
984                 goto err_mtt;
985
986         err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, virt, length,
987                              convert_access(acc), mr);
988
989         if (err)
990                 goto err_mtt;
991
992         return &mr->ibmr;
993
994 err_mtt:
995         mthca_free_mtt(dev, mr->mtt);
996
997 err_umem:
998         ib_umem_release(mr->umem);
999
1000 err:
1001         kfree(mr);
1002         return ERR_PTR(err);
1003 }
1004
1005 static int mthca_dereg_mr(struct ib_mr *mr)
1006 {
1007         struct mthca_mr *mmr = to_mmr(mr);
1008
1009         mthca_free_mr(to_mdev(mr->device), mmr);
1010         if (mmr->umem)
1011                 ib_umem_release(mmr->umem);
1012         kfree(mmr);
1013
1014         return 0;
1015 }
1016
1017 static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
1018                                       struct ib_fmr_attr *fmr_attr)
1019 {
1020         struct mthca_fmr *fmr;
1021         int err;
1022
1023         fmr = kmalloc(sizeof *fmr, GFP_KERNEL);
1024         if (!fmr)
1025                 return ERR_PTR(-ENOMEM);
1026
1027         memcpy(&fmr->attr, fmr_attr, sizeof *fmr_attr);
1028         err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num,
1029                              convert_access(mr_access_flags), fmr);
1030
1031         if (err) {
1032                 kfree(fmr);
1033                 return ERR_PTR(err);
1034         }
1035
1036         return &fmr->ibmr;
1037 }
1038
1039 static int mthca_dealloc_fmr(struct ib_fmr *fmr)
1040 {
1041         struct mthca_fmr *mfmr = to_mfmr(fmr);
1042         int err;
1043
1044         err = mthca_free_fmr(to_mdev(fmr->device), mfmr);
1045         if (err)
1046                 return err;
1047
1048         kfree(mfmr);
1049         return 0;
1050 }
1051
1052 static int mthca_unmap_fmr(struct list_head *fmr_list)
1053 {
1054         struct ib_fmr *fmr;
1055         int err;
1056         struct mthca_dev *mdev = NULL;
1057
1058         list_for_each_entry(fmr, fmr_list, list) {
1059                 if (mdev && to_mdev(fmr->device) != mdev)
1060                         return -EINVAL;
1061                 mdev = to_mdev(fmr->device);
1062         }
1063
1064         if (!mdev)
1065                 return 0;
1066
1067         if (mthca_is_memfree(mdev)) {
1068                 list_for_each_entry(fmr, fmr_list, list)
1069                         mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
1070
1071                 wmb();
1072         } else
1073                 list_for_each_entry(fmr, fmr_list, list)
1074                         mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1075
1076         err = mthca_SYNC_TPT(mdev);
1077         return err;
1078 }
1079
1080 static ssize_t hw_rev_show(struct device *device,
1081                            struct device_attribute *attr, char *buf)
1082 {
1083         struct mthca_dev *dev =
1084                 container_of(device, struct mthca_dev, ib_dev.dev);
1085         return sprintf(buf, "%x\n", dev->rev_id);
1086 }
1087 static DEVICE_ATTR_RO(hw_rev);
1088
1089 static ssize_t hca_type_show(struct device *device,
1090                              struct device_attribute *attr, char *buf)
1091 {
1092         struct mthca_dev *dev =
1093                 container_of(device, struct mthca_dev, ib_dev.dev);
1094         switch (dev->pdev->device) {
1095         case PCI_DEVICE_ID_MELLANOX_TAVOR:
1096                 return sprintf(buf, "MT23108\n");
1097         case PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT:
1098                 return sprintf(buf, "MT25208 (MT23108 compat mode)\n");
1099         case PCI_DEVICE_ID_MELLANOX_ARBEL:
1100                 return sprintf(buf, "MT25208\n");
1101         case PCI_DEVICE_ID_MELLANOX_SINAI:
1102         case PCI_DEVICE_ID_MELLANOX_SINAI_OLD:
1103                 return sprintf(buf, "MT25204\n");
1104         default:
1105                 return sprintf(buf, "unknown\n");
1106         }
1107 }
1108 static DEVICE_ATTR_RO(hca_type);
1109
1110 static ssize_t board_id_show(struct device *device,
1111                              struct device_attribute *attr, char *buf)
1112 {
1113         struct mthca_dev *dev =
1114                 container_of(device, struct mthca_dev, ib_dev.dev);
1115         return sprintf(buf, "%.*s\n", MTHCA_BOARD_ID_LEN, dev->board_id);
1116 }
1117 static DEVICE_ATTR_RO(board_id);
1118
1119 static struct attribute *mthca_dev_attributes[] = {
1120         &dev_attr_hw_rev.attr,
1121         &dev_attr_hca_type.attr,
1122         &dev_attr_board_id.attr,
1123         NULL
1124 };
1125
1126 static const struct attribute_group mthca_attr_group = {
1127         .attrs = mthca_dev_attributes,
1128 };
1129
1130 static int mthca_init_node_data(struct mthca_dev *dev)
1131 {
1132         struct ib_smp *in_mad  = NULL;
1133         struct ib_smp *out_mad = NULL;
1134         int err = -ENOMEM;
1135
1136         in_mad  = kzalloc(sizeof *in_mad, GFP_KERNEL);
1137         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1138         if (!in_mad || !out_mad)
1139                 goto out;
1140
1141         init_query_mad(in_mad);
1142         in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1143
1144         err = mthca_MAD_IFC(dev, 1, 1,
1145                             1, NULL, NULL, in_mad, out_mad);
1146         if (err)
1147                 goto out;
1148
1149         memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX);
1150
1151         in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1152
1153         err = mthca_MAD_IFC(dev, 1, 1,
1154                             1, NULL, NULL, in_mad, out_mad);
1155         if (err)
1156                 goto out;
1157
1158         if (mthca_is_memfree(dev))
1159                 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
1160         memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
1161
1162 out:
1163         kfree(in_mad);
1164         kfree(out_mad);
1165         return err;
1166 }
1167
1168 static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num,
1169                                 struct ib_port_immutable *immutable)
1170 {
1171         struct ib_port_attr attr;
1172         int err;
1173
1174         immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1175
1176         err = ib_query_port(ibdev, port_num, &attr);
1177         if (err)
1178                 return err;
1179
1180         immutable->pkey_tbl_len = attr.pkey_tbl_len;
1181         immutable->gid_tbl_len = attr.gid_tbl_len;
1182         immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1183
1184         return 0;
1185 }
1186
1187 static void get_dev_fw_str(struct ib_device *device, char *str)
1188 {
1189         struct mthca_dev *dev =
1190                 container_of(device, struct mthca_dev, ib_dev);
1191         snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%d",
1192                  (int) (dev->fw_ver >> 32),
1193                  (int) (dev->fw_ver >> 16) & 0xffff,
1194                  (int) dev->fw_ver & 0xffff);
1195 }
1196
1197 static const struct ib_device_ops mthca_dev_ops = {
1198         .alloc_pd = mthca_alloc_pd,
1199         .alloc_ucontext = mthca_alloc_ucontext,
1200         .attach_mcast = mthca_multicast_attach,
1201         .create_ah = mthca_ah_create,
1202         .create_cq = mthca_create_cq,
1203         .create_qp = mthca_create_qp,
1204         .dealloc_pd = mthca_dealloc_pd,
1205         .dealloc_ucontext = mthca_dealloc_ucontext,
1206         .dereg_mr = mthca_dereg_mr,
1207         .destroy_ah = mthca_ah_destroy,
1208         .destroy_cq = mthca_destroy_cq,
1209         .destroy_qp = mthca_destroy_qp,
1210         .detach_mcast = mthca_multicast_detach,
1211         .get_dev_fw_str = get_dev_fw_str,
1212         .get_dma_mr = mthca_get_dma_mr,
1213         .get_port_immutable = mthca_port_immutable,
1214         .mmap = mthca_mmap_uar,
1215         .modify_device = mthca_modify_device,
1216         .modify_port = mthca_modify_port,
1217         .modify_qp = mthca_modify_qp,
1218         .poll_cq = mthca_poll_cq,
1219         .process_mad = mthca_process_mad,
1220         .query_ah = mthca_ah_query,
1221         .query_device = mthca_query_device,
1222         .query_gid = mthca_query_gid,
1223         .query_pkey = mthca_query_pkey,
1224         .query_port = mthca_query_port,
1225         .query_qp = mthca_query_qp,
1226         .reg_user_mr = mthca_reg_user_mr,
1227         .resize_cq = mthca_resize_cq,
1228 };
1229
1230 static const struct ib_device_ops mthca_dev_arbel_srq_ops = {
1231         .create_srq = mthca_create_srq,
1232         .destroy_srq = mthca_destroy_srq,
1233         .modify_srq = mthca_modify_srq,
1234         .post_srq_recv = mthca_arbel_post_srq_recv,
1235         .query_srq = mthca_query_srq,
1236 };
1237
1238 static const struct ib_device_ops mthca_dev_tavor_srq_ops = {
1239         .create_srq = mthca_create_srq,
1240         .destroy_srq = mthca_destroy_srq,
1241         .modify_srq = mthca_modify_srq,
1242         .post_srq_recv = mthca_tavor_post_srq_recv,
1243         .query_srq = mthca_query_srq,
1244 };
1245
1246 static const struct ib_device_ops mthca_dev_arbel_fmr_ops = {
1247         .alloc_fmr = mthca_alloc_fmr,
1248         .dealloc_fmr = mthca_dealloc_fmr,
1249         .map_phys_fmr = mthca_arbel_map_phys_fmr,
1250         .unmap_fmr = mthca_unmap_fmr,
1251 };
1252
1253 static const struct ib_device_ops mthca_dev_tavor_fmr_ops = {
1254         .alloc_fmr = mthca_alloc_fmr,
1255         .dealloc_fmr = mthca_dealloc_fmr,
1256         .map_phys_fmr = mthca_tavor_map_phys_fmr,
1257         .unmap_fmr = mthca_unmap_fmr,
1258 };
1259
1260 static const struct ib_device_ops mthca_dev_arbel_ops = {
1261         .post_recv = mthca_arbel_post_receive,
1262         .post_send = mthca_arbel_post_send,
1263         .req_notify_cq = mthca_arbel_arm_cq,
1264 };
1265
1266 static const struct ib_device_ops mthca_dev_tavor_ops = {
1267         .post_recv = mthca_tavor_post_receive,
1268         .post_send = mthca_tavor_post_send,
1269         .req_notify_cq = mthca_tavor_arm_cq,
1270 };
1271
1272 int mthca_register_device(struct mthca_dev *dev)
1273 {
1274         int ret;
1275
1276         ret = mthca_init_node_data(dev);
1277         if (ret)
1278                 return ret;
1279
1280         dev->ib_dev.owner                = THIS_MODULE;
1281
1282         dev->ib_dev.uverbs_abi_ver       = MTHCA_UVERBS_ABI_VERSION;
1283         dev->ib_dev.uverbs_cmd_mask      =
1284                 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT)         |
1285                 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE)        |
1286                 (1ull << IB_USER_VERBS_CMD_QUERY_PORT)          |
1287                 (1ull << IB_USER_VERBS_CMD_ALLOC_PD)            |
1288                 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD)          |
1289                 (1ull << IB_USER_VERBS_CMD_REG_MR)              |
1290                 (1ull << IB_USER_VERBS_CMD_DEREG_MR)            |
1291                 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1292                 (1ull << IB_USER_VERBS_CMD_CREATE_CQ)           |
1293                 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ)           |
1294                 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ)          |
1295                 (1ull << IB_USER_VERBS_CMD_CREATE_QP)           |
1296                 (1ull << IB_USER_VERBS_CMD_QUERY_QP)            |
1297                 (1ull << IB_USER_VERBS_CMD_MODIFY_QP)           |
1298                 (1ull << IB_USER_VERBS_CMD_DESTROY_QP)          |
1299                 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST)        |
1300                 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST);
1301         dev->ib_dev.node_type            = RDMA_NODE_IB_CA;
1302         dev->ib_dev.phys_port_cnt        = dev->limits.num_ports;
1303         dev->ib_dev.num_comp_vectors     = 1;
1304         dev->ib_dev.dev.parent           = &dev->pdev->dev;
1305
1306         if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1307                 dev->ib_dev.uverbs_cmd_mask     |=
1308                         (1ull << IB_USER_VERBS_CMD_CREATE_SRQ)          |
1309                         (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ)          |
1310                         (1ull << IB_USER_VERBS_CMD_QUERY_SRQ)           |
1311                         (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
1312
1313                 if (mthca_is_memfree(dev))
1314                         ib_set_device_ops(&dev->ib_dev,
1315                                           &mthca_dev_arbel_srq_ops);
1316                 else
1317                         ib_set_device_ops(&dev->ib_dev,
1318                                           &mthca_dev_tavor_srq_ops);
1319         }
1320
1321         if (dev->mthca_flags & MTHCA_FLAG_FMR) {
1322                 if (mthca_is_memfree(dev))
1323                         ib_set_device_ops(&dev->ib_dev,
1324                                           &mthca_dev_arbel_fmr_ops);
1325                 else
1326                         ib_set_device_ops(&dev->ib_dev,
1327                                           &mthca_dev_tavor_fmr_ops);
1328         }
1329
1330         ib_set_device_ops(&dev->ib_dev, &mthca_dev_ops);
1331
1332         if (mthca_is_memfree(dev))
1333                 ib_set_device_ops(&dev->ib_dev, &mthca_dev_arbel_ops);
1334         else
1335                 ib_set_device_ops(&dev->ib_dev, &mthca_dev_tavor_ops);
1336
1337         mutex_init(&dev->cap_mask_mutex);
1338
1339         rdma_set_device_sysfs_group(&dev->ib_dev, &mthca_attr_group);
1340         dev->ib_dev.driver_id = RDMA_DRIVER_MTHCA;
1341         ret = ib_register_device(&dev->ib_dev, "mthca%d", NULL);
1342         if (ret)
1343                 return ret;
1344
1345         mthca_start_catas_poll(dev);
1346
1347         return 0;
1348 }
1349
1350 void mthca_unregister_device(struct mthca_dev *dev)
1351 {
1352         mthca_stop_catas_poll(dev);
1353         ib_unregister_device(&dev->ib_dev);
1354 }