2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
38 #include <linux/slab.h>
40 #include <asm/uaccess.h>
43 #include "core_priv.h"
45 struct uverbs_lock_class {
46 struct lock_class_key key;
50 static struct uverbs_lock_class pd_lock_class = { .name = "PD-uobj" };
51 static struct uverbs_lock_class mr_lock_class = { .name = "MR-uobj" };
52 static struct uverbs_lock_class mw_lock_class = { .name = "MW-uobj" };
53 static struct uverbs_lock_class cq_lock_class = { .name = "CQ-uobj" };
54 static struct uverbs_lock_class qp_lock_class = { .name = "QP-uobj" };
55 static struct uverbs_lock_class ah_lock_class = { .name = "AH-uobj" };
56 static struct uverbs_lock_class srq_lock_class = { .name = "SRQ-uobj" };
57 static struct uverbs_lock_class xrcd_lock_class = { .name = "XRCD-uobj" };
58 static struct uverbs_lock_class rule_lock_class = { .name = "RULE-uobj" };
61 * The ib_uobject locking scheme is as follows:
63 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
64 * needs to be held during all idr operations. When an object is
65 * looked up, a reference must be taken on the object's kref before
68 * - Each object also has an rwsem. This rwsem must be held for
69 * reading while an operation that uses the object is performed.
70 * For example, while registering an MR, the associated PD's
71 * uobject.mutex must be held for reading. The rwsem must be held
72 * for writing while initializing or destroying an object.
74 * - In addition, each object has a "live" flag. If this flag is not
75 * set, then lookups of the object will fail even if it is found in
76 * the idr. This handles a reader that blocks and does not acquire
77 * the rwsem until after the object is destroyed. The destroy
78 * operation will set the live flag to 0 and then drop the rwsem;
79 * this will allow the reader to acquire the rwsem, see that the
80 * live flag is 0, and then drop the rwsem and its reference to
81 * object. The underlying storage will not be freed until the last
82 * reference to the object is dropped.
85 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
86 struct ib_ucontext *context, struct uverbs_lock_class *c)
88 uobj->user_handle = user_handle;
89 uobj->context = context;
90 kref_init(&uobj->ref);
91 init_rwsem(&uobj->mutex);
92 lockdep_set_class_and_name(&uobj->mutex, &c->key, c->name);
96 static void release_uobj(struct kref *kref)
98 kfree(container_of(kref, struct ib_uobject, ref));
101 static void put_uobj(struct ib_uobject *uobj)
103 kref_put(&uobj->ref, release_uobj);
106 static void put_uobj_read(struct ib_uobject *uobj)
108 up_read(&uobj->mutex);
112 static void put_uobj_write(struct ib_uobject *uobj)
114 up_write(&uobj->mutex);
118 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
122 idr_preload(GFP_KERNEL);
123 spin_lock(&ib_uverbs_idr_lock);
125 ret = idr_alloc(idr, uobj, 0, 0, GFP_NOWAIT);
129 spin_unlock(&ib_uverbs_idr_lock);
132 return ret < 0 ? ret : 0;
135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137 spin_lock(&ib_uverbs_idr_lock);
138 idr_remove(idr, uobj->id);
139 spin_unlock(&ib_uverbs_idr_lock);
142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143 struct ib_ucontext *context)
145 struct ib_uobject *uobj;
147 spin_lock(&ib_uverbs_idr_lock);
148 uobj = idr_find(idr, id);
150 if (uobj->context == context)
151 kref_get(&uobj->ref);
155 spin_unlock(&ib_uverbs_idr_lock);
160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161 struct ib_ucontext *context, int nested)
163 struct ib_uobject *uobj;
165 uobj = __idr_get_uobj(idr, id, context);
170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172 down_read(&uobj->mutex);
181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182 struct ib_ucontext *context)
184 struct ib_uobject *uobj;
186 uobj = __idr_get_uobj(idr, id, context);
190 down_write(&uobj->mutex);
192 put_uobj_write(uobj);
199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
202 struct ib_uobject *uobj;
204 uobj = idr_read_uobj(idr, id, context, nested);
205 return uobj ? uobj->object : NULL;
208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
213 static void put_pd_read(struct ib_pd *pd)
215 put_uobj_read(pd->uobject);
218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
223 static void put_cq_read(struct ib_cq *cq)
225 put_uobj_read(cq->uobject);
228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
233 static void put_ah_read(struct ib_ah *ah)
235 put_uobj_read(ah->uobject);
238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
243 static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
245 struct ib_uobject *uobj;
247 uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
248 return uobj ? uobj->object : NULL;
251 static void put_qp_read(struct ib_qp *qp)
253 put_uobj_read(qp->uobject);
256 static void put_qp_write(struct ib_qp *qp)
258 put_uobj_write(qp->uobject);
261 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
263 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
266 static void put_srq_read(struct ib_srq *srq)
268 put_uobj_read(srq->uobject);
271 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle, struct ib_ucontext *context,
272 struct ib_uobject **uobj)
274 *uobj = idr_read_uobj(&ib_uverbs_xrcd_idr, xrcd_handle, context, 0);
275 return *uobj ? (*uobj)->object : NULL;
278 static void put_xrcd_read(struct ib_uobject *uobj)
283 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
284 const char __user *buf,
285 int in_len, int out_len)
287 struct ib_uverbs_get_context cmd;
288 struct ib_uverbs_get_context_resp resp;
289 struct ib_udata udata;
290 struct ib_device *ibdev = file->device->ib_dev;
291 struct ib_ucontext *ucontext;
295 if (out_len < sizeof resp)
298 if (copy_from_user(&cmd, buf, sizeof cmd))
301 mutex_lock(&file->mutex);
303 if (file->ucontext) {
308 INIT_UDATA(&udata, buf + sizeof cmd,
309 (unsigned long) cmd.response + sizeof resp,
310 in_len - sizeof cmd, out_len - sizeof resp);
312 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
313 if (IS_ERR(ucontext)) {
314 ret = PTR_ERR(ucontext);
318 ucontext->device = ibdev;
319 INIT_LIST_HEAD(&ucontext->pd_list);
320 INIT_LIST_HEAD(&ucontext->mr_list);
321 INIT_LIST_HEAD(&ucontext->mw_list);
322 INIT_LIST_HEAD(&ucontext->cq_list);
323 INIT_LIST_HEAD(&ucontext->qp_list);
324 INIT_LIST_HEAD(&ucontext->srq_list);
325 INIT_LIST_HEAD(&ucontext->ah_list);
326 INIT_LIST_HEAD(&ucontext->xrcd_list);
327 INIT_LIST_HEAD(&ucontext->rule_list);
328 ucontext->closing = 0;
330 resp.num_comp_vectors = file->device->num_comp_vectors;
332 ret = get_unused_fd_flags(O_CLOEXEC);
337 filp = ib_uverbs_alloc_event_file(file, 1);
343 if (copy_to_user((void __user *) (unsigned long) cmd.response,
344 &resp, sizeof resp)) {
349 file->async_file = filp->private_data;
351 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
352 ib_uverbs_event_handler);
353 ret = ib_register_event_handler(&file->event_handler);
357 kref_get(&file->async_file->ref);
358 kref_get(&file->ref);
359 file->ucontext = ucontext;
361 fd_install(resp.async_fd, filp);
363 mutex_unlock(&file->mutex);
371 put_unused_fd(resp.async_fd);
374 ibdev->dealloc_ucontext(ucontext);
377 mutex_unlock(&file->mutex);
381 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
382 const char __user *buf,
383 int in_len, int out_len)
385 struct ib_uverbs_query_device cmd;
386 struct ib_uverbs_query_device_resp resp;
387 struct ib_device_attr attr;
390 if (out_len < sizeof resp)
393 if (copy_from_user(&cmd, buf, sizeof cmd))
396 ret = ib_query_device(file->device->ib_dev, &attr);
400 memset(&resp, 0, sizeof resp);
402 resp.fw_ver = attr.fw_ver;
403 resp.node_guid = file->device->ib_dev->node_guid;
404 resp.sys_image_guid = attr.sys_image_guid;
405 resp.max_mr_size = attr.max_mr_size;
406 resp.page_size_cap = attr.page_size_cap;
407 resp.vendor_id = attr.vendor_id;
408 resp.vendor_part_id = attr.vendor_part_id;
409 resp.hw_ver = attr.hw_ver;
410 resp.max_qp = attr.max_qp;
411 resp.max_qp_wr = attr.max_qp_wr;
412 resp.device_cap_flags = attr.device_cap_flags;
413 resp.max_sge = attr.max_sge;
414 resp.max_sge_rd = attr.max_sge_rd;
415 resp.max_cq = attr.max_cq;
416 resp.max_cqe = attr.max_cqe;
417 resp.max_mr = attr.max_mr;
418 resp.max_pd = attr.max_pd;
419 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
420 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
421 resp.max_res_rd_atom = attr.max_res_rd_atom;
422 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
423 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
424 resp.atomic_cap = attr.atomic_cap;
425 resp.max_ee = attr.max_ee;
426 resp.max_rdd = attr.max_rdd;
427 resp.max_mw = attr.max_mw;
428 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
429 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
430 resp.max_mcast_grp = attr.max_mcast_grp;
431 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
432 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
433 resp.max_ah = attr.max_ah;
434 resp.max_fmr = attr.max_fmr;
435 resp.max_map_per_fmr = attr.max_map_per_fmr;
436 resp.max_srq = attr.max_srq;
437 resp.max_srq_wr = attr.max_srq_wr;
438 resp.max_srq_sge = attr.max_srq_sge;
439 resp.max_pkeys = attr.max_pkeys;
440 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
441 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
443 if (copy_to_user((void __user *) (unsigned long) cmd.response,
450 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
451 const char __user *buf,
452 int in_len, int out_len)
454 struct ib_uverbs_query_port cmd;
455 struct ib_uverbs_query_port_resp resp;
456 struct ib_port_attr attr;
459 if (out_len < sizeof resp)
462 if (copy_from_user(&cmd, buf, sizeof cmd))
465 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
469 memset(&resp, 0, sizeof resp);
471 resp.state = attr.state;
472 resp.max_mtu = attr.max_mtu;
473 resp.active_mtu = attr.active_mtu;
474 resp.gid_tbl_len = attr.gid_tbl_len;
475 resp.port_cap_flags = attr.port_cap_flags;
476 resp.max_msg_sz = attr.max_msg_sz;
477 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
478 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
479 resp.pkey_tbl_len = attr.pkey_tbl_len;
481 resp.sm_lid = attr.sm_lid;
483 resp.max_vl_num = attr.max_vl_num;
484 resp.sm_sl = attr.sm_sl;
485 resp.subnet_timeout = attr.subnet_timeout;
486 resp.init_type_reply = attr.init_type_reply;
487 resp.active_width = attr.active_width;
488 resp.active_speed = attr.active_speed;
489 resp.phys_state = attr.phys_state;
490 resp.link_layer = rdma_port_get_link_layer(file->device->ib_dev,
493 if (copy_to_user((void __user *) (unsigned long) cmd.response,
500 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
501 const char __user *buf,
502 int in_len, int out_len)
504 struct ib_uverbs_alloc_pd cmd;
505 struct ib_uverbs_alloc_pd_resp resp;
506 struct ib_udata udata;
507 struct ib_uobject *uobj;
511 if (out_len < sizeof resp)
514 if (copy_from_user(&cmd, buf, sizeof cmd))
517 INIT_UDATA(&udata, buf + sizeof cmd,
518 (unsigned long) cmd.response + sizeof resp,
519 in_len - sizeof cmd, out_len - sizeof resp);
521 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
525 init_uobj(uobj, 0, file->ucontext, &pd_lock_class);
526 down_write(&uobj->mutex);
528 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
529 file->ucontext, &udata);
535 pd->device = file->device->ib_dev;
537 atomic_set(&pd->usecnt, 0);
540 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
544 memset(&resp, 0, sizeof resp);
545 resp.pd_handle = uobj->id;
547 if (copy_to_user((void __user *) (unsigned long) cmd.response,
548 &resp, sizeof resp)) {
553 mutex_lock(&file->mutex);
554 list_add_tail(&uobj->list, &file->ucontext->pd_list);
555 mutex_unlock(&file->mutex);
559 up_write(&uobj->mutex);
564 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
570 put_uobj_write(uobj);
574 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
575 const char __user *buf,
576 int in_len, int out_len)
578 struct ib_uverbs_dealloc_pd cmd;
579 struct ib_uobject *uobj;
582 if (copy_from_user(&cmd, buf, sizeof cmd))
585 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
589 ret = ib_dealloc_pd(uobj->object);
593 put_uobj_write(uobj);
598 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
600 mutex_lock(&file->mutex);
601 list_del(&uobj->list);
602 mutex_unlock(&file->mutex);
609 struct xrcd_table_entry {
611 struct ib_xrcd *xrcd;
615 static int xrcd_table_insert(struct ib_uverbs_device *dev,
617 struct ib_xrcd *xrcd)
619 struct xrcd_table_entry *entry, *scan;
620 struct rb_node **p = &dev->xrcd_tree.rb_node;
621 struct rb_node *parent = NULL;
623 entry = kmalloc(sizeof *entry, GFP_KERNEL);
628 entry->inode = inode;
632 scan = rb_entry(parent, struct xrcd_table_entry, node);
634 if (inode < scan->inode) {
636 } else if (inode > scan->inode) {
644 rb_link_node(&entry->node, parent, p);
645 rb_insert_color(&entry->node, &dev->xrcd_tree);
650 static struct xrcd_table_entry *xrcd_table_search(struct ib_uverbs_device *dev,
653 struct xrcd_table_entry *entry;
654 struct rb_node *p = dev->xrcd_tree.rb_node;
657 entry = rb_entry(p, struct xrcd_table_entry, node);
659 if (inode < entry->inode)
661 else if (inode > entry->inode)
670 static struct ib_xrcd *find_xrcd(struct ib_uverbs_device *dev, struct inode *inode)
672 struct xrcd_table_entry *entry;
674 entry = xrcd_table_search(dev, inode);
681 static void xrcd_table_delete(struct ib_uverbs_device *dev,
684 struct xrcd_table_entry *entry;
686 entry = xrcd_table_search(dev, inode);
689 rb_erase(&entry->node, &dev->xrcd_tree);
694 ssize_t ib_uverbs_open_xrcd(struct ib_uverbs_file *file,
695 const char __user *buf, int in_len,
698 struct ib_uverbs_open_xrcd cmd;
699 struct ib_uverbs_open_xrcd_resp resp;
700 struct ib_udata udata;
701 struct ib_uxrcd_object *obj;
702 struct ib_xrcd *xrcd = NULL;
703 struct fd f = {NULL, 0};
704 struct inode *inode = NULL;
708 if (out_len < sizeof resp)
711 if (copy_from_user(&cmd, buf, sizeof cmd))
714 INIT_UDATA(&udata, buf + sizeof cmd,
715 (unsigned long) cmd.response + sizeof resp,
716 in_len - sizeof cmd, out_len - sizeof resp);
718 mutex_lock(&file->device->xrcd_tree_mutex);
721 /* search for file descriptor */
725 goto err_tree_mutex_unlock;
728 inode = file_inode(f.file);
729 xrcd = find_xrcd(file->device, inode);
730 if (!xrcd && !(cmd.oflags & O_CREAT)) {
731 /* no file descriptor. Need CREATE flag */
733 goto err_tree_mutex_unlock;
736 if (xrcd && cmd.oflags & O_EXCL) {
738 goto err_tree_mutex_unlock;
742 obj = kmalloc(sizeof *obj, GFP_KERNEL);
745 goto err_tree_mutex_unlock;
748 init_uobj(&obj->uobject, 0, file->ucontext, &xrcd_lock_class);
750 down_write(&obj->uobject.mutex);
753 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
754 file->ucontext, &udata);
761 xrcd->device = file->device->ib_dev;
762 atomic_set(&xrcd->usecnt, 0);
763 mutex_init(&xrcd->tgt_qp_mutex);
764 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
768 atomic_set(&obj->refcnt, 0);
769 obj->uobject.object = xrcd;
770 ret = idr_add_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
774 memset(&resp, 0, sizeof resp);
775 resp.xrcd_handle = obj->uobject.id;
779 /* create new inode/xrcd table entry */
780 ret = xrcd_table_insert(file->device, inode, xrcd);
782 goto err_insert_xrcd;
784 atomic_inc(&xrcd->usecnt);
787 if (copy_to_user((void __user *) (unsigned long) cmd.response,
788 &resp, sizeof resp)) {
796 mutex_lock(&file->mutex);
797 list_add_tail(&obj->uobject.list, &file->ucontext->xrcd_list);
798 mutex_unlock(&file->mutex);
800 obj->uobject.live = 1;
801 up_write(&obj->uobject.mutex);
803 mutex_unlock(&file->device->xrcd_tree_mutex);
809 xrcd_table_delete(file->device, inode);
810 atomic_dec(&xrcd->usecnt);
814 idr_remove_uobj(&ib_uverbs_xrcd_idr, &obj->uobject);
817 ib_dealloc_xrcd(xrcd);
820 put_uobj_write(&obj->uobject);
822 err_tree_mutex_unlock:
826 mutex_unlock(&file->device->xrcd_tree_mutex);
831 ssize_t ib_uverbs_close_xrcd(struct ib_uverbs_file *file,
832 const char __user *buf, int in_len,
835 struct ib_uverbs_close_xrcd cmd;
836 struct ib_uobject *uobj;
837 struct ib_xrcd *xrcd = NULL;
838 struct inode *inode = NULL;
839 struct ib_uxrcd_object *obj;
843 if (copy_from_user(&cmd, buf, sizeof cmd))
846 mutex_lock(&file->device->xrcd_tree_mutex);
847 uobj = idr_write_uobj(&ib_uverbs_xrcd_idr, cmd.xrcd_handle, file->ucontext);
855 obj = container_of(uobj, struct ib_uxrcd_object, uobject);
856 if (atomic_read(&obj->refcnt)) {
857 put_uobj_write(uobj);
862 if (!inode || atomic_dec_and_test(&xrcd->usecnt)) {
863 ret = ib_dealloc_xrcd(uobj->object);
870 atomic_inc(&xrcd->usecnt);
872 put_uobj_write(uobj);
878 xrcd_table_delete(file->device, inode);
880 idr_remove_uobj(&ib_uverbs_xrcd_idr, uobj);
881 mutex_lock(&file->mutex);
882 list_del(&uobj->list);
883 mutex_unlock(&file->mutex);
889 mutex_unlock(&file->device->xrcd_tree_mutex);
893 void ib_uverbs_dealloc_xrcd(struct ib_uverbs_device *dev,
894 struct ib_xrcd *xrcd)
899 if (inode && !atomic_dec_and_test(&xrcd->usecnt))
902 ib_dealloc_xrcd(xrcd);
905 xrcd_table_delete(dev, inode);
908 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
909 const char __user *buf, int in_len,
912 struct ib_uverbs_reg_mr cmd;
913 struct ib_uverbs_reg_mr_resp resp;
914 struct ib_udata udata;
915 struct ib_uobject *uobj;
920 if (out_len < sizeof resp)
923 if (copy_from_user(&cmd, buf, sizeof cmd))
926 INIT_UDATA(&udata, buf + sizeof cmd,
927 (unsigned long) cmd.response + sizeof resp,
928 in_len - sizeof cmd, out_len - sizeof resp);
930 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
933 ret = ib_check_mr_access(cmd.access_flags);
937 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
941 init_uobj(uobj, 0, file->ucontext, &mr_lock_class);
942 down_write(&uobj->mutex);
944 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
950 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
951 cmd.access_flags, &udata);
957 mr->device = pd->device;
960 atomic_inc(&pd->usecnt);
961 atomic_set(&mr->usecnt, 0);
964 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
968 memset(&resp, 0, sizeof resp);
969 resp.lkey = mr->lkey;
970 resp.rkey = mr->rkey;
971 resp.mr_handle = uobj->id;
973 if (copy_to_user((void __user *) (unsigned long) cmd.response,
974 &resp, sizeof resp)) {
981 mutex_lock(&file->mutex);
982 list_add_tail(&uobj->list, &file->ucontext->mr_list);
983 mutex_unlock(&file->mutex);
987 up_write(&uobj->mutex);
992 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1001 put_uobj_write(uobj);
1005 ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
1006 const char __user *buf, int in_len,
1009 struct ib_uverbs_rereg_mr cmd;
1010 struct ib_uverbs_rereg_mr_resp resp;
1011 struct ib_udata udata;
1012 struct ib_pd *pd = NULL;
1014 struct ib_pd *old_pd;
1016 struct ib_uobject *uobj;
1018 if (out_len < sizeof(resp))
1021 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1024 INIT_UDATA(&udata, buf + sizeof(cmd),
1025 (unsigned long) cmd.response + sizeof(resp),
1026 in_len - sizeof(cmd), out_len - sizeof(resp));
1028 if (cmd.flags & ~IB_MR_REREG_SUPPORTED || !cmd.flags)
1031 if ((cmd.flags & IB_MR_REREG_TRANS) &&
1032 (!cmd.start || !cmd.hca_va || 0 >= cmd.length ||
1033 (cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK)))
1036 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle,
1044 if (cmd.flags & IB_MR_REREG_ACCESS) {
1045 ret = ib_check_mr_access(cmd.access_flags);
1050 if (cmd.flags & IB_MR_REREG_PD) {
1051 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1058 if (atomic_read(&mr->usecnt)) {
1064 ret = mr->device->rereg_user_mr(mr, cmd.flags, cmd.start,
1065 cmd.length, cmd.hca_va,
1066 cmd.access_flags, pd, &udata);
1068 if (cmd.flags & IB_MR_REREG_PD) {
1069 atomic_inc(&pd->usecnt);
1071 atomic_dec(&old_pd->usecnt);
1077 memset(&resp, 0, sizeof(resp));
1078 resp.lkey = mr->lkey;
1079 resp.rkey = mr->rkey;
1081 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1082 &resp, sizeof(resp)))
1088 if (cmd.flags & IB_MR_REREG_PD)
1093 put_uobj_write(mr->uobject);
1098 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
1099 const char __user *buf, int in_len,
1102 struct ib_uverbs_dereg_mr cmd;
1104 struct ib_uobject *uobj;
1107 if (copy_from_user(&cmd, buf, sizeof cmd))
1110 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
1116 ret = ib_dereg_mr(mr);
1120 put_uobj_write(uobj);
1125 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
1127 mutex_lock(&file->mutex);
1128 list_del(&uobj->list);
1129 mutex_unlock(&file->mutex);
1136 ssize_t ib_uverbs_alloc_mw(struct ib_uverbs_file *file,
1137 const char __user *buf, int in_len,
1140 struct ib_uverbs_alloc_mw cmd;
1141 struct ib_uverbs_alloc_mw_resp resp;
1142 struct ib_uobject *uobj;
1147 if (out_len < sizeof(resp))
1150 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1153 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
1157 init_uobj(uobj, 0, file->ucontext, &mw_lock_class);
1158 down_write(&uobj->mutex);
1160 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1166 mw = pd->device->alloc_mw(pd, cmd.mw_type);
1172 mw->device = pd->device;
1175 atomic_inc(&pd->usecnt);
1178 ret = idr_add_uobj(&ib_uverbs_mw_idr, uobj);
1182 memset(&resp, 0, sizeof(resp));
1183 resp.rkey = mw->rkey;
1184 resp.mw_handle = uobj->id;
1186 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1187 &resp, sizeof(resp))) {
1194 mutex_lock(&file->mutex);
1195 list_add_tail(&uobj->list, &file->ucontext->mw_list);
1196 mutex_unlock(&file->mutex);
1200 up_write(&uobj->mutex);
1205 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1214 put_uobj_write(uobj);
1218 ssize_t ib_uverbs_dealloc_mw(struct ib_uverbs_file *file,
1219 const char __user *buf, int in_len,
1222 struct ib_uverbs_dealloc_mw cmd;
1224 struct ib_uobject *uobj;
1227 if (copy_from_user(&cmd, buf, sizeof(cmd)))
1230 uobj = idr_write_uobj(&ib_uverbs_mw_idr, cmd.mw_handle, file->ucontext);
1236 ret = ib_dealloc_mw(mw);
1240 put_uobj_write(uobj);
1245 idr_remove_uobj(&ib_uverbs_mw_idr, uobj);
1247 mutex_lock(&file->mutex);
1248 list_del(&uobj->list);
1249 mutex_unlock(&file->mutex);
1256 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
1257 const char __user *buf, int in_len,
1260 struct ib_uverbs_create_comp_channel cmd;
1261 struct ib_uverbs_create_comp_channel_resp resp;
1265 if (out_len < sizeof resp)
1268 if (copy_from_user(&cmd, buf, sizeof cmd))
1271 ret = get_unused_fd_flags(O_CLOEXEC);
1276 filp = ib_uverbs_alloc_event_file(file, 0);
1278 put_unused_fd(resp.fd);
1279 return PTR_ERR(filp);
1282 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1283 &resp, sizeof resp)) {
1284 put_unused_fd(resp.fd);
1289 fd_install(resp.fd, filp);
1293 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
1294 const char __user *buf, int in_len,
1297 struct ib_uverbs_create_cq cmd;
1298 struct ib_uverbs_create_cq_resp resp;
1299 struct ib_udata udata;
1300 struct ib_ucq_object *obj;
1301 struct ib_uverbs_event_file *ev_file = NULL;
1305 if (out_len < sizeof resp)
1308 if (copy_from_user(&cmd, buf, sizeof cmd))
1311 INIT_UDATA(&udata, buf + sizeof cmd,
1312 (unsigned long) cmd.response + sizeof resp,
1313 in_len - sizeof cmd, out_len - sizeof resp);
1315 if (cmd.comp_vector >= file->device->num_comp_vectors)
1318 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1322 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_class);
1323 down_write(&obj->uobject.mutex);
1325 if (cmd.comp_channel >= 0) {
1326 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
1333 obj->uverbs_file = file;
1334 obj->comp_events_reported = 0;
1335 obj->async_events_reported = 0;
1336 INIT_LIST_HEAD(&obj->comp_list);
1337 INIT_LIST_HEAD(&obj->async_list);
1339 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
1341 file->ucontext, &udata);
1347 cq->device = file->device->ib_dev;
1348 cq->uobject = &obj->uobject;
1349 cq->comp_handler = ib_uverbs_comp_handler;
1350 cq->event_handler = ib_uverbs_cq_event_handler;
1351 cq->cq_context = ev_file;
1352 atomic_set(&cq->usecnt, 0);
1354 obj->uobject.object = cq;
1355 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1359 memset(&resp, 0, sizeof resp);
1360 resp.cq_handle = obj->uobject.id;
1363 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1364 &resp, sizeof resp)) {
1369 mutex_lock(&file->mutex);
1370 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
1371 mutex_unlock(&file->mutex);
1373 obj->uobject.live = 1;
1375 up_write(&obj->uobject.mutex);
1380 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
1387 ib_uverbs_release_ucq(file, ev_file, obj);
1390 put_uobj_write(&obj->uobject);
1394 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
1395 const char __user *buf, int in_len,
1398 struct ib_uverbs_resize_cq cmd;
1399 struct ib_uverbs_resize_cq_resp resp;
1400 struct ib_udata udata;
1404 if (copy_from_user(&cmd, buf, sizeof cmd))
1407 INIT_UDATA(&udata, buf + sizeof cmd,
1408 (unsigned long) cmd.response + sizeof resp,
1409 in_len - sizeof cmd, out_len - sizeof resp);
1411 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1415 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
1421 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1422 &resp, sizeof resp.cqe))
1428 return ret ? ret : in_len;
1431 static int copy_wc_to_user(void __user *dest, struct ib_wc *wc)
1433 struct ib_uverbs_wc tmp;
1435 tmp.wr_id = wc->wr_id;
1436 tmp.status = wc->status;
1437 tmp.opcode = wc->opcode;
1438 tmp.vendor_err = wc->vendor_err;
1439 tmp.byte_len = wc->byte_len;
1440 tmp.ex.imm_data = (__u32 __force) wc->ex.imm_data;
1441 tmp.qp_num = wc->qp->qp_num;
1442 tmp.src_qp = wc->src_qp;
1443 tmp.wc_flags = wc->wc_flags;
1444 tmp.pkey_index = wc->pkey_index;
1445 tmp.slid = wc->slid;
1447 tmp.dlid_path_bits = wc->dlid_path_bits;
1448 tmp.port_num = wc->port_num;
1451 if (copy_to_user(dest, &tmp, sizeof tmp))
1457 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
1458 const char __user *buf, int in_len,
1461 struct ib_uverbs_poll_cq cmd;
1462 struct ib_uverbs_poll_cq_resp resp;
1463 u8 __user *header_ptr;
1464 u8 __user *data_ptr;
1469 if (copy_from_user(&cmd, buf, sizeof cmd))
1472 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1476 /* we copy a struct ib_uverbs_poll_cq_resp to user space */
1477 header_ptr = (void __user *)(unsigned long) cmd.response;
1478 data_ptr = header_ptr + sizeof resp;
1480 memset(&resp, 0, sizeof resp);
1481 while (resp.count < cmd.ne) {
1482 ret = ib_poll_cq(cq, 1, &wc);
1488 ret = copy_wc_to_user(data_ptr, &wc);
1492 data_ptr += sizeof(struct ib_uverbs_wc);
1496 if (copy_to_user(header_ptr, &resp, sizeof resp)) {
1508 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
1509 const char __user *buf, int in_len,
1512 struct ib_uverbs_req_notify_cq cmd;
1515 if (copy_from_user(&cmd, buf, sizeof cmd))
1518 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
1522 ib_req_notify_cq(cq, cmd.solicited_only ?
1523 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
1530 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
1531 const char __user *buf, int in_len,
1534 struct ib_uverbs_destroy_cq cmd;
1535 struct ib_uverbs_destroy_cq_resp resp;
1536 struct ib_uobject *uobj;
1538 struct ib_ucq_object *obj;
1539 struct ib_uverbs_event_file *ev_file;
1542 if (copy_from_user(&cmd, buf, sizeof cmd))
1545 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1549 ev_file = cq->cq_context;
1550 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1552 ret = ib_destroy_cq(cq);
1556 put_uobj_write(uobj);
1561 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1563 mutex_lock(&file->mutex);
1564 list_del(&uobj->list);
1565 mutex_unlock(&file->mutex);
1567 ib_uverbs_release_ucq(file, ev_file, obj);
1569 memset(&resp, 0, sizeof resp);
1570 resp.comp_events_reported = obj->comp_events_reported;
1571 resp.async_events_reported = obj->async_events_reported;
1575 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1576 &resp, sizeof resp))
1582 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1583 const char __user *buf, int in_len,
1586 struct ib_uverbs_create_qp cmd;
1587 struct ib_uverbs_create_qp_resp resp;
1588 struct ib_udata udata;
1589 struct ib_uqp_object *obj;
1590 struct ib_device *device;
1591 struct ib_pd *pd = NULL;
1592 struct ib_xrcd *xrcd = NULL;
1593 struct ib_uobject *uninitialized_var(xrcd_uobj);
1594 struct ib_cq *scq = NULL, *rcq = NULL;
1595 struct ib_srq *srq = NULL;
1597 struct ib_qp_init_attr attr;
1600 if (out_len < sizeof resp)
1603 if (copy_from_user(&cmd, buf, sizeof cmd))
1606 if (cmd.qp_type == IB_QPT_RAW_PACKET && !capable(CAP_NET_RAW))
1609 INIT_UDATA(&udata, buf + sizeof cmd,
1610 (unsigned long) cmd.response + sizeof resp,
1611 in_len - sizeof cmd, out_len - sizeof resp);
1613 obj = kzalloc(sizeof *obj, GFP_KERNEL);
1617 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1618 down_write(&obj->uevent.uobject.mutex);
1620 if (cmd.qp_type == IB_QPT_XRC_TGT) {
1621 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1626 device = xrcd->device;
1628 if (cmd.qp_type == IB_QPT_XRC_INI) {
1629 cmd.max_recv_wr = cmd.max_recv_sge = 0;
1632 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1633 if (!srq || srq->srq_type != IB_SRQT_BASIC) {
1639 if (cmd.recv_cq_handle != cmd.send_cq_handle) {
1640 rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext, 0);
1648 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, !!rcq);
1650 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1656 device = pd->device;
1659 attr.event_handler = ib_uverbs_qp_event_handler;
1660 attr.qp_context = file;
1665 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1666 attr.qp_type = cmd.qp_type;
1667 attr.create_flags = 0;
1669 attr.cap.max_send_wr = cmd.max_send_wr;
1670 attr.cap.max_recv_wr = cmd.max_recv_wr;
1671 attr.cap.max_send_sge = cmd.max_send_sge;
1672 attr.cap.max_recv_sge = cmd.max_recv_sge;
1673 attr.cap.max_inline_data = cmd.max_inline_data;
1675 obj->uevent.events_reported = 0;
1676 INIT_LIST_HEAD(&obj->uevent.event_list);
1677 INIT_LIST_HEAD(&obj->mcast_list);
1679 if (cmd.qp_type == IB_QPT_XRC_TGT)
1680 qp = ib_create_qp(pd, &attr);
1682 qp = device->create_qp(pd, &attr, &udata);
1689 if (cmd.qp_type != IB_QPT_XRC_TGT) {
1691 qp->device = device;
1693 qp->send_cq = attr.send_cq;
1694 qp->recv_cq = attr.recv_cq;
1696 qp->event_handler = attr.event_handler;
1697 qp->qp_context = attr.qp_context;
1698 qp->qp_type = attr.qp_type;
1699 atomic_set(&qp->usecnt, 0);
1700 atomic_inc(&pd->usecnt);
1701 atomic_inc(&attr.send_cq->usecnt);
1703 atomic_inc(&attr.recv_cq->usecnt);
1705 atomic_inc(&attr.srq->usecnt);
1707 qp->uobject = &obj->uevent.uobject;
1709 obj->uevent.uobject.object = qp;
1710 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1714 memset(&resp, 0, sizeof resp);
1715 resp.qpn = qp->qp_num;
1716 resp.qp_handle = obj->uevent.uobject.id;
1717 resp.max_recv_sge = attr.cap.max_recv_sge;
1718 resp.max_send_sge = attr.cap.max_send_sge;
1719 resp.max_recv_wr = attr.cap.max_recv_wr;
1720 resp.max_send_wr = attr.cap.max_send_wr;
1721 resp.max_inline_data = attr.cap.max_inline_data;
1723 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1724 &resp, sizeof resp)) {
1730 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object,
1732 atomic_inc(&obj->uxrcd->refcnt);
1733 put_xrcd_read(xrcd_uobj);
1740 if (rcq && rcq != scq)
1745 mutex_lock(&file->mutex);
1746 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1747 mutex_unlock(&file->mutex);
1749 obj->uevent.uobject.live = 1;
1751 up_write(&obj->uevent.uobject.mutex);
1756 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1763 put_xrcd_read(xrcd_uobj);
1768 if (rcq && rcq != scq)
1773 put_uobj_write(&obj->uevent.uobject);
1777 ssize_t ib_uverbs_open_qp(struct ib_uverbs_file *file,
1778 const char __user *buf, int in_len, int out_len)
1780 struct ib_uverbs_open_qp cmd;
1781 struct ib_uverbs_create_qp_resp resp;
1782 struct ib_udata udata;
1783 struct ib_uqp_object *obj;
1784 struct ib_xrcd *xrcd;
1785 struct ib_uobject *uninitialized_var(xrcd_uobj);
1787 struct ib_qp_open_attr attr;
1790 if (out_len < sizeof resp)
1793 if (copy_from_user(&cmd, buf, sizeof cmd))
1796 INIT_UDATA(&udata, buf + sizeof cmd,
1797 (unsigned long) cmd.response + sizeof resp,
1798 in_len - sizeof cmd, out_len - sizeof resp);
1800 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1804 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_class);
1805 down_write(&obj->uevent.uobject.mutex);
1807 xrcd = idr_read_xrcd(cmd.pd_handle, file->ucontext, &xrcd_uobj);
1813 attr.event_handler = ib_uverbs_qp_event_handler;
1814 attr.qp_context = file;
1815 attr.qp_num = cmd.qpn;
1816 attr.qp_type = cmd.qp_type;
1818 obj->uevent.events_reported = 0;
1819 INIT_LIST_HEAD(&obj->uevent.event_list);
1820 INIT_LIST_HEAD(&obj->mcast_list);
1822 qp = ib_open_qp(xrcd, &attr);
1828 qp->uobject = &obj->uevent.uobject;
1830 obj->uevent.uobject.object = qp;
1831 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1835 memset(&resp, 0, sizeof resp);
1836 resp.qpn = qp->qp_num;
1837 resp.qp_handle = obj->uevent.uobject.id;
1839 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1840 &resp, sizeof resp)) {
1845 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
1846 atomic_inc(&obj->uxrcd->refcnt);
1847 put_xrcd_read(xrcd_uobj);
1849 mutex_lock(&file->mutex);
1850 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1851 mutex_unlock(&file->mutex);
1853 obj->uevent.uobject.live = 1;
1855 up_write(&obj->uevent.uobject.mutex);
1860 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1866 put_xrcd_read(xrcd_uobj);
1867 put_uobj_write(&obj->uevent.uobject);
1871 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1872 const char __user *buf, int in_len,
1875 struct ib_uverbs_query_qp cmd;
1876 struct ib_uverbs_query_qp_resp resp;
1878 struct ib_qp_attr *attr;
1879 struct ib_qp_init_attr *init_attr;
1882 if (copy_from_user(&cmd, buf, sizeof cmd))
1885 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1886 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1887 if (!attr || !init_attr) {
1892 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1898 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1905 memset(&resp, 0, sizeof resp);
1907 resp.qp_state = attr->qp_state;
1908 resp.cur_qp_state = attr->cur_qp_state;
1909 resp.path_mtu = attr->path_mtu;
1910 resp.path_mig_state = attr->path_mig_state;
1911 resp.qkey = attr->qkey;
1912 resp.rq_psn = attr->rq_psn;
1913 resp.sq_psn = attr->sq_psn;
1914 resp.dest_qp_num = attr->dest_qp_num;
1915 resp.qp_access_flags = attr->qp_access_flags;
1916 resp.pkey_index = attr->pkey_index;
1917 resp.alt_pkey_index = attr->alt_pkey_index;
1918 resp.sq_draining = attr->sq_draining;
1919 resp.max_rd_atomic = attr->max_rd_atomic;
1920 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1921 resp.min_rnr_timer = attr->min_rnr_timer;
1922 resp.port_num = attr->port_num;
1923 resp.timeout = attr->timeout;
1924 resp.retry_cnt = attr->retry_cnt;
1925 resp.rnr_retry = attr->rnr_retry;
1926 resp.alt_port_num = attr->alt_port_num;
1927 resp.alt_timeout = attr->alt_timeout;
1929 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1930 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1931 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1932 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1933 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1934 resp.dest.dlid = attr->ah_attr.dlid;
1935 resp.dest.sl = attr->ah_attr.sl;
1936 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1937 resp.dest.static_rate = attr->ah_attr.static_rate;
1938 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1939 resp.dest.port_num = attr->ah_attr.port_num;
1941 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1942 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1943 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1944 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1945 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1946 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1947 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1948 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1949 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1950 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1951 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1953 resp.max_send_wr = init_attr->cap.max_send_wr;
1954 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1955 resp.max_send_sge = init_attr->cap.max_send_sge;
1956 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1957 resp.max_inline_data = init_attr->cap.max_inline_data;
1958 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1960 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1961 &resp, sizeof resp))
1968 return ret ? ret : in_len;
1971 /* Remove ignored fields set in the attribute mask */
1972 static int modify_qp_mask(enum ib_qp_type qp_type, int mask)
1975 case IB_QPT_XRC_INI:
1976 return mask & ~(IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER);
1977 case IB_QPT_XRC_TGT:
1978 return mask & ~(IB_QP_MAX_QP_RD_ATOMIC | IB_QP_RETRY_CNT |
1985 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1986 const char __user *buf, int in_len,
1989 struct ib_uverbs_modify_qp cmd;
1990 struct ib_udata udata;
1992 struct ib_qp_attr *attr;
1995 if (copy_from_user(&cmd, buf, sizeof cmd))
1998 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2001 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2005 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2011 attr->qp_state = cmd.qp_state;
2012 attr->cur_qp_state = cmd.cur_qp_state;
2013 attr->path_mtu = cmd.path_mtu;
2014 attr->path_mig_state = cmd.path_mig_state;
2015 attr->qkey = cmd.qkey;
2016 attr->rq_psn = cmd.rq_psn;
2017 attr->sq_psn = cmd.sq_psn;
2018 attr->dest_qp_num = cmd.dest_qp_num;
2019 attr->qp_access_flags = cmd.qp_access_flags;
2020 attr->pkey_index = cmd.pkey_index;
2021 attr->alt_pkey_index = cmd.alt_pkey_index;
2022 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2023 attr->max_rd_atomic = cmd.max_rd_atomic;
2024 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2025 attr->min_rnr_timer = cmd.min_rnr_timer;
2026 attr->port_num = cmd.port_num;
2027 attr->timeout = cmd.timeout;
2028 attr->retry_cnt = cmd.retry_cnt;
2029 attr->rnr_retry = cmd.rnr_retry;
2030 attr->alt_port_num = cmd.alt_port_num;
2031 attr->alt_timeout = cmd.alt_timeout;
2033 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2034 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2035 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2036 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2037 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2038 attr->ah_attr.dlid = cmd.dest.dlid;
2039 attr->ah_attr.sl = cmd.dest.sl;
2040 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2041 attr->ah_attr.static_rate = cmd.dest.static_rate;
2042 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2043 attr->ah_attr.port_num = cmd.dest.port_num;
2045 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2046 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2047 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2048 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2049 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2050 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2051 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2052 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2053 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2054 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2055 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2057 if (qp->real_qp == qp) {
2058 ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
2061 ret = qp->device->modify_qp(qp, attr,
2062 modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
2064 ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
2080 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
2081 const char __user *buf, int in_len,
2084 struct ib_uverbs_destroy_qp cmd;
2085 struct ib_uverbs_destroy_qp_resp resp;
2086 struct ib_uobject *uobj;
2088 struct ib_uqp_object *obj;
2091 if (copy_from_user(&cmd, buf, sizeof cmd))
2094 memset(&resp, 0, sizeof resp);
2096 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
2100 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
2102 if (!list_empty(&obj->mcast_list)) {
2103 put_uobj_write(uobj);
2107 ret = ib_destroy_qp(qp);
2111 put_uobj_write(uobj);
2117 atomic_dec(&obj->uxrcd->refcnt);
2119 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
2121 mutex_lock(&file->mutex);
2122 list_del(&uobj->list);
2123 mutex_unlock(&file->mutex);
2125 ib_uverbs_release_uevent(file, &obj->uevent);
2127 resp.events_reported = obj->uevent.events_reported;
2131 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2132 &resp, sizeof resp))
2138 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
2139 const char __user *buf, int in_len,
2142 struct ib_uverbs_post_send cmd;
2143 struct ib_uverbs_post_send_resp resp;
2144 struct ib_uverbs_send_wr *user_wr;
2145 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
2149 ssize_t ret = -EINVAL;
2151 if (copy_from_user(&cmd, buf, sizeof cmd))
2154 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
2155 cmd.sge_count * sizeof (struct ib_uverbs_sge))
2158 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
2161 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
2165 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2169 is_ud = qp->qp_type == IB_QPT_UD;
2172 for (i = 0; i < cmd.wr_count; ++i) {
2173 if (copy_from_user(user_wr,
2174 buf + sizeof cmd + i * cmd.wqe_size,
2180 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
2185 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2186 user_wr->num_sge * sizeof (struct ib_sge),
2200 next->wr_id = user_wr->wr_id;
2201 next->num_sge = user_wr->num_sge;
2202 next->opcode = user_wr->opcode;
2203 next->send_flags = user_wr->send_flags;
2206 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
2208 if (!next->wr.ud.ah) {
2212 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
2213 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
2214 if (next->opcode == IB_WR_SEND_WITH_IMM)
2216 (__be32 __force) user_wr->ex.imm_data;
2218 switch (next->opcode) {
2219 case IB_WR_RDMA_WRITE_WITH_IMM:
2221 (__be32 __force) user_wr->ex.imm_data;
2222 case IB_WR_RDMA_WRITE:
2223 case IB_WR_RDMA_READ:
2224 next->wr.rdma.remote_addr =
2225 user_wr->wr.rdma.remote_addr;
2226 next->wr.rdma.rkey =
2227 user_wr->wr.rdma.rkey;
2229 case IB_WR_SEND_WITH_IMM:
2231 (__be32 __force) user_wr->ex.imm_data;
2233 case IB_WR_SEND_WITH_INV:
2234 next->ex.invalidate_rkey =
2235 user_wr->ex.invalidate_rkey;
2237 case IB_WR_ATOMIC_CMP_AND_SWP:
2238 case IB_WR_ATOMIC_FETCH_AND_ADD:
2239 next->wr.atomic.remote_addr =
2240 user_wr->wr.atomic.remote_addr;
2241 next->wr.atomic.compare_add =
2242 user_wr->wr.atomic.compare_add;
2243 next->wr.atomic.swap = user_wr->wr.atomic.swap;
2244 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
2251 if (next->num_sge) {
2252 next->sg_list = (void *) next +
2253 ALIGN(sizeof *next, sizeof (struct ib_sge));
2254 if (copy_from_user(next->sg_list,
2256 cmd.wr_count * cmd.wqe_size +
2257 sg_ind * sizeof (struct ib_sge),
2258 next->num_sge * sizeof (struct ib_sge))) {
2262 sg_ind += next->num_sge;
2264 next->sg_list = NULL;
2268 ret = qp->device->post_send(qp->real_qp, wr, &bad_wr);
2270 for (next = wr; next; next = next->next) {
2276 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2277 &resp, sizeof resp))
2284 if (is_ud && wr->wr.ud.ah)
2285 put_ah_read(wr->wr.ud.ah);
2294 return ret ? ret : in_len;
2297 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
2303 struct ib_uverbs_recv_wr *user_wr;
2304 struct ib_recv_wr *wr = NULL, *last, *next;
2309 if (in_len < wqe_size * wr_count +
2310 sge_count * sizeof (struct ib_uverbs_sge))
2311 return ERR_PTR(-EINVAL);
2313 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
2314 return ERR_PTR(-EINVAL);
2316 user_wr = kmalloc(wqe_size, GFP_KERNEL);
2318 return ERR_PTR(-ENOMEM);
2322 for (i = 0; i < wr_count; ++i) {
2323 if (copy_from_user(user_wr, buf + i * wqe_size,
2329 if (user_wr->num_sge + sg_ind > sge_count) {
2334 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
2335 user_wr->num_sge * sizeof (struct ib_sge),
2349 next->wr_id = user_wr->wr_id;
2350 next->num_sge = user_wr->num_sge;
2352 if (next->num_sge) {
2353 next->sg_list = (void *) next +
2354 ALIGN(sizeof *next, sizeof (struct ib_sge));
2355 if (copy_from_user(next->sg_list,
2356 buf + wr_count * wqe_size +
2357 sg_ind * sizeof (struct ib_sge),
2358 next->num_sge * sizeof (struct ib_sge))) {
2362 sg_ind += next->num_sge;
2364 next->sg_list = NULL;
2379 return ERR_PTR(ret);
2382 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
2383 const char __user *buf, int in_len,
2386 struct ib_uverbs_post_recv cmd;
2387 struct ib_uverbs_post_recv_resp resp;
2388 struct ib_recv_wr *wr, *next, *bad_wr;
2390 ssize_t ret = -EINVAL;
2392 if (copy_from_user(&cmd, buf, sizeof cmd))
2395 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2396 in_len - sizeof cmd, cmd.wr_count,
2397 cmd.sge_count, cmd.wqe_size);
2401 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2406 ret = qp->device->post_recv(qp->real_qp, wr, &bad_wr);
2411 for (next = wr; next; next = next->next) {
2417 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2418 &resp, sizeof resp))
2428 return ret ? ret : in_len;
2431 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
2432 const char __user *buf, int in_len,
2435 struct ib_uverbs_post_srq_recv cmd;
2436 struct ib_uverbs_post_srq_recv_resp resp;
2437 struct ib_recv_wr *wr, *next, *bad_wr;
2439 ssize_t ret = -EINVAL;
2441 if (copy_from_user(&cmd, buf, sizeof cmd))
2444 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
2445 in_len - sizeof cmd, cmd.wr_count,
2446 cmd.sge_count, cmd.wqe_size);
2450 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2455 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
2460 for (next = wr; next; next = next->next) {
2466 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2467 &resp, sizeof resp))
2477 return ret ? ret : in_len;
2480 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
2481 const char __user *buf, int in_len,
2484 struct ib_uverbs_create_ah cmd;
2485 struct ib_uverbs_create_ah_resp resp;
2486 struct ib_uobject *uobj;
2489 struct ib_ah_attr attr;
2492 if (out_len < sizeof resp)
2495 if (copy_from_user(&cmd, buf, sizeof cmd))
2498 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
2502 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_class);
2503 down_write(&uobj->mutex);
2505 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2511 attr.dlid = cmd.attr.dlid;
2512 attr.sl = cmd.attr.sl;
2513 attr.src_path_bits = cmd.attr.src_path_bits;
2514 attr.static_rate = cmd.attr.static_rate;
2515 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
2516 attr.port_num = cmd.attr.port_num;
2517 attr.grh.flow_label = cmd.attr.grh.flow_label;
2518 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
2519 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
2520 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
2521 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
2523 ah = ib_create_ah(pd, &attr);
2532 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
2536 resp.ah_handle = uobj->id;
2538 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2539 &resp, sizeof resp)) {
2546 mutex_lock(&file->mutex);
2547 list_add_tail(&uobj->list, &file->ucontext->ah_list);
2548 mutex_unlock(&file->mutex);
2552 up_write(&uobj->mutex);
2557 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2566 put_uobj_write(uobj);
2570 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
2571 const char __user *buf, int in_len, int out_len)
2573 struct ib_uverbs_destroy_ah cmd;
2575 struct ib_uobject *uobj;
2578 if (copy_from_user(&cmd, buf, sizeof cmd))
2581 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
2586 ret = ib_destroy_ah(ah);
2590 put_uobj_write(uobj);
2595 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
2597 mutex_lock(&file->mutex);
2598 list_del(&uobj->list);
2599 mutex_unlock(&file->mutex);
2606 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
2607 const char __user *buf, int in_len,
2610 struct ib_uverbs_attach_mcast cmd;
2612 struct ib_uqp_object *obj;
2613 struct ib_uverbs_mcast_entry *mcast;
2616 if (copy_from_user(&cmd, buf, sizeof cmd))
2619 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2623 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2625 list_for_each_entry(mcast, &obj->mcast_list, list)
2626 if (cmd.mlid == mcast->lid &&
2627 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2632 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
2638 mcast->lid = cmd.mlid;
2639 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
2641 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
2643 list_add_tail(&mcast->list, &obj->mcast_list);
2650 return ret ? ret : in_len;
2653 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
2654 const char __user *buf, int in_len,
2657 struct ib_uverbs_detach_mcast cmd;
2658 struct ib_uqp_object *obj;
2660 struct ib_uverbs_mcast_entry *mcast;
2663 if (copy_from_user(&cmd, buf, sizeof cmd))
2666 qp = idr_write_qp(cmd.qp_handle, file->ucontext);
2670 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
2674 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
2676 list_for_each_entry(mcast, &obj->mcast_list, list)
2677 if (cmd.mlid == mcast->lid &&
2678 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
2679 list_del(&mcast->list);
2687 return ret ? ret : in_len;
2690 static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
2691 union ib_flow_spec *ib_spec)
2693 if (kern_spec->reserved)
2696 ib_spec->type = kern_spec->type;
2698 switch (ib_spec->type) {
2699 case IB_FLOW_SPEC_ETH:
2700 ib_spec->eth.size = sizeof(struct ib_flow_spec_eth);
2701 if (ib_spec->eth.size != kern_spec->eth.size)
2703 memcpy(&ib_spec->eth.val, &kern_spec->eth.val,
2704 sizeof(struct ib_flow_eth_filter));
2705 memcpy(&ib_spec->eth.mask, &kern_spec->eth.mask,
2706 sizeof(struct ib_flow_eth_filter));
2708 case IB_FLOW_SPEC_IPV4:
2709 ib_spec->ipv4.size = sizeof(struct ib_flow_spec_ipv4);
2710 if (ib_spec->ipv4.size != kern_spec->ipv4.size)
2712 memcpy(&ib_spec->ipv4.val, &kern_spec->ipv4.val,
2713 sizeof(struct ib_flow_ipv4_filter));
2714 memcpy(&ib_spec->ipv4.mask, &kern_spec->ipv4.mask,
2715 sizeof(struct ib_flow_ipv4_filter));
2717 case IB_FLOW_SPEC_TCP:
2718 case IB_FLOW_SPEC_UDP:
2719 ib_spec->tcp_udp.size = sizeof(struct ib_flow_spec_tcp_udp);
2720 if (ib_spec->tcp_udp.size != kern_spec->tcp_udp.size)
2722 memcpy(&ib_spec->tcp_udp.val, &kern_spec->tcp_udp.val,
2723 sizeof(struct ib_flow_tcp_udp_filter));
2724 memcpy(&ib_spec->tcp_udp.mask, &kern_spec->tcp_udp.mask,
2725 sizeof(struct ib_flow_tcp_udp_filter));
2733 int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
2734 struct ib_udata *ucore,
2735 struct ib_udata *uhw)
2737 struct ib_uverbs_create_flow cmd;
2738 struct ib_uverbs_create_flow_resp resp;
2739 struct ib_uobject *uobj;
2740 struct ib_flow *flow_id;
2741 struct ib_uverbs_flow_attr *kern_flow_attr;
2742 struct ib_flow_attr *flow_attr;
2749 if (ucore->inlen < sizeof(cmd))
2752 if (ucore->outlen < sizeof(resp))
2755 err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2759 ucore->inbuf += sizeof(cmd);
2760 ucore->inlen -= sizeof(cmd);
2765 if ((cmd.flow_attr.type == IB_FLOW_ATTR_SNIFFER &&
2766 !capable(CAP_NET_ADMIN)) || !capable(CAP_NET_RAW))
2769 if (cmd.flow_attr.num_of_specs > IB_FLOW_SPEC_SUPPORT_LAYERS)
2772 if (cmd.flow_attr.size > ucore->inlen ||
2773 cmd.flow_attr.size >
2774 (cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
2777 if (cmd.flow_attr.reserved[0] ||
2778 cmd.flow_attr.reserved[1])
2781 if (cmd.flow_attr.num_of_specs) {
2782 kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
2784 if (!kern_flow_attr)
2787 memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
2788 err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
2789 cmd.flow_attr.size);
2793 kern_flow_attr = &cmd.flow_attr;
2796 uobj = kmalloc(sizeof(*uobj), GFP_KERNEL);
2801 init_uobj(uobj, 0, file->ucontext, &rule_lock_class);
2802 down_write(&uobj->mutex);
2804 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
2810 flow_attr = kmalloc(sizeof(*flow_attr) + cmd.flow_attr.size, GFP_KERNEL);
2816 flow_attr->type = kern_flow_attr->type;
2817 flow_attr->priority = kern_flow_attr->priority;
2818 flow_attr->num_of_specs = kern_flow_attr->num_of_specs;
2819 flow_attr->port = kern_flow_attr->port;
2820 flow_attr->flags = kern_flow_attr->flags;
2821 flow_attr->size = sizeof(*flow_attr);
2823 kern_spec = kern_flow_attr + 1;
2824 ib_spec = flow_attr + 1;
2825 for (i = 0; i < flow_attr->num_of_specs &&
2826 cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
2827 cmd.flow_attr.size >=
2828 ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
2829 err = kern_spec_to_ib_spec(kern_spec, ib_spec);
2833 ((union ib_flow_spec *) ib_spec)->size;
2834 cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
2835 kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
2836 ib_spec += ((union ib_flow_spec *) ib_spec)->size;
2838 if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
2839 pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
2840 i, cmd.flow_attr.size);
2844 flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
2845 if (IS_ERR(flow_id)) {
2846 err = PTR_ERR(flow_id);
2850 flow_id->uobject = uobj;
2851 uobj->object = flow_id;
2853 err = idr_add_uobj(&ib_uverbs_rule_idr, uobj);
2857 memset(&resp, 0, sizeof(resp));
2858 resp.flow_handle = uobj->id;
2860 err = ib_copy_to_udata(ucore,
2861 &resp, sizeof(resp));
2866 mutex_lock(&file->mutex);
2867 list_add_tail(&uobj->list, &file->ucontext->rule_list);
2868 mutex_unlock(&file->mutex);
2872 up_write(&uobj->mutex);
2874 if (cmd.flow_attr.num_of_specs)
2875 kfree(kern_flow_attr);
2878 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2880 ib_destroy_flow(flow_id);
2886 put_uobj_write(uobj);
2888 if (cmd.flow_attr.num_of_specs)
2889 kfree(kern_flow_attr);
2893 int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
2894 struct ib_udata *ucore,
2895 struct ib_udata *uhw)
2897 struct ib_uverbs_destroy_flow cmd;
2898 struct ib_flow *flow_id;
2899 struct ib_uobject *uobj;
2902 if (ucore->inlen < sizeof(cmd))
2905 ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
2912 uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
2916 flow_id = uobj->object;
2918 ret = ib_destroy_flow(flow_id);
2922 put_uobj_write(uobj);
2924 idr_remove_uobj(&ib_uverbs_rule_idr, uobj);
2926 mutex_lock(&file->mutex);
2927 list_del(&uobj->list);
2928 mutex_unlock(&file->mutex);
2935 static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
2936 struct ib_uverbs_create_xsrq *cmd,
2937 struct ib_udata *udata)
2939 struct ib_uverbs_create_srq_resp resp;
2940 struct ib_usrq_object *obj;
2943 struct ib_uobject *uninitialized_var(xrcd_uobj);
2944 struct ib_srq_init_attr attr;
2947 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2951 init_uobj(&obj->uevent.uobject, cmd->user_handle, file->ucontext, &srq_lock_class);
2952 down_write(&obj->uevent.uobject.mutex);
2954 if (cmd->srq_type == IB_SRQT_XRC) {
2955 attr.ext.xrc.xrcd = idr_read_xrcd(cmd->xrcd_handle, file->ucontext, &xrcd_uobj);
2956 if (!attr.ext.xrc.xrcd) {
2961 obj->uxrcd = container_of(xrcd_uobj, struct ib_uxrcd_object, uobject);
2962 atomic_inc(&obj->uxrcd->refcnt);
2964 attr.ext.xrc.cq = idr_read_cq(cmd->cq_handle, file->ucontext, 0);
2965 if (!attr.ext.xrc.cq) {
2971 pd = idr_read_pd(cmd->pd_handle, file->ucontext);
2977 attr.event_handler = ib_uverbs_srq_event_handler;
2978 attr.srq_context = file;
2979 attr.srq_type = cmd->srq_type;
2980 attr.attr.max_wr = cmd->max_wr;
2981 attr.attr.max_sge = cmd->max_sge;
2982 attr.attr.srq_limit = cmd->srq_limit;
2984 obj->uevent.events_reported = 0;
2985 INIT_LIST_HEAD(&obj->uevent.event_list);
2987 srq = pd->device->create_srq(pd, &attr, udata);
2993 srq->device = pd->device;
2995 srq->srq_type = cmd->srq_type;
2996 srq->uobject = &obj->uevent.uobject;
2997 srq->event_handler = attr.event_handler;
2998 srq->srq_context = attr.srq_context;
3000 if (cmd->srq_type == IB_SRQT_XRC) {
3001 srq->ext.xrc.cq = attr.ext.xrc.cq;
3002 srq->ext.xrc.xrcd = attr.ext.xrc.xrcd;
3003 atomic_inc(&attr.ext.xrc.cq->usecnt);
3004 atomic_inc(&attr.ext.xrc.xrcd->usecnt);
3007 atomic_inc(&pd->usecnt);
3008 atomic_set(&srq->usecnt, 0);
3010 obj->uevent.uobject.object = srq;
3011 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3015 memset(&resp, 0, sizeof resp);
3016 resp.srq_handle = obj->uevent.uobject.id;
3017 resp.max_wr = attr.attr.max_wr;
3018 resp.max_sge = attr.attr.max_sge;
3019 if (cmd->srq_type == IB_SRQT_XRC)
3020 resp.srqn = srq->ext.xrc.srq_num;
3022 if (copy_to_user((void __user *) (unsigned long) cmd->response,
3023 &resp, sizeof resp)) {
3028 if (cmd->srq_type == IB_SRQT_XRC) {
3029 put_uobj_read(xrcd_uobj);
3030 put_cq_read(attr.ext.xrc.cq);
3034 mutex_lock(&file->mutex);
3035 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->srq_list);
3036 mutex_unlock(&file->mutex);
3038 obj->uevent.uobject.live = 1;
3040 up_write(&obj->uevent.uobject.mutex);
3045 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uevent.uobject);
3048 ib_destroy_srq(srq);
3054 if (cmd->srq_type == IB_SRQT_XRC)
3055 put_cq_read(attr.ext.xrc.cq);
3058 if (cmd->srq_type == IB_SRQT_XRC) {
3059 atomic_dec(&obj->uxrcd->refcnt);
3060 put_uobj_read(xrcd_uobj);
3064 put_uobj_write(&obj->uevent.uobject);
3068 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
3069 const char __user *buf, int in_len,
3072 struct ib_uverbs_create_srq cmd;
3073 struct ib_uverbs_create_xsrq xcmd;
3074 struct ib_uverbs_create_srq_resp resp;
3075 struct ib_udata udata;
3078 if (out_len < sizeof resp)
3081 if (copy_from_user(&cmd, buf, sizeof cmd))
3084 xcmd.response = cmd.response;
3085 xcmd.user_handle = cmd.user_handle;
3086 xcmd.srq_type = IB_SRQT_BASIC;
3087 xcmd.pd_handle = cmd.pd_handle;
3088 xcmd.max_wr = cmd.max_wr;
3089 xcmd.max_sge = cmd.max_sge;
3090 xcmd.srq_limit = cmd.srq_limit;
3092 INIT_UDATA(&udata, buf + sizeof cmd,
3093 (unsigned long) cmd.response + sizeof resp,
3094 in_len - sizeof cmd, out_len - sizeof resp);
3096 ret = __uverbs_create_xsrq(file, &xcmd, &udata);
3103 ssize_t ib_uverbs_create_xsrq(struct ib_uverbs_file *file,
3104 const char __user *buf, int in_len, int out_len)
3106 struct ib_uverbs_create_xsrq cmd;
3107 struct ib_uverbs_create_srq_resp resp;
3108 struct ib_udata udata;
3111 if (out_len < sizeof resp)
3114 if (copy_from_user(&cmd, buf, sizeof cmd))
3117 INIT_UDATA(&udata, buf + sizeof cmd,
3118 (unsigned long) cmd.response + sizeof resp,
3119 in_len - sizeof cmd, out_len - sizeof resp);
3121 ret = __uverbs_create_xsrq(file, &cmd, &udata);
3128 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
3129 const char __user *buf, int in_len,
3132 struct ib_uverbs_modify_srq cmd;
3133 struct ib_udata udata;
3135 struct ib_srq_attr attr;
3138 if (copy_from_user(&cmd, buf, sizeof cmd))
3141 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
3144 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3148 attr.max_wr = cmd.max_wr;
3149 attr.srq_limit = cmd.srq_limit;
3151 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
3155 return ret ? ret : in_len;
3158 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
3159 const char __user *buf,
3160 int in_len, int out_len)
3162 struct ib_uverbs_query_srq cmd;
3163 struct ib_uverbs_query_srq_resp resp;
3164 struct ib_srq_attr attr;
3168 if (out_len < sizeof resp)
3171 if (copy_from_user(&cmd, buf, sizeof cmd))
3174 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
3178 ret = ib_query_srq(srq, &attr);
3185 memset(&resp, 0, sizeof resp);
3187 resp.max_wr = attr.max_wr;
3188 resp.max_sge = attr.max_sge;
3189 resp.srq_limit = attr.srq_limit;
3191 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3192 &resp, sizeof resp))
3198 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
3199 const char __user *buf, int in_len,
3202 struct ib_uverbs_destroy_srq cmd;
3203 struct ib_uverbs_destroy_srq_resp resp;
3204 struct ib_uobject *uobj;
3206 struct ib_uevent_object *obj;
3208 struct ib_usrq_object *us;
3209 enum ib_srq_type srq_type;
3211 if (copy_from_user(&cmd, buf, sizeof cmd))
3214 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
3218 obj = container_of(uobj, struct ib_uevent_object, uobject);
3219 srq_type = srq->srq_type;
3221 ret = ib_destroy_srq(srq);
3225 put_uobj_write(uobj);
3230 if (srq_type == IB_SRQT_XRC) {
3231 us = container_of(obj, struct ib_usrq_object, uevent);
3232 atomic_dec(&us->uxrcd->refcnt);
3235 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
3237 mutex_lock(&file->mutex);
3238 list_del(&uobj->list);
3239 mutex_unlock(&file->mutex);
3241 ib_uverbs_release_uevent(file, obj);
3243 memset(&resp, 0, sizeof resp);
3244 resp.events_reported = obj->events_reported;
3248 if (copy_to_user((void __user *) (unsigned long) cmd.response,
3249 &resp, sizeof resp))
3252 return ret ? ret : in_len;