2 * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include <rdma/restrack.h>
39 #include "rdma_core.h"
42 static int uverbs_free_ah(struct ib_uobject *uobject,
43 enum rdma_remove_reason why)
45 return rdma_destroy_ah((struct ib_ah *)uobject->object);
48 static int uverbs_free_flow(struct ib_uobject *uobject,
49 enum rdma_remove_reason why)
51 return ib_destroy_flow((struct ib_flow *)uobject->object);
54 static int uverbs_free_mw(struct ib_uobject *uobject,
55 enum rdma_remove_reason why)
57 return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
60 static int uverbs_free_qp(struct ib_uobject *uobject,
61 enum rdma_remove_reason why)
63 struct ib_qp *qp = uobject->object;
64 struct ib_uqp_object *uqp =
65 container_of(uobject, struct ib_uqp_object, uevent.uobject);
68 if (why == RDMA_REMOVE_DESTROY) {
69 if (!list_empty(&uqp->mcast_list))
71 } else if (qp == qp->real_qp) {
72 ib_uverbs_detach_umcast(qp, uqp);
75 ret = ib_destroy_qp(qp);
76 if (ret && why == RDMA_REMOVE_DESTROY)
80 atomic_dec(&uqp->uxrcd->refcnt);
82 ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
86 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
87 enum rdma_remove_reason why)
89 struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
90 struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
93 ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
94 if (!ret || why != RDMA_REMOVE_DESTROY)
99 static int uverbs_free_wq(struct ib_uobject *uobject,
100 enum rdma_remove_reason why)
102 struct ib_wq *wq = uobject->object;
103 struct ib_uwq_object *uwq =
104 container_of(uobject, struct ib_uwq_object, uevent.uobject);
107 ret = ib_destroy_wq(wq);
108 if (!ret || why != RDMA_REMOVE_DESTROY)
109 ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
113 static int uverbs_free_srq(struct ib_uobject *uobject,
114 enum rdma_remove_reason why)
116 struct ib_srq *srq = uobject->object;
117 struct ib_uevent_object *uevent =
118 container_of(uobject, struct ib_uevent_object, uobject);
119 enum ib_srq_type srq_type = srq->srq_type;
122 ret = ib_destroy_srq(srq);
124 if (ret && why == RDMA_REMOVE_DESTROY)
127 if (srq_type == IB_SRQT_XRC) {
128 struct ib_usrq_object *us =
129 container_of(uevent, struct ib_usrq_object, uevent);
131 atomic_dec(&us->uxrcd->refcnt);
134 ib_uverbs_release_uevent(uobject->context->ufile, uevent);
138 static int uverbs_free_cq(struct ib_uobject *uobject,
139 enum rdma_remove_reason why)
141 struct ib_cq *cq = uobject->object;
142 struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
143 struct ib_ucq_object *ucq =
144 container_of(uobject, struct ib_ucq_object, uobject);
147 ret = ib_destroy_cq(cq);
148 if (!ret || why != RDMA_REMOVE_DESTROY)
149 ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
150 container_of(ev_queue,
151 struct ib_uverbs_completion_event_file,
157 static int uverbs_free_mr(struct ib_uobject *uobject,
158 enum rdma_remove_reason why)
160 return ib_dereg_mr((struct ib_mr *)uobject->object);
163 static int uverbs_free_xrcd(struct ib_uobject *uobject,
164 enum rdma_remove_reason why)
166 struct ib_xrcd *xrcd = uobject->object;
167 struct ib_uxrcd_object *uxrcd =
168 container_of(uobject, struct ib_uxrcd_object, uobject);
171 mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
172 if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
175 ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
177 mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
182 static int uverbs_free_pd(struct ib_uobject *uobject,
183 enum rdma_remove_reason why)
185 struct ib_pd *pd = uobject->object;
187 if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
190 ib_dealloc_pd((struct ib_pd *)uobject->object);
194 static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
195 enum rdma_remove_reason why)
197 struct ib_uverbs_completion_event_file *comp_event_file =
198 container_of(uobj_file, struct ib_uverbs_completion_event_file,
200 struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
202 spin_lock_irq(&event_queue->lock);
203 event_queue->is_closed = 1;
204 spin_unlock_irq(&event_queue->lock);
206 if (why == RDMA_REMOVE_DRIVER_REMOVE) {
207 wake_up_interruptible(&event_queue->poll_wait);
208 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
214 * This spec is used in order to pass information to the hardware driver in a
215 * legacy way. Every verb that could get driver specific data should get this
218 static const struct uverbs_attr_def uverbs_uhw_compat_in =
219 UVERBS_ATTR_PTR_IN_SZ(UVERBS_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
220 static const struct uverbs_attr_def uverbs_uhw_compat_out =
221 UVERBS_ATTR_PTR_OUT_SZ(UVERBS_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
223 static void create_udata(struct uverbs_attr_bundle *ctx,
224 struct ib_udata *udata)
227 * This is for ease of conversion. The purpose is to convert all drivers
228 * to use uverbs_attr_bundle instead of ib_udata.
229 * Assume attr == 0 is input and attr == 1 is output.
231 const struct uverbs_attr *uhw_in =
232 uverbs_attr_get(ctx, UVERBS_UHW_IN);
233 const struct uverbs_attr *uhw_out =
234 uverbs_attr_get(ctx, UVERBS_UHW_OUT);
236 if (!IS_ERR(uhw_in)) {
237 udata->inlen = uhw_in->ptr_attr.len;
238 if (uverbs_attr_ptr_is_inline(uhw_in))
239 udata->inbuf = &uhw_in->uattr->data;
241 udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
247 if (!IS_ERR(uhw_out)) {
248 udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
249 udata->outlen = uhw_out->ptr_attr.len;
251 udata->outbuf = NULL;
256 static int uverbs_create_cq_handler(struct ib_device *ib_dev,
257 struct ib_uverbs_file *file,
258 struct uverbs_attr_bundle *attrs)
260 struct ib_ucontext *ucontext = file->ucontext;
261 struct ib_ucq_object *obj;
265 struct ib_cq_init_attr attr = {};
267 struct ib_uverbs_completion_event_file *ev_file = NULL;
268 const struct uverbs_attr *ev_file_attr;
269 struct ib_uobject *ev_file_uobj;
271 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
274 ret = uverbs_copy_from(&attr.comp_vector, attrs, CREATE_CQ_COMP_VECTOR);
276 ret = uverbs_copy_from(&attr.cqe, attrs, CREATE_CQ_CQE);
278 ret = uverbs_copy_from(&user_handle, attrs, CREATE_CQ_USER_HANDLE);
282 /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
283 if (uverbs_copy_from(&attr.flags, attrs, CREATE_CQ_FLAGS) == -EFAULT)
286 ev_file_attr = uverbs_attr_get(attrs, CREATE_CQ_COMP_CHANNEL);
287 if (!IS_ERR(ev_file_attr)) {
288 ev_file_uobj = ev_file_attr->obj_attr.uobject;
290 ev_file = container_of(ev_file_uobj,
291 struct ib_uverbs_completion_event_file,
293 uverbs_uobject_get(ev_file_uobj);
296 if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
301 obj = container_of(uverbs_attr_get(attrs, CREATE_CQ_HANDLE)->obj_attr.uobject,
302 typeof(*obj), uobject);
303 obj->uverbs_file = ucontext->ufile;
304 obj->comp_events_reported = 0;
305 obj->async_events_reported = 0;
306 INIT_LIST_HEAD(&obj->comp_list);
307 INIT_LIST_HEAD(&obj->async_list);
309 /* Temporary, only until drivers get the new uverbs_attr_bundle */
310 create_udata(attrs, &uhw);
312 cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
319 cq->uobject = &obj->uobject;
320 cq->comp_handler = ib_uverbs_comp_handler;
321 cq->event_handler = ib_uverbs_cq_event_handler;
322 cq->cq_context = ev_file ? &ev_file->ev_queue : NULL;
323 obj->uobject.object = cq;
324 obj->uobject.user_handle = user_handle;
325 atomic_set(&cq->usecnt, 0);
326 cq->res.type = RDMA_RESTRACK_CQ;
327 rdma_restrack_add(&cq->res);
329 ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
340 uverbs_uobject_put(ev_file_uobj);
344 static DECLARE_UVERBS_METHOD(
345 uverbs_method_cq_create, UVERBS_CQ_CREATE, uverbs_create_cq_handler,
346 &UVERBS_ATTR_IDR(CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW,
347 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
348 &UVERBS_ATTR_PTR_IN(CREATE_CQ_CQE, u32,
349 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
350 &UVERBS_ATTR_PTR_IN(CREATE_CQ_USER_HANDLE, u64,
351 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
352 &UVERBS_ATTR_FD(CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
354 &UVERBS_ATTR_PTR_IN(CREATE_CQ_COMP_VECTOR, u32,
355 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
356 &UVERBS_ATTR_PTR_IN(CREATE_CQ_FLAGS, u32),
357 &UVERBS_ATTR_PTR_OUT(CREATE_CQ_RESP_CQE, u32,
358 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
359 &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
361 static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
362 struct ib_uverbs_file *file,
363 struct uverbs_attr_bundle *attrs)
365 struct ib_uverbs_destroy_cq_resp resp;
366 struct ib_uobject *uobj =
367 uverbs_attr_get(attrs, DESTROY_CQ_HANDLE)->obj_attr.uobject;
368 struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
372 if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
375 ret = rdma_explicit_destroy(uobj);
379 resp.comp_events_reported = obj->comp_events_reported;
380 resp.async_events_reported = obj->async_events_reported;
382 return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
385 static DECLARE_UVERBS_METHOD(
386 uverbs_method_cq_destroy, UVERBS_CQ_DESTROY, uverbs_destroy_cq_handler,
387 &UVERBS_ATTR_IDR(DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
388 UVERBS_ACCESS_DESTROY,
389 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
390 &UVERBS_ATTR_PTR_OUT(DESTROY_CQ_RESP, struct ib_uverbs_destroy_cq_resp,
391 UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
393 DECLARE_UVERBS_OBJECT(uverbs_object_comp_channel,
394 UVERBS_OBJECT_COMP_CHANNEL,
395 &UVERBS_TYPE_ALLOC_FD(0,
396 sizeof(struct ib_uverbs_completion_event_file),
397 uverbs_hot_unplug_completion_event_file,
399 "[infinibandevent]", O_RDONLY));
401 DECLARE_UVERBS_OBJECT(uverbs_object_cq, UVERBS_OBJECT_CQ,
402 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
404 &uverbs_method_cq_create,
405 &uverbs_method_cq_destroy);
407 DECLARE_UVERBS_OBJECT(uverbs_object_qp, UVERBS_OBJECT_QP,
408 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
411 DECLARE_UVERBS_OBJECT(uverbs_object_mw, UVERBS_OBJECT_MW,
412 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
414 DECLARE_UVERBS_OBJECT(uverbs_object_mr, UVERBS_OBJECT_MR,
415 /* 1 is used in order to free the MR after all the MWs */
416 &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr));
418 DECLARE_UVERBS_OBJECT(uverbs_object_srq, UVERBS_OBJECT_SRQ,
419 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
422 DECLARE_UVERBS_OBJECT(uverbs_object_ah, UVERBS_OBJECT_AH,
423 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
425 DECLARE_UVERBS_OBJECT(uverbs_object_flow, UVERBS_OBJECT_FLOW,
426 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
428 DECLARE_UVERBS_OBJECT(uverbs_object_wq, UVERBS_OBJECT_WQ,
429 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
432 DECLARE_UVERBS_OBJECT(uverbs_object_rwq_ind_table,
433 UVERBS_OBJECT_RWQ_IND_TBL,
434 &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
436 DECLARE_UVERBS_OBJECT(uverbs_object_xrcd, UVERBS_OBJECT_XRCD,
437 &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
440 DECLARE_UVERBS_OBJECT(uverbs_object_pd, UVERBS_OBJECT_PD,
441 /* 2 is used in order to free the PD after MRs */
442 &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
444 DECLARE_UVERBS_OBJECT(uverbs_object_device, UVERBS_OBJECT_DEVICE, NULL);
446 DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
447 &uverbs_object_device,
450 &uverbs_object_comp_channel,
458 &uverbs_object_rwq_ind_table,
459 &uverbs_object_xrcd);