Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / drivers / infiniband / core / uverbs_std_types.c
1 /*
2  * Copyright (c) 2017, Mellanox Technologies inc.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/uverbs_std_types.h>
34 #include <rdma/ib_user_verbs.h>
35 #include <rdma/ib_verbs.h>
36 #include <linux/bug.h>
37 #include <linux/file.h>
38 #include <rdma/restrack.h>
39 #include "rdma_core.h"
40 #include "uverbs.h"
41
42 static int uverbs_free_ah(struct ib_uobject *uobject,
43                           enum rdma_remove_reason why)
44 {
45         return rdma_destroy_ah((struct ib_ah *)uobject->object);
46 }
47
48 static int uverbs_free_flow(struct ib_uobject *uobject,
49                             enum rdma_remove_reason why)
50 {
51         return ib_destroy_flow((struct ib_flow *)uobject->object);
52 }
53
54 static int uverbs_free_mw(struct ib_uobject *uobject,
55                           enum rdma_remove_reason why)
56 {
57         return uverbs_dealloc_mw((struct ib_mw *)uobject->object);
58 }
59
60 static int uverbs_free_qp(struct ib_uobject *uobject,
61                           enum rdma_remove_reason why)
62 {
63         struct ib_qp *qp = uobject->object;
64         struct ib_uqp_object *uqp =
65                 container_of(uobject, struct ib_uqp_object, uevent.uobject);
66         int ret;
67
68         if (why == RDMA_REMOVE_DESTROY) {
69                 if (!list_empty(&uqp->mcast_list))
70                         return -EBUSY;
71         } else if (qp == qp->real_qp) {
72                 ib_uverbs_detach_umcast(qp, uqp);
73         }
74
75         ret = ib_destroy_qp(qp);
76         if (ret && why == RDMA_REMOVE_DESTROY)
77                 return ret;
78
79         if (uqp->uxrcd)
80                 atomic_dec(&uqp->uxrcd->refcnt);
81
82         ib_uverbs_release_uevent(uobject->context->ufile, &uqp->uevent);
83         return ret;
84 }
85
86 static int uverbs_free_rwq_ind_tbl(struct ib_uobject *uobject,
87                                    enum rdma_remove_reason why)
88 {
89         struct ib_rwq_ind_table *rwq_ind_tbl = uobject->object;
90         struct ib_wq **ind_tbl = rwq_ind_tbl->ind_tbl;
91         int ret;
92
93         ret = ib_destroy_rwq_ind_table(rwq_ind_tbl);
94         if (!ret || why != RDMA_REMOVE_DESTROY)
95                 kfree(ind_tbl);
96         return ret;
97 }
98
99 static int uverbs_free_wq(struct ib_uobject *uobject,
100                           enum rdma_remove_reason why)
101 {
102         struct ib_wq *wq = uobject->object;
103         struct ib_uwq_object *uwq =
104                 container_of(uobject, struct ib_uwq_object, uevent.uobject);
105         int ret;
106
107         ret = ib_destroy_wq(wq);
108         if (!ret || why != RDMA_REMOVE_DESTROY)
109                 ib_uverbs_release_uevent(uobject->context->ufile, &uwq->uevent);
110         return ret;
111 }
112
113 static int uverbs_free_srq(struct ib_uobject *uobject,
114                            enum rdma_remove_reason why)
115 {
116         struct ib_srq *srq = uobject->object;
117         struct ib_uevent_object *uevent =
118                 container_of(uobject, struct ib_uevent_object, uobject);
119         enum ib_srq_type  srq_type = srq->srq_type;
120         int ret;
121
122         ret = ib_destroy_srq(srq);
123
124         if (ret && why == RDMA_REMOVE_DESTROY)
125                 return ret;
126
127         if (srq_type == IB_SRQT_XRC) {
128                 struct ib_usrq_object *us =
129                         container_of(uevent, struct ib_usrq_object, uevent);
130
131                 atomic_dec(&us->uxrcd->refcnt);
132         }
133
134         ib_uverbs_release_uevent(uobject->context->ufile, uevent);
135         return ret;
136 }
137
138 static int uverbs_free_cq(struct ib_uobject *uobject,
139                           enum rdma_remove_reason why)
140 {
141         struct ib_cq *cq = uobject->object;
142         struct ib_uverbs_event_queue *ev_queue = cq->cq_context;
143         struct ib_ucq_object *ucq =
144                 container_of(uobject, struct ib_ucq_object, uobject);
145         int ret;
146
147         ret = ib_destroy_cq(cq);
148         if (!ret || why != RDMA_REMOVE_DESTROY)
149                 ib_uverbs_release_ucq(uobject->context->ufile, ev_queue ?
150                                       container_of(ev_queue,
151                                                    struct ib_uverbs_completion_event_file,
152                                                    ev_queue) : NULL,
153                                       ucq);
154         return ret;
155 }
156
157 static int uverbs_free_mr(struct ib_uobject *uobject,
158                           enum rdma_remove_reason why)
159 {
160         return ib_dereg_mr((struct ib_mr *)uobject->object);
161 }
162
163 static int uverbs_free_xrcd(struct ib_uobject *uobject,
164                             enum rdma_remove_reason why)
165 {
166         struct ib_xrcd *xrcd = uobject->object;
167         struct ib_uxrcd_object *uxrcd =
168                 container_of(uobject, struct ib_uxrcd_object, uobject);
169         int ret;
170
171         mutex_lock(&uobject->context->ufile->device->xrcd_tree_mutex);
172         if (why == RDMA_REMOVE_DESTROY && atomic_read(&uxrcd->refcnt))
173                 ret = -EBUSY;
174         else
175                 ret = ib_uverbs_dealloc_xrcd(uobject->context->ufile->device,
176                                              xrcd, why);
177         mutex_unlock(&uobject->context->ufile->device->xrcd_tree_mutex);
178
179         return ret;
180 }
181
182 static int uverbs_free_pd(struct ib_uobject *uobject,
183                           enum rdma_remove_reason why)
184 {
185         struct ib_pd *pd = uobject->object;
186
187         if (why == RDMA_REMOVE_DESTROY && atomic_read(&pd->usecnt))
188                 return -EBUSY;
189
190         ib_dealloc_pd((struct ib_pd *)uobject->object);
191         return 0;
192 }
193
194 static int uverbs_hot_unplug_completion_event_file(struct ib_uobject_file *uobj_file,
195                                                    enum rdma_remove_reason why)
196 {
197         struct ib_uverbs_completion_event_file *comp_event_file =
198                 container_of(uobj_file, struct ib_uverbs_completion_event_file,
199                              uobj_file);
200         struct ib_uverbs_event_queue *event_queue = &comp_event_file->ev_queue;
201
202         spin_lock_irq(&event_queue->lock);
203         event_queue->is_closed = 1;
204         spin_unlock_irq(&event_queue->lock);
205
206         if (why == RDMA_REMOVE_DRIVER_REMOVE) {
207                 wake_up_interruptible(&event_queue->poll_wait);
208                 kill_fasync(&event_queue->async_queue, SIGIO, POLL_IN);
209         }
210         return 0;
211 };
212
213 /*
214  * This spec is used in order to pass information to the hardware driver in a
215  * legacy way. Every verb that could get driver specific data should get this
216  * spec.
217  */
218 static const struct uverbs_attr_def uverbs_uhw_compat_in =
219         UVERBS_ATTR_PTR_IN_SZ(UVERBS_UHW_IN, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
220 static const struct uverbs_attr_def uverbs_uhw_compat_out =
221         UVERBS_ATTR_PTR_OUT_SZ(UVERBS_UHW_OUT, 0, UA_FLAGS(UVERBS_ATTR_SPEC_F_MIN_SZ));
222
223 static void create_udata(struct uverbs_attr_bundle *ctx,
224                          struct ib_udata *udata)
225 {
226         /*
227          * This is for ease of conversion. The purpose is to convert all drivers
228          * to use uverbs_attr_bundle instead of ib_udata.
229          * Assume attr == 0 is input and attr == 1 is output.
230          */
231         const struct uverbs_attr *uhw_in =
232                 uverbs_attr_get(ctx, UVERBS_UHW_IN);
233         const struct uverbs_attr *uhw_out =
234                 uverbs_attr_get(ctx, UVERBS_UHW_OUT);
235
236         if (!IS_ERR(uhw_in)) {
237                 udata->inlen = uhw_in->ptr_attr.len;
238                 if (uverbs_attr_ptr_is_inline(uhw_in))
239                         udata->inbuf = &uhw_in->uattr->data;
240                 else
241                         udata->inbuf = u64_to_user_ptr(uhw_in->ptr_attr.data);
242         } else {
243                 udata->inbuf = NULL;
244                 udata->inlen = 0;
245         }
246
247         if (!IS_ERR(uhw_out)) {
248                 udata->outbuf = u64_to_user_ptr(uhw_out->ptr_attr.data);
249                 udata->outlen = uhw_out->ptr_attr.len;
250         } else {
251                 udata->outbuf = NULL;
252                 udata->outlen = 0;
253         }
254 }
255
256 static int uverbs_create_cq_handler(struct ib_device *ib_dev,
257                                     struct ib_uverbs_file *file,
258                                     struct uverbs_attr_bundle *attrs)
259 {
260         struct ib_ucontext *ucontext = file->ucontext;
261         struct ib_ucq_object           *obj;
262         struct ib_udata uhw;
263         int ret;
264         u64 user_handle;
265         struct ib_cq_init_attr attr = {};
266         struct ib_cq                   *cq;
267         struct ib_uverbs_completion_event_file    *ev_file = NULL;
268         const struct uverbs_attr *ev_file_attr;
269         struct ib_uobject *ev_file_uobj;
270
271         if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_CREATE_CQ))
272                 return -EOPNOTSUPP;
273
274         ret = uverbs_copy_from(&attr.comp_vector, attrs, CREATE_CQ_COMP_VECTOR);
275         if (!ret)
276                 ret = uverbs_copy_from(&attr.cqe, attrs, CREATE_CQ_CQE);
277         if (!ret)
278                 ret = uverbs_copy_from(&user_handle, attrs, CREATE_CQ_USER_HANDLE);
279         if (ret)
280                 return ret;
281
282         /* Optional param, if it doesn't exist, we get -ENOENT and skip it */
283         if (uverbs_copy_from(&attr.flags, attrs, CREATE_CQ_FLAGS) == -EFAULT)
284                 return -EFAULT;
285
286         ev_file_attr = uverbs_attr_get(attrs, CREATE_CQ_COMP_CHANNEL);
287         if (!IS_ERR(ev_file_attr)) {
288                 ev_file_uobj = ev_file_attr->obj_attr.uobject;
289
290                 ev_file = container_of(ev_file_uobj,
291                                        struct ib_uverbs_completion_event_file,
292                                        uobj_file.uobj);
293                 uverbs_uobject_get(ev_file_uobj);
294         }
295
296         if (attr.comp_vector >= ucontext->ufile->device->num_comp_vectors) {
297                 ret = -EINVAL;
298                 goto err_event_file;
299         }
300
301         obj = container_of(uverbs_attr_get(attrs, CREATE_CQ_HANDLE)->obj_attr.uobject,
302                            typeof(*obj), uobject);
303         obj->uverbs_file           = ucontext->ufile;
304         obj->comp_events_reported  = 0;
305         obj->async_events_reported = 0;
306         INIT_LIST_HEAD(&obj->comp_list);
307         INIT_LIST_HEAD(&obj->async_list);
308
309         /* Temporary, only until drivers get the new uverbs_attr_bundle */
310         create_udata(attrs, &uhw);
311
312         cq = ib_dev->create_cq(ib_dev, &attr, ucontext, &uhw);
313         if (IS_ERR(cq)) {
314                 ret = PTR_ERR(cq);
315                 goto err_event_file;
316         }
317
318         cq->device        = ib_dev;
319         cq->uobject       = &obj->uobject;
320         cq->comp_handler  = ib_uverbs_comp_handler;
321         cq->event_handler = ib_uverbs_cq_event_handler;
322         cq->cq_context    = ev_file ? &ev_file->ev_queue : NULL;
323         obj->uobject.object = cq;
324         obj->uobject.user_handle = user_handle;
325         atomic_set(&cq->usecnt, 0);
326         cq->res.type = RDMA_RESTRACK_CQ;
327         rdma_restrack_add(&cq->res);
328
329         ret = uverbs_copy_to(attrs, CREATE_CQ_RESP_CQE, &cq->cqe,
330                              sizeof(cq->cqe));
331         if (ret)
332                 goto err_cq;
333
334         return 0;
335 err_cq:
336         ib_destroy_cq(cq);
337
338 err_event_file:
339         if (ev_file)
340                 uverbs_uobject_put(ev_file_uobj);
341         return ret;
342 };
343
344 static DECLARE_UVERBS_METHOD(
345         uverbs_method_cq_create, UVERBS_CQ_CREATE, uverbs_create_cq_handler,
346         &UVERBS_ATTR_IDR(CREATE_CQ_HANDLE, UVERBS_OBJECT_CQ, UVERBS_ACCESS_NEW,
347                          UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
348         &UVERBS_ATTR_PTR_IN(CREATE_CQ_CQE, u32,
349                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
350         &UVERBS_ATTR_PTR_IN(CREATE_CQ_USER_HANDLE, u64,
351                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
352         &UVERBS_ATTR_FD(CREATE_CQ_COMP_CHANNEL, UVERBS_OBJECT_COMP_CHANNEL,
353                         UVERBS_ACCESS_READ),
354         &UVERBS_ATTR_PTR_IN(CREATE_CQ_COMP_VECTOR, u32,
355                             UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
356         &UVERBS_ATTR_PTR_IN(CREATE_CQ_FLAGS, u32),
357         &UVERBS_ATTR_PTR_OUT(CREATE_CQ_RESP_CQE, u32,
358                              UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
359         &uverbs_uhw_compat_in, &uverbs_uhw_compat_out);
360
361 static int uverbs_destroy_cq_handler(struct ib_device *ib_dev,
362                                      struct ib_uverbs_file *file,
363                                      struct uverbs_attr_bundle *attrs)
364 {
365         struct ib_uverbs_destroy_cq_resp resp;
366         struct ib_uobject *uobj =
367                 uverbs_attr_get(attrs, DESTROY_CQ_HANDLE)->obj_attr.uobject;
368         struct ib_ucq_object *obj = container_of(uobj, struct ib_ucq_object,
369                                                  uobject);
370         int ret;
371
372         if (!(ib_dev->uverbs_cmd_mask & 1ULL << IB_USER_VERBS_CMD_DESTROY_CQ))
373                 return -EOPNOTSUPP;
374
375         ret = rdma_explicit_destroy(uobj);
376         if (ret)
377                 return ret;
378
379         resp.comp_events_reported  = obj->comp_events_reported;
380         resp.async_events_reported = obj->async_events_reported;
381
382         return uverbs_copy_to(attrs, DESTROY_CQ_RESP, &resp, sizeof(resp));
383 }
384
385 static DECLARE_UVERBS_METHOD(
386         uverbs_method_cq_destroy, UVERBS_CQ_DESTROY, uverbs_destroy_cq_handler,
387         &UVERBS_ATTR_IDR(DESTROY_CQ_HANDLE, UVERBS_OBJECT_CQ,
388                          UVERBS_ACCESS_DESTROY,
389                          UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)),
390         &UVERBS_ATTR_PTR_OUT(DESTROY_CQ_RESP, struct ib_uverbs_destroy_cq_resp,
391                              UA_FLAGS(UVERBS_ATTR_SPEC_F_MANDATORY)));
392
393 DECLARE_UVERBS_OBJECT(uverbs_object_comp_channel,
394                       UVERBS_OBJECT_COMP_CHANNEL,
395                       &UVERBS_TYPE_ALLOC_FD(0,
396                                               sizeof(struct ib_uverbs_completion_event_file),
397                                               uverbs_hot_unplug_completion_event_file,
398                                               &uverbs_event_fops,
399                                               "[infinibandevent]", O_RDONLY));
400
401 DECLARE_UVERBS_OBJECT(uverbs_object_cq, UVERBS_OBJECT_CQ,
402                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_ucq_object), 0,
403                                                   uverbs_free_cq),
404                       &uverbs_method_cq_create,
405                       &uverbs_method_cq_destroy);
406
407 DECLARE_UVERBS_OBJECT(uverbs_object_qp, UVERBS_OBJECT_QP,
408                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uqp_object), 0,
409                                                   uverbs_free_qp));
410
411 DECLARE_UVERBS_OBJECT(uverbs_object_mw, UVERBS_OBJECT_MW,
412                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_mw));
413
414 DECLARE_UVERBS_OBJECT(uverbs_object_mr, UVERBS_OBJECT_MR,
415                       /* 1 is used in order to free the MR after all the MWs */
416                       &UVERBS_TYPE_ALLOC_IDR(1, uverbs_free_mr));
417
418 DECLARE_UVERBS_OBJECT(uverbs_object_srq, UVERBS_OBJECT_SRQ,
419                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_usrq_object), 0,
420                                                   uverbs_free_srq));
421
422 DECLARE_UVERBS_OBJECT(uverbs_object_ah, UVERBS_OBJECT_AH,
423                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_ah));
424
425 DECLARE_UVERBS_OBJECT(uverbs_object_flow, UVERBS_OBJECT_FLOW,
426                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_flow));
427
428 DECLARE_UVERBS_OBJECT(uverbs_object_wq, UVERBS_OBJECT_WQ,
429                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uwq_object), 0,
430                                                   uverbs_free_wq));
431
432 DECLARE_UVERBS_OBJECT(uverbs_object_rwq_ind_table,
433                       UVERBS_OBJECT_RWQ_IND_TBL,
434                       &UVERBS_TYPE_ALLOC_IDR(0, uverbs_free_rwq_ind_tbl));
435
436 DECLARE_UVERBS_OBJECT(uverbs_object_xrcd, UVERBS_OBJECT_XRCD,
437                       &UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uxrcd_object), 0,
438                                                   uverbs_free_xrcd));
439
440 DECLARE_UVERBS_OBJECT(uverbs_object_pd, UVERBS_OBJECT_PD,
441                       /* 2 is used in order to free the PD after MRs */
442                       &UVERBS_TYPE_ALLOC_IDR(2, uverbs_free_pd));
443
444 DECLARE_UVERBS_OBJECT(uverbs_object_device, UVERBS_OBJECT_DEVICE, NULL);
445
446 DECLARE_UVERBS_OBJECT_TREE(uverbs_default_objects,
447                            &uverbs_object_device,
448                            &uverbs_object_pd,
449                            &uverbs_object_mr,
450                            &uverbs_object_comp_channel,
451                            &uverbs_object_cq,
452                            &uverbs_object_qp,
453                            &uverbs_object_ah,
454                            &uverbs_object_mw,
455                            &uverbs_object_srq,
456                            &uverbs_object_flow,
457                            &uverbs_object_wq,
458                            &uverbs_object_rwq_ind_table,
459                            &uverbs_object_xrcd);