2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/module.h>
38 #include <linux/init.h>
39 #include <linux/device.h>
40 #include <linux/err.h>
42 #include <linux/poll.h>
43 #include <linux/sched.h>
44 #include <linux/file.h>
45 #include <linux/cdev.h>
46 #include <linux/anon_inodes.h>
47 #include <linux/slab.h>
48 #include <linux/sched/mm.h>
50 #include <linux/uaccess.h>
53 #include <rdma/uverbs_std_types.h>
54 #include <rdma/rdma_netlink.h>
57 #include "core_priv.h"
58 #include "rdma_core.h"
60 MODULE_AUTHOR("Roland Dreier");
61 MODULE_DESCRIPTION("InfiniBand userspace verbs access");
62 MODULE_LICENSE("Dual BSD/GPL");
65 IB_UVERBS_MAJOR = 231,
66 IB_UVERBS_BASE_MINOR = 192,
67 IB_UVERBS_MAX_DEVICES = RDMA_MAX_PORTS,
68 IB_UVERBS_NUM_FIXED_MINOR = 32,
69 IB_UVERBS_NUM_DYNAMIC_MINOR = IB_UVERBS_MAX_DEVICES - IB_UVERBS_NUM_FIXED_MINOR,
72 #define IB_UVERBS_BASE_DEV MKDEV(IB_UVERBS_MAJOR, IB_UVERBS_BASE_MINOR)
74 static dev_t dynamic_uverbs_dev;
75 static struct class *uverbs_class;
77 static DEFINE_IDA(uverbs_ida);
78 static void ib_uverbs_add_one(struct ib_device *device);
79 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data);
82 * Must be called with the ufile->device->disassociate_srcu held, and the lock
83 * must be held until use of the ucontext is finished.
85 struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile)
88 * We do not hold the hw_destroy_rwsem lock for this flow, instead
89 * srcu is used. It does not matter if someone races this with
90 * get_context, we get NULL or valid ucontext.
92 struct ib_ucontext *ucontext = smp_load_acquire(&ufile->ucontext);
94 if (!srcu_dereference(ufile->device->ib_dev,
95 &ufile->device->disassociate_srcu))
99 return ERR_PTR(-EINVAL);
103 EXPORT_SYMBOL(ib_uverbs_get_ucontext_file);
105 int uverbs_dealloc_mw(struct ib_mw *mw)
107 struct ib_pd *pd = mw->pd;
110 ret = mw->device->ops.dealloc_mw(mw);
112 atomic_dec(&pd->usecnt);
116 static void ib_uverbs_release_dev(struct device *device)
118 struct ib_uverbs_device *dev =
119 container_of(device, struct ib_uverbs_device, dev);
121 uverbs_destroy_api(dev->uapi);
122 cleanup_srcu_struct(&dev->disassociate_srcu);
123 mutex_destroy(&dev->lists_mutex);
124 mutex_destroy(&dev->xrcd_tree_mutex);
128 static void ib_uverbs_release_async_event_file(struct kref *ref)
130 struct ib_uverbs_async_event_file *file =
131 container_of(ref, struct ib_uverbs_async_event_file, ref);
136 void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
137 struct ib_uverbs_completion_event_file *ev_file,
138 struct ib_ucq_object *uobj)
140 struct ib_uverbs_event *evt, *tmp;
143 spin_lock_irq(&ev_file->ev_queue.lock);
144 list_for_each_entry_safe(evt, tmp, &uobj->comp_list, obj_list) {
145 list_del(&evt->list);
148 spin_unlock_irq(&ev_file->ev_queue.lock);
150 uverbs_uobject_put(&ev_file->uobj);
153 spin_lock_irq(&file->async_file->ev_queue.lock);
154 list_for_each_entry_safe(evt, tmp, &uobj->async_list, obj_list) {
155 list_del(&evt->list);
158 spin_unlock_irq(&file->async_file->ev_queue.lock);
161 void ib_uverbs_release_uevent(struct ib_uverbs_file *file,
162 struct ib_uevent_object *uobj)
164 struct ib_uverbs_event *evt, *tmp;
166 spin_lock_irq(&file->async_file->ev_queue.lock);
167 list_for_each_entry_safe(evt, tmp, &uobj->event_list, obj_list) {
168 list_del(&evt->list);
171 spin_unlock_irq(&file->async_file->ev_queue.lock);
174 void ib_uverbs_detach_umcast(struct ib_qp *qp,
175 struct ib_uqp_object *uobj)
177 struct ib_uverbs_mcast_entry *mcast, *tmp;
179 list_for_each_entry_safe(mcast, tmp, &uobj->mcast_list, list) {
180 ib_detach_mcast(qp, &mcast->gid, mcast->lid);
181 list_del(&mcast->list);
186 static void ib_uverbs_comp_dev(struct ib_uverbs_device *dev)
188 complete(&dev->comp);
191 void ib_uverbs_release_file(struct kref *ref)
193 struct ib_uverbs_file *file =
194 container_of(ref, struct ib_uverbs_file, ref);
195 struct ib_device *ib_dev;
198 release_ufile_idr_uobject(file);
200 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
201 ib_dev = srcu_dereference(file->device->ib_dev,
202 &file->device->disassociate_srcu);
203 if (ib_dev && !ib_dev->ops.disassociate_ucontext)
204 module_put(ib_dev->ops.owner);
205 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
207 if (atomic_dec_and_test(&file->device->refcount))
208 ib_uverbs_comp_dev(file->device);
210 if (file->async_file)
211 kref_put(&file->async_file->ref,
212 ib_uverbs_release_async_event_file);
213 put_device(&file->device->dev);
215 if (file->disassociate_page)
216 __free_pages(file->disassociate_page, 0);
217 mutex_destroy(&file->umap_lock);
218 mutex_destroy(&file->ucontext_lock);
222 static ssize_t ib_uverbs_event_read(struct ib_uverbs_event_queue *ev_queue,
223 struct ib_uverbs_file *uverbs_file,
224 struct file *filp, char __user *buf,
225 size_t count, loff_t *pos,
228 struct ib_uverbs_event *event;
231 spin_lock_irq(&ev_queue->lock);
233 while (list_empty(&ev_queue->event_list)) {
234 spin_unlock_irq(&ev_queue->lock);
236 if (filp->f_flags & O_NONBLOCK)
239 if (wait_event_interruptible(ev_queue->poll_wait,
240 (!list_empty(&ev_queue->event_list) ||
241 /* The barriers built into wait_event_interruptible()
242 * and wake_up() guarentee this will see the null set
245 !uverbs_file->device->ib_dev)))
248 /* If device was disassociated and no event exists set an error */
249 if (list_empty(&ev_queue->event_list) &&
250 !uverbs_file->device->ib_dev)
253 spin_lock_irq(&ev_queue->lock);
256 event = list_entry(ev_queue->event_list.next, struct ib_uverbs_event, list);
258 if (eventsz > count) {
262 list_del(ev_queue->event_list.next);
263 if (event->counter) {
265 list_del(&event->obj_list);
269 spin_unlock_irq(&ev_queue->lock);
272 if (copy_to_user(buf, event, eventsz))
283 static ssize_t ib_uverbs_async_event_read(struct file *filp, char __user *buf,
284 size_t count, loff_t *pos)
286 struct ib_uverbs_async_event_file *file = filp->private_data;
288 return ib_uverbs_event_read(&file->ev_queue, file->uverbs_file, filp,
290 sizeof(struct ib_uverbs_async_event_desc));
293 static ssize_t ib_uverbs_comp_event_read(struct file *filp, char __user *buf,
294 size_t count, loff_t *pos)
296 struct ib_uverbs_completion_event_file *comp_ev_file =
299 return ib_uverbs_event_read(&comp_ev_file->ev_queue,
300 comp_ev_file->uobj.ufile, filp,
302 sizeof(struct ib_uverbs_comp_event_desc));
305 static __poll_t ib_uverbs_event_poll(struct ib_uverbs_event_queue *ev_queue,
307 struct poll_table_struct *wait)
309 __poll_t pollflags = 0;
311 poll_wait(filp, &ev_queue->poll_wait, wait);
313 spin_lock_irq(&ev_queue->lock);
314 if (!list_empty(&ev_queue->event_list))
315 pollflags = EPOLLIN | EPOLLRDNORM;
316 spin_unlock_irq(&ev_queue->lock);
321 static __poll_t ib_uverbs_async_event_poll(struct file *filp,
322 struct poll_table_struct *wait)
324 return ib_uverbs_event_poll(filp->private_data, filp, wait);
327 static __poll_t ib_uverbs_comp_event_poll(struct file *filp,
328 struct poll_table_struct *wait)
330 struct ib_uverbs_completion_event_file *comp_ev_file =
333 return ib_uverbs_event_poll(&comp_ev_file->ev_queue, filp, wait);
336 static int ib_uverbs_async_event_fasync(int fd, struct file *filp, int on)
338 struct ib_uverbs_event_queue *ev_queue = filp->private_data;
340 return fasync_helper(fd, filp, on, &ev_queue->async_queue);
343 static int ib_uverbs_comp_event_fasync(int fd, struct file *filp, int on)
345 struct ib_uverbs_completion_event_file *comp_ev_file =
348 return fasync_helper(fd, filp, on, &comp_ev_file->ev_queue.async_queue);
351 static int ib_uverbs_async_event_close(struct inode *inode, struct file *filp)
353 struct ib_uverbs_async_event_file *file = filp->private_data;
354 struct ib_uverbs_file *uverbs_file = file->uverbs_file;
355 struct ib_uverbs_event *entry, *tmp;
356 int closed_already = 0;
358 mutex_lock(&uverbs_file->device->lists_mutex);
359 spin_lock_irq(&file->ev_queue.lock);
360 closed_already = file->ev_queue.is_closed;
361 file->ev_queue.is_closed = 1;
362 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
364 list_del(&entry->obj_list);
367 spin_unlock_irq(&file->ev_queue.lock);
368 if (!closed_already) {
369 list_del(&file->list);
370 ib_unregister_event_handler(&uverbs_file->event_handler);
372 mutex_unlock(&uverbs_file->device->lists_mutex);
374 kref_put(&uverbs_file->ref, ib_uverbs_release_file);
375 kref_put(&file->ref, ib_uverbs_release_async_event_file);
380 static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
382 struct ib_uobject *uobj = filp->private_data;
383 struct ib_uverbs_completion_event_file *file = container_of(
384 uobj, struct ib_uverbs_completion_event_file, uobj);
385 struct ib_uverbs_event *entry, *tmp;
387 spin_lock_irq(&file->ev_queue.lock);
388 list_for_each_entry_safe(entry, tmp, &file->ev_queue.event_list, list) {
390 list_del(&entry->obj_list);
393 file->ev_queue.is_closed = 1;
394 spin_unlock_irq(&file->ev_queue.lock);
396 uverbs_close_fd(filp);
401 const struct file_operations uverbs_event_fops = {
402 .owner = THIS_MODULE,
403 .read = ib_uverbs_comp_event_read,
404 .poll = ib_uverbs_comp_event_poll,
405 .release = ib_uverbs_comp_event_close,
406 .fasync = ib_uverbs_comp_event_fasync,
410 static const struct file_operations uverbs_async_event_fops = {
411 .owner = THIS_MODULE,
412 .read = ib_uverbs_async_event_read,
413 .poll = ib_uverbs_async_event_poll,
414 .release = ib_uverbs_async_event_close,
415 .fasync = ib_uverbs_async_event_fasync,
419 void ib_uverbs_comp_handler(struct ib_cq *cq, void *cq_context)
421 struct ib_uverbs_event_queue *ev_queue = cq_context;
422 struct ib_ucq_object *uobj;
423 struct ib_uverbs_event *entry;
429 spin_lock_irqsave(&ev_queue->lock, flags);
430 if (ev_queue->is_closed) {
431 spin_unlock_irqrestore(&ev_queue->lock, flags);
435 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
437 spin_unlock_irqrestore(&ev_queue->lock, flags);
441 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
443 entry->desc.comp.cq_handle = cq->uobject->user_handle;
444 entry->counter = &uobj->comp_events_reported;
446 list_add_tail(&entry->list, &ev_queue->event_list);
447 list_add_tail(&entry->obj_list, &uobj->comp_list);
448 spin_unlock_irqrestore(&ev_queue->lock, flags);
450 wake_up_interruptible(&ev_queue->poll_wait);
451 kill_fasync(&ev_queue->async_queue, SIGIO, POLL_IN);
454 static void ib_uverbs_async_handler(struct ib_uverbs_file *file,
455 __u64 element, __u64 event,
456 struct list_head *obj_list,
459 struct ib_uverbs_event *entry;
462 spin_lock_irqsave(&file->async_file->ev_queue.lock, flags);
463 if (file->async_file->ev_queue.is_closed) {
464 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
468 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
470 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
474 entry->desc.async.element = element;
475 entry->desc.async.event_type = event;
476 entry->desc.async.reserved = 0;
477 entry->counter = counter;
479 list_add_tail(&entry->list, &file->async_file->ev_queue.event_list);
481 list_add_tail(&entry->obj_list, obj_list);
482 spin_unlock_irqrestore(&file->async_file->ev_queue.lock, flags);
484 wake_up_interruptible(&file->async_file->ev_queue.poll_wait);
485 kill_fasync(&file->async_file->ev_queue.async_queue, SIGIO, POLL_IN);
488 void ib_uverbs_cq_event_handler(struct ib_event *event, void *context_ptr)
490 struct ib_ucq_object *uobj = container_of(event->element.cq->uobject,
491 struct ib_ucq_object, uobject);
493 ib_uverbs_async_handler(uobj->uobject.ufile, uobj->uobject.user_handle,
494 event->event, &uobj->async_list,
495 &uobj->async_events_reported);
498 void ib_uverbs_qp_event_handler(struct ib_event *event, void *context_ptr)
500 struct ib_uevent_object *uobj;
502 /* for XRC target qp's, check that qp is live */
503 if (!event->element.qp->uobject)
506 uobj = container_of(event->element.qp->uobject,
507 struct ib_uevent_object, uobject);
509 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
510 event->event, &uobj->event_list,
511 &uobj->events_reported);
514 void ib_uverbs_wq_event_handler(struct ib_event *event, void *context_ptr)
516 struct ib_uevent_object *uobj = container_of(event->element.wq->uobject,
517 struct ib_uevent_object, uobject);
519 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
520 event->event, &uobj->event_list,
521 &uobj->events_reported);
524 void ib_uverbs_srq_event_handler(struct ib_event *event, void *context_ptr)
526 struct ib_uevent_object *uobj;
528 uobj = container_of(event->element.srq->uobject,
529 struct ib_uevent_object, uobject);
531 ib_uverbs_async_handler(context_ptr, uobj->uobject.user_handle,
532 event->event, &uobj->event_list,
533 &uobj->events_reported);
536 void ib_uverbs_event_handler(struct ib_event_handler *handler,
537 struct ib_event *event)
539 struct ib_uverbs_file *file =
540 container_of(handler, struct ib_uverbs_file, event_handler);
542 ib_uverbs_async_handler(file, event->element.port_num, event->event,
546 void ib_uverbs_free_async_event_file(struct ib_uverbs_file *file)
548 kref_put(&file->async_file->ref, ib_uverbs_release_async_event_file);
549 file->async_file = NULL;
552 void ib_uverbs_init_event_queue(struct ib_uverbs_event_queue *ev_queue)
554 spin_lock_init(&ev_queue->lock);
555 INIT_LIST_HEAD(&ev_queue->event_list);
556 init_waitqueue_head(&ev_queue->poll_wait);
557 ev_queue->is_closed = 0;
558 ev_queue->async_queue = NULL;
561 struct file *ib_uverbs_alloc_async_event_file(struct ib_uverbs_file *uverbs_file,
562 struct ib_device *ib_dev)
564 struct ib_uverbs_async_event_file *ev_file;
567 ev_file = kzalloc(sizeof(*ev_file), GFP_KERNEL);
569 return ERR_PTR(-ENOMEM);
571 ib_uverbs_init_event_queue(&ev_file->ev_queue);
572 ev_file->uverbs_file = uverbs_file;
573 kref_get(&ev_file->uverbs_file->ref);
574 kref_init(&ev_file->ref);
575 filp = anon_inode_getfile("[infinibandevent]", &uverbs_async_event_fops,
580 mutex_lock(&uverbs_file->device->lists_mutex);
581 list_add_tail(&ev_file->list,
582 &uverbs_file->device->uverbs_events_file_list);
583 mutex_unlock(&uverbs_file->device->lists_mutex);
585 WARN_ON(uverbs_file->async_file);
586 uverbs_file->async_file = ev_file;
587 kref_get(&uverbs_file->async_file->ref);
588 INIT_IB_EVENT_HANDLER(&uverbs_file->event_handler,
590 ib_uverbs_event_handler);
591 ib_register_event_handler(&uverbs_file->event_handler);
592 /* At that point async file stuff was fully set */
597 kref_put(&ev_file->uverbs_file->ref, ib_uverbs_release_file);
598 kref_put(&ev_file->ref, ib_uverbs_release_async_event_file);
602 static ssize_t verify_hdr(struct ib_uverbs_cmd_hdr *hdr,
603 struct ib_uverbs_ex_cmd_hdr *ex_hdr, size_t count,
604 const struct uverbs_api_write_method *method_elm)
606 if (method_elm->is_ex) {
607 count -= sizeof(*hdr) + sizeof(*ex_hdr);
609 if ((hdr->in_words + ex_hdr->provider_in_words) * 8 != count)
612 if (hdr->in_words * 8 < method_elm->req_size)
615 if (ex_hdr->cmd_hdr_reserved)
618 if (ex_hdr->response) {
619 if (!hdr->out_words && !ex_hdr->provider_out_words)
622 if (hdr->out_words * 8 < method_elm->resp_size)
625 if (!access_ok(u64_to_user_ptr(ex_hdr->response),
626 (hdr->out_words + ex_hdr->provider_out_words) * 8))
629 if (hdr->out_words || ex_hdr->provider_out_words)
636 /* not extended command */
637 if (hdr->in_words * 4 != count)
640 if (count < method_elm->req_size + sizeof(hdr)) {
642 * rdma-core v18 and v19 have a bug where they send DESTROY_CQ
643 * with a 16 byte write instead of 24. Old kernels didn't
644 * check the size so they allowed this. Now that the size is
645 * checked provide a compatibility work around to not break
648 if (hdr->command == IB_USER_VERBS_CMD_DESTROY_CQ &&
655 if (hdr->out_words * 4 < method_elm->resp_size)
661 static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
662 size_t count, loff_t *pos)
664 struct ib_uverbs_file *file = filp->private_data;
665 const struct uverbs_api_write_method *method_elm;
666 struct uverbs_api *uapi = file->device->uapi;
667 struct ib_uverbs_ex_cmd_hdr ex_hdr;
668 struct ib_uverbs_cmd_hdr hdr;
669 struct uverbs_attr_bundle bundle;
673 if (!ib_safe_file_access(filp)) {
674 pr_err_once("uverbs_write: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
675 task_tgid_vnr(current), current->comm);
679 if (count < sizeof(hdr))
682 if (copy_from_user(&hdr, buf, sizeof(hdr)))
685 method_elm = uapi_get_method(uapi, hdr.command);
686 if (IS_ERR(method_elm))
687 return PTR_ERR(method_elm);
689 if (method_elm->is_ex) {
690 if (count < (sizeof(hdr) + sizeof(ex_hdr)))
692 if (copy_from_user(&ex_hdr, buf + sizeof(hdr), sizeof(ex_hdr)))
696 ret = verify_hdr(&hdr, &ex_hdr, count, method_elm);
700 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
704 memset(bundle.attr_present, 0, sizeof(bundle.attr_present));
706 bundle.context = NULL; /* only valid if bundle has uobject */
707 if (!method_elm->is_ex) {
708 size_t in_len = hdr.in_words * 4 - sizeof(hdr);
709 size_t out_len = hdr.out_words * 4;
712 if (method_elm->has_udata) {
713 bundle.driver_udata.inlen =
714 in_len - method_elm->req_size;
715 in_len = method_elm->req_size;
716 if (bundle.driver_udata.inlen)
717 bundle.driver_udata.inbuf = buf + in_len;
719 bundle.driver_udata.inbuf = NULL;
721 memset(&bundle.driver_udata, 0,
722 sizeof(bundle.driver_udata));
725 if (method_elm->has_resp) {
727 * The macros check that if has_resp is set
728 * then the command request structure starts
729 * with a '__aligned u64 response' member.
731 ret = get_user(response, (const u64 __user *)buf);
735 if (method_elm->has_udata) {
736 bundle.driver_udata.outlen =
737 out_len - method_elm->resp_size;
738 out_len = method_elm->resp_size;
739 if (bundle.driver_udata.outlen)
740 bundle.driver_udata.outbuf =
741 u64_to_user_ptr(response +
744 bundle.driver_udata.outbuf = NULL;
747 bundle.driver_udata.outlen = 0;
748 bundle.driver_udata.outbuf = NULL;
751 ib_uverbs_init_udata_buf_or_null(
752 &bundle.ucore, buf, u64_to_user_ptr(response),
755 buf += sizeof(ex_hdr);
757 ib_uverbs_init_udata_buf_or_null(&bundle.ucore, buf,
758 u64_to_user_ptr(ex_hdr.response),
759 hdr.in_words * 8, hdr.out_words * 8);
761 ib_uverbs_init_udata_buf_or_null(
762 &bundle.driver_udata, buf + bundle.ucore.inlen,
763 u64_to_user_ptr(ex_hdr.response) + bundle.ucore.outlen,
764 ex_hdr.provider_in_words * 8,
765 ex_hdr.provider_out_words * 8);
769 ret = method_elm->handler(&bundle);
771 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
772 return (ret) ? : count;
775 static const struct vm_operations_struct rdma_umap_ops;
777 static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
779 struct ib_uverbs_file *file = filp->private_data;
780 struct ib_ucontext *ucontext;
784 srcu_key = srcu_read_lock(&file->device->disassociate_srcu);
785 ucontext = ib_uverbs_get_ucontext_file(file);
786 if (IS_ERR(ucontext)) {
787 ret = PTR_ERR(ucontext);
790 vma->vm_ops = &rdma_umap_ops;
791 ret = ucontext->device->ops.mmap(ucontext, vma);
793 srcu_read_unlock(&file->device->disassociate_srcu, srcu_key);
798 * The VMA has been dup'd, initialize the vm_private_data with a new tracking
801 static void rdma_umap_open(struct vm_area_struct *vma)
803 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
804 struct rdma_umap_priv *opriv = vma->vm_private_data;
805 struct rdma_umap_priv *priv;
810 /* We are racing with disassociation */
811 if (!down_read_trylock(&ufile->hw_destroy_rwsem))
814 * Disassociation already completed, the VMA should already be zapped.
816 if (!ufile->ucontext)
819 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
822 rdma_umap_priv_init(priv, vma, opriv->entry);
824 up_read(&ufile->hw_destroy_rwsem);
828 up_read(&ufile->hw_destroy_rwsem);
831 * We can't allow the VMA to be created with the actual IO pages, that
832 * would break our API contract, and it can't be stopped at this
835 vma->vm_private_data = NULL;
836 zap_vma_ptes(vma, vma->vm_start, vma->vm_end - vma->vm_start);
839 static void rdma_umap_close(struct vm_area_struct *vma)
841 struct ib_uverbs_file *ufile = vma->vm_file->private_data;
842 struct rdma_umap_priv *priv = vma->vm_private_data;
848 * The vma holds a reference on the struct file that created it, which
849 * in turn means that the ib_uverbs_file is guaranteed to exist at
852 mutex_lock(&ufile->umap_lock);
854 rdma_user_mmap_entry_put(priv->entry);
856 list_del(&priv->list);
857 mutex_unlock(&ufile->umap_lock);
862 * Once the zap_vma_ptes has been called touches to the VMA will come here and
863 * we return a dummy writable zero page for all the pfns.
865 static vm_fault_t rdma_umap_fault(struct vm_fault *vmf)
867 struct ib_uverbs_file *ufile = vmf->vma->vm_file->private_data;
868 struct rdma_umap_priv *priv = vmf->vma->vm_private_data;
872 return VM_FAULT_SIGBUS;
874 /* Read only pages can just use the system zero page. */
875 if (!(vmf->vma->vm_flags & (VM_WRITE | VM_MAYWRITE))) {
876 vmf->page = ZERO_PAGE(vmf->address);
881 mutex_lock(&ufile->umap_lock);
882 if (!ufile->disassociate_page)
883 ufile->disassociate_page =
884 alloc_pages(vmf->gfp_mask | __GFP_ZERO, 0);
886 if (ufile->disassociate_page) {
888 * This VMA is forced to always be shared so this doesn't have
889 * to worry about COW.
891 vmf->page = ufile->disassociate_page;
894 ret = VM_FAULT_SIGBUS;
896 mutex_unlock(&ufile->umap_lock);
901 static const struct vm_operations_struct rdma_umap_ops = {
902 .open = rdma_umap_open,
903 .close = rdma_umap_close,
904 .fault = rdma_umap_fault,
907 void uverbs_user_mmap_disassociate(struct ib_uverbs_file *ufile)
909 struct rdma_umap_priv *priv, *next_priv;
911 lockdep_assert_held(&ufile->hw_destroy_rwsem);
914 struct mm_struct *mm = NULL;
916 /* Get an arbitrary mm pointer that hasn't been cleaned yet */
917 mutex_lock(&ufile->umap_lock);
918 while (!list_empty(&ufile->umaps)) {
921 priv = list_first_entry(&ufile->umaps,
922 struct rdma_umap_priv, list);
923 mm = priv->vma->vm_mm;
924 ret = mmget_not_zero(mm);
926 list_del_init(&priv->list);
932 mutex_unlock(&ufile->umap_lock);
937 * The umap_lock is nested under mmap_sem since it used within
938 * the vma_ops callbacks, so we have to clean the list one mm
939 * at a time to get the lock ordering right. Typically there
940 * will only be one mm, so no big deal.
942 down_read(&mm->mmap_sem);
943 if (!mmget_still_valid(mm))
945 mutex_lock(&ufile->umap_lock);
946 list_for_each_entry_safe (priv, next_priv, &ufile->umaps,
948 struct vm_area_struct *vma = priv->vma;
950 if (vma->vm_mm != mm)
952 list_del_init(&priv->list);
954 zap_vma_ptes(vma, vma->vm_start,
955 vma->vm_end - vma->vm_start);
958 rdma_user_mmap_entry_put(priv->entry);
962 mutex_unlock(&ufile->umap_lock);
964 up_read(&mm->mmap_sem);
970 * ib_uverbs_open() does not need the BKL:
972 * - the ib_uverbs_device structures are properly reference counted and
973 * everything else is purely local to the file being created, so
974 * races against other open calls are not a problem;
975 * - there is no ioctl method to race against;
976 * - the open method will either immediately run -ENXIO, or all
977 * required initialization will be done.
979 static int ib_uverbs_open(struct inode *inode, struct file *filp)
981 struct ib_uverbs_device *dev;
982 struct ib_uverbs_file *file;
983 struct ib_device *ib_dev;
985 int module_dependent;
988 dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
989 if (!atomic_inc_not_zero(&dev->refcount))
992 get_device(&dev->dev);
993 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
994 mutex_lock(&dev->lists_mutex);
995 ib_dev = srcu_dereference(dev->ib_dev,
996 &dev->disassociate_srcu);
1002 if (!rdma_dev_access_netns(ib_dev, current->nsproxy->net_ns)) {
1007 /* In case IB device supports disassociate ucontext, there is no hard
1008 * dependency between uverbs device and its low level device.
1010 module_dependent = !(ib_dev->ops.disassociate_ucontext);
1012 if (module_dependent) {
1013 if (!try_module_get(ib_dev->ops.owner)) {
1019 file = kzalloc(sizeof(*file), GFP_KERNEL);
1022 if (module_dependent)
1029 kref_init(&file->ref);
1030 mutex_init(&file->ucontext_lock);
1032 spin_lock_init(&file->uobjects_lock);
1033 INIT_LIST_HEAD(&file->uobjects);
1034 init_rwsem(&file->hw_destroy_rwsem);
1035 mutex_init(&file->umap_lock);
1036 INIT_LIST_HEAD(&file->umaps);
1038 filp->private_data = file;
1039 list_add_tail(&file->list, &dev->uverbs_file_list);
1040 mutex_unlock(&dev->lists_mutex);
1041 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1043 setup_ufile_idr_uobject(file);
1045 return stream_open(inode, filp);
1048 module_put(ib_dev->ops.owner);
1051 mutex_unlock(&dev->lists_mutex);
1052 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1053 if (atomic_dec_and_test(&dev->refcount))
1054 ib_uverbs_comp_dev(dev);
1056 put_device(&dev->dev);
1060 static int ib_uverbs_close(struct inode *inode, struct file *filp)
1062 struct ib_uverbs_file *file = filp->private_data;
1064 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_CLOSE);
1066 mutex_lock(&file->device->lists_mutex);
1067 list_del_init(&file->list);
1068 mutex_unlock(&file->device->lists_mutex);
1070 kref_put(&file->ref, ib_uverbs_release_file);
1075 static const struct file_operations uverbs_fops = {
1076 .owner = THIS_MODULE,
1077 .write = ib_uverbs_write,
1078 .open = ib_uverbs_open,
1079 .release = ib_uverbs_close,
1080 .llseek = no_llseek,
1081 .unlocked_ioctl = ib_uverbs_ioctl,
1082 .compat_ioctl = ib_uverbs_ioctl,
1085 static const struct file_operations uverbs_mmap_fops = {
1086 .owner = THIS_MODULE,
1087 .write = ib_uverbs_write,
1088 .mmap = ib_uverbs_mmap,
1089 .open = ib_uverbs_open,
1090 .release = ib_uverbs_close,
1091 .llseek = no_llseek,
1092 .unlocked_ioctl = ib_uverbs_ioctl,
1093 .compat_ioctl = ib_uverbs_ioctl,
1096 static int ib_uverbs_get_nl_info(struct ib_device *ibdev, void *client_data,
1097 struct ib_client_nl_info *res)
1099 struct ib_uverbs_device *uverbs_dev = client_data;
1102 if (res->port != -1)
1105 res->abi = ibdev->ops.uverbs_abi_ver;
1106 res->cdev = &uverbs_dev->dev;
1109 * To support DRIVER_ID binding in userspace some of the driver need
1110 * upgrading to expose their PCI dependent revision information
1111 * through get_context instead of relying on modalias matching. When
1112 * the drivers are fixed they can drop this flag.
1114 if (!ibdev->ops.uverbs_no_driver_id_binding) {
1115 ret = nla_put_u32(res->nl_msg, RDMA_NLDEV_ATTR_UVERBS_DRIVER_ID,
1116 ibdev->ops.driver_id);
1123 static struct ib_client uverbs_client = {
1125 .no_kverbs_req = true,
1126 .add = ib_uverbs_add_one,
1127 .remove = ib_uverbs_remove_one,
1128 .get_nl_info = ib_uverbs_get_nl_info,
1130 MODULE_ALIAS_RDMA_CLIENT("uverbs");
1132 static ssize_t ibdev_show(struct device *device, struct device_attribute *attr,
1135 struct ib_uverbs_device *dev =
1136 container_of(device, struct ib_uverbs_device, dev);
1139 struct ib_device *ib_dev;
1141 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1142 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1144 ret = sprintf(buf, "%s\n", dev_name(&ib_dev->dev));
1145 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1149 static DEVICE_ATTR_RO(ibdev);
1151 static ssize_t abi_version_show(struct device *device,
1152 struct device_attribute *attr, char *buf)
1154 struct ib_uverbs_device *dev =
1155 container_of(device, struct ib_uverbs_device, dev);
1158 struct ib_device *ib_dev;
1160 srcu_key = srcu_read_lock(&dev->disassociate_srcu);
1161 ib_dev = srcu_dereference(dev->ib_dev, &dev->disassociate_srcu);
1163 ret = sprintf(buf, "%u\n", ib_dev->ops.uverbs_abi_ver);
1164 srcu_read_unlock(&dev->disassociate_srcu, srcu_key);
1168 static DEVICE_ATTR_RO(abi_version);
1170 static struct attribute *ib_dev_attrs[] = {
1171 &dev_attr_abi_version.attr,
1172 &dev_attr_ibdev.attr,
1176 static const struct attribute_group dev_attr_group = {
1177 .attrs = ib_dev_attrs,
1180 static CLASS_ATTR_STRING(abi_version, S_IRUGO,
1181 __stringify(IB_USER_VERBS_ABI_VERSION));
1183 static int ib_uverbs_create_uapi(struct ib_device *device,
1184 struct ib_uverbs_device *uverbs_dev)
1186 struct uverbs_api *uapi;
1188 uapi = uverbs_alloc_api(device);
1190 return PTR_ERR(uapi);
1192 uverbs_dev->uapi = uapi;
1196 static void ib_uverbs_add_one(struct ib_device *device)
1200 struct ib_uverbs_device *uverbs_dev;
1203 if (!device->ops.alloc_ucontext)
1206 uverbs_dev = kzalloc(sizeof(*uverbs_dev), GFP_KERNEL);
1210 ret = init_srcu_struct(&uverbs_dev->disassociate_srcu);
1216 device_initialize(&uverbs_dev->dev);
1217 uverbs_dev->dev.class = uverbs_class;
1218 uverbs_dev->dev.parent = device->dev.parent;
1219 uverbs_dev->dev.release = ib_uverbs_release_dev;
1220 uverbs_dev->groups[0] = &dev_attr_group;
1221 uverbs_dev->dev.groups = uverbs_dev->groups;
1222 atomic_set(&uverbs_dev->refcount, 1);
1223 init_completion(&uverbs_dev->comp);
1224 uverbs_dev->xrcd_tree = RB_ROOT;
1225 mutex_init(&uverbs_dev->xrcd_tree_mutex);
1226 mutex_init(&uverbs_dev->lists_mutex);
1227 INIT_LIST_HEAD(&uverbs_dev->uverbs_file_list);
1228 INIT_LIST_HEAD(&uverbs_dev->uverbs_events_file_list);
1229 rcu_assign_pointer(uverbs_dev->ib_dev, device);
1230 uverbs_dev->num_comp_vectors = device->num_comp_vectors;
1232 devnum = ida_alloc_max(&uverbs_ida, IB_UVERBS_MAX_DEVICES - 1,
1236 uverbs_dev->devnum = devnum;
1237 if (devnum >= IB_UVERBS_NUM_FIXED_MINOR)
1238 base = dynamic_uverbs_dev + devnum - IB_UVERBS_NUM_FIXED_MINOR;
1240 base = IB_UVERBS_BASE_DEV + devnum;
1242 if (ib_uverbs_create_uapi(device, uverbs_dev))
1245 uverbs_dev->dev.devt = base;
1246 dev_set_name(&uverbs_dev->dev, "uverbs%d", uverbs_dev->devnum);
1248 cdev_init(&uverbs_dev->cdev,
1249 device->ops.mmap ? &uverbs_mmap_fops : &uverbs_fops);
1250 uverbs_dev->cdev.owner = THIS_MODULE;
1252 ret = cdev_device_add(&uverbs_dev->cdev, &uverbs_dev->dev);
1256 ib_set_client_data(device, &uverbs_client, uverbs_dev);
1260 ida_free(&uverbs_ida, devnum);
1262 if (atomic_dec_and_test(&uverbs_dev->refcount))
1263 ib_uverbs_comp_dev(uverbs_dev);
1264 wait_for_completion(&uverbs_dev->comp);
1265 put_device(&uverbs_dev->dev);
1269 static void ib_uverbs_free_hw_resources(struct ib_uverbs_device *uverbs_dev,
1270 struct ib_device *ib_dev)
1272 struct ib_uverbs_file *file;
1273 struct ib_uverbs_async_event_file *event_file;
1274 struct ib_event event;
1276 /* Pending running commands to terminate */
1277 uverbs_disassociate_api_pre(uverbs_dev);
1278 event.event = IB_EVENT_DEVICE_FATAL;
1279 event.element.port_num = 0;
1280 event.device = ib_dev;
1282 mutex_lock(&uverbs_dev->lists_mutex);
1283 while (!list_empty(&uverbs_dev->uverbs_file_list)) {
1284 file = list_first_entry(&uverbs_dev->uverbs_file_list,
1285 struct ib_uverbs_file, list);
1286 list_del_init(&file->list);
1287 kref_get(&file->ref);
1289 /* We must release the mutex before going ahead and calling
1290 * uverbs_cleanup_ufile, as it might end up indirectly calling
1291 * uverbs_close, for example due to freeing the resources (e.g
1294 mutex_unlock(&uverbs_dev->lists_mutex);
1296 ib_uverbs_event_handler(&file->event_handler, &event);
1297 uverbs_destroy_ufile_hw(file, RDMA_REMOVE_DRIVER_REMOVE);
1298 kref_put(&file->ref, ib_uverbs_release_file);
1300 mutex_lock(&uverbs_dev->lists_mutex);
1303 while (!list_empty(&uverbs_dev->uverbs_events_file_list)) {
1304 event_file = list_first_entry(&uverbs_dev->
1305 uverbs_events_file_list,
1306 struct ib_uverbs_async_event_file,
1308 spin_lock_irq(&event_file->ev_queue.lock);
1309 event_file->ev_queue.is_closed = 1;
1310 spin_unlock_irq(&event_file->ev_queue.lock);
1312 list_del(&event_file->list);
1313 ib_unregister_event_handler(
1314 &event_file->uverbs_file->event_handler);
1315 event_file->uverbs_file->event_handler.device =
1318 wake_up_interruptible(&event_file->ev_queue.poll_wait);
1319 kill_fasync(&event_file->ev_queue.async_queue, SIGIO, POLL_IN);
1321 mutex_unlock(&uverbs_dev->lists_mutex);
1323 uverbs_disassociate_api(uverbs_dev->uapi);
1326 static void ib_uverbs_remove_one(struct ib_device *device, void *client_data)
1328 struct ib_uverbs_device *uverbs_dev = client_data;
1329 int wait_clients = 1;
1334 cdev_device_del(&uverbs_dev->cdev, &uverbs_dev->dev);
1335 ida_free(&uverbs_ida, uverbs_dev->devnum);
1337 if (device->ops.disassociate_ucontext) {
1338 /* We disassociate HW resources and immediately return.
1339 * Userspace will see a EIO errno for all future access.
1340 * Upon returning, ib_device may be freed internally and is not
1342 * uverbs_device is still available until all clients close
1343 * their files, then the uverbs device ref count will be zero
1344 * and its resources will be freed.
1345 * Note: At this point no more files can be opened since the
1346 * cdev was deleted, however active clients can still issue
1347 * commands and close their open files.
1349 ib_uverbs_free_hw_resources(uverbs_dev, device);
1353 if (atomic_dec_and_test(&uverbs_dev->refcount))
1354 ib_uverbs_comp_dev(uverbs_dev);
1356 wait_for_completion(&uverbs_dev->comp);
1358 put_device(&uverbs_dev->dev);
1361 static char *uverbs_devnode(struct device *dev, umode_t *mode)
1365 return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
1368 static int __init ib_uverbs_init(void)
1372 ret = register_chrdev_region(IB_UVERBS_BASE_DEV,
1373 IB_UVERBS_NUM_FIXED_MINOR,
1374 "infiniband_verbs");
1376 pr_err("user_verbs: couldn't register device number\n");
1380 ret = alloc_chrdev_region(&dynamic_uverbs_dev, 0,
1381 IB_UVERBS_NUM_DYNAMIC_MINOR,
1382 "infiniband_verbs");
1384 pr_err("couldn't register dynamic device number\n");
1388 uverbs_class = class_create(THIS_MODULE, "infiniband_verbs");
1389 if (IS_ERR(uverbs_class)) {
1390 ret = PTR_ERR(uverbs_class);
1391 pr_err("user_verbs: couldn't create class infiniband_verbs\n");
1395 uverbs_class->devnode = uverbs_devnode;
1397 ret = class_create_file(uverbs_class, &class_attr_abi_version.attr);
1399 pr_err("user_verbs: couldn't create abi_version attribute\n");
1403 ret = ib_register_client(&uverbs_client);
1405 pr_err("user_verbs: couldn't register client\n");
1412 class_destroy(uverbs_class);
1415 unregister_chrdev_region(dynamic_uverbs_dev,
1416 IB_UVERBS_NUM_DYNAMIC_MINOR);
1419 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1420 IB_UVERBS_NUM_FIXED_MINOR);
1426 static void __exit ib_uverbs_cleanup(void)
1428 ib_unregister_client(&uverbs_client);
1429 class_destroy(uverbs_class);
1430 unregister_chrdev_region(IB_UVERBS_BASE_DEV,
1431 IB_UVERBS_NUM_FIXED_MINOR);
1432 unregister_chrdev_region(dynamic_uverbs_dev,
1433 IB_UVERBS_NUM_DYNAMIC_MINOR);
1434 mmu_notifier_synchronize();
1437 module_init(ib_uverbs_init);
1438 module_exit(ib_uverbs_cleanup);