Merge tag 'pci-v5.18-changes-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / virt / acrn / irqfd.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * ACRN HSM irqfd: use eventfd objects to inject virtual interrupts
4  *
5  * Copyright (C) 2020 Intel Corporation. All rights reserved.
6  *
7  * Authors:
8  *      Shuo Liu <shuo.a.liu@intel.com>
9  *      Yakui Zhao <yakui.zhao@intel.com>
10  */
11
12 #include <linux/eventfd.h>
13 #include <linux/file.h>
14 #include <linux/poll.h>
15 #include <linux/slab.h>
16
17 #include "acrn_drv.h"
18
19 static LIST_HEAD(acrn_irqfd_clients);
20
21 /**
22  * struct hsm_irqfd - Properties of HSM irqfd
23  * @vm:         Associated VM pointer
24  * @wait:       Entry of wait-queue
25  * @shutdown:   Async shutdown work
26  * @eventfd:    Associated eventfd
27  * @list:       Entry within &acrn_vm.irqfds of irqfds of a VM
28  * @pt:         Structure for select/poll on the associated eventfd
29  * @msi:        MSI data
30  */
31 struct hsm_irqfd {
32         struct acrn_vm          *vm;
33         wait_queue_entry_t      wait;
34         struct work_struct      shutdown;
35         struct eventfd_ctx      *eventfd;
36         struct list_head        list;
37         poll_table              pt;
38         struct acrn_msi_entry   msi;
39 };
40
41 static void acrn_irqfd_inject(struct hsm_irqfd *irqfd)
42 {
43         struct acrn_vm *vm = irqfd->vm;
44
45         acrn_msi_inject(vm, irqfd->msi.msi_addr,
46                         irqfd->msi.msi_data);
47 }
48
49 static void hsm_irqfd_shutdown(struct hsm_irqfd *irqfd)
50 {
51         u64 cnt;
52
53         lockdep_assert_held(&irqfd->vm->irqfds_lock);
54
55         /* remove from wait queue */
56         list_del_init(&irqfd->list);
57         eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
58         eventfd_ctx_put(irqfd->eventfd);
59         kfree(irqfd);
60 }
61
62 static void hsm_irqfd_shutdown_work(struct work_struct *work)
63 {
64         struct hsm_irqfd *irqfd;
65         struct acrn_vm *vm;
66
67         irqfd = container_of(work, struct hsm_irqfd, shutdown);
68         vm = irqfd->vm;
69         mutex_lock(&vm->irqfds_lock);
70         if (!list_empty(&irqfd->list))
71                 hsm_irqfd_shutdown(irqfd);
72         mutex_unlock(&vm->irqfds_lock);
73 }
74
75 /* Called with wqh->lock held and interrupts disabled */
76 static int hsm_irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode,
77                             int sync, void *key)
78 {
79         unsigned long poll_bits = (unsigned long)key;
80         struct hsm_irqfd *irqfd;
81         struct acrn_vm *vm;
82
83         irqfd = container_of(wait, struct hsm_irqfd, wait);
84         vm = irqfd->vm;
85         if (poll_bits & POLLIN)
86                 /* An event has been signaled, inject an interrupt */
87                 acrn_irqfd_inject(irqfd);
88
89         if (poll_bits & POLLHUP)
90                 /* Do shutdown work in thread to hold wqh->lock */
91                 queue_work(vm->irqfd_wq, &irqfd->shutdown);
92
93         return 0;
94 }
95
96 static void hsm_irqfd_poll_func(struct file *file, wait_queue_head_t *wqh,
97                                 poll_table *pt)
98 {
99         struct hsm_irqfd *irqfd;
100
101         irqfd = container_of(pt, struct hsm_irqfd, pt);
102         add_wait_queue(wqh, &irqfd->wait);
103 }
104
105 /*
106  * Assign an eventfd to a VM and create a HSM irqfd associated with the
107  * eventfd. The properties of the HSM irqfd are built from a &struct
108  * acrn_irqfd.
109  */
110 static int acrn_irqfd_assign(struct acrn_vm *vm, struct acrn_irqfd *args)
111 {
112         struct eventfd_ctx *eventfd = NULL;
113         struct hsm_irqfd *irqfd, *tmp;
114         __poll_t events;
115         struct fd f;
116         int ret = 0;
117
118         irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
119         if (!irqfd)
120                 return -ENOMEM;
121
122         irqfd->vm = vm;
123         memcpy(&irqfd->msi, &args->msi, sizeof(args->msi));
124         INIT_LIST_HEAD(&irqfd->list);
125         INIT_WORK(&irqfd->shutdown, hsm_irqfd_shutdown_work);
126
127         f = fdget(args->fd);
128         if (!f.file) {
129                 ret = -EBADF;
130                 goto out;
131         }
132
133         eventfd = eventfd_ctx_fileget(f.file);
134         if (IS_ERR(eventfd)) {
135                 ret = PTR_ERR(eventfd);
136                 goto fail;
137         }
138
139         irqfd->eventfd = eventfd;
140
141         /*
142          * Install custom wake-up handling to be notified whenever underlying
143          * eventfd is signaled.
144          */
145         init_waitqueue_func_entry(&irqfd->wait, hsm_irqfd_wakeup);
146         init_poll_funcptr(&irqfd->pt, hsm_irqfd_poll_func);
147
148         mutex_lock(&vm->irqfds_lock);
149         list_for_each_entry(tmp, &vm->irqfds, list) {
150                 if (irqfd->eventfd != tmp->eventfd)
151                         continue;
152                 ret = -EBUSY;
153                 mutex_unlock(&vm->irqfds_lock);
154                 goto fail;
155         }
156         list_add_tail(&irqfd->list, &vm->irqfds);
157         mutex_unlock(&vm->irqfds_lock);
158
159         /* Check the pending event in this stage */
160         events = vfs_poll(f.file, &irqfd->pt);
161
162         if (events & EPOLLIN)
163                 acrn_irqfd_inject(irqfd);
164
165         fdput(f);
166         return 0;
167 fail:
168         if (eventfd && !IS_ERR(eventfd))
169                 eventfd_ctx_put(eventfd);
170
171         fdput(f);
172 out:
173         kfree(irqfd);
174         return ret;
175 }
176
177 static int acrn_irqfd_deassign(struct acrn_vm *vm,
178                                struct acrn_irqfd *args)
179 {
180         struct hsm_irqfd *irqfd, *tmp;
181         struct eventfd_ctx *eventfd;
182
183         eventfd = eventfd_ctx_fdget(args->fd);
184         if (IS_ERR(eventfd))
185                 return PTR_ERR(eventfd);
186
187         mutex_lock(&vm->irqfds_lock);
188         list_for_each_entry_safe(irqfd, tmp, &vm->irqfds, list) {
189                 if (irqfd->eventfd == eventfd) {
190                         hsm_irqfd_shutdown(irqfd);
191                         break;
192                 }
193         }
194         mutex_unlock(&vm->irqfds_lock);
195         eventfd_ctx_put(eventfd);
196
197         return 0;
198 }
199
200 int acrn_irqfd_config(struct acrn_vm *vm, struct acrn_irqfd *args)
201 {
202         int ret;
203
204         if (args->flags & ACRN_IRQFD_FLAG_DEASSIGN)
205                 ret = acrn_irqfd_deassign(vm, args);
206         else
207                 ret = acrn_irqfd_assign(vm, args);
208
209         return ret;
210 }
211
212 int acrn_irqfd_init(struct acrn_vm *vm)
213 {
214         INIT_LIST_HEAD(&vm->irqfds);
215         mutex_init(&vm->irqfds_lock);
216         vm->irqfd_wq = alloc_workqueue("acrn_irqfd-%u", 0, 0, vm->vmid);
217         if (!vm->irqfd_wq)
218                 return -ENOMEM;
219
220         dev_dbg(acrn_dev.this_device, "VM %u irqfd init.\n", vm->vmid);
221         return 0;
222 }
223
224 void acrn_irqfd_deinit(struct acrn_vm *vm)
225 {
226         struct hsm_irqfd *irqfd, *next;
227
228         dev_dbg(acrn_dev.this_device, "VM %u irqfd deinit.\n", vm->vmid);
229         destroy_workqueue(vm->irqfd_wq);
230         mutex_lock(&vm->irqfds_lock);
231         list_for_each_entry_safe(irqfd, next, &vm->irqfds, list)
232                 hsm_irqfd_shutdown(irqfd);
233         mutex_unlock(&vm->irqfds_lock);
234 }