md-multipath: Use seq_putc() in multipath_status()
[sfrench/cifs-2.6.git] / virt / kvm / eventfd.c
1 /*
2  * kvm eventfd support - use eventfd objects to signal various KVM events
3  *
4  * Copyright 2009 Novell.  All Rights Reserved.
5  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6  *
7  * Author:
8  *      Gregory Haskins <ghaskins@novell.com>
9  *
10  * This file is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License
12  * as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software Foundation,
21  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22  */
23
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_irqfd.h>
27 #include <linux/workqueue.h>
28 #include <linux/syscalls.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32 #include <linux/list.h>
33 #include <linux/eventfd.h>
34 #include <linux/kernel.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/seqlock.h>
38 #include <linux/irqbypass.h>
39 #include <trace/events/kvm.h>
40
41 #include <kvm/iodev.h>
42
43 #ifdef CONFIG_HAVE_KVM_IRQFD
44
45 static struct workqueue_struct *irqfd_cleanup_wq;
46
47 static void
48 irqfd_inject(struct work_struct *work)
49 {
50         struct kvm_kernel_irqfd *irqfd =
51                 container_of(work, struct kvm_kernel_irqfd, inject);
52         struct kvm *kvm = irqfd->kvm;
53
54         if (!irqfd->resampler) {
55                 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56                                 false);
57                 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58                                 false);
59         } else
60                 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
61                             irqfd->gsi, 1, false);
62 }
63
64 /*
65  * Since resampler irqfds share an IRQ source ID, we de-assert once
66  * then notify all of the resampler irqfds using this GSI.  We can't
67  * do multiple de-asserts or we risk racing with incoming re-asserts.
68  */
69 static void
70 irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71 {
72         struct kvm_kernel_irqfd_resampler *resampler;
73         struct kvm *kvm;
74         struct kvm_kernel_irqfd *irqfd;
75         int idx;
76
77         resampler = container_of(kian,
78                         struct kvm_kernel_irqfd_resampler, notifier);
79         kvm = resampler->kvm;
80
81         kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
82                     resampler->notifier.gsi, 0, false);
83
84         idx = srcu_read_lock(&kvm->irq_srcu);
85
86         list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87                 eventfd_signal(irqfd->resamplefd, 1);
88
89         srcu_read_unlock(&kvm->irq_srcu, idx);
90 }
91
92 static void
93 irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
94 {
95         struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
96         struct kvm *kvm = resampler->kvm;
97
98         mutex_lock(&kvm->irqfds.resampler_lock);
99
100         list_del_rcu(&irqfd->resampler_link);
101         synchronize_srcu(&kvm->irq_srcu);
102
103         if (list_empty(&resampler->list)) {
104                 list_del(&resampler->link);
105                 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106                 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
107                             resampler->notifier.gsi, 0, false);
108                 kfree(resampler);
109         }
110
111         mutex_unlock(&kvm->irqfds.resampler_lock);
112 }
113
114 /*
115  * Race-free decouple logic (ordering is critical)
116  */
117 static void
118 irqfd_shutdown(struct work_struct *work)
119 {
120         struct kvm_kernel_irqfd *irqfd =
121                 container_of(work, struct kvm_kernel_irqfd, shutdown);
122         u64 cnt;
123
124         /*
125          * Synchronize with the wait-queue and unhook ourselves to prevent
126          * further events.
127          */
128         eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
129
130         /*
131          * We know no new events will be scheduled at this point, so block
132          * until all previously outstanding events have completed
133          */
134         flush_work(&irqfd->inject);
135
136         if (irqfd->resampler) {
137                 irqfd_resampler_shutdown(irqfd);
138                 eventfd_ctx_put(irqfd->resamplefd);
139         }
140
141         /*
142          * It is now safe to release the object's resources
143          */
144 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
145         irq_bypass_unregister_consumer(&irqfd->consumer);
146 #endif
147         eventfd_ctx_put(irqfd->eventfd);
148         kfree(irqfd);
149 }
150
151
152 /* assumes kvm->irqfds.lock is held */
153 static bool
154 irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
155 {
156         return list_empty(&irqfd->list) ? false : true;
157 }
158
159 /*
160  * Mark the irqfd as inactive and schedule it for removal
161  *
162  * assumes kvm->irqfds.lock is held
163  */
164 static void
165 irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
166 {
167         BUG_ON(!irqfd_is_active(irqfd));
168
169         list_del_init(&irqfd->list);
170
171         queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
172 }
173
174 int __attribute__((weak)) kvm_arch_set_irq_inatomic(
175                                 struct kvm_kernel_irq_routing_entry *irq,
176                                 struct kvm *kvm, int irq_source_id,
177                                 int level,
178                                 bool line_status)
179 {
180         return -EWOULDBLOCK;
181 }
182
183 /*
184  * Called with wqh->lock held and interrupts disabled
185  */
186 static int
187 irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
188 {
189         struct kvm_kernel_irqfd *irqfd =
190                 container_of(wait, struct kvm_kernel_irqfd, wait);
191         __poll_t flags = key_to_poll(key);
192         struct kvm_kernel_irq_routing_entry irq;
193         struct kvm *kvm = irqfd->kvm;
194         unsigned seq;
195         int idx;
196
197         if (flags & EPOLLIN) {
198                 idx = srcu_read_lock(&kvm->irq_srcu);
199                 do {
200                         seq = read_seqcount_begin(&irqfd->irq_entry_sc);
201                         irq = irqfd->irq_entry;
202                 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
203                 /* An event has been signaled, inject an interrupt */
204                 if (kvm_arch_set_irq_inatomic(&irq, kvm,
205                                               KVM_USERSPACE_IRQ_SOURCE_ID, 1,
206                                               false) == -EWOULDBLOCK)
207                         schedule_work(&irqfd->inject);
208                 srcu_read_unlock(&kvm->irq_srcu, idx);
209         }
210
211         if (flags & EPOLLHUP) {
212                 /* The eventfd is closing, detach from KVM */
213                 unsigned long flags;
214
215                 spin_lock_irqsave(&kvm->irqfds.lock, flags);
216
217                 /*
218                  * We must check if someone deactivated the irqfd before
219                  * we could acquire the irqfds.lock since the item is
220                  * deactivated from the KVM side before it is unhooked from
221                  * the wait-queue.  If it is already deactivated, we can
222                  * simply return knowing the other side will cleanup for us.
223                  * We cannot race against the irqfd going away since the
224                  * other side is required to acquire wqh->lock, which we hold
225                  */
226                 if (irqfd_is_active(irqfd))
227                         irqfd_deactivate(irqfd);
228
229                 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
230         }
231
232         return 0;
233 }
234
235 static void
236 irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
237                         poll_table *pt)
238 {
239         struct kvm_kernel_irqfd *irqfd =
240                 container_of(pt, struct kvm_kernel_irqfd, pt);
241         add_wait_queue(wqh, &irqfd->wait);
242 }
243
244 /* Must be called under irqfds.lock */
245 static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
246 {
247         struct kvm_kernel_irq_routing_entry *e;
248         struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
249         int n_entries;
250
251         n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
252
253         write_seqcount_begin(&irqfd->irq_entry_sc);
254
255         e = entries;
256         if (n_entries == 1)
257                 irqfd->irq_entry = *e;
258         else
259                 irqfd->irq_entry.type = 0;
260
261         write_seqcount_end(&irqfd->irq_entry_sc);
262 }
263
264 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
265 void __attribute__((weak)) kvm_arch_irq_bypass_stop(
266                                 struct irq_bypass_consumer *cons)
267 {
268 }
269
270 void __attribute__((weak)) kvm_arch_irq_bypass_start(
271                                 struct irq_bypass_consumer *cons)
272 {
273 }
274
275 int  __attribute__((weak)) kvm_arch_update_irqfd_routing(
276                                 struct kvm *kvm, unsigned int host_irq,
277                                 uint32_t guest_irq, bool set)
278 {
279         return 0;
280 }
281 #endif
282
283 static int
284 kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
285 {
286         struct kvm_kernel_irqfd *irqfd, *tmp;
287         struct fd f;
288         struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
289         int ret;
290         __poll_t events;
291         int idx;
292
293         if (!kvm_arch_intc_initialized(kvm))
294                 return -EAGAIN;
295
296         irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
297         if (!irqfd)
298                 return -ENOMEM;
299
300         irqfd->kvm = kvm;
301         irqfd->gsi = args->gsi;
302         INIT_LIST_HEAD(&irqfd->list);
303         INIT_WORK(&irqfd->inject, irqfd_inject);
304         INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
305         seqcount_init(&irqfd->irq_entry_sc);
306
307         f = fdget(args->fd);
308         if (!f.file) {
309                 ret = -EBADF;
310                 goto out;
311         }
312
313         eventfd = eventfd_ctx_fileget(f.file);
314         if (IS_ERR(eventfd)) {
315                 ret = PTR_ERR(eventfd);
316                 goto fail;
317         }
318
319         irqfd->eventfd = eventfd;
320
321         if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
322                 struct kvm_kernel_irqfd_resampler *resampler;
323
324                 resamplefd = eventfd_ctx_fdget(args->resamplefd);
325                 if (IS_ERR(resamplefd)) {
326                         ret = PTR_ERR(resamplefd);
327                         goto fail;
328                 }
329
330                 irqfd->resamplefd = resamplefd;
331                 INIT_LIST_HEAD(&irqfd->resampler_link);
332
333                 mutex_lock(&kvm->irqfds.resampler_lock);
334
335                 list_for_each_entry(resampler,
336                                     &kvm->irqfds.resampler_list, link) {
337                         if (resampler->notifier.gsi == irqfd->gsi) {
338                                 irqfd->resampler = resampler;
339                                 break;
340                         }
341                 }
342
343                 if (!irqfd->resampler) {
344                         resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
345                         if (!resampler) {
346                                 ret = -ENOMEM;
347                                 mutex_unlock(&kvm->irqfds.resampler_lock);
348                                 goto fail;
349                         }
350
351                         resampler->kvm = kvm;
352                         INIT_LIST_HEAD(&resampler->list);
353                         resampler->notifier.gsi = irqfd->gsi;
354                         resampler->notifier.irq_acked = irqfd_resampler_ack;
355                         INIT_LIST_HEAD(&resampler->link);
356
357                         list_add(&resampler->link, &kvm->irqfds.resampler_list);
358                         kvm_register_irq_ack_notifier(kvm,
359                                                       &resampler->notifier);
360                         irqfd->resampler = resampler;
361                 }
362
363                 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
364                 synchronize_srcu(&kvm->irq_srcu);
365
366                 mutex_unlock(&kvm->irqfds.resampler_lock);
367         }
368
369         /*
370          * Install our own custom wake-up handling so we are notified via
371          * a callback whenever someone signals the underlying eventfd
372          */
373         init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
374         init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
375
376         spin_lock_irq(&kvm->irqfds.lock);
377
378         ret = 0;
379         list_for_each_entry(tmp, &kvm->irqfds.items, list) {
380                 if (irqfd->eventfd != tmp->eventfd)
381                         continue;
382                 /* This fd is used for another irq already. */
383                 ret = -EBUSY;
384                 spin_unlock_irq(&kvm->irqfds.lock);
385                 goto fail;
386         }
387
388         idx = srcu_read_lock(&kvm->irq_srcu);
389         irqfd_update(kvm, irqfd);
390         srcu_read_unlock(&kvm->irq_srcu, idx);
391
392         list_add_tail(&irqfd->list, &kvm->irqfds.items);
393
394         spin_unlock_irq(&kvm->irqfds.lock);
395
396         /*
397          * Check if there was an event already pending on the eventfd
398          * before we registered, and trigger it as if we didn't miss it.
399          */
400         events = f.file->f_op->poll(f.file, &irqfd->pt);
401
402         if (events & EPOLLIN)
403                 schedule_work(&irqfd->inject);
404
405         /*
406          * do not drop the file until the irqfd is fully initialized, otherwise
407          * we might race against the EPOLLHUP
408          */
409         fdput(f);
410 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
411         if (kvm_arch_has_irq_bypass()) {
412                 irqfd->consumer.token = (void *)irqfd->eventfd;
413                 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
414                 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
415                 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
416                 irqfd->consumer.start = kvm_arch_irq_bypass_start;
417                 ret = irq_bypass_register_consumer(&irqfd->consumer);
418                 if (ret)
419                         pr_info("irq bypass consumer (token %p) registration fails: %d\n",
420                                 irqfd->consumer.token, ret);
421         }
422 #endif
423
424         return 0;
425
426 fail:
427         if (irqfd->resampler)
428                 irqfd_resampler_shutdown(irqfd);
429
430         if (resamplefd && !IS_ERR(resamplefd))
431                 eventfd_ctx_put(resamplefd);
432
433         if (eventfd && !IS_ERR(eventfd))
434                 eventfd_ctx_put(eventfd);
435
436         fdput(f);
437
438 out:
439         kfree(irqfd);
440         return ret;
441 }
442
443 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
444 {
445         struct kvm_irq_ack_notifier *kian;
446         int gsi, idx;
447
448         idx = srcu_read_lock(&kvm->irq_srcu);
449         gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
450         if (gsi != -1)
451                 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
452                                          link)
453                         if (kian->gsi == gsi) {
454                                 srcu_read_unlock(&kvm->irq_srcu, idx);
455                                 return true;
456                         }
457
458         srcu_read_unlock(&kvm->irq_srcu, idx);
459
460         return false;
461 }
462 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
463
464 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
465 {
466         struct kvm_irq_ack_notifier *kian;
467
468         hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
469                                  link)
470                 if (kian->gsi == gsi)
471                         kian->irq_acked(kian);
472 }
473
474 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
475 {
476         int gsi, idx;
477
478         trace_kvm_ack_irq(irqchip, pin);
479
480         idx = srcu_read_lock(&kvm->irq_srcu);
481         gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
482         if (gsi != -1)
483                 kvm_notify_acked_gsi(kvm, gsi);
484         srcu_read_unlock(&kvm->irq_srcu, idx);
485 }
486
487 void kvm_register_irq_ack_notifier(struct kvm *kvm,
488                                    struct kvm_irq_ack_notifier *kian)
489 {
490         mutex_lock(&kvm->irq_lock);
491         hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
492         mutex_unlock(&kvm->irq_lock);
493         kvm_arch_post_irq_ack_notifier_list_update(kvm);
494 }
495
496 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
497                                     struct kvm_irq_ack_notifier *kian)
498 {
499         mutex_lock(&kvm->irq_lock);
500         hlist_del_init_rcu(&kian->link);
501         mutex_unlock(&kvm->irq_lock);
502         synchronize_srcu(&kvm->irq_srcu);
503         kvm_arch_post_irq_ack_notifier_list_update(kvm);
504 }
505 #endif
506
507 void
508 kvm_eventfd_init(struct kvm *kvm)
509 {
510 #ifdef CONFIG_HAVE_KVM_IRQFD
511         spin_lock_init(&kvm->irqfds.lock);
512         INIT_LIST_HEAD(&kvm->irqfds.items);
513         INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
514         mutex_init(&kvm->irqfds.resampler_lock);
515 #endif
516         INIT_LIST_HEAD(&kvm->ioeventfds);
517 }
518
519 #ifdef CONFIG_HAVE_KVM_IRQFD
520 /*
521  * shutdown any irqfd's that match fd+gsi
522  */
523 static int
524 kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
525 {
526         struct kvm_kernel_irqfd *irqfd, *tmp;
527         struct eventfd_ctx *eventfd;
528
529         eventfd = eventfd_ctx_fdget(args->fd);
530         if (IS_ERR(eventfd))
531                 return PTR_ERR(eventfd);
532
533         spin_lock_irq(&kvm->irqfds.lock);
534
535         list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
536                 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
537                         /*
538                          * This clearing of irq_entry.type is needed for when
539                          * another thread calls kvm_irq_routing_update before
540                          * we flush workqueue below (we synchronize with
541                          * kvm_irq_routing_update using irqfds.lock).
542                          */
543                         write_seqcount_begin(&irqfd->irq_entry_sc);
544                         irqfd->irq_entry.type = 0;
545                         write_seqcount_end(&irqfd->irq_entry_sc);
546                         irqfd_deactivate(irqfd);
547                 }
548         }
549
550         spin_unlock_irq(&kvm->irqfds.lock);
551         eventfd_ctx_put(eventfd);
552
553         /*
554          * Block until we know all outstanding shutdown jobs have completed
555          * so that we guarantee there will not be any more interrupts on this
556          * gsi once this deassign function returns.
557          */
558         flush_workqueue(irqfd_cleanup_wq);
559
560         return 0;
561 }
562
563 int
564 kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
565 {
566         if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
567                 return -EINVAL;
568
569         if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
570                 return kvm_irqfd_deassign(kvm, args);
571
572         return kvm_irqfd_assign(kvm, args);
573 }
574
575 /*
576  * This function is called as the kvm VM fd is being released. Shutdown all
577  * irqfds that still remain open
578  */
579 void
580 kvm_irqfd_release(struct kvm *kvm)
581 {
582         struct kvm_kernel_irqfd *irqfd, *tmp;
583
584         spin_lock_irq(&kvm->irqfds.lock);
585
586         list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
587                 irqfd_deactivate(irqfd);
588
589         spin_unlock_irq(&kvm->irqfds.lock);
590
591         /*
592          * Block until we know all outstanding shutdown jobs have completed
593          * since we do not take a kvm* reference.
594          */
595         flush_workqueue(irqfd_cleanup_wq);
596
597 }
598
599 /*
600  * Take note of a change in irq routing.
601  * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
602  */
603 void kvm_irq_routing_update(struct kvm *kvm)
604 {
605         struct kvm_kernel_irqfd *irqfd;
606
607         spin_lock_irq(&kvm->irqfds.lock);
608
609         list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
610                 irqfd_update(kvm, irqfd);
611
612 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
613                 if (irqfd->producer) {
614                         int ret = kvm_arch_update_irqfd_routing(
615                                         irqfd->kvm, irqfd->producer->irq,
616                                         irqfd->gsi, 1);
617                         WARN_ON(ret);
618                 }
619 #endif
620         }
621
622         spin_unlock_irq(&kvm->irqfds.lock);
623 }
624
625 /*
626  * create a host-wide workqueue for issuing deferred shutdown requests
627  * aggregated from all vm* instances. We need our own isolated
628  * queue to ease flushing work items when a VM exits.
629  */
630 int kvm_irqfd_init(void)
631 {
632         irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
633         if (!irqfd_cleanup_wq)
634                 return -ENOMEM;
635
636         return 0;
637 }
638
639 void kvm_irqfd_exit(void)
640 {
641         destroy_workqueue(irqfd_cleanup_wq);
642 }
643 #endif
644
645 /*
646  * --------------------------------------------------------------------
647  * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
648  *
649  * userspace can register a PIO/MMIO address with an eventfd for receiving
650  * notification when the memory has been touched.
651  * --------------------------------------------------------------------
652  */
653
654 struct _ioeventfd {
655         struct list_head     list;
656         u64                  addr;
657         int                  length;
658         struct eventfd_ctx  *eventfd;
659         u64                  datamatch;
660         struct kvm_io_device dev;
661         u8                   bus_idx;
662         bool                 wildcard;
663 };
664
665 static inline struct _ioeventfd *
666 to_ioeventfd(struct kvm_io_device *dev)
667 {
668         return container_of(dev, struct _ioeventfd, dev);
669 }
670
671 static void
672 ioeventfd_release(struct _ioeventfd *p)
673 {
674         eventfd_ctx_put(p->eventfd);
675         list_del(&p->list);
676         kfree(p);
677 }
678
679 static bool
680 ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
681 {
682         u64 _val;
683
684         if (addr != p->addr)
685                 /* address must be precise for a hit */
686                 return false;
687
688         if (!p->length)
689                 /* length = 0 means only look at the address, so always a hit */
690                 return true;
691
692         if (len != p->length)
693                 /* address-range must be precise for a hit */
694                 return false;
695
696         if (p->wildcard)
697                 /* all else equal, wildcard is always a hit */
698                 return true;
699
700         /* otherwise, we have to actually compare the data */
701
702         BUG_ON(!IS_ALIGNED((unsigned long)val, len));
703
704         switch (len) {
705         case 1:
706                 _val = *(u8 *)val;
707                 break;
708         case 2:
709                 _val = *(u16 *)val;
710                 break;
711         case 4:
712                 _val = *(u32 *)val;
713                 break;
714         case 8:
715                 _val = *(u64 *)val;
716                 break;
717         default:
718                 return false;
719         }
720
721         return _val == p->datamatch ? true : false;
722 }
723
724 /* MMIO/PIO writes trigger an event if the addr/val match */
725 static int
726 ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
727                 int len, const void *val)
728 {
729         struct _ioeventfd *p = to_ioeventfd(this);
730
731         if (!ioeventfd_in_range(p, addr, len, val))
732                 return -EOPNOTSUPP;
733
734         eventfd_signal(p->eventfd, 1);
735         return 0;
736 }
737
738 /*
739  * This function is called as KVM is completely shutting down.  We do not
740  * need to worry about locking just nuke anything we have as quickly as possible
741  */
742 static void
743 ioeventfd_destructor(struct kvm_io_device *this)
744 {
745         struct _ioeventfd *p = to_ioeventfd(this);
746
747         ioeventfd_release(p);
748 }
749
750 static const struct kvm_io_device_ops ioeventfd_ops = {
751         .write      = ioeventfd_write,
752         .destructor = ioeventfd_destructor,
753 };
754
755 /* assumes kvm->slots_lock held */
756 static bool
757 ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
758 {
759         struct _ioeventfd *_p;
760
761         list_for_each_entry(_p, &kvm->ioeventfds, list)
762                 if (_p->bus_idx == p->bus_idx &&
763                     _p->addr == p->addr &&
764                     (!_p->length || !p->length ||
765                      (_p->length == p->length &&
766                       (_p->wildcard || p->wildcard ||
767                        _p->datamatch == p->datamatch))))
768                         return true;
769
770         return false;
771 }
772
773 static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
774 {
775         if (flags & KVM_IOEVENTFD_FLAG_PIO)
776                 return KVM_PIO_BUS;
777         if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
778                 return KVM_VIRTIO_CCW_NOTIFY_BUS;
779         return KVM_MMIO_BUS;
780 }
781
782 static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
783                                 enum kvm_bus bus_idx,
784                                 struct kvm_ioeventfd *args)
785 {
786
787         struct eventfd_ctx *eventfd;
788         struct _ioeventfd *p;
789         int ret;
790
791         eventfd = eventfd_ctx_fdget(args->fd);
792         if (IS_ERR(eventfd))
793                 return PTR_ERR(eventfd);
794
795         p = kzalloc(sizeof(*p), GFP_KERNEL);
796         if (!p) {
797                 ret = -ENOMEM;
798                 goto fail;
799         }
800
801         INIT_LIST_HEAD(&p->list);
802         p->addr    = args->addr;
803         p->bus_idx = bus_idx;
804         p->length  = args->len;
805         p->eventfd = eventfd;
806
807         /* The datamatch feature is optional, otherwise this is a wildcard */
808         if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
809                 p->datamatch = args->datamatch;
810         else
811                 p->wildcard = true;
812
813         mutex_lock(&kvm->slots_lock);
814
815         /* Verify that there isn't a match already */
816         if (ioeventfd_check_collision(kvm, p)) {
817                 ret = -EEXIST;
818                 goto unlock_fail;
819         }
820
821         kvm_iodevice_init(&p->dev, &ioeventfd_ops);
822
823         ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
824                                       &p->dev);
825         if (ret < 0)
826                 goto unlock_fail;
827
828         kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
829         list_add_tail(&p->list, &kvm->ioeventfds);
830
831         mutex_unlock(&kvm->slots_lock);
832
833         return 0;
834
835 unlock_fail:
836         mutex_unlock(&kvm->slots_lock);
837
838 fail:
839         kfree(p);
840         eventfd_ctx_put(eventfd);
841
842         return ret;
843 }
844
845 static int
846 kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
847                            struct kvm_ioeventfd *args)
848 {
849         struct _ioeventfd        *p, *tmp;
850         struct eventfd_ctx       *eventfd;
851         struct kvm_io_bus        *bus;
852         int                       ret = -ENOENT;
853
854         eventfd = eventfd_ctx_fdget(args->fd);
855         if (IS_ERR(eventfd))
856                 return PTR_ERR(eventfd);
857
858         mutex_lock(&kvm->slots_lock);
859
860         list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
861                 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
862
863                 if (p->bus_idx != bus_idx ||
864                     p->eventfd != eventfd  ||
865                     p->addr != args->addr  ||
866                     p->length != args->len ||
867                     p->wildcard != wildcard)
868                         continue;
869
870                 if (!p->wildcard && p->datamatch != args->datamatch)
871                         continue;
872
873                 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
874                 bus = kvm_get_bus(kvm, bus_idx);
875                 if (bus)
876                         bus->ioeventfd_count--;
877                 ioeventfd_release(p);
878                 ret = 0;
879                 break;
880         }
881
882         mutex_unlock(&kvm->slots_lock);
883
884         eventfd_ctx_put(eventfd);
885
886         return ret;
887 }
888
889 static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
890 {
891         enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
892         int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
893
894         if (!args->len && bus_idx == KVM_MMIO_BUS)
895                 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
896
897         return ret;
898 }
899
900 static int
901 kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
902 {
903         enum kvm_bus              bus_idx;
904         int ret;
905
906         bus_idx = ioeventfd_bus_from_flags(args->flags);
907         /* must be natural-word sized, or 0 to ignore length */
908         switch (args->len) {
909         case 0:
910         case 1:
911         case 2:
912         case 4:
913         case 8:
914                 break;
915         default:
916                 return -EINVAL;
917         }
918
919         /* check for range overflow */
920         if (args->addr + args->len < args->addr)
921                 return -EINVAL;
922
923         /* check for extra flags that we don't understand */
924         if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
925                 return -EINVAL;
926
927         /* ioeventfd with no length can't be combined with DATAMATCH */
928         if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
929                 return -EINVAL;
930
931         ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
932         if (ret)
933                 goto fail;
934
935         /* When length is ignored, MMIO is also put on a separate bus, for
936          * faster lookups.
937          */
938         if (!args->len && bus_idx == KVM_MMIO_BUS) {
939                 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
940                 if (ret < 0)
941                         goto fast_fail;
942         }
943
944         return 0;
945
946 fast_fail:
947         kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
948 fail:
949         return ret;
950 }
951
952 int
953 kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
954 {
955         if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
956                 return kvm_deassign_ioeventfd(kvm, args);
957
958         return kvm_assign_ioeventfd(kvm, args);
959 }