Merge branches 'acpi-scan', 'acpi-resource', 'acpi-apei', 'acpi-extlog' and 'acpi...
[sfrench/cifs-2.6.git] / virt / kvm / kvm_main.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Kernel-based Virtual Machine driver for Linux
4  *
5  * This module enables machines with Intel VT-x extensions to run virtual
6  * machines without emulation or binary translation.
7  *
8  * Copyright (C) 2006 Qumranet, Inc.
9  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Avi Kivity   <avi@qumranet.com>
13  *   Yaniv Kamay  <yaniv@qumranet.com>
14  */
15
16 #include <kvm/iodev.h>
17
18 #include <linux/kvm_host.h>
19 #include <linux/kvm.h>
20 #include <linux/module.h>
21 #include <linux/errno.h>
22 #include <linux/percpu.h>
23 #include <linux/mm.h>
24 #include <linux/miscdevice.h>
25 #include <linux/vmalloc.h>
26 #include <linux/reboot.h>
27 #include <linux/debugfs.h>
28 #include <linux/highmem.h>
29 #include <linux/file.h>
30 #include <linux/syscore_ops.h>
31 #include <linux/cpu.h>
32 #include <linux/sched/signal.h>
33 #include <linux/sched/mm.h>
34 #include <linux/sched/stat.h>
35 #include <linux/cpumask.h>
36 #include <linux/smp.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/profile.h>
39 #include <linux/kvm_para.h>
40 #include <linux/pagemap.h>
41 #include <linux/mman.h>
42 #include <linux/swap.h>
43 #include <linux/bitops.h>
44 #include <linux/spinlock.h>
45 #include <linux/compat.h>
46 #include <linux/srcu.h>
47 #include <linux/hugetlb.h>
48 #include <linux/slab.h>
49 #include <linux/sort.h>
50 #include <linux/bsearch.h>
51 #include <linux/io.h>
52 #include <linux/lockdep.h>
53 #include <linux/kthread.h>
54 #include <linux/suspend.h>
55
56 #include <asm/processor.h>
57 #include <asm/ioctl.h>
58 #include <linux/uaccess.h>
59
60 #include "coalesced_mmio.h"
61 #include "async_pf.h"
62 #include "kvm_mm.h"
63 #include "vfio.h"
64
65 #define CREATE_TRACE_POINTS
66 #include <trace/events/kvm.h>
67
68 #include <linux/kvm_dirty_ring.h>
69
70 /* Worst case buffer size needed for holding an integer. */
71 #define ITOA_MAX_LEN 12
72
73 MODULE_AUTHOR("Qumranet");
74 MODULE_LICENSE("GPL");
75
76 /* Architectures should define their poll value according to the halt latency */
77 unsigned int halt_poll_ns = KVM_HALT_POLL_NS_DEFAULT;
78 module_param(halt_poll_ns, uint, 0644);
79 EXPORT_SYMBOL_GPL(halt_poll_ns);
80
81 /* Default doubles per-vcpu halt_poll_ns. */
82 unsigned int halt_poll_ns_grow = 2;
83 module_param(halt_poll_ns_grow, uint, 0644);
84 EXPORT_SYMBOL_GPL(halt_poll_ns_grow);
85
86 /* The start value to grow halt_poll_ns from */
87 unsigned int halt_poll_ns_grow_start = 10000; /* 10us */
88 module_param(halt_poll_ns_grow_start, uint, 0644);
89 EXPORT_SYMBOL_GPL(halt_poll_ns_grow_start);
90
91 /* Default resets per-vcpu halt_poll_ns . */
92 unsigned int halt_poll_ns_shrink;
93 module_param(halt_poll_ns_shrink, uint, 0644);
94 EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
95
96 /*
97  * Ordering of locks:
98  *
99  *      kvm->lock --> kvm->slots_lock --> kvm->irq_lock
100  */
101
102 DEFINE_MUTEX(kvm_lock);
103 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
104 LIST_HEAD(vm_list);
105
106 static cpumask_var_t cpus_hardware_enabled;
107 static int kvm_usage_count;
108 static atomic_t hardware_enable_failed;
109
110 static struct kmem_cache *kvm_vcpu_cache;
111
112 static __read_mostly struct preempt_ops kvm_preempt_ops;
113 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_running_vcpu);
114
115 struct dentry *kvm_debugfs_dir;
116 EXPORT_SYMBOL_GPL(kvm_debugfs_dir);
117
118 static const struct file_operations stat_fops_per_vm;
119
120 static struct file_operations kvm_chardev_ops;
121
122 static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
123                            unsigned long arg);
124 #ifdef CONFIG_KVM_COMPAT
125 static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
126                                   unsigned long arg);
127 #define KVM_COMPAT(c)   .compat_ioctl   = (c)
128 #else
129 /*
130  * For architectures that don't implement a compat infrastructure,
131  * adopt a double line of defense:
132  * - Prevent a compat task from opening /dev/kvm
133  * - If the open has been done by a 64bit task, and the KVM fd
134  *   passed to a compat task, let the ioctls fail.
135  */
136 static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
137                                 unsigned long arg) { return -EINVAL; }
138
139 static int kvm_no_compat_open(struct inode *inode, struct file *file)
140 {
141         return is_compat_task() ? -ENODEV : 0;
142 }
143 #define KVM_COMPAT(c)   .compat_ioctl   = kvm_no_compat_ioctl,  \
144                         .open           = kvm_no_compat_open
145 #endif
146 static int hardware_enable_all(void);
147 static void hardware_disable_all(void);
148
149 static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
150
151 __visible bool kvm_rebooting;
152 EXPORT_SYMBOL_GPL(kvm_rebooting);
153
154 #define KVM_EVENT_CREATE_VM 0
155 #define KVM_EVENT_DESTROY_VM 1
156 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
157 static unsigned long long kvm_createvm_count;
158 static unsigned long long kvm_active_vms;
159
160 static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask);
161
162 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
163                                                    unsigned long start, unsigned long end)
164 {
165 }
166
167 __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm)
168 {
169 }
170
171 bool kvm_is_zone_device_page(struct page *page)
172 {
173         /*
174          * The metadata used by is_zone_device_page() to determine whether or
175          * not a page is ZONE_DEVICE is guaranteed to be valid if and only if
176          * the device has been pinned, e.g. by get_user_pages().  WARN if the
177          * page_count() is zero to help detect bad usage of this helper.
178          */
179         if (WARN_ON_ONCE(!page_count(page)))
180                 return false;
181
182         return is_zone_device_page(page);
183 }
184
185 /*
186  * Returns a 'struct page' if the pfn is "valid" and backed by a refcounted
187  * page, NULL otherwise.  Note, the list of refcounted PG_reserved page types
188  * is likely incomplete, it has been compiled purely through people wanting to
189  * back guest with a certain type of memory and encountering issues.
190  */
191 struct page *kvm_pfn_to_refcounted_page(kvm_pfn_t pfn)
192 {
193         struct page *page;
194
195         if (!pfn_valid(pfn))
196                 return NULL;
197
198         page = pfn_to_page(pfn);
199         if (!PageReserved(page))
200                 return page;
201
202         /* The ZERO_PAGE(s) is marked PG_reserved, but is refcounted. */
203         if (is_zero_pfn(pfn))
204                 return page;
205
206         /*
207          * ZONE_DEVICE pages currently set PG_reserved, but from a refcounting
208          * perspective they are "normal" pages, albeit with slightly different
209          * usage rules.
210          */
211         if (kvm_is_zone_device_page(page))
212                 return page;
213
214         return NULL;
215 }
216
217 /*
218  * Switches to specified vcpu, until a matching vcpu_put()
219  */
220 void vcpu_load(struct kvm_vcpu *vcpu)
221 {
222         int cpu = get_cpu();
223
224         __this_cpu_write(kvm_running_vcpu, vcpu);
225         preempt_notifier_register(&vcpu->preempt_notifier);
226         kvm_arch_vcpu_load(vcpu, cpu);
227         put_cpu();
228 }
229 EXPORT_SYMBOL_GPL(vcpu_load);
230
231 void vcpu_put(struct kvm_vcpu *vcpu)
232 {
233         preempt_disable();
234         kvm_arch_vcpu_put(vcpu);
235         preempt_notifier_unregister(&vcpu->preempt_notifier);
236         __this_cpu_write(kvm_running_vcpu, NULL);
237         preempt_enable();
238 }
239 EXPORT_SYMBOL_GPL(vcpu_put);
240
241 /* TODO: merge with kvm_arch_vcpu_should_kick */
242 static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
243 {
244         int mode = kvm_vcpu_exiting_guest_mode(vcpu);
245
246         /*
247          * We need to wait for the VCPU to reenable interrupts and get out of
248          * READING_SHADOW_PAGE_TABLES mode.
249          */
250         if (req & KVM_REQUEST_WAIT)
251                 return mode != OUTSIDE_GUEST_MODE;
252
253         /*
254          * Need to kick a running VCPU, but otherwise there is nothing to do.
255          */
256         return mode == IN_GUEST_MODE;
257 }
258
259 static void ack_kick(void *_completed)
260 {
261 }
262
263 static inline bool kvm_kick_many_cpus(struct cpumask *cpus, bool wait)
264 {
265         if (cpumask_empty(cpus))
266                 return false;
267
268         smp_call_function_many(cpus, ack_kick, NULL, wait);
269         return true;
270 }
271
272 static void kvm_make_vcpu_request(struct kvm_vcpu *vcpu, unsigned int req,
273                                   struct cpumask *tmp, int current_cpu)
274 {
275         int cpu;
276
277         if (likely(!(req & KVM_REQUEST_NO_ACTION)))
278                 __kvm_make_request(req, vcpu);
279
280         if (!(req & KVM_REQUEST_NO_WAKEUP) && kvm_vcpu_wake_up(vcpu))
281                 return;
282
283         /*
284          * Note, the vCPU could get migrated to a different pCPU at any point
285          * after kvm_request_needs_ipi(), which could result in sending an IPI
286          * to the previous pCPU.  But, that's OK because the purpose of the IPI
287          * is to ensure the vCPU returns to OUTSIDE_GUEST_MODE, which is
288          * satisfied if the vCPU migrates. Entering READING_SHADOW_PAGE_TABLES
289          * after this point is also OK, as the requirement is only that KVM wait
290          * for vCPUs that were reading SPTEs _before_ any changes were
291          * finalized. See kvm_vcpu_kick() for more details on handling requests.
292          */
293         if (kvm_request_needs_ipi(vcpu, req)) {
294                 cpu = READ_ONCE(vcpu->cpu);
295                 if (cpu != -1 && cpu != current_cpu)
296                         __cpumask_set_cpu(cpu, tmp);
297         }
298 }
299
300 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
301                                  unsigned long *vcpu_bitmap)
302 {
303         struct kvm_vcpu *vcpu;
304         struct cpumask *cpus;
305         int i, me;
306         bool called;
307
308         me = get_cpu();
309
310         cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
311         cpumask_clear(cpus);
312
313         for_each_set_bit(i, vcpu_bitmap, KVM_MAX_VCPUS) {
314                 vcpu = kvm_get_vcpu(kvm, i);
315                 if (!vcpu)
316                         continue;
317                 kvm_make_vcpu_request(vcpu, req, cpus, me);
318         }
319
320         called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
321         put_cpu();
322
323         return called;
324 }
325
326 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
327                                       struct kvm_vcpu *except)
328 {
329         struct kvm_vcpu *vcpu;
330         struct cpumask *cpus;
331         unsigned long i;
332         bool called;
333         int me;
334
335         me = get_cpu();
336
337         cpus = this_cpu_cpumask_var_ptr(cpu_kick_mask);
338         cpumask_clear(cpus);
339
340         kvm_for_each_vcpu(i, vcpu, kvm) {
341                 if (vcpu == except)
342                         continue;
343                 kvm_make_vcpu_request(vcpu, req, cpus, me);
344         }
345
346         called = kvm_kick_many_cpus(cpus, !!(req & KVM_REQUEST_WAIT));
347         put_cpu();
348
349         return called;
350 }
351
352 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
353 {
354         return kvm_make_all_cpus_request_except(kvm, req, NULL);
355 }
356 EXPORT_SYMBOL_GPL(kvm_make_all_cpus_request);
357
358 #ifndef CONFIG_HAVE_KVM_ARCH_TLB_FLUSH_ALL
359 void kvm_flush_remote_tlbs(struct kvm *kvm)
360 {
361         ++kvm->stat.generic.remote_tlb_flush_requests;
362
363         /*
364          * We want to publish modifications to the page tables before reading
365          * mode. Pairs with a memory barrier in arch-specific code.
366          * - x86: smp_mb__after_srcu_read_unlock in vcpu_enter_guest
367          * and smp_mb in walk_shadow_page_lockless_begin/end.
368          * - powerpc: smp_mb in kvmppc_prepare_to_enter.
369          *
370          * There is already an smp_mb__after_atomic() before
371          * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that
372          * barrier here.
373          */
374         if (!kvm_arch_flush_remote_tlb(kvm)
375             || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
376                 ++kvm->stat.generic.remote_tlb_flush;
377 }
378 EXPORT_SYMBOL_GPL(kvm_flush_remote_tlbs);
379 #endif
380
381 static void kvm_flush_shadow_all(struct kvm *kvm)
382 {
383         kvm_arch_flush_shadow_all(kvm);
384         kvm_arch_guest_memory_reclaimed(kvm);
385 }
386
387 #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
388 static inline void *mmu_memory_cache_alloc_obj(struct kvm_mmu_memory_cache *mc,
389                                                gfp_t gfp_flags)
390 {
391         gfp_flags |= mc->gfp_zero;
392
393         if (mc->kmem_cache)
394                 return kmem_cache_alloc(mc->kmem_cache, gfp_flags);
395         else
396                 return (void *)__get_free_page(gfp_flags);
397 }
398
399 int __kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int capacity, int min)
400 {
401         gfp_t gfp = mc->gfp_custom ? mc->gfp_custom : GFP_KERNEL_ACCOUNT;
402         void *obj;
403
404         if (mc->nobjs >= min)
405                 return 0;
406
407         if (unlikely(!mc->objects)) {
408                 if (WARN_ON_ONCE(!capacity))
409                         return -EIO;
410
411                 mc->objects = kvmalloc_array(sizeof(void *), capacity, gfp);
412                 if (!mc->objects)
413                         return -ENOMEM;
414
415                 mc->capacity = capacity;
416         }
417
418         /* It is illegal to request a different capacity across topups. */
419         if (WARN_ON_ONCE(mc->capacity != capacity))
420                 return -EIO;
421
422         while (mc->nobjs < mc->capacity) {
423                 obj = mmu_memory_cache_alloc_obj(mc, gfp);
424                 if (!obj)
425                         return mc->nobjs >= min ? 0 : -ENOMEM;
426                 mc->objects[mc->nobjs++] = obj;
427         }
428         return 0;
429 }
430
431 int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min)
432 {
433         return __kvm_mmu_topup_memory_cache(mc, KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE, min);
434 }
435
436 int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc)
437 {
438         return mc->nobjs;
439 }
440
441 void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
442 {
443         while (mc->nobjs) {
444                 if (mc->kmem_cache)
445                         kmem_cache_free(mc->kmem_cache, mc->objects[--mc->nobjs]);
446                 else
447                         free_page((unsigned long)mc->objects[--mc->nobjs]);
448         }
449
450         kvfree(mc->objects);
451
452         mc->objects = NULL;
453         mc->capacity = 0;
454 }
455
456 void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
457 {
458         void *p;
459
460         if (WARN_ON(!mc->nobjs))
461                 p = mmu_memory_cache_alloc_obj(mc, GFP_ATOMIC | __GFP_ACCOUNT);
462         else
463                 p = mc->objects[--mc->nobjs];
464         BUG_ON(!p);
465         return p;
466 }
467 #endif
468
469 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
470 {
471         mutex_init(&vcpu->mutex);
472         vcpu->cpu = -1;
473         vcpu->kvm = kvm;
474         vcpu->vcpu_id = id;
475         vcpu->pid = NULL;
476 #ifndef __KVM_HAVE_ARCH_WQP
477         rcuwait_init(&vcpu->wait);
478 #endif
479         kvm_async_pf_vcpu_init(vcpu);
480
481         kvm_vcpu_set_in_spin_loop(vcpu, false);
482         kvm_vcpu_set_dy_eligible(vcpu, false);
483         vcpu->preempted = false;
484         vcpu->ready = false;
485         preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
486         vcpu->last_used_slot = NULL;
487
488         /* Fill the stats id string for the vcpu */
489         snprintf(vcpu->stats_id, sizeof(vcpu->stats_id), "kvm-%d/vcpu-%d",
490                  task_pid_nr(current), id);
491 }
492
493 static void kvm_vcpu_destroy(struct kvm_vcpu *vcpu)
494 {
495         kvm_arch_vcpu_destroy(vcpu);
496         kvm_dirty_ring_free(&vcpu->dirty_ring);
497
498         /*
499          * No need for rcu_read_lock as VCPU_RUN is the only place that changes
500          * the vcpu->pid pointer, and at destruction time all file descriptors
501          * are already gone.
502          */
503         put_pid(rcu_dereference_protected(vcpu->pid, 1));
504
505         free_page((unsigned long)vcpu->run);
506         kmem_cache_free(kvm_vcpu_cache, vcpu);
507 }
508
509 void kvm_destroy_vcpus(struct kvm *kvm)
510 {
511         unsigned long i;
512         struct kvm_vcpu *vcpu;
513
514         kvm_for_each_vcpu(i, vcpu, kvm) {
515                 kvm_vcpu_destroy(vcpu);
516                 xa_erase(&kvm->vcpu_array, i);
517         }
518
519         atomic_set(&kvm->online_vcpus, 0);
520 }
521 EXPORT_SYMBOL_GPL(kvm_destroy_vcpus);
522
523 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
524 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
525 {
526         return container_of(mn, struct kvm, mmu_notifier);
527 }
528
529 static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
530                                               struct mm_struct *mm,
531                                               unsigned long start, unsigned long end)
532 {
533         struct kvm *kvm = mmu_notifier_to_kvm(mn);
534         int idx;
535
536         idx = srcu_read_lock(&kvm->srcu);
537         kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
538         srcu_read_unlock(&kvm->srcu, idx);
539 }
540
541 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
542
543 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
544                              unsigned long end);
545
546 typedef void (*on_unlock_fn_t)(struct kvm *kvm);
547
548 struct kvm_hva_range {
549         unsigned long start;
550         unsigned long end;
551         pte_t pte;
552         hva_handler_t handler;
553         on_lock_fn_t on_lock;
554         on_unlock_fn_t on_unlock;
555         bool flush_on_ret;
556         bool may_block;
557 };
558
559 /*
560  * Use a dedicated stub instead of NULL to indicate that there is no callback
561  * function/handler.  The compiler technically can't guarantee that a real
562  * function will have a non-zero address, and so it will generate code to
563  * check for !NULL, whereas comparing against a stub will be elided at compile
564  * time (unless the compiler is getting long in the tooth, e.g. gcc 4.9).
565  */
566 static void kvm_null_fn(void)
567 {
568
569 }
570 #define IS_KVM_NULL_FN(fn) ((fn) == (void *)kvm_null_fn)
571
572 /* Iterate over each memslot intersecting [start, last] (inclusive) range */
573 #define kvm_for_each_memslot_in_hva_range(node, slots, start, last)          \
574         for (node = interval_tree_iter_first(&slots->hva_tree, start, last); \
575              node;                                                           \
576              node = interval_tree_iter_next(node, start, last))      \
577
578 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm,
579                                                   const struct kvm_hva_range *range)
580 {
581         bool ret = false, locked = false;
582         struct kvm_gfn_range gfn_range;
583         struct kvm_memory_slot *slot;
584         struct kvm_memslots *slots;
585         int i, idx;
586
587         if (WARN_ON_ONCE(range->end <= range->start))
588                 return 0;
589
590         /* A null handler is allowed if and only if on_lock() is provided. */
591         if (WARN_ON_ONCE(IS_KVM_NULL_FN(range->on_lock) &&
592                          IS_KVM_NULL_FN(range->handler)))
593                 return 0;
594
595         idx = srcu_read_lock(&kvm->srcu);
596
597         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
598                 struct interval_tree_node *node;
599
600                 slots = __kvm_memslots(kvm, i);
601                 kvm_for_each_memslot_in_hva_range(node, slots,
602                                                   range->start, range->end - 1) {
603                         unsigned long hva_start, hva_end;
604
605                         slot = container_of(node, struct kvm_memory_slot, hva_node[slots->node_idx]);
606                         hva_start = max(range->start, slot->userspace_addr);
607                         hva_end = min(range->end, slot->userspace_addr +
608                                                   (slot->npages << PAGE_SHIFT));
609
610                         /*
611                          * To optimize for the likely case where the address
612                          * range is covered by zero or one memslots, don't
613                          * bother making these conditional (to avoid writes on
614                          * the second or later invocation of the handler).
615                          */
616                         gfn_range.pte = range->pte;
617                         gfn_range.may_block = range->may_block;
618
619                         /*
620                          * {gfn(page) | page intersects with [hva_start, hva_end)} =
621                          * {gfn_start, gfn_start+1, ..., gfn_end-1}.
622                          */
623                         gfn_range.start = hva_to_gfn_memslot(hva_start, slot);
624                         gfn_range.end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, slot);
625                         gfn_range.slot = slot;
626
627                         if (!locked) {
628                                 locked = true;
629                                 KVM_MMU_LOCK(kvm);
630                                 if (!IS_KVM_NULL_FN(range->on_lock))
631                                         range->on_lock(kvm, range->start, range->end);
632                                 if (IS_KVM_NULL_FN(range->handler))
633                                         break;
634                         }
635                         ret |= range->handler(kvm, &gfn_range);
636                 }
637         }
638
639         if (range->flush_on_ret && ret)
640                 kvm_flush_remote_tlbs(kvm);
641
642         if (locked) {
643                 KVM_MMU_UNLOCK(kvm);
644                 if (!IS_KVM_NULL_FN(range->on_unlock))
645                         range->on_unlock(kvm);
646         }
647
648         srcu_read_unlock(&kvm->srcu, idx);
649
650         /* The notifiers are averse to booleans. :-( */
651         return (int)ret;
652 }
653
654 static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
655                                                 unsigned long start,
656                                                 unsigned long end,
657                                                 pte_t pte,
658                                                 hva_handler_t handler)
659 {
660         struct kvm *kvm = mmu_notifier_to_kvm(mn);
661         const struct kvm_hva_range range = {
662                 .start          = start,
663                 .end            = end,
664                 .pte            = pte,
665                 .handler        = handler,
666                 .on_lock        = (void *)kvm_null_fn,
667                 .on_unlock      = (void *)kvm_null_fn,
668                 .flush_on_ret   = true,
669                 .may_block      = false,
670         };
671
672         return __kvm_handle_hva_range(kvm, &range);
673 }
674
675 static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
676                                                          unsigned long start,
677                                                          unsigned long end,
678                                                          hva_handler_t handler)
679 {
680         struct kvm *kvm = mmu_notifier_to_kvm(mn);
681         const struct kvm_hva_range range = {
682                 .start          = start,
683                 .end            = end,
684                 .pte            = __pte(0),
685                 .handler        = handler,
686                 .on_lock        = (void *)kvm_null_fn,
687                 .on_unlock      = (void *)kvm_null_fn,
688                 .flush_on_ret   = false,
689                 .may_block      = false,
690         };
691
692         return __kvm_handle_hva_range(kvm, &range);
693 }
694 static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
695                                         struct mm_struct *mm,
696                                         unsigned long address,
697                                         pte_t pte)
698 {
699         struct kvm *kvm = mmu_notifier_to_kvm(mn);
700
701         trace_kvm_set_spte_hva(address);
702
703         /*
704          * .change_pte() must be surrounded by .invalidate_range_{start,end}().
705          * If mmu_invalidate_in_progress is zero, then no in-progress
706          * invalidations, including this one, found a relevant memslot at
707          * start(); rechecking memslots here is unnecessary.  Note, a false
708          * positive (count elevated by a different invalidation) is sub-optimal
709          * but functionally ok.
710          */
711         WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count));
712         if (!READ_ONCE(kvm->mmu_invalidate_in_progress))
713                 return;
714
715         kvm_handle_hva_range(mn, address, address + 1, pte, kvm_set_spte_gfn);
716 }
717
718 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
719                               unsigned long end)
720 {
721         /*
722          * The count increase must become visible at unlock time as no
723          * spte can be established without taking the mmu_lock and
724          * count is also read inside the mmu_lock critical section.
725          */
726         kvm->mmu_invalidate_in_progress++;
727         if (likely(kvm->mmu_invalidate_in_progress == 1)) {
728                 kvm->mmu_invalidate_range_start = start;
729                 kvm->mmu_invalidate_range_end = end;
730         } else {
731                 /*
732                  * Fully tracking multiple concurrent ranges has diminishing
733                  * returns. Keep things simple and just find the minimal range
734                  * which includes the current and new ranges. As there won't be
735                  * enough information to subtract a range after its invalidate
736                  * completes, any ranges invalidated concurrently will
737                  * accumulate and persist until all outstanding invalidates
738                  * complete.
739                  */
740                 kvm->mmu_invalidate_range_start =
741                         min(kvm->mmu_invalidate_range_start, start);
742                 kvm->mmu_invalidate_range_end =
743                         max(kvm->mmu_invalidate_range_end, end);
744         }
745 }
746
747 static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
748                                         const struct mmu_notifier_range *range)
749 {
750         struct kvm *kvm = mmu_notifier_to_kvm(mn);
751         const struct kvm_hva_range hva_range = {
752                 .start          = range->start,
753                 .end            = range->end,
754                 .pte            = __pte(0),
755                 .handler        = kvm_unmap_gfn_range,
756                 .on_lock        = kvm_mmu_invalidate_begin,
757                 .on_unlock      = kvm_arch_guest_memory_reclaimed,
758                 .flush_on_ret   = true,
759                 .may_block      = mmu_notifier_range_blockable(range),
760         };
761
762         trace_kvm_unmap_hva_range(range->start, range->end);
763
764         /*
765          * Prevent memslot modification between range_start() and range_end()
766          * so that conditionally locking provides the same result in both
767          * functions.  Without that guarantee, the mmu_invalidate_in_progress
768          * adjustments will be imbalanced.
769          *
770          * Pairs with the decrement in range_end().
771          */
772         spin_lock(&kvm->mn_invalidate_lock);
773         kvm->mn_active_invalidate_count++;
774         spin_unlock(&kvm->mn_invalidate_lock);
775
776         /*
777          * Invalidate pfn caches _before_ invalidating the secondary MMUs, i.e.
778          * before acquiring mmu_lock, to avoid holding mmu_lock while acquiring
779          * each cache's lock.  There are relatively few caches in existence at
780          * any given time, and the caches themselves can check for hva overlap,
781          * i.e. don't need to rely on memslot overlap checks for performance.
782          * Because this runs without holding mmu_lock, the pfn caches must use
783          * mn_active_invalidate_count (see above) instead of
784          * mmu_invalidate_in_progress.
785          */
786         gfn_to_pfn_cache_invalidate_start(kvm, range->start, range->end,
787                                           hva_range.may_block);
788
789         __kvm_handle_hva_range(kvm, &hva_range);
790
791         return 0;
792 }
793
794 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
795                             unsigned long end)
796 {
797         /*
798          * This sequence increase will notify the kvm page fault that
799          * the page that is going to be mapped in the spte could have
800          * been freed.
801          */
802         kvm->mmu_invalidate_seq++;
803         smp_wmb();
804         /*
805          * The above sequence increase must be visible before the
806          * below count decrease, which is ensured by the smp_wmb above
807          * in conjunction with the smp_rmb in mmu_invalidate_retry().
808          */
809         kvm->mmu_invalidate_in_progress--;
810 }
811
812 static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
813                                         const struct mmu_notifier_range *range)
814 {
815         struct kvm *kvm = mmu_notifier_to_kvm(mn);
816         const struct kvm_hva_range hva_range = {
817                 .start          = range->start,
818                 .end            = range->end,
819                 .pte            = __pte(0),
820                 .handler        = (void *)kvm_null_fn,
821                 .on_lock        = kvm_mmu_invalidate_end,
822                 .on_unlock      = (void *)kvm_null_fn,
823                 .flush_on_ret   = false,
824                 .may_block      = mmu_notifier_range_blockable(range),
825         };
826         bool wake;
827
828         __kvm_handle_hva_range(kvm, &hva_range);
829
830         /* Pairs with the increment in range_start(). */
831         spin_lock(&kvm->mn_invalidate_lock);
832         wake = (--kvm->mn_active_invalidate_count == 0);
833         spin_unlock(&kvm->mn_invalidate_lock);
834
835         /*
836          * There can only be one waiter, since the wait happens under
837          * slots_lock.
838          */
839         if (wake)
840                 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait);
841
842         BUG_ON(kvm->mmu_invalidate_in_progress < 0);
843 }
844
845 static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
846                                               struct mm_struct *mm,
847                                               unsigned long start,
848                                               unsigned long end)
849 {
850         trace_kvm_age_hva(start, end);
851
852         return kvm_handle_hva_range(mn, start, end, __pte(0), kvm_age_gfn);
853 }
854
855 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
856                                         struct mm_struct *mm,
857                                         unsigned long start,
858                                         unsigned long end)
859 {
860         trace_kvm_age_hva(start, end);
861
862         /*
863          * Even though we do not flush TLB, this will still adversely
864          * affect performance on pre-Haswell Intel EPT, where there is
865          * no EPT Access Bit to clear so that we have to tear down EPT
866          * tables instead. If we find this unacceptable, we can always
867          * add a parameter to kvm_age_hva so that it effectively doesn't
868          * do anything on clear_young.
869          *
870          * Also note that currently we never issue secondary TLB flushes
871          * from clear_young, leaving this job up to the regular system
872          * cadence. If we find this inaccurate, we might come up with a
873          * more sophisticated heuristic later.
874          */
875         return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
876 }
877
878 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
879                                        struct mm_struct *mm,
880                                        unsigned long address)
881 {
882         trace_kvm_test_age_hva(address);
883
884         return kvm_handle_hva_range_no_flush(mn, address, address + 1,
885                                              kvm_test_age_gfn);
886 }
887
888 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
889                                      struct mm_struct *mm)
890 {
891         struct kvm *kvm = mmu_notifier_to_kvm(mn);
892         int idx;
893
894         idx = srcu_read_lock(&kvm->srcu);
895         kvm_flush_shadow_all(kvm);
896         srcu_read_unlock(&kvm->srcu, idx);
897 }
898
899 static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
900         .invalidate_range       = kvm_mmu_notifier_invalidate_range,
901         .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
902         .invalidate_range_end   = kvm_mmu_notifier_invalidate_range_end,
903         .clear_flush_young      = kvm_mmu_notifier_clear_flush_young,
904         .clear_young            = kvm_mmu_notifier_clear_young,
905         .test_young             = kvm_mmu_notifier_test_young,
906         .change_pte             = kvm_mmu_notifier_change_pte,
907         .release                = kvm_mmu_notifier_release,
908 };
909
910 static int kvm_init_mmu_notifier(struct kvm *kvm)
911 {
912         kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
913         return mmu_notifier_register(&kvm->mmu_notifier, current->mm);
914 }
915
916 #else  /* !(CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER) */
917
918 static int kvm_init_mmu_notifier(struct kvm *kvm)
919 {
920         return 0;
921 }
922
923 #endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
924
925 #ifdef CONFIG_HAVE_KVM_PM_NOTIFIER
926 static int kvm_pm_notifier_call(struct notifier_block *bl,
927                                 unsigned long state,
928                                 void *unused)
929 {
930         struct kvm *kvm = container_of(bl, struct kvm, pm_notifier);
931
932         return kvm_arch_pm_notifier(kvm, state);
933 }
934
935 static void kvm_init_pm_notifier(struct kvm *kvm)
936 {
937         kvm->pm_notifier.notifier_call = kvm_pm_notifier_call;
938         /* Suspend KVM before we suspend ftrace, RCU, etc. */
939         kvm->pm_notifier.priority = INT_MAX;
940         register_pm_notifier(&kvm->pm_notifier);
941 }
942
943 static void kvm_destroy_pm_notifier(struct kvm *kvm)
944 {
945         unregister_pm_notifier(&kvm->pm_notifier);
946 }
947 #else /* !CONFIG_HAVE_KVM_PM_NOTIFIER */
948 static void kvm_init_pm_notifier(struct kvm *kvm)
949 {
950 }
951
952 static void kvm_destroy_pm_notifier(struct kvm *kvm)
953 {
954 }
955 #endif /* CONFIG_HAVE_KVM_PM_NOTIFIER */
956
957 static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
958 {
959         if (!memslot->dirty_bitmap)
960                 return;
961
962         kvfree(memslot->dirty_bitmap);
963         memslot->dirty_bitmap = NULL;
964 }
965
966 /* This does not remove the slot from struct kvm_memslots data structures */
967 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
968 {
969         kvm_destroy_dirty_bitmap(slot);
970
971         kvm_arch_free_memslot(kvm, slot);
972
973         kfree(slot);
974 }
975
976 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
977 {
978         struct hlist_node *idnode;
979         struct kvm_memory_slot *memslot;
980         int bkt;
981
982         /*
983          * The same memslot objects live in both active and inactive sets,
984          * arbitrarily free using index '1' so the second invocation of this
985          * function isn't operating over a structure with dangling pointers
986          * (even though this function isn't actually touching them).
987          */
988         if (!slots->node_idx)
989                 return;
990
991         hash_for_each_safe(slots->id_hash, bkt, idnode, memslot, id_node[1])
992                 kvm_free_memslot(kvm, memslot);
993 }
994
995 static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
996 {
997         switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
998         case KVM_STATS_TYPE_INSTANT:
999                 return 0444;
1000         case KVM_STATS_TYPE_CUMULATIVE:
1001         case KVM_STATS_TYPE_PEAK:
1002         default:
1003                 return 0644;
1004         }
1005 }
1006
1007
1008 static void kvm_destroy_vm_debugfs(struct kvm *kvm)
1009 {
1010         int i;
1011         int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1012                                       kvm_vcpu_stats_header.num_desc;
1013
1014         if (IS_ERR(kvm->debugfs_dentry))
1015                 return;
1016
1017         debugfs_remove_recursive(kvm->debugfs_dentry);
1018
1019         if (kvm->debugfs_stat_data) {
1020                 for (i = 0; i < kvm_debugfs_num_entries; i++)
1021                         kfree(kvm->debugfs_stat_data[i]);
1022                 kfree(kvm->debugfs_stat_data);
1023         }
1024 }
1025
1026 static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
1027 {
1028         static DEFINE_MUTEX(kvm_debugfs_lock);
1029         struct dentry *dent;
1030         char dir_name[ITOA_MAX_LEN * 2];
1031         struct kvm_stat_data *stat_data;
1032         const struct _kvm_stats_desc *pdesc;
1033         int i, ret = -ENOMEM;
1034         int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
1035                                       kvm_vcpu_stats_header.num_desc;
1036
1037         if (!debugfs_initialized())
1038                 return 0;
1039
1040         snprintf(dir_name, sizeof(dir_name), "%d-%s", task_pid_nr(current), fdname);
1041         mutex_lock(&kvm_debugfs_lock);
1042         dent = debugfs_lookup(dir_name, kvm_debugfs_dir);
1043         if (dent) {
1044                 pr_warn_ratelimited("KVM: debugfs: duplicate directory %s\n", dir_name);
1045                 dput(dent);
1046                 mutex_unlock(&kvm_debugfs_lock);
1047                 return 0;
1048         }
1049         dent = debugfs_create_dir(dir_name, kvm_debugfs_dir);
1050         mutex_unlock(&kvm_debugfs_lock);
1051         if (IS_ERR(dent))
1052                 return 0;
1053
1054         kvm->debugfs_dentry = dent;
1055         kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries,
1056                                          sizeof(*kvm->debugfs_stat_data),
1057                                          GFP_KERNEL_ACCOUNT);
1058         if (!kvm->debugfs_stat_data)
1059                 goto out_err;
1060
1061         for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
1062                 pdesc = &kvm_vm_stats_desc[i];
1063                 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1064                 if (!stat_data)
1065                         goto out_err;
1066
1067                 stat_data->kvm = kvm;
1068                 stat_data->desc = pdesc;
1069                 stat_data->kind = KVM_STAT_VM;
1070                 kvm->debugfs_stat_data[i] = stat_data;
1071                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1072                                     kvm->debugfs_dentry, stat_data,
1073                                     &stat_fops_per_vm);
1074         }
1075
1076         for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
1077                 pdesc = &kvm_vcpu_stats_desc[i];
1078                 stat_data = kzalloc(sizeof(*stat_data), GFP_KERNEL_ACCOUNT);
1079                 if (!stat_data)
1080                         goto out_err;
1081
1082                 stat_data->kvm = kvm;
1083                 stat_data->desc = pdesc;
1084                 stat_data->kind = KVM_STAT_VCPU;
1085                 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data;
1086                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
1087                                     kvm->debugfs_dentry, stat_data,
1088                                     &stat_fops_per_vm);
1089         }
1090
1091         ret = kvm_arch_create_vm_debugfs(kvm);
1092         if (ret)
1093                 goto out_err;
1094
1095         return 0;
1096 out_err:
1097         kvm_destroy_vm_debugfs(kvm);
1098         return ret;
1099 }
1100
1101 /*
1102  * Called after the VM is otherwise initialized, but just before adding it to
1103  * the vm_list.
1104  */
1105 int __weak kvm_arch_post_init_vm(struct kvm *kvm)
1106 {
1107         return 0;
1108 }
1109
1110 /*
1111  * Called just after removing the VM from the vm_list, but before doing any
1112  * other destruction.
1113  */
1114 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm)
1115 {
1116 }
1117
1118 /*
1119  * Called after per-vm debugfs created.  When called kvm->debugfs_dentry should
1120  * be setup already, so we can create arch-specific debugfs entries under it.
1121  * Cleanup should be automatic done in kvm_destroy_vm_debugfs() recursively, so
1122  * a per-arch destroy interface is not needed.
1123  */
1124 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm)
1125 {
1126         return 0;
1127 }
1128
1129 static struct kvm *kvm_create_vm(unsigned long type, const char *fdname)
1130 {
1131         struct kvm *kvm = kvm_arch_alloc_vm();
1132         struct kvm_memslots *slots;
1133         int r = -ENOMEM;
1134         int i, j;
1135
1136         if (!kvm)
1137                 return ERR_PTR(-ENOMEM);
1138
1139         /* KVM is pinned via open("/dev/kvm"), the fd passed to this ioctl(). */
1140         __module_get(kvm_chardev_ops.owner);
1141
1142         KVM_MMU_LOCK_INIT(kvm);
1143         mmgrab(current->mm);
1144         kvm->mm = current->mm;
1145         kvm_eventfd_init(kvm);
1146         mutex_init(&kvm->lock);
1147         mutex_init(&kvm->irq_lock);
1148         mutex_init(&kvm->slots_lock);
1149         mutex_init(&kvm->slots_arch_lock);
1150         spin_lock_init(&kvm->mn_invalidate_lock);
1151         rcuwait_init(&kvm->mn_memslots_update_rcuwait);
1152         xa_init(&kvm->vcpu_array);
1153
1154         INIT_LIST_HEAD(&kvm->gpc_list);
1155         spin_lock_init(&kvm->gpc_lock);
1156
1157         INIT_LIST_HEAD(&kvm->devices);
1158         kvm->max_vcpus = KVM_MAX_VCPUS;
1159
1160         BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
1161
1162         /*
1163          * Force subsequent debugfs file creations to fail if the VM directory
1164          * is not created (by kvm_create_vm_debugfs()).
1165          */
1166         kvm->debugfs_dentry = ERR_PTR(-ENOENT);
1167
1168         snprintf(kvm->stats_id, sizeof(kvm->stats_id), "kvm-%d",
1169                  task_pid_nr(current));
1170
1171         if (init_srcu_struct(&kvm->srcu))
1172                 goto out_err_no_srcu;
1173         if (init_srcu_struct(&kvm->irq_srcu))
1174                 goto out_err_no_irq_srcu;
1175
1176         refcount_set(&kvm->users_count, 1);
1177         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1178                 for (j = 0; j < 2; j++) {
1179                         slots = &kvm->__memslots[i][j];
1180
1181                         atomic_long_set(&slots->last_used_slot, (unsigned long)NULL);
1182                         slots->hva_tree = RB_ROOT_CACHED;
1183                         slots->gfn_tree = RB_ROOT;
1184                         hash_init(slots->id_hash);
1185                         slots->node_idx = j;
1186
1187                         /* Generations must be different for each address space. */
1188                         slots->generation = i;
1189                 }
1190
1191                 rcu_assign_pointer(kvm->memslots[i], &kvm->__memslots[i][0]);
1192         }
1193
1194         for (i = 0; i < KVM_NR_BUSES; i++) {
1195                 rcu_assign_pointer(kvm->buses[i],
1196                         kzalloc(sizeof(struct kvm_io_bus), GFP_KERNEL_ACCOUNT));
1197                 if (!kvm->buses[i])
1198                         goto out_err_no_arch_destroy_vm;
1199         }
1200
1201         kvm->max_halt_poll_ns = halt_poll_ns;
1202
1203         r = kvm_arch_init_vm(kvm, type);
1204         if (r)
1205                 goto out_err_no_arch_destroy_vm;
1206
1207         r = hardware_enable_all();
1208         if (r)
1209                 goto out_err_no_disable;
1210
1211 #ifdef CONFIG_HAVE_KVM_IRQFD
1212         INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list);
1213 #endif
1214
1215         r = kvm_init_mmu_notifier(kvm);
1216         if (r)
1217                 goto out_err_no_mmu_notifier;
1218
1219         r = kvm_coalesced_mmio_init(kvm);
1220         if (r < 0)
1221                 goto out_no_coalesced_mmio;
1222
1223         r = kvm_create_vm_debugfs(kvm, fdname);
1224         if (r)
1225                 goto out_err_no_debugfs;
1226
1227         r = kvm_arch_post_init_vm(kvm);
1228         if (r)
1229                 goto out_err;
1230
1231         mutex_lock(&kvm_lock);
1232         list_add(&kvm->vm_list, &vm_list);
1233         mutex_unlock(&kvm_lock);
1234
1235         preempt_notifier_inc();
1236         kvm_init_pm_notifier(kvm);
1237
1238         return kvm;
1239
1240 out_err:
1241         kvm_destroy_vm_debugfs(kvm);
1242 out_err_no_debugfs:
1243         kvm_coalesced_mmio_free(kvm);
1244 out_no_coalesced_mmio:
1245 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1246         if (kvm->mmu_notifier.ops)
1247                 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm);
1248 #endif
1249 out_err_no_mmu_notifier:
1250         hardware_disable_all();
1251 out_err_no_disable:
1252         kvm_arch_destroy_vm(kvm);
1253 out_err_no_arch_destroy_vm:
1254         WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count));
1255         for (i = 0; i < KVM_NR_BUSES; i++)
1256                 kfree(kvm_get_bus(kvm, i));
1257         cleanup_srcu_struct(&kvm->irq_srcu);
1258 out_err_no_irq_srcu:
1259         cleanup_srcu_struct(&kvm->srcu);
1260 out_err_no_srcu:
1261         kvm_arch_free_vm(kvm);
1262         mmdrop(current->mm);
1263         module_put(kvm_chardev_ops.owner);
1264         return ERR_PTR(r);
1265 }
1266
1267 static void kvm_destroy_devices(struct kvm *kvm)
1268 {
1269         struct kvm_device *dev, *tmp;
1270
1271         /*
1272          * We do not need to take the kvm->lock here, because nobody else
1273          * has a reference to the struct kvm at this point and therefore
1274          * cannot access the devices list anyhow.
1275          */
1276         list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
1277                 list_del(&dev->vm_node);
1278                 dev->ops->destroy(dev);
1279         }
1280 }
1281
1282 static void kvm_destroy_vm(struct kvm *kvm)
1283 {
1284         int i;
1285         struct mm_struct *mm = kvm->mm;
1286
1287         kvm_destroy_pm_notifier(kvm);
1288         kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
1289         kvm_destroy_vm_debugfs(kvm);
1290         kvm_arch_sync_events(kvm);
1291         mutex_lock(&kvm_lock);
1292         list_del(&kvm->vm_list);
1293         mutex_unlock(&kvm_lock);
1294         kvm_arch_pre_destroy_vm(kvm);
1295
1296         kvm_free_irq_routing(kvm);
1297         for (i = 0; i < KVM_NR_BUSES; i++) {
1298                 struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
1299
1300                 if (bus)
1301                         kvm_io_bus_destroy(bus);
1302                 kvm->buses[i] = NULL;
1303         }
1304         kvm_coalesced_mmio_free(kvm);
1305 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1306         mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1307         /*
1308          * At this point, pending calls to invalidate_range_start()
1309          * have completed but no more MMU notifiers will run, so
1310          * mn_active_invalidate_count may remain unbalanced.
1311          * No threads can be waiting in install_new_memslots as the
1312          * last reference on KVM has been dropped, but freeing
1313          * memslots would deadlock without this manual intervention.
1314          */
1315         WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait));
1316         kvm->mn_active_invalidate_count = 0;
1317 #else
1318         kvm_flush_shadow_all(kvm);
1319 #endif
1320         kvm_arch_destroy_vm(kvm);
1321         kvm_destroy_devices(kvm);
1322         for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
1323                 kvm_free_memslots(kvm, &kvm->__memslots[i][0]);
1324                 kvm_free_memslots(kvm, &kvm->__memslots[i][1]);
1325         }
1326         cleanup_srcu_struct(&kvm->irq_srcu);
1327         cleanup_srcu_struct(&kvm->srcu);
1328         kvm_arch_free_vm(kvm);
1329         preempt_notifier_dec();
1330         hardware_disable_all();
1331         mmdrop(mm);
1332         module_put(kvm_chardev_ops.owner);
1333 }
1334
1335 void kvm_get_kvm(struct kvm *kvm)
1336 {
1337         refcount_inc(&kvm->users_count);
1338 }
1339 EXPORT_SYMBOL_GPL(kvm_get_kvm);
1340
1341 /*
1342  * Make sure the vm is not during destruction, which is a safe version of
1343  * kvm_get_kvm().  Return true if kvm referenced successfully, false otherwise.
1344  */
1345 bool kvm_get_kvm_safe(struct kvm *kvm)
1346 {
1347         return refcount_inc_not_zero(&kvm->users_count);
1348 }
1349 EXPORT_SYMBOL_GPL(kvm_get_kvm_safe);
1350
1351 void kvm_put_kvm(struct kvm *kvm)
1352 {
1353         if (refcount_dec_and_test(&kvm->users_count))
1354                 kvm_destroy_vm(kvm);
1355 }
1356 EXPORT_SYMBOL_GPL(kvm_put_kvm);
1357
1358 /*
1359  * Used to put a reference that was taken on behalf of an object associated
1360  * with a user-visible file descriptor, e.g. a vcpu or device, if installation
1361  * of the new file descriptor fails and the reference cannot be transferred to
1362  * its final owner.  In such cases, the caller is still actively using @kvm and
1363  * will fail miserably if the refcount unexpectedly hits zero.
1364  */
1365 void kvm_put_kvm_no_destroy(struct kvm *kvm)
1366 {
1367         WARN_ON(refcount_dec_and_test(&kvm->users_count));
1368 }
1369 EXPORT_SYMBOL_GPL(kvm_put_kvm_no_destroy);
1370
1371 static int kvm_vm_release(struct inode *inode, struct file *filp)
1372 {
1373         struct kvm *kvm = filp->private_data;
1374
1375         kvm_irqfd_release(kvm);
1376
1377         kvm_put_kvm(kvm);
1378         return 0;
1379 }
1380
1381 /*
1382  * Allocation size is twice as large as the actual dirty bitmap size.
1383  * See kvm_vm_ioctl_get_dirty_log() why this is needed.
1384  */
1385 static int kvm_alloc_dirty_bitmap(struct kvm_memory_slot *memslot)
1386 {
1387         unsigned long dirty_bytes = kvm_dirty_bitmap_bytes(memslot);
1388
1389         memslot->dirty_bitmap = __vcalloc(2, dirty_bytes, GFP_KERNEL_ACCOUNT);
1390         if (!memslot->dirty_bitmap)
1391                 return -ENOMEM;
1392
1393         return 0;
1394 }
1395
1396 static struct kvm_memslots *kvm_get_inactive_memslots(struct kvm *kvm, int as_id)
1397 {
1398         struct kvm_memslots *active = __kvm_memslots(kvm, as_id);
1399         int node_idx_inactive = active->node_idx ^ 1;
1400
1401         return &kvm->__memslots[as_id][node_idx_inactive];
1402 }
1403
1404 /*
1405  * Helper to get the address space ID when one of memslot pointers may be NULL.
1406  * This also serves as a sanity that at least one of the pointers is non-NULL,
1407  * and that their address space IDs don't diverge.
1408  */
1409 static int kvm_memslots_get_as_id(struct kvm_memory_slot *a,
1410                                   struct kvm_memory_slot *b)
1411 {
1412         if (WARN_ON_ONCE(!a && !b))
1413                 return 0;
1414
1415         if (!a)
1416                 return b->as_id;
1417         if (!b)
1418                 return a->as_id;
1419
1420         WARN_ON_ONCE(a->as_id != b->as_id);
1421         return a->as_id;
1422 }
1423
1424 static void kvm_insert_gfn_node(struct kvm_memslots *slots,
1425                                 struct kvm_memory_slot *slot)
1426 {
1427         struct rb_root *gfn_tree = &slots->gfn_tree;
1428         struct rb_node **node, *parent;
1429         int idx = slots->node_idx;
1430
1431         parent = NULL;
1432         for (node = &gfn_tree->rb_node; *node; ) {
1433                 struct kvm_memory_slot *tmp;
1434
1435                 tmp = container_of(*node, struct kvm_memory_slot, gfn_node[idx]);
1436                 parent = *node;
1437                 if (slot->base_gfn < tmp->base_gfn)
1438                         node = &(*node)->rb_left;
1439                 else if (slot->base_gfn > tmp->base_gfn)
1440                         node = &(*node)->rb_right;
1441                 else
1442                         BUG();
1443         }
1444
1445         rb_link_node(&slot->gfn_node[idx], parent, node);
1446         rb_insert_color(&slot->gfn_node[idx], gfn_tree);
1447 }
1448
1449 static void kvm_erase_gfn_node(struct kvm_memslots *slots,
1450                                struct kvm_memory_slot *slot)
1451 {
1452         rb_erase(&slot->gfn_node[slots->node_idx], &slots->gfn_tree);
1453 }
1454
1455 static void kvm_replace_gfn_node(struct kvm_memslots *slots,
1456                                  struct kvm_memory_slot *old,
1457                                  struct kvm_memory_slot *new)
1458 {
1459         int idx = slots->node_idx;
1460
1461         WARN_ON_ONCE(old->base_gfn != new->base_gfn);
1462
1463         rb_replace_node(&old->gfn_node[idx], &new->gfn_node[idx],
1464                         &slots->gfn_tree);
1465 }
1466
1467 /*
1468  * Replace @old with @new in the inactive memslots.
1469  *
1470  * With NULL @old this simply adds @new.
1471  * With NULL @new this simply removes @old.
1472  *
1473  * If @new is non-NULL its hva_node[slots_idx] range has to be set
1474  * appropriately.
1475  */
1476 static void kvm_replace_memslot(struct kvm *kvm,
1477                                 struct kvm_memory_slot *old,
1478                                 struct kvm_memory_slot *new)
1479 {
1480         int as_id = kvm_memslots_get_as_id(old, new);
1481         struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1482         int idx = slots->node_idx;
1483
1484         if (old) {
1485                 hash_del(&old->id_node[idx]);
1486                 interval_tree_remove(&old->hva_node[idx], &slots->hva_tree);
1487
1488                 if ((long)old == atomic_long_read(&slots->last_used_slot))
1489                         atomic_long_set(&slots->last_used_slot, (long)new);
1490
1491                 if (!new) {
1492                         kvm_erase_gfn_node(slots, old);
1493                         return;
1494                 }
1495         }
1496
1497         /*
1498          * Initialize @new's hva range.  Do this even when replacing an @old
1499          * slot, kvm_copy_memslot() deliberately does not touch node data.
1500          */
1501         new->hva_node[idx].start = new->userspace_addr;
1502         new->hva_node[idx].last = new->userspace_addr +
1503                                   (new->npages << PAGE_SHIFT) - 1;
1504
1505         /*
1506          * (Re)Add the new memslot.  There is no O(1) interval_tree_replace(),
1507          * hva_node needs to be swapped with remove+insert even though hva can't
1508          * change when replacing an existing slot.
1509          */
1510         hash_add(slots->id_hash, &new->id_node[idx], new->id);
1511         interval_tree_insert(&new->hva_node[idx], &slots->hva_tree);
1512
1513         /*
1514          * If the memslot gfn is unchanged, rb_replace_node() can be used to
1515          * switch the node in the gfn tree instead of removing the old and
1516          * inserting the new as two separate operations. Replacement is a
1517          * single O(1) operation versus two O(log(n)) operations for
1518          * remove+insert.
1519          */
1520         if (old && old->base_gfn == new->base_gfn) {
1521                 kvm_replace_gfn_node(slots, old, new);
1522         } else {
1523                 if (old)
1524                         kvm_erase_gfn_node(slots, old);
1525                 kvm_insert_gfn_node(slots, new);
1526         }
1527 }
1528
1529 static int check_memory_region_flags(const struct kvm_userspace_memory_region *mem)
1530 {
1531         u32 valid_flags = KVM_MEM_LOG_DIRTY_PAGES;
1532
1533 #ifdef __KVM_HAVE_READONLY_MEM
1534         valid_flags |= KVM_MEM_READONLY;
1535 #endif
1536
1537         if (mem->flags & ~valid_flags)
1538                 return -EINVAL;
1539
1540         return 0;
1541 }
1542
1543 static void kvm_swap_active_memslots(struct kvm *kvm, int as_id)
1544 {
1545         struct kvm_memslots *slots = kvm_get_inactive_memslots(kvm, as_id);
1546
1547         /* Grab the generation from the activate memslots. */
1548         u64 gen = __kvm_memslots(kvm, as_id)->generation;
1549
1550         WARN_ON(gen & KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS);
1551         slots->generation = gen | KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1552
1553         /*
1554          * Do not store the new memslots while there are invalidations in
1555          * progress, otherwise the locking in invalidate_range_start and
1556          * invalidate_range_end will be unbalanced.
1557          */
1558         spin_lock(&kvm->mn_invalidate_lock);
1559         prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait);
1560         while (kvm->mn_active_invalidate_count) {
1561                 set_current_state(TASK_UNINTERRUPTIBLE);
1562                 spin_unlock(&kvm->mn_invalidate_lock);
1563                 schedule();
1564                 spin_lock(&kvm->mn_invalidate_lock);
1565         }
1566         finish_rcuwait(&kvm->mn_memslots_update_rcuwait);
1567         rcu_assign_pointer(kvm->memslots[as_id], slots);
1568         spin_unlock(&kvm->mn_invalidate_lock);
1569
1570         /*
1571          * Acquired in kvm_set_memslot. Must be released before synchronize
1572          * SRCU below in order to avoid deadlock with another thread
1573          * acquiring the slots_arch_lock in an srcu critical section.
1574          */
1575         mutex_unlock(&kvm->slots_arch_lock);
1576
1577         synchronize_srcu_expedited(&kvm->srcu);
1578
1579         /*
1580          * Increment the new memslot generation a second time, dropping the
1581          * update in-progress flag and incrementing the generation based on
1582          * the number of address spaces.  This provides a unique and easily
1583          * identifiable generation number while the memslots are in flux.
1584          */
1585         gen = slots->generation & ~KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS;
1586
1587         /*
1588          * Generations must be unique even across address spaces.  We do not need
1589          * a global counter for that, instead the generation space is evenly split
1590          * across address spaces.  For example, with two address spaces, address
1591          * space 0 will use generations 0, 2, 4, ... while address space 1 will
1592          * use generations 1, 3, 5, ...
1593          */
1594         gen += KVM_ADDRESS_SPACE_NUM;
1595
1596         kvm_arch_memslots_updated(kvm, gen);
1597
1598         slots->generation = gen;
1599 }
1600
1601 static int kvm_prepare_memory_region(struct kvm *kvm,
1602                                      const struct kvm_memory_slot *old,
1603                                      struct kvm_memory_slot *new,
1604                                      enum kvm_mr_change change)
1605 {
1606         int r;
1607
1608         /*
1609          * If dirty logging is disabled, nullify the bitmap; the old bitmap
1610          * will be freed on "commit".  If logging is enabled in both old and
1611          * new, reuse the existing bitmap.  If logging is enabled only in the
1612          * new and KVM isn't using a ring buffer, allocate and initialize a
1613          * new bitmap.
1614          */
1615         if (change != KVM_MR_DELETE) {
1616                 if (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))
1617                         new->dirty_bitmap = NULL;
1618                 else if (old && old->dirty_bitmap)
1619                         new->dirty_bitmap = old->dirty_bitmap;
1620                 else if (!kvm->dirty_ring_size) {
1621                         r = kvm_alloc_dirty_bitmap(new);
1622                         if (r)
1623                                 return r;
1624
1625                         if (kvm_dirty_log_manual_protect_and_init_set(kvm))
1626                                 bitmap_set(new->dirty_bitmap, 0, new->npages);
1627                 }
1628         }
1629
1630         r = kvm_arch_prepare_memory_region(kvm, old, new, change);
1631
1632         /* Free the bitmap on failure if it was allocated above. */
1633         if (r && new && new->dirty_bitmap && (!old || !old->dirty_bitmap))
1634                 kvm_destroy_dirty_bitmap(new);
1635
1636         return r;
1637 }
1638
1639 static void kvm_commit_memory_region(struct kvm *kvm,
1640                                      struct kvm_memory_slot *old,
1641                                      const struct kvm_memory_slot *new,
1642                                      enum kvm_mr_change change)
1643 {
1644         /*
1645          * Update the total number of memslot pages before calling the arch
1646          * hook so that architectures can consume the result directly.
1647          */
1648         if (change == KVM_MR_DELETE)
1649                 kvm->nr_memslot_pages -= old->npages;
1650         else if (change == KVM_MR_CREATE)
1651                 kvm->nr_memslot_pages += new->npages;
1652
1653         kvm_arch_commit_memory_region(kvm, old, new, change);
1654
1655         switch (change) {
1656         case KVM_MR_CREATE:
1657                 /* Nothing more to do. */
1658                 break;
1659         case KVM_MR_DELETE:
1660                 /* Free the old memslot and all its metadata. */
1661                 kvm_free_memslot(kvm, old);
1662                 break;
1663         case KVM_MR_MOVE:
1664         case KVM_MR_FLAGS_ONLY:
1665                 /*
1666                  * Free the dirty bitmap as needed; the below check encompasses
1667                  * both the flags and whether a ring buffer is being used)
1668                  */
1669                 if (old->dirty_bitmap && !new->dirty_bitmap)
1670                         kvm_destroy_dirty_bitmap(old);
1671
1672                 /*
1673                  * The final quirk.  Free the detached, old slot, but only its
1674                  * memory, not any metadata.  Metadata, including arch specific
1675                  * data, may be reused by @new.
1676                  */
1677                 kfree(old);
1678                 break;
1679         default:
1680                 BUG();
1681         }
1682 }
1683
1684 /*
1685  * Activate @new, which must be installed in the inactive slots by the caller,
1686  * by swapping the active slots and then propagating @new to @old once @old is
1687  * unreachable and can be safely modified.
1688  *
1689  * With NULL @old this simply adds @new to @active (while swapping the sets).
1690  * With NULL @new this simply removes @old from @active and frees it
1691  * (while also swapping the sets).
1692  */
1693 static void kvm_activate_memslot(struct kvm *kvm,
1694                                  struct kvm_memory_slot *old,
1695                                  struct kvm_memory_slot *new)
1696 {
1697         int as_id = kvm_memslots_get_as_id(old, new);
1698
1699         kvm_swap_active_memslots(kvm, as_id);
1700
1701         /* Propagate the new memslot to the now inactive memslots. */
1702         kvm_replace_memslot(kvm, old, new);
1703 }
1704
1705 static void kvm_copy_memslot(struct kvm_memory_slot *dest,
1706                              const struct kvm_memory_slot *src)
1707 {
1708         dest->base_gfn = src->base_gfn;
1709         dest->npages = src->npages;
1710         dest->dirty_bitmap = src->dirty_bitmap;
1711         dest->arch = src->arch;
1712         dest->userspace_addr = src->userspace_addr;
1713         dest->flags = src->flags;
1714         dest->id = src->id;
1715         dest->as_id = src->as_id;
1716 }
1717
1718 static void kvm_invalidate_memslot(struct kvm *kvm,
1719                                    struct kvm_memory_slot *old,
1720                                    struct kvm_memory_slot *invalid_slot)
1721 {
1722         /*
1723          * Mark the current slot INVALID.  As with all memslot modifications,
1724          * this must be done on an unreachable slot to avoid modifying the
1725          * current slot in the active tree.
1726          */
1727         kvm_copy_memslot(invalid_slot, old);
1728         invalid_slot->flags |= KVM_MEMSLOT_INVALID;
1729         kvm_replace_memslot(kvm, old, invalid_slot);
1730
1731         /*
1732          * Activate the slot that is now marked INVALID, but don't propagate
1733          * the slot to the now inactive slots. The slot is either going to be
1734          * deleted or recreated as a new slot.
1735          */
1736         kvm_swap_active_memslots(kvm, old->as_id);
1737
1738         /*
1739          * From this point no new shadow pages pointing to a deleted, or moved,
1740          * memslot will be created.  Validation of sp->gfn happens in:
1741          *      - gfn_to_hva (kvm_read_guest, gfn_to_pfn)
1742          *      - kvm_is_visible_gfn (mmu_check_root)
1743          */
1744         kvm_arch_flush_shadow_memslot(kvm, old);
1745         kvm_arch_guest_memory_reclaimed(kvm);
1746
1747         /* Was released by kvm_swap_active_memslots, reacquire. */
1748         mutex_lock(&kvm->slots_arch_lock);
1749
1750         /*
1751          * Copy the arch-specific field of the newly-installed slot back to the
1752          * old slot as the arch data could have changed between releasing
1753          * slots_arch_lock in install_new_memslots() and re-acquiring the lock
1754          * above.  Writers are required to retrieve memslots *after* acquiring
1755          * slots_arch_lock, thus the active slot's data is guaranteed to be fresh.
1756          */
1757         old->arch = invalid_slot->arch;
1758 }
1759
1760 static void kvm_create_memslot(struct kvm *kvm,
1761                                struct kvm_memory_slot *new)
1762 {
1763         /* Add the new memslot to the inactive set and activate. */
1764         kvm_replace_memslot(kvm, NULL, new);
1765         kvm_activate_memslot(kvm, NULL, new);
1766 }
1767
1768 static void kvm_delete_memslot(struct kvm *kvm,
1769                                struct kvm_memory_slot *old,
1770                                struct kvm_memory_slot *invalid_slot)
1771 {
1772         /*
1773          * Remove the old memslot (in the inactive memslots) by passing NULL as
1774          * the "new" slot, and for the invalid version in the active slots.
1775          */
1776         kvm_replace_memslot(kvm, old, NULL);
1777         kvm_activate_memslot(kvm, invalid_slot, NULL);
1778 }
1779
1780 static void kvm_move_memslot(struct kvm *kvm,
1781                              struct kvm_memory_slot *old,
1782                              struct kvm_memory_slot *new,
1783                              struct kvm_memory_slot *invalid_slot)
1784 {
1785         /*
1786          * Replace the old memslot in the inactive slots, and then swap slots
1787          * and replace the current INVALID with the new as well.
1788          */
1789         kvm_replace_memslot(kvm, old, new);
1790         kvm_activate_memslot(kvm, invalid_slot, new);
1791 }
1792
1793 static void kvm_update_flags_memslot(struct kvm *kvm,
1794                                      struct kvm_memory_slot *old,
1795                                      struct kvm_memory_slot *new)
1796 {
1797         /*
1798          * Similar to the MOVE case, but the slot doesn't need to be zapped as
1799          * an intermediate step. Instead, the old memslot is simply replaced
1800          * with a new, updated copy in both memslot sets.
1801          */
1802         kvm_replace_memslot(kvm, old, new);
1803         kvm_activate_memslot(kvm, old, new);
1804 }
1805
1806 static int kvm_set_memslot(struct kvm *kvm,
1807                            struct kvm_memory_slot *old,
1808                            struct kvm_memory_slot *new,
1809                            enum kvm_mr_change change)
1810 {
1811         struct kvm_memory_slot *invalid_slot;
1812         int r;
1813
1814         /*
1815          * Released in kvm_swap_active_memslots.
1816          *
1817          * Must be held from before the current memslots are copied until
1818          * after the new memslots are installed with rcu_assign_pointer,
1819          * then released before the synchronize srcu in kvm_swap_active_memslots.
1820          *
1821          * When modifying memslots outside of the slots_lock, must be held
1822          * before reading the pointer to the current memslots until after all
1823          * changes to those memslots are complete.
1824          *
1825          * These rules ensure that installing new memslots does not lose
1826          * changes made to the previous memslots.
1827          */
1828         mutex_lock(&kvm->slots_arch_lock);
1829
1830         /*
1831          * Invalidate the old slot if it's being deleted or moved.  This is
1832          * done prior to actually deleting/moving the memslot to allow vCPUs to
1833          * continue running by ensuring there are no mappings or shadow pages
1834          * for the memslot when it is deleted/moved.  Without pre-invalidation
1835          * (and without a lock), a window would exist between effecting the
1836          * delete/move and committing the changes in arch code where KVM or a
1837          * guest could access a non-existent memslot.
1838          *
1839          * Modifications are done on a temporary, unreachable slot.  The old
1840          * slot needs to be preserved in case a later step fails and the
1841          * invalidation needs to be reverted.
1842          */
1843         if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1844                 invalid_slot = kzalloc(sizeof(*invalid_slot), GFP_KERNEL_ACCOUNT);
1845                 if (!invalid_slot) {
1846                         mutex_unlock(&kvm->slots_arch_lock);
1847                         return -ENOMEM;
1848                 }
1849                 kvm_invalidate_memslot(kvm, old, invalid_slot);
1850         }
1851
1852         r = kvm_prepare_memory_region(kvm, old, new, change);
1853         if (r) {
1854                 /*
1855                  * For DELETE/MOVE, revert the above INVALID change.  No
1856                  * modifications required since the original slot was preserved
1857                  * in the inactive slots.  Changing the active memslots also
1858                  * release slots_arch_lock.
1859                  */
1860                 if (change == KVM_MR_DELETE || change == KVM_MR_MOVE) {
1861                         kvm_activate_memslot(kvm, invalid_slot, old);
1862                         kfree(invalid_slot);
1863                 } else {
1864                         mutex_unlock(&kvm->slots_arch_lock);
1865                 }
1866                 return r;
1867         }
1868
1869         /*
1870          * For DELETE and MOVE, the working slot is now active as the INVALID
1871          * version of the old slot.  MOVE is particularly special as it reuses
1872          * the old slot and returns a copy of the old slot (in working_slot).
1873          * For CREATE, there is no old slot.  For DELETE and FLAGS_ONLY, the
1874          * old slot is detached but otherwise preserved.
1875          */
1876         if (change == KVM_MR_CREATE)
1877                 kvm_create_memslot(kvm, new);
1878         else if (change == KVM_MR_DELETE)
1879                 kvm_delete_memslot(kvm, old, invalid_slot);
1880         else if (change == KVM_MR_MOVE)
1881                 kvm_move_memslot(kvm, old, new, invalid_slot);
1882         else if (change == KVM_MR_FLAGS_ONLY)
1883                 kvm_update_flags_memslot(kvm, old, new);
1884         else
1885                 BUG();
1886
1887         /* Free the temporary INVALID slot used for DELETE and MOVE. */
1888         if (change == KVM_MR_DELETE || change == KVM_MR_MOVE)
1889                 kfree(invalid_slot);
1890
1891         /*
1892          * No need to refresh new->arch, changes after dropping slots_arch_lock
1893          * will directly hit the final, active memslot.  Architectures are
1894          * responsible for knowing that new->arch may be stale.
1895          */
1896         kvm_commit_memory_region(kvm, old, new, change);
1897
1898         return 0;
1899 }
1900
1901 static bool kvm_check_memslot_overlap(struct kvm_memslots *slots, int id,
1902                                       gfn_t start, gfn_t end)
1903 {
1904         struct kvm_memslot_iter iter;
1905
1906         kvm_for_each_memslot_in_gfn_range(&iter, slots, start, end) {
1907                 if (iter.slot->id != id)
1908                         return true;
1909         }
1910
1911         return false;
1912 }
1913
1914 /*
1915  * Allocate some memory and give it an address in the guest physical address
1916  * space.
1917  *
1918  * Discontiguous memory is allowed, mostly for framebuffers.
1919  *
1920  * Must be called holding kvm->slots_lock for write.
1921  */
1922 int __kvm_set_memory_region(struct kvm *kvm,
1923                             const struct kvm_userspace_memory_region *mem)
1924 {
1925         struct kvm_memory_slot *old, *new;
1926         struct kvm_memslots *slots;
1927         enum kvm_mr_change change;
1928         unsigned long npages;
1929         gfn_t base_gfn;
1930         int as_id, id;
1931         int r;
1932
1933         r = check_memory_region_flags(mem);
1934         if (r)
1935                 return r;
1936
1937         as_id = mem->slot >> 16;
1938         id = (u16)mem->slot;
1939
1940         /* General sanity checks */
1941         if ((mem->memory_size & (PAGE_SIZE - 1)) ||
1942             (mem->memory_size != (unsigned long)mem->memory_size))
1943                 return -EINVAL;
1944         if (mem->guest_phys_addr & (PAGE_SIZE - 1))
1945                 return -EINVAL;
1946         /* We can read the guest memory with __xxx_user() later on. */
1947         if ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
1948             (mem->userspace_addr != untagged_addr(mem->userspace_addr)) ||
1949              !access_ok((void __user *)(unsigned long)mem->userspace_addr,
1950                         mem->memory_size))
1951                 return -EINVAL;
1952         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_MEM_SLOTS_NUM)
1953                 return -EINVAL;
1954         if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
1955                 return -EINVAL;
1956         if ((mem->memory_size >> PAGE_SHIFT) > KVM_MEM_MAX_NR_PAGES)
1957                 return -EINVAL;
1958
1959         slots = __kvm_memslots(kvm, as_id);
1960
1961         /*
1962          * Note, the old memslot (and the pointer itself!) may be invalidated
1963          * and/or destroyed by kvm_set_memslot().
1964          */
1965         old = id_to_memslot(slots, id);
1966
1967         if (!mem->memory_size) {
1968                 if (!old || !old->npages)
1969                         return -EINVAL;
1970
1971                 if (WARN_ON_ONCE(kvm->nr_memslot_pages < old->npages))
1972                         return -EIO;
1973
1974                 return kvm_set_memslot(kvm, old, NULL, KVM_MR_DELETE);
1975         }
1976
1977         base_gfn = (mem->guest_phys_addr >> PAGE_SHIFT);
1978         npages = (mem->memory_size >> PAGE_SHIFT);
1979
1980         if (!old || !old->npages) {
1981                 change = KVM_MR_CREATE;
1982
1983                 /*
1984                  * To simplify KVM internals, the total number of pages across
1985                  * all memslots must fit in an unsigned long.
1986                  */
1987                 if ((kvm->nr_memslot_pages + npages) < kvm->nr_memslot_pages)
1988                         return -EINVAL;
1989         } else { /* Modify an existing slot. */
1990                 if ((mem->userspace_addr != old->userspace_addr) ||
1991                     (npages != old->npages) ||
1992                     ((mem->flags ^ old->flags) & KVM_MEM_READONLY))
1993                         return -EINVAL;
1994
1995                 if (base_gfn != old->base_gfn)
1996                         change = KVM_MR_MOVE;
1997                 else if (mem->flags != old->flags)
1998                         change = KVM_MR_FLAGS_ONLY;
1999                 else /* Nothing to change. */
2000                         return 0;
2001         }
2002
2003         if ((change == KVM_MR_CREATE || change == KVM_MR_MOVE) &&
2004             kvm_check_memslot_overlap(slots, id, base_gfn, base_gfn + npages))
2005                 return -EEXIST;
2006
2007         /* Allocate a slot that will persist in the memslot. */
2008         new = kzalloc(sizeof(*new), GFP_KERNEL_ACCOUNT);
2009         if (!new)
2010                 return -ENOMEM;
2011
2012         new->as_id = as_id;
2013         new->id = id;
2014         new->base_gfn = base_gfn;
2015         new->npages = npages;
2016         new->flags = mem->flags;
2017         new->userspace_addr = mem->userspace_addr;
2018
2019         r = kvm_set_memslot(kvm, old, new, change);
2020         if (r)
2021                 kfree(new);
2022         return r;
2023 }
2024 EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
2025
2026 int kvm_set_memory_region(struct kvm *kvm,
2027                           const struct kvm_userspace_memory_region *mem)
2028 {
2029         int r;
2030
2031         mutex_lock(&kvm->slots_lock);
2032         r = __kvm_set_memory_region(kvm, mem);
2033         mutex_unlock(&kvm->slots_lock);
2034         return r;
2035 }
2036 EXPORT_SYMBOL_GPL(kvm_set_memory_region);
2037
2038 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
2039                                           struct kvm_userspace_memory_region *mem)
2040 {
2041         if ((u16)mem->slot >= KVM_USER_MEM_SLOTS)
2042                 return -EINVAL;
2043
2044         return kvm_set_memory_region(kvm, mem);
2045 }
2046
2047 #ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
2048 /**
2049  * kvm_get_dirty_log - get a snapshot of dirty pages
2050  * @kvm:        pointer to kvm instance
2051  * @log:        slot id and address to which we copy the log
2052  * @is_dirty:   set to '1' if any dirty pages were found
2053  * @memslot:    set to the associated memslot, always valid on success
2054  */
2055 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
2056                       int *is_dirty, struct kvm_memory_slot **memslot)
2057 {
2058         struct kvm_memslots *slots;
2059         int i, as_id, id;
2060         unsigned long n;
2061         unsigned long any = 0;
2062
2063         /* Dirty ring tracking is exclusive to dirty log tracking */
2064         if (kvm->dirty_ring_size)
2065                 return -ENXIO;
2066
2067         *memslot = NULL;
2068         *is_dirty = 0;
2069
2070         as_id = log->slot >> 16;
2071         id = (u16)log->slot;
2072         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2073                 return -EINVAL;
2074
2075         slots = __kvm_memslots(kvm, as_id);
2076         *memslot = id_to_memslot(slots, id);
2077         if (!(*memslot) || !(*memslot)->dirty_bitmap)
2078                 return -ENOENT;
2079
2080         kvm_arch_sync_dirty_log(kvm, *memslot);
2081
2082         n = kvm_dirty_bitmap_bytes(*memslot);
2083
2084         for (i = 0; !any && i < n/sizeof(long); ++i)
2085                 any = (*memslot)->dirty_bitmap[i];
2086
2087         if (copy_to_user(log->dirty_bitmap, (*memslot)->dirty_bitmap, n))
2088                 return -EFAULT;
2089
2090         if (any)
2091                 *is_dirty = 1;
2092         return 0;
2093 }
2094 EXPORT_SYMBOL_GPL(kvm_get_dirty_log);
2095
2096 #else /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2097 /**
2098  * kvm_get_dirty_log_protect - get a snapshot of dirty pages
2099  *      and reenable dirty page tracking for the corresponding pages.
2100  * @kvm:        pointer to kvm instance
2101  * @log:        slot id and address to which we copy the log
2102  *
2103  * We need to keep it in mind that VCPU threads can write to the bitmap
2104  * concurrently. So, to avoid losing track of dirty pages we keep the
2105  * following order:
2106  *
2107  *    1. Take a snapshot of the bit and clear it if needed.
2108  *    2. Write protect the corresponding page.
2109  *    3. Copy the snapshot to the userspace.
2110  *    4. Upon return caller flushes TLB's if needed.
2111  *
2112  * Between 2 and 4, the guest may write to the page using the remaining TLB
2113  * entry.  This is not a problem because the page is reported dirty using
2114  * the snapshot taken before and step 4 ensures that writes done after
2115  * exiting to userspace will be logged for the next call.
2116  *
2117  */
2118 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log)
2119 {
2120         struct kvm_memslots *slots;
2121         struct kvm_memory_slot *memslot;
2122         int i, as_id, id;
2123         unsigned long n;
2124         unsigned long *dirty_bitmap;
2125         unsigned long *dirty_bitmap_buffer;
2126         bool flush;
2127
2128         /* Dirty ring tracking is exclusive to dirty log tracking */
2129         if (kvm->dirty_ring_size)
2130                 return -ENXIO;
2131
2132         as_id = log->slot >> 16;
2133         id = (u16)log->slot;
2134         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2135                 return -EINVAL;
2136
2137         slots = __kvm_memslots(kvm, as_id);
2138         memslot = id_to_memslot(slots, id);
2139         if (!memslot || !memslot->dirty_bitmap)
2140                 return -ENOENT;
2141
2142         dirty_bitmap = memslot->dirty_bitmap;
2143
2144         kvm_arch_sync_dirty_log(kvm, memslot);
2145
2146         n = kvm_dirty_bitmap_bytes(memslot);
2147         flush = false;
2148         if (kvm->manual_dirty_log_protect) {
2149                 /*
2150                  * Unlike kvm_get_dirty_log, we always return false in *flush,
2151                  * because no flush is needed until KVM_CLEAR_DIRTY_LOG.  There
2152                  * is some code duplication between this function and
2153                  * kvm_get_dirty_log, but hopefully all architecture
2154                  * transition to kvm_get_dirty_log_protect and kvm_get_dirty_log
2155                  * can be eliminated.
2156                  */
2157                 dirty_bitmap_buffer = dirty_bitmap;
2158         } else {
2159                 dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2160                 memset(dirty_bitmap_buffer, 0, n);
2161
2162                 KVM_MMU_LOCK(kvm);
2163                 for (i = 0; i < n / sizeof(long); i++) {
2164                         unsigned long mask;
2165                         gfn_t offset;
2166
2167                         if (!dirty_bitmap[i])
2168                                 continue;
2169
2170                         flush = true;
2171                         mask = xchg(&dirty_bitmap[i], 0);
2172                         dirty_bitmap_buffer[i] = mask;
2173
2174                         offset = i * BITS_PER_LONG;
2175                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2176                                                                 offset, mask);
2177                 }
2178                 KVM_MMU_UNLOCK(kvm);
2179         }
2180
2181         if (flush)
2182                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2183
2184         if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
2185                 return -EFAULT;
2186         return 0;
2187 }
2188
2189
2190 /**
2191  * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
2192  * @kvm: kvm instance
2193  * @log: slot id and address to which we copy the log
2194  *
2195  * Steps 1-4 below provide general overview of dirty page logging. See
2196  * kvm_get_dirty_log_protect() function description for additional details.
2197  *
2198  * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
2199  * always flush the TLB (step 4) even if previous step failed  and the dirty
2200  * bitmap may be corrupt. Regardless of previous outcome the KVM logging API
2201  * does not preclude user space subsequent dirty log read. Flushing TLB ensures
2202  * writes will be marked dirty for next log read.
2203  *
2204  *   1. Take a snapshot of the bit and clear it if needed.
2205  *   2. Write protect the corresponding page.
2206  *   3. Copy the snapshot to the userspace.
2207  *   4. Flush TLB's if needed.
2208  */
2209 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
2210                                       struct kvm_dirty_log *log)
2211 {
2212         int r;
2213
2214         mutex_lock(&kvm->slots_lock);
2215
2216         r = kvm_get_dirty_log_protect(kvm, log);
2217
2218         mutex_unlock(&kvm->slots_lock);
2219         return r;
2220 }
2221
2222 /**
2223  * kvm_clear_dirty_log_protect - clear dirty bits in the bitmap
2224  *      and reenable dirty page tracking for the corresponding pages.
2225  * @kvm:        pointer to kvm instance
2226  * @log:        slot id and address from which to fetch the bitmap of dirty pages
2227  */
2228 static int kvm_clear_dirty_log_protect(struct kvm *kvm,
2229                                        struct kvm_clear_dirty_log *log)
2230 {
2231         struct kvm_memslots *slots;
2232         struct kvm_memory_slot *memslot;
2233         int as_id, id;
2234         gfn_t offset;
2235         unsigned long i, n;
2236         unsigned long *dirty_bitmap;
2237         unsigned long *dirty_bitmap_buffer;
2238         bool flush;
2239
2240         /* Dirty ring tracking is exclusive to dirty log tracking */
2241         if (kvm->dirty_ring_size)
2242                 return -ENXIO;
2243
2244         as_id = log->slot >> 16;
2245         id = (u16)log->slot;
2246         if (as_id >= KVM_ADDRESS_SPACE_NUM || id >= KVM_USER_MEM_SLOTS)
2247                 return -EINVAL;
2248
2249         if (log->first_page & 63)
2250                 return -EINVAL;
2251
2252         slots = __kvm_memslots(kvm, as_id);
2253         memslot = id_to_memslot(slots, id);
2254         if (!memslot || !memslot->dirty_bitmap)
2255                 return -ENOENT;
2256
2257         dirty_bitmap = memslot->dirty_bitmap;
2258
2259         n = ALIGN(log->num_pages, BITS_PER_LONG) / 8;
2260
2261         if (log->first_page > memslot->npages ||
2262             log->num_pages > memslot->npages - log->first_page ||
2263             (log->num_pages < memslot->npages - log->first_page && (log->num_pages & 63)))
2264             return -EINVAL;
2265
2266         kvm_arch_sync_dirty_log(kvm, memslot);
2267
2268         flush = false;
2269         dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot);
2270         if (copy_from_user(dirty_bitmap_buffer, log->dirty_bitmap, n))
2271                 return -EFAULT;
2272
2273         KVM_MMU_LOCK(kvm);
2274         for (offset = log->first_page, i = offset / BITS_PER_LONG,
2275                  n = DIV_ROUND_UP(log->num_pages, BITS_PER_LONG); n--;
2276              i++, offset += BITS_PER_LONG) {
2277                 unsigned long mask = *dirty_bitmap_buffer++;
2278                 atomic_long_t *p = (atomic_long_t *) &dirty_bitmap[i];
2279                 if (!mask)
2280                         continue;
2281
2282                 mask &= atomic_long_fetch_andnot(mask, p);
2283
2284                 /*
2285                  * mask contains the bits that really have been cleared.  This
2286                  * never includes any bits beyond the length of the memslot (if
2287                  * the length is not aligned to 64 pages), therefore it is not
2288                  * a problem if userspace sets them in log->dirty_bitmap.
2289                 */
2290                 if (mask) {
2291                         flush = true;
2292                         kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot,
2293                                                                 offset, mask);
2294                 }
2295         }
2296         KVM_MMU_UNLOCK(kvm);
2297
2298         if (flush)
2299                 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
2300
2301         return 0;
2302 }
2303
2304 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm,
2305                                         struct kvm_clear_dirty_log *log)
2306 {
2307         int r;
2308
2309         mutex_lock(&kvm->slots_lock);
2310
2311         r = kvm_clear_dirty_log_protect(kvm, log);
2312
2313         mutex_unlock(&kvm->slots_lock);
2314         return r;
2315 }
2316 #endif /* CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
2317
2318 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
2319 {
2320         return __gfn_to_memslot(kvm_memslots(kvm), gfn);
2321 }
2322 EXPORT_SYMBOL_GPL(gfn_to_memslot);
2323
2324 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn)
2325 {
2326         struct kvm_memslots *slots = kvm_vcpu_memslots(vcpu);
2327         u64 gen = slots->generation;
2328         struct kvm_memory_slot *slot;
2329
2330         /*
2331          * This also protects against using a memslot from a different address space,
2332          * since different address spaces have different generation numbers.
2333          */
2334         if (unlikely(gen != vcpu->last_used_slot_gen)) {
2335                 vcpu->last_used_slot = NULL;
2336                 vcpu->last_used_slot_gen = gen;
2337         }
2338
2339         slot = try_get_memslot(vcpu->last_used_slot, gfn);
2340         if (slot)
2341                 return slot;
2342
2343         /*
2344          * Fall back to searching all memslots. We purposely use
2345          * search_memslots() instead of __gfn_to_memslot() to avoid
2346          * thrashing the VM-wide last_used_slot in kvm_memslots.
2347          */
2348         slot = search_memslots(slots, gfn, false);
2349         if (slot) {
2350                 vcpu->last_used_slot = slot;
2351                 return slot;
2352         }
2353
2354         return NULL;
2355 }
2356
2357 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
2358 {
2359         struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn);
2360
2361         return kvm_is_visible_memslot(memslot);
2362 }
2363 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
2364
2365 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2366 {
2367         struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2368
2369         return kvm_is_visible_memslot(memslot);
2370 }
2371 EXPORT_SYMBOL_GPL(kvm_vcpu_is_visible_gfn);
2372
2373 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
2374 {
2375         struct vm_area_struct *vma;
2376         unsigned long addr, size;
2377
2378         size = PAGE_SIZE;
2379
2380         addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL);
2381         if (kvm_is_error_hva(addr))
2382                 return PAGE_SIZE;
2383
2384         mmap_read_lock(current->mm);
2385         vma = find_vma(current->mm, addr);
2386         if (!vma)
2387                 goto out;
2388
2389         size = vma_kernel_pagesize(vma);
2390
2391 out:
2392         mmap_read_unlock(current->mm);
2393
2394         return size;
2395 }
2396
2397 static bool memslot_is_readonly(const struct kvm_memory_slot *slot)
2398 {
2399         return slot->flags & KVM_MEM_READONLY;
2400 }
2401
2402 static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t gfn,
2403                                        gfn_t *nr_pages, bool write)
2404 {
2405         if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
2406                 return KVM_HVA_ERR_BAD;
2407
2408         if (memslot_is_readonly(slot) && write)
2409                 return KVM_HVA_ERR_RO_BAD;
2410
2411         if (nr_pages)
2412                 *nr_pages = slot->npages - (gfn - slot->base_gfn);
2413
2414         return __gfn_to_hva_memslot(slot, gfn);
2415 }
2416
2417 static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
2418                                      gfn_t *nr_pages)
2419 {
2420         return __gfn_to_hva_many(slot, gfn, nr_pages, true);
2421 }
2422
2423 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
2424                                         gfn_t gfn)
2425 {
2426         return gfn_to_hva_many(slot, gfn, NULL);
2427 }
2428 EXPORT_SYMBOL_GPL(gfn_to_hva_memslot);
2429
2430 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
2431 {
2432         return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL);
2433 }
2434 EXPORT_SYMBOL_GPL(gfn_to_hva);
2435
2436 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn)
2437 {
2438         return gfn_to_hva_many(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, NULL);
2439 }
2440 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_hva);
2441
2442 /*
2443  * Return the hva of a @gfn and the R/W attribute if possible.
2444  *
2445  * @slot: the kvm_memory_slot which contains @gfn
2446  * @gfn: the gfn to be translated
2447  * @writable: used to return the read/write attribute of the @slot if the hva
2448  * is valid and @writable is not NULL
2449  */
2450 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot,
2451                                       gfn_t gfn, bool *writable)
2452 {
2453         unsigned long hva = __gfn_to_hva_many(slot, gfn, NULL, false);
2454
2455         if (!kvm_is_error_hva(hva) && writable)
2456                 *writable = !memslot_is_readonly(slot);
2457
2458         return hva;
2459 }
2460
2461 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable)
2462 {
2463         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2464
2465         return gfn_to_hva_memslot_prot(slot, gfn, writable);
2466 }
2467
2468 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable)
2469 {
2470         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
2471
2472         return gfn_to_hva_memslot_prot(slot, gfn, writable);
2473 }
2474
2475 static inline int check_user_page_hwpoison(unsigned long addr)
2476 {
2477         int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
2478
2479         rc = get_user_pages(addr, 1, flags, NULL, NULL);
2480         return rc == -EHWPOISON;
2481 }
2482
2483 /*
2484  * The fast path to get the writable pfn which will be stored in @pfn,
2485  * true indicates success, otherwise false is returned.  It's also the
2486  * only part that runs if we can in atomic context.
2487  */
2488 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault,
2489                             bool *writable, kvm_pfn_t *pfn)
2490 {
2491         struct page *page[1];
2492
2493         /*
2494          * Fast pin a writable pfn only if it is a write fault request
2495          * or the caller allows to map a writable pfn for a read fault
2496          * request.
2497          */
2498         if (!(write_fault || writable))
2499                 return false;
2500
2501         if (get_user_page_fast_only(addr, FOLL_WRITE, page)) {
2502                 *pfn = page_to_pfn(page[0]);
2503
2504                 if (writable)
2505                         *writable = true;
2506                 return true;
2507         }
2508
2509         return false;
2510 }
2511
2512 /*
2513  * The slow path to get the pfn of the specified host virtual address,
2514  * 1 indicates success, -errno is returned if error is detected.
2515  */
2516 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
2517                            bool *writable, kvm_pfn_t *pfn)
2518 {
2519         unsigned int flags = FOLL_HWPOISON;
2520         struct page *page;
2521         int npages;
2522
2523         might_sleep();
2524
2525         if (writable)
2526                 *writable = write_fault;
2527
2528         if (write_fault)
2529                 flags |= FOLL_WRITE;
2530         if (async)
2531                 flags |= FOLL_NOWAIT;
2532
2533         npages = get_user_pages_unlocked(addr, 1, &page, flags);
2534         if (npages != 1)
2535                 return npages;
2536
2537         /* map read fault as writable if possible */
2538         if (unlikely(!write_fault) && writable) {
2539                 struct page *wpage;
2540
2541                 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) {
2542                         *writable = true;
2543                         put_page(page);
2544                         page = wpage;
2545                 }
2546         }
2547         *pfn = page_to_pfn(page);
2548         return npages;
2549 }
2550
2551 static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2552 {
2553         if (unlikely(!(vma->vm_flags & VM_READ)))
2554                 return false;
2555
2556         if (write_fault && (unlikely(!(vma->vm_flags & VM_WRITE))))
2557                 return false;
2558
2559         return true;
2560 }
2561
2562 static int kvm_try_get_pfn(kvm_pfn_t pfn)
2563 {
2564         struct page *page = kvm_pfn_to_refcounted_page(pfn);
2565
2566         if (!page)
2567                 return 1;
2568
2569         return get_page_unless_zero(page);
2570 }
2571
2572 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2573                                unsigned long addr, bool write_fault,
2574                                bool *writable, kvm_pfn_t *p_pfn)
2575 {
2576         kvm_pfn_t pfn;
2577         pte_t *ptep;
2578         spinlock_t *ptl;
2579         int r;
2580
2581         r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2582         if (r) {
2583                 /*
2584                  * get_user_pages fails for VM_IO and VM_PFNMAP vmas and does
2585                  * not call the fault handler, so do it here.
2586                  */
2587                 bool unlocked = false;
2588                 r = fixup_user_fault(current->mm, addr,
2589                                      (write_fault ? FAULT_FLAG_WRITE : 0),
2590                                      &unlocked);
2591                 if (unlocked)
2592                         return -EAGAIN;
2593                 if (r)
2594                         return r;
2595
2596                 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl);
2597                 if (r)
2598                         return r;
2599         }
2600
2601         if (write_fault && !pte_write(*ptep)) {
2602                 pfn = KVM_PFN_ERR_RO_FAULT;
2603                 goto out;
2604         }
2605
2606         if (writable)
2607                 *writable = pte_write(*ptep);
2608         pfn = pte_pfn(*ptep);
2609
2610         /*
2611          * Get a reference here because callers of *hva_to_pfn* and
2612          * *gfn_to_pfn* ultimately call kvm_release_pfn_clean on the
2613          * returned pfn.  This is only needed if the VMA has VM_MIXEDMAP
2614          * set, but the kvm_try_get_pfn/kvm_release_pfn_clean pair will
2615          * simply do nothing for reserved pfns.
2616          *
2617          * Whoever called remap_pfn_range is also going to call e.g.
2618          * unmap_mapping_range before the underlying pages are freed,
2619          * causing a call to our MMU notifier.
2620          *
2621          * Certain IO or PFNMAP mappings can be backed with valid
2622          * struct pages, but be allocated without refcounting e.g.,
2623          * tail pages of non-compound higher order allocations, which
2624          * would then underflow the refcount when the caller does the
2625          * required put_page. Don't allow those pages here.
2626          */ 
2627         if (!kvm_try_get_pfn(pfn))
2628                 r = -EFAULT;
2629
2630 out:
2631         pte_unmap_unlock(ptep, ptl);
2632         *p_pfn = pfn;
2633
2634         return r;
2635 }
2636
2637 /*
2638  * Pin guest page in memory and return its pfn.
2639  * @addr: host virtual address which maps memory to the guest
2640  * @atomic: whether this function can sleep
2641  * @async: whether this function need to wait IO complete if the
2642  *         host page is not in the memory
2643  * @write_fault: whether we should get a writable host page
2644  * @writable: whether it allows to map a writable host page for !@write_fault
2645  *
2646  * The function will map a writable host page for these two cases:
2647  * 1): @write_fault = true
2648  * 2): @write_fault = false && @writable, @writable will tell the caller
2649  *     whether the mapping is writable.
2650  */
2651 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
2652                      bool write_fault, bool *writable)
2653 {
2654         struct vm_area_struct *vma;
2655         kvm_pfn_t pfn;
2656         int npages, r;
2657
2658         /* we can do it either atomically or asynchronously, not both */
2659         BUG_ON(atomic && async);
2660
2661         if (hva_to_pfn_fast(addr, write_fault, writable, &pfn))
2662                 return pfn;
2663
2664         if (atomic)
2665                 return KVM_PFN_ERR_FAULT;
2666
2667         npages = hva_to_pfn_slow(addr, async, write_fault, writable, &pfn);
2668         if (npages == 1)
2669                 return pfn;
2670
2671         mmap_read_lock(current->mm);
2672         if (npages == -EHWPOISON ||
2673               (!async && check_user_page_hwpoison(addr))) {
2674                 pfn = KVM_PFN_ERR_HWPOISON;
2675                 goto exit;
2676         }
2677
2678 retry:
2679         vma = vma_lookup(current->mm, addr);
2680
2681         if (vma == NULL)
2682                 pfn = KVM_PFN_ERR_FAULT;
2683         else if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
2684                 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn);
2685                 if (r == -EAGAIN)
2686                         goto retry;
2687                 if (r < 0)
2688                         pfn = KVM_PFN_ERR_FAULT;
2689         } else {
2690                 if (async && vma_is_valid(vma, write_fault))
2691                         *async = true;
2692                 pfn = KVM_PFN_ERR_FAULT;
2693         }
2694 exit:
2695         mmap_read_unlock(current->mm);
2696         return pfn;
2697 }
2698
2699 kvm_pfn_t __gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn,
2700                                bool atomic, bool *async, bool write_fault,
2701                                bool *writable, hva_t *hva)
2702 {
2703         unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
2704
2705         if (hva)
2706                 *hva = addr;
2707
2708         if (addr == KVM_HVA_ERR_RO_BAD) {
2709                 if (writable)
2710                         *writable = false;
2711                 return KVM_PFN_ERR_RO_FAULT;
2712         }
2713
2714         if (kvm_is_error_hva(addr)) {
2715                 if (writable)
2716                         *writable = false;
2717                 return KVM_PFN_NOSLOT;
2718         }
2719
2720         /* Do not map writable pfn in the readonly memslot. */
2721         if (writable && memslot_is_readonly(slot)) {
2722                 *writable = false;
2723                 writable = NULL;
2724         }
2725
2726         return hva_to_pfn(addr, atomic, async, write_fault,
2727                           writable);
2728 }
2729 EXPORT_SYMBOL_GPL(__gfn_to_pfn_memslot);
2730
2731 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
2732                       bool *writable)
2733 {
2734         return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL,
2735                                     write_fault, writable, NULL);
2736 }
2737 EXPORT_SYMBOL_GPL(gfn_to_pfn_prot);
2738
2739 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn)
2740 {
2741         return __gfn_to_pfn_memslot(slot, gfn, false, NULL, true, NULL, NULL);
2742 }
2743 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot);
2744
2745 kvm_pfn_t gfn_to_pfn_memslot_atomic(const struct kvm_memory_slot *slot, gfn_t gfn)
2746 {
2747         return __gfn_to_pfn_memslot(slot, gfn, true, NULL, true, NULL, NULL);
2748 }
2749 EXPORT_SYMBOL_GPL(gfn_to_pfn_memslot_atomic);
2750
2751 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn)
2752 {
2753         return gfn_to_pfn_memslot_atomic(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2754 }
2755 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn_atomic);
2756
2757 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
2758 {
2759         return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn);
2760 }
2761 EXPORT_SYMBOL_GPL(gfn_to_pfn);
2762
2763 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
2764 {
2765         return gfn_to_pfn_memslot(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn);
2766 }
2767 EXPORT_SYMBOL_GPL(kvm_vcpu_gfn_to_pfn);
2768
2769 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
2770                             struct page **pages, int nr_pages)
2771 {
2772         unsigned long addr;
2773         gfn_t entry = 0;
2774
2775         addr = gfn_to_hva_many(slot, gfn, &entry);
2776         if (kvm_is_error_hva(addr))
2777                 return -1;
2778
2779         if (entry < nr_pages)
2780                 return 0;
2781
2782         return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
2783 }
2784 EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
2785
2786 /*
2787  * Do not use this helper unless you are absolutely certain the gfn _must_ be
2788  * backed by 'struct page'.  A valid example is if the backing memslot is
2789  * controlled by KVM.  Note, if the returned page is valid, it's refcount has
2790  * been elevated by gfn_to_pfn().
2791  */
2792 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
2793 {
2794         struct page *page;
2795         kvm_pfn_t pfn;
2796
2797         pfn = gfn_to_pfn(kvm, gfn);
2798
2799         if (is_error_noslot_pfn(pfn))
2800                 return KVM_ERR_PTR_BAD_PAGE;
2801
2802         page = kvm_pfn_to_refcounted_page(pfn);
2803         if (!page)
2804                 return KVM_ERR_PTR_BAD_PAGE;
2805
2806         return page;
2807 }
2808 EXPORT_SYMBOL_GPL(gfn_to_page);
2809
2810 void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
2811 {
2812         if (dirty)
2813                 kvm_release_pfn_dirty(pfn);
2814         else
2815                 kvm_release_pfn_clean(pfn);
2816 }
2817
2818 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
2819 {
2820         kvm_pfn_t pfn;
2821         void *hva = NULL;
2822         struct page *page = KVM_UNMAPPED_PAGE;
2823
2824         if (!map)
2825                 return -EINVAL;
2826
2827         pfn = gfn_to_pfn(vcpu->kvm, gfn);
2828         if (is_error_noslot_pfn(pfn))
2829                 return -EINVAL;
2830
2831         if (pfn_valid(pfn)) {
2832                 page = pfn_to_page(pfn);
2833                 hva = kmap(page);
2834 #ifdef CONFIG_HAS_IOMEM
2835         } else {
2836                 hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
2837 #endif
2838         }
2839
2840         if (!hva)
2841                 return -EFAULT;
2842
2843         map->page = page;
2844         map->hva = hva;
2845         map->pfn = pfn;
2846         map->gfn = gfn;
2847
2848         return 0;
2849 }
2850 EXPORT_SYMBOL_GPL(kvm_vcpu_map);
2851
2852 void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
2853 {
2854         if (!map)
2855                 return;
2856
2857         if (!map->hva)
2858                 return;
2859
2860         if (map->page != KVM_UNMAPPED_PAGE)
2861                 kunmap(map->page);
2862 #ifdef CONFIG_HAS_IOMEM
2863         else
2864                 memunmap(map->hva);
2865 #endif
2866
2867         if (dirty)
2868                 kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
2869
2870         kvm_release_pfn(map->pfn, dirty);
2871
2872         map->hva = NULL;
2873         map->page = NULL;
2874 }
2875 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
2876
2877 static bool kvm_is_ad_tracked_page(struct page *page)
2878 {
2879         /*
2880          * Per page-flags.h, pages tagged PG_reserved "should in general not be
2881          * touched (e.g. set dirty) except by its owner".
2882          */
2883         return !PageReserved(page);
2884 }
2885
2886 static void kvm_set_page_dirty(struct page *page)
2887 {
2888         if (kvm_is_ad_tracked_page(page))
2889                 SetPageDirty(page);
2890 }
2891
2892 static void kvm_set_page_accessed(struct page *page)
2893 {
2894         if (kvm_is_ad_tracked_page(page))
2895                 mark_page_accessed(page);
2896 }
2897
2898 void kvm_release_page_clean(struct page *page)
2899 {
2900         WARN_ON(is_error_page(page));
2901
2902         kvm_set_page_accessed(page);
2903         put_page(page);
2904 }
2905 EXPORT_SYMBOL_GPL(kvm_release_page_clean);
2906
2907 void kvm_release_pfn_clean(kvm_pfn_t pfn)
2908 {
2909         struct page *page;
2910
2911         if (is_error_noslot_pfn(pfn))
2912                 return;
2913
2914         page = kvm_pfn_to_refcounted_page(pfn);
2915         if (!page)
2916                 return;
2917
2918         kvm_release_page_clean(page);
2919 }
2920 EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
2921
2922 void kvm_release_page_dirty(struct page *page)
2923 {
2924         WARN_ON(is_error_page(page));
2925
2926         kvm_set_page_dirty(page);
2927         kvm_release_page_clean(page);
2928 }
2929 EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
2930
2931 void kvm_release_pfn_dirty(kvm_pfn_t pfn)
2932 {
2933         struct page *page;
2934
2935         if (is_error_noslot_pfn(pfn))
2936                 return;
2937
2938         page = kvm_pfn_to_refcounted_page(pfn);
2939         if (!page)
2940                 return;
2941
2942         kvm_release_page_dirty(page);
2943 }
2944 EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
2945
2946 /*
2947  * Note, checking for an error/noslot pfn is the caller's responsibility when
2948  * directly marking a page dirty/accessed.  Unlike the "release" helpers, the
2949  * "set" helpers are not to be used when the pfn might point at garbage.
2950  */
2951 void kvm_set_pfn_dirty(kvm_pfn_t pfn)
2952 {
2953         if (WARN_ON(is_error_noslot_pfn(pfn)))
2954                 return;
2955
2956         if (pfn_valid(pfn))
2957                 kvm_set_page_dirty(pfn_to_page(pfn));
2958 }
2959 EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
2960
2961 void kvm_set_pfn_accessed(kvm_pfn_t pfn)
2962 {
2963         if (WARN_ON(is_error_noslot_pfn(pfn)))
2964                 return;
2965
2966         if (pfn_valid(pfn))
2967                 kvm_set_page_accessed(pfn_to_page(pfn));
2968 }
2969 EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
2970
2971 static int next_segment(unsigned long len, int offset)
2972 {
2973         if (len > PAGE_SIZE - offset)
2974                 return PAGE_SIZE - offset;
2975         else
2976                 return len;
2977 }
2978
2979 static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
2980                                  void *data, int offset, int len)
2981 {
2982         int r;
2983         unsigned long addr;
2984
2985         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
2986         if (kvm_is_error_hva(addr))
2987                 return -EFAULT;
2988         r = __copy_from_user(data, (void __user *)addr + offset, len);
2989         if (r)
2990                 return -EFAULT;
2991         return 0;
2992 }
2993
2994 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
2995                         int len)
2996 {
2997         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
2998
2999         return __kvm_read_guest_page(slot, gfn, data, offset, len);
3000 }
3001 EXPORT_SYMBOL_GPL(kvm_read_guest_page);
3002
3003 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
3004                              int offset, int len)
3005 {
3006         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3007
3008         return __kvm_read_guest_page(slot, gfn, data, offset, len);
3009 }
3010 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
3011
3012 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
3013 {
3014         gfn_t gfn = gpa >> PAGE_SHIFT;
3015         int seg;
3016         int offset = offset_in_page(gpa);
3017         int ret;
3018
3019         while ((seg = next_segment(len, offset)) != 0) {
3020                 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
3021                 if (ret < 0)
3022                         return ret;
3023                 offset = 0;
3024                 len -= seg;
3025                 data += seg;
3026                 ++gfn;
3027         }
3028         return 0;
3029 }
3030 EXPORT_SYMBOL_GPL(kvm_read_guest);
3031
3032 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, unsigned long len)
3033 {
3034         gfn_t gfn = gpa >> PAGE_SHIFT;
3035         int seg;
3036         int offset = offset_in_page(gpa);
3037         int ret;
3038
3039         while ((seg = next_segment(len, offset)) != 0) {
3040                 ret = kvm_vcpu_read_guest_page(vcpu, gfn, data, offset, seg);
3041                 if (ret < 0)
3042                         return ret;
3043                 offset = 0;
3044                 len -= seg;
3045                 data += seg;
3046                 ++gfn;
3047         }
3048         return 0;
3049 }
3050 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest);
3051
3052 static int __kvm_read_guest_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
3053                                    void *data, int offset, unsigned long len)
3054 {
3055         int r;
3056         unsigned long addr;
3057
3058         addr = gfn_to_hva_memslot_prot(slot, gfn, NULL);
3059         if (kvm_is_error_hva(addr))
3060                 return -EFAULT;
3061         pagefault_disable();
3062         r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
3063         pagefault_enable();
3064         if (r)
3065                 return -EFAULT;
3066         return 0;
3067 }
3068
3069 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa,
3070                                void *data, unsigned long len)
3071 {
3072         gfn_t gfn = gpa >> PAGE_SHIFT;
3073         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3074         int offset = offset_in_page(gpa);
3075
3076         return __kvm_read_guest_atomic(slot, gfn, data, offset, len);
3077 }
3078 EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_atomic);
3079
3080 static int __kvm_write_guest_page(struct kvm *kvm,
3081                                   struct kvm_memory_slot *memslot, gfn_t gfn,
3082                                   const void *data, int offset, int len)
3083 {
3084         int r;
3085         unsigned long addr;
3086
3087         addr = gfn_to_hva_memslot(memslot, gfn);
3088         if (kvm_is_error_hva(addr))
3089                 return -EFAULT;
3090         r = __copy_to_user((void __user *)addr + offset, data, len);
3091         if (r)
3092                 return -EFAULT;
3093         mark_page_dirty_in_slot(kvm, memslot, gfn);
3094         return 0;
3095 }
3096
3097 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
3098                          const void *data, int offset, int len)
3099 {
3100         struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
3101
3102         return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
3103 }
3104 EXPORT_SYMBOL_GPL(kvm_write_guest_page);
3105
3106 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
3107                               const void *data, int offset, int len)
3108 {
3109         struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3110
3111         return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
3112 }
3113 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
3114
3115 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
3116                     unsigned long len)
3117 {
3118         gfn_t gfn = gpa >> PAGE_SHIFT;
3119         int seg;
3120         int offset = offset_in_page(gpa);
3121         int ret;
3122
3123         while ((seg = next_segment(len, offset)) != 0) {
3124                 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
3125                 if (ret < 0)
3126                         return ret;
3127                 offset = 0;
3128                 len -= seg;
3129                 data += seg;
3130                 ++gfn;
3131         }
3132         return 0;
3133 }
3134 EXPORT_SYMBOL_GPL(kvm_write_guest);
3135
3136 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
3137                          unsigned long len)
3138 {
3139         gfn_t gfn = gpa >> PAGE_SHIFT;
3140         int seg;
3141         int offset = offset_in_page(gpa);
3142         int ret;
3143
3144         while ((seg = next_segment(len, offset)) != 0) {
3145                 ret = kvm_vcpu_write_guest_page(vcpu, gfn, data, offset, seg);
3146                 if (ret < 0)
3147                         return ret;
3148                 offset = 0;
3149                 len -= seg;
3150                 data += seg;
3151                 ++gfn;
3152         }
3153         return 0;
3154 }
3155 EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest);
3156
3157 static int __kvm_gfn_to_hva_cache_init(struct kvm_memslots *slots,
3158                                        struct gfn_to_hva_cache *ghc,
3159                                        gpa_t gpa, unsigned long len)
3160 {
3161         int offset = offset_in_page(gpa);
3162         gfn_t start_gfn = gpa >> PAGE_SHIFT;
3163         gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT;
3164         gfn_t nr_pages_needed = end_gfn - start_gfn + 1;
3165         gfn_t nr_pages_avail;
3166
3167         /* Update ghc->generation before performing any error checks. */
3168         ghc->generation = slots->generation;
3169
3170         if (start_gfn > end_gfn) {
3171                 ghc->hva = KVM_HVA_ERR_BAD;
3172                 return -EINVAL;
3173         }
3174
3175         /*
3176          * If the requested region crosses two memslots, we still
3177          * verify that the entire region is valid here.
3178          */
3179         for ( ; start_gfn <= end_gfn; start_gfn += nr_pages_avail) {
3180                 ghc->memslot = __gfn_to_memslot(slots, start_gfn);
3181                 ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn,
3182                                            &nr_pages_avail);
3183                 if (kvm_is_error_hva(ghc->hva))
3184                         return -EFAULT;
3185         }
3186
3187         /* Use the slow path for cross page reads and writes. */
3188         if (nr_pages_needed == 1)
3189                 ghc->hva += offset;
3190         else
3191                 ghc->memslot = NULL;
3192
3193         ghc->gpa = gpa;
3194         ghc->len = len;
3195         return 0;
3196 }
3197
3198 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3199                               gpa_t gpa, unsigned long len)
3200 {
3201         struct kvm_memslots *slots = kvm_memslots(kvm);
3202         return __kvm_gfn_to_hva_cache_init(slots, ghc, gpa, len);
3203 }
3204 EXPORT_SYMBOL_GPL(kvm_gfn_to_hva_cache_init);
3205
3206 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3207                                   void *data, unsigned int offset,
3208                                   unsigned long len)
3209 {
3210         struct kvm_memslots *slots = kvm_memslots(kvm);
3211         int r;
3212         gpa_t gpa = ghc->gpa + offset;
3213
3214         if (WARN_ON_ONCE(len + offset > ghc->len))
3215                 return -EINVAL;
3216
3217         if (slots->generation != ghc->generation) {
3218                 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3219                         return -EFAULT;
3220         }
3221
3222         if (kvm_is_error_hva(ghc->hva))
3223                 return -EFAULT;
3224
3225         if (unlikely(!ghc->memslot))
3226                 return kvm_write_guest(kvm, gpa, data, len);
3227
3228         r = __copy_to_user((void __user *)ghc->hva + offset, data, len);
3229         if (r)
3230                 return -EFAULT;
3231         mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT);
3232
3233         return 0;
3234 }
3235 EXPORT_SYMBOL_GPL(kvm_write_guest_offset_cached);
3236
3237 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3238                            void *data, unsigned long len)
3239 {
3240         return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len);
3241 }
3242 EXPORT_SYMBOL_GPL(kvm_write_guest_cached);
3243
3244 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3245                                  void *data, unsigned int offset,
3246                                  unsigned long len)
3247 {
3248         struct kvm_memslots *slots = kvm_memslots(kvm);
3249         int r;
3250         gpa_t gpa = ghc->gpa + offset;
3251
3252         if (WARN_ON_ONCE(len + offset > ghc->len))
3253                 return -EINVAL;
3254
3255         if (slots->generation != ghc->generation) {
3256                 if (__kvm_gfn_to_hva_cache_init(slots, ghc, ghc->gpa, ghc->len))
3257                         return -EFAULT;
3258         }
3259
3260         if (kvm_is_error_hva(ghc->hva))
3261                 return -EFAULT;
3262
3263         if (unlikely(!ghc->memslot))
3264                 return kvm_read_guest(kvm, gpa, data, len);
3265
3266         r = __copy_from_user(data, (void __user *)ghc->hva + offset, len);
3267         if (r)
3268                 return -EFAULT;
3269
3270         return 0;
3271 }
3272 EXPORT_SYMBOL_GPL(kvm_read_guest_offset_cached);
3273
3274 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
3275                           void *data, unsigned long len)
3276 {
3277         return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len);
3278 }
3279 EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
3280
3281 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
3282 {
3283         const void *zero_page = (const void *) __va(page_to_phys(ZERO_PAGE(0)));
3284         gfn_t gfn = gpa >> PAGE_SHIFT;
3285         int seg;
3286         int offset = offset_in_page(gpa);
3287         int ret;
3288
3289         while ((seg = next_segment(len, offset)) != 0) {
3290                 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len);
3291                 if (ret < 0)
3292                         return ret;
3293                 offset = 0;
3294                 len -= seg;
3295                 ++gfn;
3296         }
3297         return 0;
3298 }
3299 EXPORT_SYMBOL_GPL(kvm_clear_guest);
3300
3301 void mark_page_dirty_in_slot(struct kvm *kvm,
3302                              const struct kvm_memory_slot *memslot,
3303                              gfn_t gfn)
3304 {
3305         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
3306
3307 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3308         if (WARN_ON_ONCE(!vcpu) || WARN_ON_ONCE(vcpu->kvm != kvm))
3309                 return;
3310 #endif
3311
3312         if (memslot && kvm_slot_dirty_track_enabled(memslot)) {
3313                 unsigned long rel_gfn = gfn - memslot->base_gfn;
3314                 u32 slot = (memslot->as_id << 16) | memslot->id;
3315
3316                 if (kvm->dirty_ring_size)
3317                         kvm_dirty_ring_push(&vcpu->dirty_ring,
3318                                             slot, rel_gfn);
3319                 else
3320                         set_bit_le(rel_gfn, memslot->dirty_bitmap);
3321         }
3322 }
3323 EXPORT_SYMBOL_GPL(mark_page_dirty_in_slot);
3324
3325 void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
3326 {
3327         struct kvm_memory_slot *memslot;
3328
3329         memslot = gfn_to_memslot(kvm, gfn);
3330         mark_page_dirty_in_slot(kvm, memslot, gfn);
3331 }
3332 EXPORT_SYMBOL_GPL(mark_page_dirty);
3333
3334 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
3335 {
3336         struct kvm_memory_slot *memslot;
3337
3338         memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
3339         mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn);
3340 }
3341 EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
3342
3343 void kvm_sigset_activate(struct kvm_vcpu *vcpu)
3344 {
3345         if (!vcpu->sigset_active)
3346                 return;
3347
3348         /*
3349          * This does a lockless modification of ->real_blocked, which is fine
3350          * because, only current can change ->real_blocked and all readers of
3351          * ->real_blocked don't care as long ->real_blocked is always a subset
3352          * of ->blocked.
3353          */
3354         sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
3355 }
3356
3357 void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
3358 {
3359         if (!vcpu->sigset_active)
3360                 return;
3361
3362         sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
3363         sigemptyset(&current->real_blocked);
3364 }
3365
3366 static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
3367 {
3368         unsigned int old, val, grow, grow_start;
3369
3370         old = val = vcpu->halt_poll_ns;
3371         grow_start = READ_ONCE(halt_poll_ns_grow_start);
3372         grow = READ_ONCE(halt_poll_ns_grow);
3373         if (!grow)
3374                 goto out;
3375
3376         val *= grow;
3377         if (val < grow_start)
3378                 val = grow_start;
3379
3380         if (val > vcpu->kvm->max_halt_poll_ns)
3381                 val = vcpu->kvm->max_halt_poll_ns;
3382
3383         vcpu->halt_poll_ns = val;
3384 out:
3385         trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
3386 }
3387
3388 static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
3389 {
3390         unsigned int old, val, shrink, grow_start;
3391
3392         old = val = vcpu->halt_poll_ns;
3393         shrink = READ_ONCE(halt_poll_ns_shrink);
3394         grow_start = READ_ONCE(halt_poll_ns_grow_start);
3395         if (shrink == 0)
3396                 val = 0;
3397         else
3398                 val /= shrink;
3399
3400         if (val < grow_start)
3401                 val = 0;
3402
3403         vcpu->halt_poll_ns = val;
3404         trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);
3405 }
3406
3407 static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu)
3408 {
3409         int ret = -EINTR;
3410         int idx = srcu_read_lock(&vcpu->kvm->srcu);
3411
3412         if (kvm_arch_vcpu_runnable(vcpu))
3413                 goto out;
3414         if (kvm_cpu_has_pending_timer(vcpu))
3415                 goto out;
3416         if (signal_pending(current))
3417                 goto out;
3418         if (kvm_check_request(KVM_REQ_UNBLOCK, vcpu))
3419                 goto out;
3420
3421         ret = 0;
3422 out:
3423         srcu_read_unlock(&vcpu->kvm->srcu, idx);
3424         return ret;
3425 }
3426
3427 /*
3428  * Block the vCPU until the vCPU is runnable, an event arrives, or a signal is
3429  * pending.  This is mostly used when halting a vCPU, but may also be used
3430  * directly for other vCPU non-runnable states, e.g. x86's Wait-For-SIPI.
3431  */
3432 bool kvm_vcpu_block(struct kvm_vcpu *vcpu)
3433 {
3434         struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
3435         bool waited = false;
3436
3437         vcpu->stat.generic.blocking = 1;
3438
3439         preempt_disable();
3440         kvm_arch_vcpu_blocking(vcpu);
3441         prepare_to_rcuwait(wait);
3442         preempt_enable();
3443
3444         for (;;) {
3445                 set_current_state(TASK_INTERRUPTIBLE);
3446
3447                 if (kvm_vcpu_check_block(vcpu) < 0)
3448                         break;
3449
3450                 waited = true;
3451                 schedule();
3452         }
3453
3454         preempt_disable();
3455         finish_rcuwait(wait);
3456         kvm_arch_vcpu_unblocking(vcpu);
3457         preempt_enable();
3458
3459         vcpu->stat.generic.blocking = 0;
3460
3461         return waited;
3462 }
3463
3464 static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start,
3465                                           ktime_t end, bool success)
3466 {
3467         struct kvm_vcpu_stat_generic *stats = &vcpu->stat.generic;
3468         u64 poll_ns = ktime_to_ns(ktime_sub(end, start));
3469
3470         ++vcpu->stat.generic.halt_attempted_poll;
3471
3472         if (success) {
3473                 ++vcpu->stat.generic.halt_successful_poll;
3474
3475                 if (!vcpu_valid_wakeup(vcpu))
3476                         ++vcpu->stat.generic.halt_poll_invalid;
3477
3478                 stats->halt_poll_success_ns += poll_ns;
3479                 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_success_hist, poll_ns);
3480         } else {
3481                 stats->halt_poll_fail_ns += poll_ns;
3482                 KVM_STATS_LOG_HIST_UPDATE(stats->halt_poll_fail_hist, poll_ns);
3483         }
3484 }
3485
3486 /*
3487  * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc...  If halt
3488  * polling is enabled, busy wait for a short time before blocking to avoid the
3489  * expensive block+unblock sequence if a wake event arrives soon after the vCPU
3490  * is halted.
3491  */
3492 void kvm_vcpu_halt(struct kvm_vcpu *vcpu)
3493 {
3494         bool halt_poll_allowed = !kvm_arch_no_poll(vcpu);
3495         bool do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns;
3496         ktime_t start, cur, poll_end;
3497         bool waited = false;
3498         u64 halt_ns;
3499
3500         start = cur = poll_end = ktime_get();
3501         if (do_halt_poll) {
3502                 ktime_t stop = ktime_add_ns(start, vcpu->halt_poll_ns);
3503
3504                 do {
3505                         /*
3506                          * This sets KVM_REQ_UNHALT if an interrupt
3507                          * arrives.
3508                          */
3509                         if (kvm_vcpu_check_block(vcpu) < 0)
3510                                 goto out;
3511                         cpu_relax();
3512                         poll_end = cur = ktime_get();
3513                 } while (kvm_vcpu_can_poll(cur, stop));
3514         }
3515
3516         waited = kvm_vcpu_block(vcpu);
3517
3518         cur = ktime_get();
3519         if (waited) {
3520                 vcpu->stat.generic.halt_wait_ns +=
3521                         ktime_to_ns(cur) - ktime_to_ns(poll_end);
3522                 KVM_STATS_LOG_HIST_UPDATE(vcpu->stat.generic.halt_wait_hist,
3523                                 ktime_to_ns(cur) - ktime_to_ns(poll_end));
3524         }
3525 out:
3526         /* The total time the vCPU was "halted", including polling time. */
3527         halt_ns = ktime_to_ns(cur) - ktime_to_ns(start);
3528
3529         /*
3530          * Note, halt-polling is considered successful so long as the vCPU was
3531          * never actually scheduled out, i.e. even if the wake event arrived
3532          * after of the halt-polling loop itself, but before the full wait.
3533          */
3534         if (do_halt_poll)
3535                 update_halt_poll_stats(vcpu, start, poll_end, !waited);
3536
3537         if (halt_poll_allowed) {
3538                 if (!vcpu_valid_wakeup(vcpu)) {
3539                         shrink_halt_poll_ns(vcpu);
3540                 } else if (vcpu->kvm->max_halt_poll_ns) {
3541                         if (halt_ns <= vcpu->halt_poll_ns)
3542                                 ;
3543                         /* we had a long block, shrink polling */
3544                         else if (vcpu->halt_poll_ns &&
3545                                  halt_ns > vcpu->kvm->max_halt_poll_ns)
3546                                 shrink_halt_poll_ns(vcpu);
3547                         /* we had a short halt and our poll time is too small */
3548                         else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns &&
3549                                  halt_ns < vcpu->kvm->max_halt_poll_ns)
3550                                 grow_halt_poll_ns(vcpu);
3551                 } else {
3552                         vcpu->halt_poll_ns = 0;
3553                 }
3554         }
3555
3556         trace_kvm_vcpu_wakeup(halt_ns, waited, vcpu_valid_wakeup(vcpu));
3557 }
3558 EXPORT_SYMBOL_GPL(kvm_vcpu_halt);
3559
3560 bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu)
3561 {
3562         if (__kvm_vcpu_wake_up(vcpu)) {
3563                 WRITE_ONCE(vcpu->ready, true);
3564                 ++vcpu->stat.generic.halt_wakeup;
3565                 return true;
3566         }
3567
3568         return false;
3569 }
3570 EXPORT_SYMBOL_GPL(kvm_vcpu_wake_up);
3571
3572 #ifndef CONFIG_S390
3573 /*
3574  * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
3575  */
3576 void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
3577 {
3578         int me, cpu;
3579
3580         if (kvm_vcpu_wake_up(vcpu))
3581                 return;
3582
3583         me = get_cpu();
3584         /*
3585          * The only state change done outside the vcpu mutex is IN_GUEST_MODE
3586          * to EXITING_GUEST_MODE.  Therefore the moderately expensive "should
3587          * kick" check does not need atomic operations if kvm_vcpu_kick is used
3588          * within the vCPU thread itself.
3589          */
3590         if (vcpu == __this_cpu_read(kvm_running_vcpu)) {
3591                 if (vcpu->mode == IN_GUEST_MODE)
3592                         WRITE_ONCE(vcpu->mode, EXITING_GUEST_MODE);
3593                 goto out;
3594         }
3595
3596         /*
3597          * Note, the vCPU could get migrated to a different pCPU at any point
3598          * after kvm_arch_vcpu_should_kick(), which could result in sending an
3599          * IPI to the previous pCPU.  But, that's ok because the purpose of the
3600          * IPI is to force the vCPU to leave IN_GUEST_MODE, and migrating the
3601          * vCPU also requires it to leave IN_GUEST_MODE.
3602          */
3603         if (kvm_arch_vcpu_should_kick(vcpu)) {
3604                 cpu = READ_ONCE(vcpu->cpu);
3605                 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
3606                         smp_send_reschedule(cpu);
3607         }
3608 out:
3609         put_cpu();
3610 }
3611 EXPORT_SYMBOL_GPL(kvm_vcpu_kick);
3612 #endif /* !CONFIG_S390 */
3613
3614 int kvm_vcpu_yield_to(struct kvm_vcpu *target)
3615 {
3616         struct pid *pid;
3617         struct task_struct *task = NULL;
3618         int ret = 0;
3619
3620         rcu_read_lock();
3621         pid = rcu_dereference(target->pid);
3622         if (pid)
3623                 task = get_pid_task(pid, PIDTYPE_PID);
3624         rcu_read_unlock();
3625         if (!task)
3626                 return ret;
3627         ret = yield_to(task, 1);
3628         put_task_struct(task);
3629
3630         return ret;
3631 }
3632 EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
3633
3634 /*
3635  * Helper that checks whether a VCPU is eligible for directed yield.
3636  * Most eligible candidate to yield is decided by following heuristics:
3637  *
3638  *  (a) VCPU which has not done pl-exit or cpu relax intercepted recently
3639  *  (preempted lock holder), indicated by @in_spin_loop.
3640  *  Set at the beginning and cleared at the end of interception/PLE handler.
3641  *
3642  *  (b) VCPU which has done pl-exit/ cpu relax intercepted but did not get
3643  *  chance last time (mostly it has become eligible now since we have probably
3644  *  yielded to lockholder in last iteration. This is done by toggling
3645  *  @dy_eligible each time a VCPU checked for eligibility.)
3646  *
3647  *  Yielding to a recently pl-exited/cpu relax intercepted VCPU before yielding
3648  *  to preempted lock-holder could result in wrong VCPU selection and CPU
3649  *  burning. Giving priority for a potential lock-holder increases lock
3650  *  progress.
3651  *
3652  *  Since algorithm is based on heuristics, accessing another VCPU data without
3653  *  locking does not harm. It may result in trying to yield to  same VCPU, fail
3654  *  and continue with next VCPU and so on.
3655  */
3656 static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
3657 {
3658 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
3659         bool eligible;
3660
3661         eligible = !vcpu->spin_loop.in_spin_loop ||
3662                     vcpu->spin_loop.dy_eligible;
3663
3664         if (vcpu->spin_loop.in_spin_loop)
3665                 kvm_vcpu_set_dy_eligible(vcpu, !vcpu->spin_loop.dy_eligible);
3666
3667         return eligible;
3668 #else
3669         return true;
3670 #endif
3671 }
3672
3673 /*
3674  * Unlike kvm_arch_vcpu_runnable, this function is called outside
3675  * a vcpu_load/vcpu_put pair.  However, for most architectures
3676  * kvm_arch_vcpu_runnable does not require vcpu_load.
3677  */
3678 bool __weak kvm_arch_dy_runnable(struct kvm_vcpu *vcpu)
3679 {
3680         return kvm_arch_vcpu_runnable(vcpu);
3681 }
3682
3683 static bool vcpu_dy_runnable(struct kvm_vcpu *vcpu)
3684 {
3685         if (kvm_arch_dy_runnable(vcpu))
3686                 return true;
3687
3688 #ifdef CONFIG_KVM_ASYNC_PF
3689         if (!list_empty_careful(&vcpu->async_pf.done))
3690                 return true;
3691 #endif
3692
3693         return false;
3694 }
3695
3696 bool __weak kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu)
3697 {
3698         return false;
3699 }
3700
3701 void kvm_vcpu_on_spin(struct kvm_vcpu *me, bool yield_to_kernel_mode)
3702 {
3703         struct kvm *kvm = me->kvm;
3704         struct kvm_vcpu *vcpu;
3705         int last_boosted_vcpu = me->kvm->last_boosted_vcpu;
3706         unsigned long i;
3707         int yielded = 0;
3708         int try = 3;
3709         int pass;
3710
3711         kvm_vcpu_set_in_spin_loop(me, true);
3712         /*
3713          * We boost the priority of a VCPU that is runnable but not
3714          * currently running, because it got preempted by something
3715          * else and called schedule in __vcpu_run.  Hopefully that
3716          * VCPU is holding the lock that we need and will release it.
3717          * We approximate round-robin by starting at the last boosted VCPU.
3718          */
3719         for (pass = 0; pass < 2 && !yielded && try; pass++) {
3720                 kvm_for_each_vcpu(i, vcpu, kvm) {
3721                         if (!pass && i <= last_boosted_vcpu) {
3722                                 i = last_boosted_vcpu;
3723                                 continue;
3724                         } else if (pass && i > last_boosted_vcpu)
3725                                 break;
3726                         if (!READ_ONCE(vcpu->ready))
3727                                 continue;
3728                         if (vcpu == me)
3729                                 continue;
3730                         if (kvm_vcpu_is_blocking(vcpu) && !vcpu_dy_runnable(vcpu))
3731                                 continue;
3732                         if (READ_ONCE(vcpu->preempted) && yield_to_kernel_mode &&
3733                             !kvm_arch_dy_has_pending_interrupt(vcpu) &&
3734                             !kvm_arch_vcpu_in_kernel(vcpu))
3735                                 continue;
3736                         if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
3737                                 continue;
3738
3739                         yielded = kvm_vcpu_yield_to(vcpu);
3740                         if (yielded > 0) {
3741                                 kvm->last_boosted_vcpu = i;
3742                                 break;
3743                         } else if (yielded < 0) {
3744                                 try--;
3745                                 if (!try)
3746                                         break;
3747                         }
3748                 }
3749         }
3750         kvm_vcpu_set_in_spin_loop(me, false);
3751
3752         /* Ensure vcpu is not eligible during next spinloop */
3753         kvm_vcpu_set_dy_eligible(me, false);
3754 }
3755 EXPORT_SYMBOL_GPL(kvm_vcpu_on_spin);
3756
3757 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff)
3758 {
3759 #ifdef CONFIG_HAVE_KVM_DIRTY_RING
3760         return (pgoff >= KVM_DIRTY_LOG_PAGE_OFFSET) &&
3761             (pgoff < KVM_DIRTY_LOG_PAGE_OFFSET +
3762              kvm->dirty_ring_size / PAGE_SIZE);
3763 #else
3764         return false;
3765 #endif
3766 }
3767
3768 static vm_fault_t kvm_vcpu_fault(struct vm_fault *vmf)
3769 {
3770         struct kvm_vcpu *vcpu = vmf->vma->vm_file->private_data;
3771         struct page *page;
3772
3773         if (vmf->pgoff == 0)
3774                 page = virt_to_page(vcpu->run);
3775 #ifdef CONFIG_X86
3776         else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
3777                 page = virt_to_page(vcpu->arch.pio_data);
3778 #endif
3779 #ifdef CONFIG_KVM_MMIO
3780         else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
3781                 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
3782 #endif
3783         else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff))
3784                 page = kvm_dirty_ring_get_page(
3785                     &vcpu->dirty_ring,
3786                     vmf->pgoff - KVM_DIRTY_LOG_PAGE_OFFSET);
3787         else
3788                 return kvm_arch_vcpu_fault(vcpu, vmf);
3789         get_page(page);
3790         vmf->page = page;
3791         return 0;
3792 }
3793
3794 static const struct vm_operations_struct kvm_vcpu_vm_ops = {
3795         .fault = kvm_vcpu_fault,
3796 };
3797
3798 static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
3799 {
3800         struct kvm_vcpu *vcpu = file->private_data;
3801         unsigned long pages = vma_pages(vma);
3802
3803         if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) ||
3804              kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) &&
3805             ((vma->vm_flags & VM_EXEC) || !(vma->vm_flags & VM_SHARED)))
3806                 return -EINVAL;
3807
3808         vma->vm_ops = &kvm_vcpu_vm_ops;
3809         return 0;
3810 }
3811
3812 static int kvm_vcpu_release(struct inode *inode, struct file *filp)
3813 {
3814         struct kvm_vcpu *vcpu = filp->private_data;
3815
3816         kvm_put_kvm(vcpu->kvm);
3817         return 0;
3818 }
3819
3820 static const struct file_operations kvm_vcpu_fops = {
3821         .release        = kvm_vcpu_release,
3822         .unlocked_ioctl = kvm_vcpu_ioctl,
3823         .mmap           = kvm_vcpu_mmap,
3824         .llseek         = noop_llseek,
3825         KVM_COMPAT(kvm_vcpu_compat_ioctl),
3826 };
3827
3828 /*
3829  * Allocates an inode for the vcpu.
3830  */
3831 static int create_vcpu_fd(struct kvm_vcpu *vcpu)
3832 {
3833         char name[8 + 1 + ITOA_MAX_LEN + 1];
3834
3835         snprintf(name, sizeof(name), "kvm-vcpu:%d", vcpu->vcpu_id);
3836         return anon_inode_getfd(name, &kvm_vcpu_fops, vcpu, O_RDWR | O_CLOEXEC);
3837 }
3838
3839 #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
3840 static int vcpu_get_pid(void *data, u64 *val)
3841 {
3842         struct kvm_vcpu *vcpu = (struct kvm_vcpu *) data;
3843         *val = pid_nr(rcu_access_pointer(vcpu->pid));
3844         return 0;
3845 }
3846
3847 DEFINE_SIMPLE_ATTRIBUTE(vcpu_get_pid_fops, vcpu_get_pid, NULL, "%llu\n");
3848
3849 static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
3850 {
3851         struct dentry *debugfs_dentry;
3852         char dir_name[ITOA_MAX_LEN * 2];
3853
3854         if (!debugfs_initialized())
3855                 return;
3856
3857         snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
3858         debugfs_dentry = debugfs_create_dir(dir_name,
3859                                             vcpu->kvm->debugfs_dentry);
3860         debugfs_create_file("pid", 0444, debugfs_dentry, vcpu,
3861                             &vcpu_get_pid_fops);
3862
3863         kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
3864 }
3865 #endif
3866
3867 /*
3868  * Creates some virtual cpus.  Good luck creating more than one.
3869  */
3870 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
3871 {
3872         int r;
3873         struct kvm_vcpu *vcpu;
3874         struct page *page;
3875
3876         if (id >= KVM_MAX_VCPU_IDS)
3877                 return -EINVAL;
3878
3879         mutex_lock(&kvm->lock);
3880         if (kvm->created_vcpus >= kvm->max_vcpus) {
3881                 mutex_unlock(&kvm->lock);
3882                 return -EINVAL;
3883         }
3884
3885         r = kvm_arch_vcpu_precreate(kvm, id);
3886         if (r) {
3887                 mutex_unlock(&kvm->lock);
3888                 return r;
3889         }
3890
3891         kvm->created_vcpus++;
3892         mutex_unlock(&kvm->lock);
3893
3894         vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT);
3895         if (!vcpu) {
3896                 r = -ENOMEM;
3897                 goto vcpu_decrement;
3898         }
3899
3900         BUILD_BUG_ON(sizeof(struct kvm_run) > PAGE_SIZE);
3901         page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
3902         if (!page) {
3903                 r = -ENOMEM;
3904                 goto vcpu_free;
3905         }
3906         vcpu->run = page_address(page);
3907
3908         kvm_vcpu_init(vcpu, kvm, id);
3909
3910         r = kvm_arch_vcpu_create(vcpu);
3911         if (r)
3912                 goto vcpu_free_run_page;
3913
3914         if (kvm->dirty_ring_size) {
3915                 r = kvm_dirty_ring_alloc(&vcpu->dirty_ring,
3916                                          id, kvm->dirty_ring_size);
3917                 if (r)
3918                         goto arch_vcpu_destroy;
3919         }
3920
3921         mutex_lock(&kvm->lock);
3922         if (kvm_get_vcpu_by_id(kvm, id)) {
3923                 r = -EEXIST;
3924                 goto unlock_vcpu_destroy;
3925         }
3926
3927         vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus);
3928         r = xa_insert(&kvm->vcpu_array, vcpu->vcpu_idx, vcpu, GFP_KERNEL_ACCOUNT);
3929         BUG_ON(r == -EBUSY);
3930         if (r)
3931                 goto unlock_vcpu_destroy;
3932
3933         /* Now it's all set up, let userspace reach it */
3934         kvm_get_kvm(kvm);
3935         r = create_vcpu_fd(vcpu);
3936         if (r < 0) {
3937                 xa_erase(&kvm->vcpu_array, vcpu->vcpu_idx);
3938                 kvm_put_kvm_no_destroy(kvm);
3939                 goto unlock_vcpu_destroy;
3940         }
3941
3942         /*
3943          * Pairs with smp_rmb() in kvm_get_vcpu.  Store the vcpu
3944          * pointer before kvm->online_vcpu's incremented value.
3945          */
3946         smp_wmb();
3947         atomic_inc(&kvm->online_vcpus);
3948
3949         mutex_unlock(&kvm->lock);
3950         kvm_arch_vcpu_postcreate(vcpu);
3951         kvm_create_vcpu_debugfs(vcpu);
3952         return r;
3953
3954 unlock_vcpu_destroy:
3955         mutex_unlock(&kvm->lock);
3956         kvm_dirty_ring_free(&vcpu->dirty_ring);
3957 arch_vcpu_destroy:
3958         kvm_arch_vcpu_destroy(vcpu);
3959 vcpu_free_run_page:
3960         free_page((unsigned long)vcpu->run);
3961 vcpu_free:
3962         kmem_cache_free(kvm_vcpu_cache, vcpu);
3963 vcpu_decrement:
3964         mutex_lock(&kvm->lock);
3965         kvm->created_vcpus--;
3966         mutex_unlock(&kvm->lock);
3967         return r;
3968 }
3969
3970 static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
3971 {
3972         if (sigset) {
3973                 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
3974                 vcpu->sigset_active = 1;
3975                 vcpu->sigset = *sigset;
3976         } else
3977                 vcpu->sigset_active = 0;
3978         return 0;
3979 }
3980
3981 static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer,
3982                               size_t size, loff_t *offset)
3983 {
3984         struct kvm_vcpu *vcpu = file->private_data;
3985
3986         return kvm_stats_read(vcpu->stats_id, &kvm_vcpu_stats_header,
3987                         &kvm_vcpu_stats_desc[0], &vcpu->stat,
3988                         sizeof(vcpu->stat), user_buffer, size, offset);
3989 }
3990
3991 static const struct file_operations kvm_vcpu_stats_fops = {
3992         .read = kvm_vcpu_stats_read,
3993         .llseek = noop_llseek,
3994 };
3995
3996 static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu)
3997 {
3998         int fd;
3999         struct file *file;
4000         char name[15 + ITOA_MAX_LEN + 1];
4001
4002         snprintf(name, sizeof(name), "kvm-vcpu-stats:%d", vcpu->vcpu_id);
4003
4004         fd = get_unused_fd_flags(O_CLOEXEC);
4005         if (fd < 0)
4006                 return fd;
4007
4008         file = anon_inode_getfile(name, &kvm_vcpu_stats_fops, vcpu, O_RDONLY);
4009         if (IS_ERR(file)) {
4010                 put_unused_fd(fd);
4011                 return PTR_ERR(file);
4012         }
4013         file->f_mode |= FMODE_PREAD;
4014         fd_install(fd, file);
4015
4016         return fd;
4017 }
4018
4019 static long kvm_vcpu_ioctl(struct file *filp,
4020                            unsigned int ioctl, unsigned long arg)
4021 {
4022         struct kvm_vcpu *vcpu = filp->private_data;
4023         void __user *argp = (void __user *)arg;
4024         int r;
4025         struct kvm_fpu *fpu = NULL;
4026         struct kvm_sregs *kvm_sregs = NULL;
4027
4028         if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4029                 return -EIO;
4030
4031         if (unlikely(_IOC_TYPE(ioctl) != KVMIO))
4032                 return -EINVAL;
4033
4034         /*
4035          * Some architectures have vcpu ioctls that are asynchronous to vcpu
4036          * execution; mutex_lock() would break them.
4037          */
4038         r = kvm_arch_vcpu_async_ioctl(filp, ioctl, arg);
4039         if (r != -ENOIOCTLCMD)
4040                 return r;
4041
4042         if (mutex_lock_killable(&vcpu->mutex))
4043                 return -EINTR;
4044         switch (ioctl) {
4045         case KVM_RUN: {
4046                 struct pid *oldpid;
4047                 r = -EINVAL;
4048                 if (arg)
4049                         goto out;
4050                 oldpid = rcu_access_pointer(vcpu->pid);
4051                 if (unlikely(oldpid != task_pid(current))) {
4052                         /* The thread running this VCPU changed. */
4053                         struct pid *newpid;
4054
4055                         r = kvm_arch_vcpu_run_pid_change(vcpu);
4056                         if (r)
4057                                 break;
4058
4059                         newpid = get_task_pid(current, PIDTYPE_PID);
4060                         rcu_assign_pointer(vcpu->pid, newpid);
4061                         if (oldpid)
4062                                 synchronize_rcu();
4063                         put_pid(oldpid);
4064                 }
4065                 r = kvm_arch_vcpu_ioctl_run(vcpu);
4066                 trace_kvm_userspace_exit(vcpu->run->exit_reason, r);
4067                 break;
4068         }
4069         case KVM_GET_REGS: {
4070                 struct kvm_regs *kvm_regs;
4071
4072                 r = -ENOMEM;
4073                 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL_ACCOUNT);
4074                 if (!kvm_regs)
4075                         goto out;
4076                 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
4077                 if (r)
4078                         goto out_free1;
4079                 r = -EFAULT;
4080                 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
4081                         goto out_free1;
4082                 r = 0;
4083 out_free1:
4084                 kfree(kvm_regs);
4085                 break;
4086         }
4087         case KVM_SET_REGS: {
4088                 struct kvm_regs *kvm_regs;
4089
4090                 kvm_regs = memdup_user(argp, sizeof(*kvm_regs));
4091                 if (IS_ERR(kvm_regs)) {
4092                         r = PTR_ERR(kvm_regs);
4093                         goto out;
4094                 }
4095                 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
4096                 kfree(kvm_regs);
4097                 break;
4098         }
4099         case KVM_GET_SREGS: {
4100                 kvm_sregs = kzalloc(sizeof(struct kvm_sregs),
4101                                     GFP_KERNEL_ACCOUNT);
4102                 r = -ENOMEM;
4103                 if (!kvm_sregs)
4104                         goto out;
4105                 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
4106                 if (r)
4107                         goto out;
4108                 r = -EFAULT;
4109                 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
4110                         goto out;
4111                 r = 0;
4112                 break;
4113         }
4114         case KVM_SET_SREGS: {
4115                 kvm_sregs = memdup_user(argp, sizeof(*kvm_sregs));
4116                 if (IS_ERR(kvm_sregs)) {
4117                         r = PTR_ERR(kvm_sregs);
4118                         kvm_sregs = NULL;
4119                         goto out;
4120                 }
4121                 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
4122                 break;
4123         }
4124         case KVM_GET_MP_STATE: {
4125                 struct kvm_mp_state mp_state;
4126
4127                 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
4128                 if (r)
4129                         goto out;
4130                 r = -EFAULT;
4131                 if (copy_to_user(argp, &mp_state, sizeof(mp_state)))
4132                         goto out;
4133                 r = 0;
4134                 break;
4135         }
4136         case KVM_SET_MP_STATE: {
4137                 struct kvm_mp_state mp_state;
4138
4139                 r = -EFAULT;
4140                 if (copy_from_user(&mp_state, argp, sizeof(mp_state)))
4141                         goto out;
4142                 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
4143                 break;
4144         }
4145         case KVM_TRANSLATE: {
4146                 struct kvm_translation tr;
4147
4148                 r = -EFAULT;
4149                 if (copy_from_user(&tr, argp, sizeof(tr)))
4150                         goto out;
4151                 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
4152                 if (r)
4153                         goto out;
4154                 r = -EFAULT;
4155                 if (copy_to_user(argp, &tr, sizeof(tr)))
4156                         goto out;
4157                 r = 0;
4158                 break;
4159         }
4160         case KVM_SET_GUEST_DEBUG: {
4161                 struct kvm_guest_debug dbg;
4162
4163                 r = -EFAULT;
4164                 if (copy_from_user(&dbg, argp, sizeof(dbg)))
4165                         goto out;
4166                 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
4167                 break;
4168         }
4169         case KVM_SET_SIGNAL_MASK: {
4170                 struct kvm_signal_mask __user *sigmask_arg = argp;
4171                 struct kvm_signal_mask kvm_sigmask;
4172                 sigset_t sigset, *p;
4173
4174                 p = NULL;
4175                 if (argp) {
4176                         r = -EFAULT;
4177                         if (copy_from_user(&kvm_sigmask, argp,
4178                                            sizeof(kvm_sigmask)))
4179                                 goto out;
4180                         r = -EINVAL;
4181                         if (kvm_sigmask.len != sizeof(sigset))
4182                                 goto out;
4183                         r = -EFAULT;
4184                         if (copy_from_user(&sigset, sigmask_arg->sigset,
4185                                            sizeof(sigset)))
4186                                 goto out;
4187                         p = &sigset;
4188                 }
4189                 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
4190                 break;
4191         }
4192         case KVM_GET_FPU: {
4193                 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL_ACCOUNT);
4194                 r = -ENOMEM;
4195                 if (!fpu)
4196                         goto out;
4197                 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
4198                 if (r)
4199                         goto out;
4200                 r = -EFAULT;
4201                 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
4202                         goto out;
4203                 r = 0;
4204                 break;
4205         }
4206         case KVM_SET_FPU: {
4207                 fpu = memdup_user(argp, sizeof(*fpu));
4208                 if (IS_ERR(fpu)) {
4209                         r = PTR_ERR(fpu);
4210                         fpu = NULL;
4211                         goto out;
4212                 }
4213                 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
4214                 break;
4215         }
4216         case KVM_GET_STATS_FD: {
4217                 r = kvm_vcpu_ioctl_get_stats_fd(vcpu);
4218                 break;
4219         }
4220         default:
4221                 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
4222         }
4223 out:
4224         mutex_unlock(&vcpu->mutex);
4225         kfree(fpu);
4226         kfree(kvm_sregs);
4227         return r;
4228 }
4229
4230 #ifdef CONFIG_KVM_COMPAT
4231 static long kvm_vcpu_compat_ioctl(struct file *filp,
4232                                   unsigned int ioctl, unsigned long arg)
4233 {
4234         struct kvm_vcpu *vcpu = filp->private_data;
4235         void __user *argp = compat_ptr(arg);
4236         int r;
4237
4238         if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_dead)
4239                 return -EIO;
4240
4241         switch (ioctl) {
4242         case KVM_SET_SIGNAL_MASK: {
4243                 struct kvm_signal_mask __user *sigmask_arg = argp;
4244                 struct kvm_signal_mask kvm_sigmask;
4245                 sigset_t sigset;
4246
4247                 if (argp) {
4248                         r = -EFAULT;
4249                         if (copy_from_user(&kvm_sigmask, argp,
4250                                            sizeof(kvm_sigmask)))
4251                                 goto out;
4252                         r = -EINVAL;
4253                         if (kvm_sigmask.len != sizeof(compat_sigset_t))
4254                                 goto out;
4255                         r = -EFAULT;
4256                         if (get_compat_sigset(&sigset,
4257                                               (compat_sigset_t __user *)sigmask_arg->sigset))
4258                                 goto out;
4259                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
4260                 } else
4261                         r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL);
4262                 break;
4263         }
4264         default:
4265                 r = kvm_vcpu_ioctl(filp, ioctl, arg);
4266         }
4267
4268 out:
4269         return r;
4270 }
4271 #endif
4272
4273 static int kvm_device_mmap(struct file *filp, struct vm_area_struct *vma)
4274 {
4275         struct kvm_device *dev = filp->private_data;
4276
4277         if (dev->ops->mmap)
4278                 return dev->ops->mmap(dev, vma);
4279
4280         return -ENODEV;
4281 }
4282
4283 static int kvm_device_ioctl_attr(struct kvm_device *dev,
4284                                  int (*accessor)(struct kvm_device *dev,
4285                                                  struct kvm_device_attr *attr),
4286                                  unsigned long arg)
4287 {
4288         struct kvm_device_attr attr;
4289
4290         if (!accessor)
4291                 return -EPERM;
4292
4293         if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
4294                 return -EFAULT;
4295
4296         return accessor(dev, &attr);
4297 }
4298
4299 static long kvm_device_ioctl(struct file *filp, unsigned int ioctl,
4300                              unsigned long arg)
4301 {
4302         struct kvm_device *dev = filp->private_data;
4303
4304         if (dev->kvm->mm != current->mm || dev->kvm->vm_dead)
4305                 return -EIO;
4306
4307         switch (ioctl) {
4308         case KVM_SET_DEVICE_ATTR:
4309                 return kvm_device_ioctl_attr(dev, dev->ops->set_attr, arg);
4310         case KVM_GET_DEVICE_ATTR:
4311                 return kvm_device_ioctl_attr(dev, dev->ops->get_attr, arg);
4312         case KVM_HAS_DEVICE_ATTR:
4313                 return kvm_device_ioctl_attr(dev, dev->ops->has_attr, arg);
4314         default:
4315                 if (dev->ops->ioctl)
4316                         return dev->ops->ioctl(dev, ioctl, arg);
4317
4318                 return -ENOTTY;
4319         }
4320 }
4321
4322 static int kvm_device_release(struct inode *inode, struct file *filp)
4323 {
4324         struct kvm_device *dev = filp->private_data;
4325         struct kvm *kvm = dev->kvm;
4326
4327         if (dev->ops->release) {
4328                 mutex_lock(&kvm->lock);
4329                 list_del(&dev->vm_node);
4330                 dev->ops->release(dev);
4331                 mutex_unlock(&kvm->lock);
4332         }
4333
4334         kvm_put_kvm(kvm);
4335         return 0;
4336 }
4337
4338 static const struct file_operations kvm_device_fops = {
4339         .unlocked_ioctl = kvm_device_ioctl,
4340         .release = kvm_device_release,
4341         KVM_COMPAT(kvm_device_ioctl),
4342         .mmap = kvm_device_mmap,
4343 };
4344
4345 struct kvm_device *kvm_device_from_filp(struct file *filp)
4346 {
4347         if (filp->f_op != &kvm_device_fops)
4348                 return NULL;
4349
4350         return filp->private_data;
4351 }
4352
4353 static const struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
4354 #ifdef CONFIG_KVM_MPIC
4355         [KVM_DEV_TYPE_FSL_MPIC_20]      = &kvm_mpic_ops,
4356         [KVM_DEV_TYPE_FSL_MPIC_42]      = &kvm_mpic_ops,
4357 #endif
4358 };
4359
4360 int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type)
4361 {
4362         if (type >= ARRAY_SIZE(kvm_device_ops_table))
4363                 return -ENOSPC;
4364
4365         if (kvm_device_ops_table[type] != NULL)
4366                 return -EEXIST;
4367
4368         kvm_device_ops_table[type] = ops;
4369         return 0;
4370 }
4371
4372 void kvm_unregister_device_ops(u32 type)
4373 {
4374         if (kvm_device_ops_table[type] != NULL)
4375                 kvm_device_ops_table[type] = NULL;
4376 }
4377
4378 static int kvm_ioctl_create_device(struct kvm *kvm,
4379                                    struct kvm_create_device *cd)
4380 {
4381         const struct kvm_device_ops *ops;
4382         struct kvm_device *dev;
4383         bool test = cd->flags & KVM_CREATE_DEVICE_TEST;
4384         int type;
4385         int ret;
4386
4387         if (cd->type >= ARRAY_SIZE(kvm_device_ops_table))
4388                 return -ENODEV;
4389
4390         type = array_index_nospec(cd->type, ARRAY_SIZE(kvm_device_ops_table));
4391         ops = kvm_device_ops_table[type];
4392         if (ops == NULL)
4393                 return -ENODEV;
4394
4395         if (test)
4396                 return 0;
4397
4398         dev = kzalloc(sizeof(*dev), GFP_KERNEL_ACCOUNT);
4399         if (!dev)
4400                 return -ENOMEM;
4401
4402         dev->ops = ops;
4403         dev->kvm = kvm;
4404
4405         mutex_lock(&kvm->lock);
4406         ret = ops->create(dev, type);
4407         if (ret < 0) {
4408                 mutex_unlock(&kvm->lock);
4409                 kfree(dev);
4410                 return ret;
4411         }
4412         list_add(&dev->vm_node, &kvm->devices);
4413         mutex_unlock(&kvm->lock);
4414
4415         if (ops->init)
4416                 ops->init(dev);
4417
4418         kvm_get_kvm(kvm);
4419         ret = anon_inode_getfd(ops->name, &kvm_device_fops, dev, O_RDWR | O_CLOEXEC);
4420         if (ret < 0) {
4421                 kvm_put_kvm_no_destroy(kvm);
4422                 mutex_lock(&kvm->lock);
4423                 list_del(&dev->vm_node);
4424                 if (ops->release)
4425                         ops->release(dev);
4426                 mutex_unlock(&kvm->lock);
4427                 if (ops->destroy)
4428                         ops->destroy(dev);
4429                 return ret;
4430         }
4431
4432         cd->fd = ret;
4433         return 0;
4434 }
4435
4436 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg)
4437 {
4438         switch (arg) {
4439         case KVM_CAP_USER_MEMORY:
4440         case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
4441         case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
4442         case KVM_CAP_INTERNAL_ERROR_DATA:
4443 #ifdef CONFIG_HAVE_KVM_MSI
4444         case KVM_CAP_SIGNAL_MSI:
4445 #endif
4446 #ifdef CONFIG_HAVE_KVM_IRQFD
4447         case KVM_CAP_IRQFD:
4448         case KVM_CAP_IRQFD_RESAMPLE:
4449 #endif
4450         case KVM_CAP_IOEVENTFD_ANY_LENGTH:
4451         case KVM_CAP_CHECK_EXTENSION_VM:
4452         case KVM_CAP_ENABLE_CAP_VM:
4453         case KVM_CAP_HALT_POLL:
4454                 return 1;
4455 #ifdef CONFIG_KVM_MMIO
4456         case KVM_CAP_COALESCED_MMIO:
4457                 return KVM_COALESCED_MMIO_PAGE_OFFSET;
4458         case KVM_CAP_COALESCED_PIO:
4459                 return 1;
4460 #endif
4461 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4462         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2:
4463                 return KVM_DIRTY_LOG_MANUAL_CAPS;
4464 #endif
4465 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4466         case KVM_CAP_IRQ_ROUTING:
4467                 return KVM_MAX_IRQ_ROUTES;
4468 #endif
4469 #if KVM_ADDRESS_SPACE_NUM > 1
4470         case KVM_CAP_MULTI_ADDRESS_SPACE:
4471                 return KVM_ADDRESS_SPACE_NUM;
4472 #endif
4473         case KVM_CAP_NR_MEMSLOTS:
4474                 return KVM_USER_MEM_SLOTS;
4475         case KVM_CAP_DIRTY_LOG_RING:
4476 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_TSO
4477                 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4478 #else
4479                 return 0;
4480 #endif
4481         case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4482 #ifdef CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL
4483                 return KVM_DIRTY_RING_MAX_ENTRIES * sizeof(struct kvm_dirty_gfn);
4484 #else
4485                 return 0;
4486 #endif
4487         case KVM_CAP_BINARY_STATS_FD:
4488         case KVM_CAP_SYSTEM_EVENT_DATA:
4489                 return 1;
4490         default:
4491                 break;
4492         }
4493         return kvm_vm_ioctl_check_extension(kvm, arg);
4494 }
4495
4496 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size)
4497 {
4498         int r;
4499
4500         if (!KVM_DIRTY_LOG_PAGE_OFFSET)
4501                 return -EINVAL;
4502
4503         /* the size should be power of 2 */
4504         if (!size || (size & (size - 1)))
4505                 return -EINVAL;
4506
4507         /* Should be bigger to keep the reserved entries, or a page */
4508         if (size < kvm_dirty_ring_get_rsvd_entries() *
4509             sizeof(struct kvm_dirty_gfn) || size < PAGE_SIZE)
4510                 return -EINVAL;
4511
4512         if (size > KVM_DIRTY_RING_MAX_ENTRIES *
4513             sizeof(struct kvm_dirty_gfn))
4514                 return -E2BIG;
4515
4516         /* We only allow it to set once */
4517         if (kvm->dirty_ring_size)
4518                 return -EINVAL;
4519
4520         mutex_lock(&kvm->lock);
4521
4522         if (kvm->created_vcpus) {
4523                 /* We don't allow to change this value after vcpu created */
4524                 r = -EINVAL;
4525         } else {
4526                 kvm->dirty_ring_size = size;
4527                 r = 0;
4528         }
4529
4530         mutex_unlock(&kvm->lock);
4531         return r;
4532 }
4533
4534 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm)
4535 {
4536         unsigned long i;
4537         struct kvm_vcpu *vcpu;
4538         int cleared = 0;
4539
4540         if (!kvm->dirty_ring_size)
4541                 return -EINVAL;
4542
4543         mutex_lock(&kvm->slots_lock);
4544
4545         kvm_for_each_vcpu(i, vcpu, kvm)
4546                 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring);
4547
4548         mutex_unlock(&kvm->slots_lock);
4549
4550         if (cleared)
4551                 kvm_flush_remote_tlbs(kvm);
4552
4553         return cleared;
4554 }
4555
4556 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm,
4557                                                   struct kvm_enable_cap *cap)
4558 {
4559         return -EINVAL;
4560 }
4561
4562 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm,
4563                                            struct kvm_enable_cap *cap)
4564 {
4565         switch (cap->cap) {
4566 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4567         case KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2: {
4568                 u64 allowed_options = KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE;
4569
4570                 if (cap->args[0] & KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE)
4571                         allowed_options = KVM_DIRTY_LOG_MANUAL_CAPS;
4572
4573                 if (cap->flags || (cap->args[0] & ~allowed_options))
4574                         return -EINVAL;
4575                 kvm->manual_dirty_log_protect = cap->args[0];
4576                 return 0;
4577         }
4578 #endif
4579         case KVM_CAP_HALT_POLL: {
4580                 if (cap->flags || cap->args[0] != (unsigned int)cap->args[0])
4581                         return -EINVAL;
4582
4583                 kvm->max_halt_poll_ns = cap->args[0];
4584                 return 0;
4585         }
4586         case KVM_CAP_DIRTY_LOG_RING:
4587         case KVM_CAP_DIRTY_LOG_RING_ACQ_REL:
4588                 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]);
4589         default:
4590                 return kvm_vm_ioctl_enable_cap(kvm, cap);
4591         }
4592 }
4593
4594 static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer,
4595                               size_t size, loff_t *offset)
4596 {
4597         struct kvm *kvm = file->private_data;
4598
4599         return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header,
4600                                 &kvm_vm_stats_desc[0], &kvm->stat,
4601                                 sizeof(kvm->stat), user_buffer, size, offset);
4602 }
4603
4604 static const struct file_operations kvm_vm_stats_fops = {
4605         .read = kvm_vm_stats_read,
4606         .llseek = noop_llseek,
4607 };
4608
4609 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm)
4610 {
4611         int fd;
4612         struct file *file;
4613
4614         fd = get_unused_fd_flags(O_CLOEXEC);
4615         if (fd < 0)
4616                 return fd;
4617
4618         file = anon_inode_getfile("kvm-vm-stats",
4619                         &kvm_vm_stats_fops, kvm, O_RDONLY);
4620         if (IS_ERR(file)) {
4621                 put_unused_fd(fd);
4622                 return PTR_ERR(file);
4623         }
4624         file->f_mode |= FMODE_PREAD;
4625         fd_install(fd, file);
4626
4627         return fd;
4628 }
4629
4630 static long kvm_vm_ioctl(struct file *filp,
4631                            unsigned int ioctl, unsigned long arg)
4632 {
4633         struct kvm *kvm = filp->private_data;
4634         void __user *argp = (void __user *)arg;
4635         int r;
4636
4637         if (kvm->mm != current->mm || kvm->vm_dead)
4638                 return -EIO;
4639         switch (ioctl) {
4640         case KVM_CREATE_VCPU:
4641                 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
4642                 break;
4643         case KVM_ENABLE_CAP: {
4644                 struct kvm_enable_cap cap;
4645
4646                 r = -EFAULT;
4647                 if (copy_from_user(&cap, argp, sizeof(cap)))
4648                         goto out;
4649                 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap);
4650                 break;
4651         }
4652         case KVM_SET_USER_MEMORY_REGION: {
4653                 struct kvm_userspace_memory_region kvm_userspace_mem;
4654
4655                 r = -EFAULT;
4656                 if (copy_from_user(&kvm_userspace_mem, argp,
4657                                                 sizeof(kvm_userspace_mem)))
4658                         goto out;
4659
4660                 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem);
4661                 break;
4662         }
4663         case KVM_GET_DIRTY_LOG: {
4664                 struct kvm_dirty_log log;
4665
4666                 r = -EFAULT;
4667                 if (copy_from_user(&log, argp, sizeof(log)))
4668                         goto out;
4669                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4670                 break;
4671         }
4672 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4673         case KVM_CLEAR_DIRTY_LOG: {
4674                 struct kvm_clear_dirty_log log;
4675
4676                 r = -EFAULT;
4677                 if (copy_from_user(&log, argp, sizeof(log)))
4678                         goto out;
4679                 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4680                 break;
4681         }
4682 #endif
4683 #ifdef CONFIG_KVM_MMIO
4684         case KVM_REGISTER_COALESCED_MMIO: {
4685                 struct kvm_coalesced_mmio_zone zone;
4686
4687                 r = -EFAULT;
4688                 if (copy_from_user(&zone, argp, sizeof(zone)))
4689                         goto out;
4690                 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
4691                 break;
4692         }
4693         case KVM_UNREGISTER_COALESCED_MMIO: {
4694                 struct kvm_coalesced_mmio_zone zone;
4695
4696                 r = -EFAULT;
4697                 if (copy_from_user(&zone, argp, sizeof(zone)))
4698                         goto out;
4699                 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
4700                 break;
4701         }
4702 #endif
4703         case KVM_IRQFD: {
4704                 struct kvm_irqfd data;
4705
4706                 r = -EFAULT;
4707                 if (copy_from_user(&data, argp, sizeof(data)))
4708                         goto out;
4709                 r = kvm_irqfd(kvm, &data);
4710                 break;
4711         }
4712         case KVM_IOEVENTFD: {
4713                 struct kvm_ioeventfd data;
4714
4715                 r = -EFAULT;
4716                 if (copy_from_user(&data, argp, sizeof(data)))
4717                         goto out;
4718                 r = kvm_ioeventfd(kvm, &data);
4719                 break;
4720         }
4721 #ifdef CONFIG_HAVE_KVM_MSI
4722         case KVM_SIGNAL_MSI: {
4723                 struct kvm_msi msi;
4724
4725                 r = -EFAULT;
4726                 if (copy_from_user(&msi, argp, sizeof(msi)))
4727                         goto out;
4728                 r = kvm_send_userspace_msi(kvm, &msi);
4729                 break;
4730         }
4731 #endif
4732 #ifdef __KVM_HAVE_IRQ_LINE
4733         case KVM_IRQ_LINE_STATUS:
4734         case KVM_IRQ_LINE: {
4735                 struct kvm_irq_level irq_event;
4736
4737                 r = -EFAULT;
4738                 if (copy_from_user(&irq_event, argp, sizeof(irq_event)))
4739                         goto out;
4740
4741                 r = kvm_vm_ioctl_irq_line(kvm, &irq_event,
4742                                         ioctl == KVM_IRQ_LINE_STATUS);
4743                 if (r)
4744                         goto out;
4745
4746                 r = -EFAULT;
4747                 if (ioctl == KVM_IRQ_LINE_STATUS) {
4748                         if (copy_to_user(argp, &irq_event, sizeof(irq_event)))
4749                                 goto out;
4750                 }
4751
4752                 r = 0;
4753                 break;
4754         }
4755 #endif
4756 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
4757         case KVM_SET_GSI_ROUTING: {
4758                 struct kvm_irq_routing routing;
4759                 struct kvm_irq_routing __user *urouting;
4760                 struct kvm_irq_routing_entry *entries = NULL;
4761
4762                 r = -EFAULT;
4763                 if (copy_from_user(&routing, argp, sizeof(routing)))
4764                         goto out;
4765                 r = -EINVAL;
4766                 if (!kvm_arch_can_set_irq_routing(kvm))
4767                         goto out;
4768                 if (routing.nr > KVM_MAX_IRQ_ROUTES)
4769                         goto out;
4770                 if (routing.flags)
4771                         goto out;
4772                 if (routing.nr) {
4773                         urouting = argp;
4774                         entries = vmemdup_user(urouting->entries,
4775                                                array_size(sizeof(*entries),
4776                                                           routing.nr));
4777                         if (IS_ERR(entries)) {
4778                                 r = PTR_ERR(entries);
4779                                 goto out;
4780                         }
4781                 }
4782                 r = kvm_set_irq_routing(kvm, entries, routing.nr,
4783                                         routing.flags);
4784                 kvfree(entries);
4785                 break;
4786         }
4787 #endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */
4788         case KVM_CREATE_DEVICE: {
4789                 struct kvm_create_device cd;
4790
4791                 r = -EFAULT;
4792                 if (copy_from_user(&cd, argp, sizeof(cd)))
4793                         goto out;
4794
4795                 r = kvm_ioctl_create_device(kvm, &cd);
4796                 if (r)
4797                         goto out;
4798
4799                 r = -EFAULT;
4800                 if (copy_to_user(argp, &cd, sizeof(cd)))
4801                         goto out;
4802
4803                 r = 0;
4804                 break;
4805         }
4806         case KVM_CHECK_EXTENSION:
4807                 r = kvm_vm_ioctl_check_extension_generic(kvm, arg);
4808                 break;
4809         case KVM_RESET_DIRTY_RINGS:
4810                 r = kvm_vm_ioctl_reset_dirty_pages(kvm);
4811                 break;
4812         case KVM_GET_STATS_FD:
4813                 r = kvm_vm_ioctl_get_stats_fd(kvm);
4814                 break;
4815         default:
4816                 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
4817         }
4818 out:
4819         return r;
4820 }
4821
4822 #ifdef CONFIG_KVM_COMPAT
4823 struct compat_kvm_dirty_log {
4824         __u32 slot;
4825         __u32 padding1;
4826         union {
4827                 compat_uptr_t dirty_bitmap; /* one bit per page */
4828                 __u64 padding2;
4829         };
4830 };
4831
4832 struct compat_kvm_clear_dirty_log {
4833         __u32 slot;
4834         __u32 num_pages;
4835         __u64 first_page;
4836         union {
4837                 compat_uptr_t dirty_bitmap; /* one bit per page */
4838                 __u64 padding2;
4839         };
4840 };
4841
4842 static long kvm_vm_compat_ioctl(struct file *filp,
4843                            unsigned int ioctl, unsigned long arg)
4844 {
4845         struct kvm *kvm = filp->private_data;
4846         int r;
4847
4848         if (kvm->mm != current->mm || kvm->vm_dead)
4849                 return -EIO;
4850         switch (ioctl) {
4851 #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
4852         case KVM_CLEAR_DIRTY_LOG: {
4853                 struct compat_kvm_clear_dirty_log compat_log;
4854                 struct kvm_clear_dirty_log log;
4855
4856                 if (copy_from_user(&compat_log, (void __user *)arg,
4857                                    sizeof(compat_log)))
4858                         return -EFAULT;
4859                 log.slot         = compat_log.slot;
4860                 log.num_pages    = compat_log.num_pages;
4861                 log.first_page   = compat_log.first_page;
4862                 log.padding2     = compat_log.padding2;
4863                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4864
4865                 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log);
4866                 break;
4867         }
4868 #endif
4869         case KVM_GET_DIRTY_LOG: {
4870                 struct compat_kvm_dirty_log compat_log;
4871                 struct kvm_dirty_log log;
4872
4873                 if (copy_from_user(&compat_log, (void __user *)arg,
4874                                    sizeof(compat_log)))
4875                         return -EFAULT;
4876                 log.slot         = compat_log.slot;
4877                 log.padding1     = compat_log.padding1;
4878                 log.padding2     = compat_log.padding2;
4879                 log.dirty_bitmap = compat_ptr(compat_log.dirty_bitmap);
4880
4881                 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
4882                 break;
4883         }
4884         default:
4885                 r = kvm_vm_ioctl(filp, ioctl, arg);
4886         }
4887         return r;
4888 }
4889 #endif
4890
4891 static const struct file_operations kvm_vm_fops = {
4892         .release        = kvm_vm_release,
4893         .unlocked_ioctl = kvm_vm_ioctl,
4894         .llseek         = noop_llseek,
4895         KVM_COMPAT(kvm_vm_compat_ioctl),
4896 };
4897
4898 bool file_is_kvm(struct file *file)
4899 {
4900         return file && file->f_op == &kvm_vm_fops;
4901 }
4902 EXPORT_SYMBOL_GPL(file_is_kvm);
4903
4904 static int kvm_dev_ioctl_create_vm(unsigned long type)
4905 {
4906         char fdname[ITOA_MAX_LEN + 1];
4907         int r, fd;
4908         struct kvm *kvm;
4909         struct file *file;
4910
4911         fd = get_unused_fd_flags(O_CLOEXEC);
4912         if (fd < 0)
4913                 return fd;
4914
4915         snprintf(fdname, sizeof(fdname), "%d", fd);
4916
4917         kvm = kvm_create_vm(type, fdname);
4918         if (IS_ERR(kvm)) {
4919                 r = PTR_ERR(kvm);
4920                 goto put_fd;
4921         }
4922
4923         file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR);
4924         if (IS_ERR(file)) {
4925                 r = PTR_ERR(file);
4926                 goto put_kvm;
4927         }
4928
4929         /*
4930          * Don't call kvm_put_kvm anymore at this point; file->f_op is
4931          * already set, with ->release() being kvm_vm_release().  In error
4932          * cases it will be called by the final fput(file) and will take
4933          * care of doing kvm_put_kvm(kvm).
4934          */
4935         kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm);
4936
4937         fd_install(fd, file);
4938         return fd;
4939
4940 put_kvm:
4941         kvm_put_kvm(kvm);
4942 put_fd:
4943         put_unused_fd(fd);
4944         return r;
4945 }
4946
4947 static long kvm_dev_ioctl(struct file *filp,
4948                           unsigned int ioctl, unsigned long arg)
4949 {
4950         long r = -EINVAL;
4951
4952         switch (ioctl) {
4953         case KVM_GET_API_VERSION:
4954                 if (arg)
4955                         goto out;
4956                 r = KVM_API_VERSION;
4957                 break;
4958         case KVM_CREATE_VM:
4959                 r = kvm_dev_ioctl_create_vm(arg);
4960                 break;
4961         case KVM_CHECK_EXTENSION:
4962                 r = kvm_vm_ioctl_check_extension_generic(NULL, arg);
4963                 break;
4964         case KVM_GET_VCPU_MMAP_SIZE:
4965                 if (arg)
4966                         goto out;
4967                 r = PAGE_SIZE;     /* struct kvm_run */
4968 #ifdef CONFIG_X86
4969                 r += PAGE_SIZE;    /* pio data page */
4970 #endif
4971 #ifdef CONFIG_KVM_MMIO
4972                 r += PAGE_SIZE;    /* coalesced mmio ring page */
4973 #endif
4974                 break;
4975         case KVM_TRACE_ENABLE:
4976         case KVM_TRACE_PAUSE:
4977         case KVM_TRACE_DISABLE:
4978                 r = -EOPNOTSUPP;
4979                 break;
4980         default:
4981                 return kvm_arch_dev_ioctl(filp, ioctl, arg);
4982         }
4983 out:
4984         return r;
4985 }
4986
4987 static struct file_operations kvm_chardev_ops = {
4988         .unlocked_ioctl = kvm_dev_ioctl,
4989         .llseek         = noop_llseek,
4990         KVM_COMPAT(kvm_dev_ioctl),
4991 };
4992
4993 static struct miscdevice kvm_dev = {
4994         KVM_MINOR,
4995         "kvm",
4996         &kvm_chardev_ops,
4997 };
4998
4999 static void hardware_enable_nolock(void *junk)
5000 {
5001         int cpu = raw_smp_processor_id();
5002         int r;
5003
5004         if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
5005                 return;
5006
5007         cpumask_set_cpu(cpu, cpus_hardware_enabled);
5008
5009         r = kvm_arch_hardware_enable();
5010
5011         if (r) {
5012                 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
5013                 atomic_inc(&hardware_enable_failed);
5014                 pr_info("kvm: enabling virtualization on CPU%d failed\n", cpu);
5015         }
5016 }
5017
5018 static int kvm_starting_cpu(unsigned int cpu)
5019 {
5020         raw_spin_lock(&kvm_count_lock);
5021         if (kvm_usage_count)
5022                 hardware_enable_nolock(NULL);
5023         raw_spin_unlock(&kvm_count_lock);
5024         return 0;
5025 }
5026
5027 static void hardware_disable_nolock(void *junk)
5028 {
5029         int cpu = raw_smp_processor_id();
5030
5031         if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
5032                 return;
5033         cpumask_clear_cpu(cpu, cpus_hardware_enabled);
5034         kvm_arch_hardware_disable();
5035 }
5036
5037 static int kvm_dying_cpu(unsigned int cpu)
5038 {
5039         raw_spin_lock(&kvm_count_lock);
5040         if (kvm_usage_count)
5041                 hardware_disable_nolock(NULL);
5042         raw_spin_unlock(&kvm_count_lock);
5043         return 0;
5044 }
5045
5046 static void hardware_disable_all_nolock(void)
5047 {
5048         BUG_ON(!kvm_usage_count);
5049
5050         kvm_usage_count--;
5051         if (!kvm_usage_count)
5052                 on_each_cpu(hardware_disable_nolock, NULL, 1);
5053 }
5054
5055 static void hardware_disable_all(void)
5056 {
5057         raw_spin_lock(&kvm_count_lock);
5058         hardware_disable_all_nolock();
5059         raw_spin_unlock(&kvm_count_lock);
5060 }
5061
5062 static int hardware_enable_all(void)
5063 {
5064         int r = 0;
5065
5066         raw_spin_lock(&kvm_count_lock);
5067
5068         kvm_usage_count++;
5069         if (kvm_usage_count == 1) {
5070                 atomic_set(&hardware_enable_failed, 0);
5071                 on_each_cpu(hardware_enable_nolock, NULL, 1);
5072
5073                 if (atomic_read(&hardware_enable_failed)) {
5074                         hardware_disable_all_nolock();
5075                         r = -EBUSY;
5076                 }
5077         }
5078
5079         raw_spin_unlock(&kvm_count_lock);
5080
5081         return r;
5082 }
5083
5084 static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
5085                       void *v)
5086 {
5087         /*
5088          * Some (well, at least mine) BIOSes hang on reboot if
5089          * in vmx root mode.
5090          *
5091          * And Intel TXT required VMX off for all cpu when system shutdown.
5092          */
5093         pr_info("kvm: exiting hardware virtualization\n");
5094         kvm_rebooting = true;
5095         on_each_cpu(hardware_disable_nolock, NULL, 1);
5096         return NOTIFY_OK;
5097 }
5098
5099 static struct notifier_block kvm_reboot_notifier = {
5100         .notifier_call = kvm_reboot,
5101         .priority = 0,
5102 };
5103
5104 static void kvm_io_bus_destroy(struct kvm_io_bus *bus)
5105 {
5106         int i;
5107
5108         for (i = 0; i < bus->dev_count; i++) {
5109                 struct kvm_io_device *pos = bus->range[i].dev;
5110
5111                 kvm_iodevice_destructor(pos);
5112         }
5113         kfree(bus);
5114 }
5115
5116 static inline int kvm_io_bus_cmp(const struct kvm_io_range *r1,
5117                                  const struct kvm_io_range *r2)
5118 {
5119         gpa_t addr1 = r1->addr;
5120         gpa_t addr2 = r2->addr;
5121
5122         if (addr1 < addr2)
5123                 return -1;
5124
5125         /* If r2->len == 0, match the exact address.  If r2->len != 0,
5126          * accept any overlapping write.  Any order is acceptable for
5127          * overlapping ranges, because kvm_io_bus_get_first_dev ensures
5128          * we process all of them.
5129          */
5130         if (r2->len) {
5131                 addr1 += r1->len;
5132                 addr2 += r2->len;
5133         }
5134
5135         if (addr1 > addr2)
5136                 return 1;
5137
5138         return 0;
5139 }
5140
5141 static int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
5142 {
5143         return kvm_io_bus_cmp(p1, p2);
5144 }
5145
5146 static int kvm_io_bus_get_first_dev(struct kvm_io_bus *bus,
5147                              gpa_t addr, int len)
5148 {
5149         struct kvm_io_range *range, key;
5150         int off;
5151
5152         key = (struct kvm_io_range) {
5153                 .addr = addr,
5154                 .len = len,
5155         };
5156
5157         range = bsearch(&key, bus->range, bus->dev_count,
5158                         sizeof(struct kvm_io_range), kvm_io_bus_sort_cmp);
5159         if (range == NULL)
5160                 return -ENOENT;
5161
5162         off = range - bus->range;
5163
5164         while (off > 0 && kvm_io_bus_cmp(&key, &bus->range[off-1]) == 0)
5165                 off--;
5166
5167         return off;
5168 }
5169
5170 static int __kvm_io_bus_write(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5171                               struct kvm_io_range *range, const void *val)
5172 {
5173         int idx;
5174
5175         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5176         if (idx < 0)
5177                 return -EOPNOTSUPP;
5178
5179         while (idx < bus->dev_count &&
5180                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5181                 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr,
5182                                         range->len, val))
5183                         return idx;
5184                 idx++;
5185         }
5186
5187         return -EOPNOTSUPP;
5188 }
5189
5190 /* kvm_io_bus_write - called under kvm->slots_lock */
5191 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5192                      int len, const void *val)
5193 {
5194         struct kvm_io_bus *bus;
5195         struct kvm_io_range range;
5196         int r;
5197
5198         range = (struct kvm_io_range) {
5199                 .addr = addr,
5200                 .len = len,
5201         };
5202
5203         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5204         if (!bus)
5205                 return -ENOMEM;
5206         r = __kvm_io_bus_write(vcpu, bus, &range, val);
5207         return r < 0 ? r : 0;
5208 }
5209 EXPORT_SYMBOL_GPL(kvm_io_bus_write);
5210
5211 /* kvm_io_bus_write_cookie - called under kvm->slots_lock */
5212 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
5213                             gpa_t addr, int len, const void *val, long cookie)
5214 {
5215         struct kvm_io_bus *bus;
5216         struct kvm_io_range range;
5217
5218         range = (struct kvm_io_range) {
5219                 .addr = addr,
5220                 .len = len,
5221         };
5222
5223         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5224         if (!bus)
5225                 return -ENOMEM;
5226
5227         /* First try the device referenced by cookie. */
5228         if ((cookie >= 0) && (cookie < bus->dev_count) &&
5229             (kvm_io_bus_cmp(&range, &bus->range[cookie]) == 0))
5230                 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len,
5231                                         val))
5232                         return cookie;
5233
5234         /*
5235          * cookie contained garbage; fall back to search and return the
5236          * correct cookie value.
5237          */
5238         return __kvm_io_bus_write(vcpu, bus, &range, val);
5239 }
5240
5241 static int __kvm_io_bus_read(struct kvm_vcpu *vcpu, struct kvm_io_bus *bus,
5242                              struct kvm_io_range *range, void *val)
5243 {
5244         int idx;
5245
5246         idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len);
5247         if (idx < 0)
5248                 return -EOPNOTSUPP;
5249
5250         while (idx < bus->dev_count &&
5251                 kvm_io_bus_cmp(range, &bus->range[idx]) == 0) {
5252                 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr,
5253                                        range->len, val))
5254                         return idx;
5255                 idx++;
5256         }
5257
5258         return -EOPNOTSUPP;
5259 }
5260
5261 /* kvm_io_bus_read - called under kvm->slots_lock */
5262 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
5263                     int len, void *val)
5264 {
5265         struct kvm_io_bus *bus;
5266         struct kvm_io_range range;
5267         int r;
5268
5269         range = (struct kvm_io_range) {
5270                 .addr = addr,
5271                 .len = len,
5272         };
5273
5274         bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
5275         if (!bus)
5276                 return -ENOMEM;
5277         r = __kvm_io_bus_read(vcpu, bus, &range, val);
5278         return r < 0 ? r : 0;
5279 }
5280
5281 /* Caller must hold slots_lock. */
5282 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
5283                             int len, struct kvm_io_device *dev)
5284 {
5285         int i;
5286         struct kvm_io_bus *new_bus, *bus;
5287         struct kvm_io_range range;
5288
5289         bus = kvm_get_bus(kvm, bus_idx);
5290         if (!bus)
5291                 return -ENOMEM;
5292
5293         /* exclude ioeventfd which is limited by maximum fd */
5294         if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
5295                 return -ENOSPC;
5296
5297         new_bus = kmalloc(struct_size(bus, range, bus->dev_count + 1),
5298                           GFP_KERNEL_ACCOUNT);
5299         if (!new_bus)
5300                 return -ENOMEM;
5301
5302         range = (struct kvm_io_range) {
5303                 .addr = addr,
5304                 .len = len,
5305                 .dev = dev,
5306         };
5307
5308         for (i = 0; i < bus->dev_count; i++)
5309                 if (kvm_io_bus_cmp(&bus->range[i], &range) > 0)
5310                         break;
5311
5312         memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
5313         new_bus->dev_count++;
5314         new_bus->range[i] = range;
5315         memcpy(new_bus->range + i + 1, bus->range + i,
5316                 (bus->dev_count - i) * sizeof(struct kvm_io_range));
5317         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5318         synchronize_srcu_expedited(&kvm->srcu);
5319         kfree(bus);
5320
5321         return 0;
5322 }
5323
5324 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5325                               struct kvm_io_device *dev)
5326 {
5327         int i, j;
5328         struct kvm_io_bus *new_bus, *bus;
5329
5330         lockdep_assert_held(&kvm->slots_lock);
5331
5332         bus = kvm_get_bus(kvm, bus_idx);
5333         if (!bus)
5334                 return 0;
5335
5336         for (i = 0; i < bus->dev_count; i++) {
5337                 if (bus->range[i].dev == dev) {
5338                         break;
5339                 }
5340         }
5341
5342         if (i == bus->dev_count)
5343                 return 0;
5344
5345         new_bus = kmalloc(struct_size(bus, range, bus->dev_count - 1),
5346                           GFP_KERNEL_ACCOUNT);
5347         if (new_bus) {
5348                 memcpy(new_bus, bus, struct_size(bus, range, i));
5349                 new_bus->dev_count--;
5350                 memcpy(new_bus->range + i, bus->range + i + 1,
5351                                 flex_array_size(new_bus, range, new_bus->dev_count - i));
5352         }
5353
5354         rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
5355         synchronize_srcu_expedited(&kvm->srcu);
5356
5357         /* Destroy the old bus _after_ installing the (null) bus. */
5358         if (!new_bus) {
5359                 pr_err("kvm: failed to shrink bus, removing it completely\n");
5360                 for (j = 0; j < bus->dev_count; j++) {
5361                         if (j == i)
5362                                 continue;
5363                         kvm_iodevice_destructor(bus->range[j].dev);
5364                 }
5365         }
5366
5367         kfree(bus);
5368         return new_bus ? 0 : -ENOMEM;
5369 }
5370
5371 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
5372                                          gpa_t addr)
5373 {
5374         struct kvm_io_bus *bus;
5375         int dev_idx, srcu_idx;
5376         struct kvm_io_device *iodev = NULL;
5377
5378         srcu_idx = srcu_read_lock(&kvm->srcu);
5379
5380         bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
5381         if (!bus)
5382                 goto out_unlock;
5383
5384         dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
5385         if (dev_idx < 0)
5386                 goto out_unlock;
5387
5388         iodev = bus->range[dev_idx].dev;
5389
5390 out_unlock:
5391         srcu_read_unlock(&kvm->srcu, srcu_idx);
5392
5393         return iodev;
5394 }
5395 EXPORT_SYMBOL_GPL(kvm_io_bus_get_dev);
5396
5397 static int kvm_debugfs_open(struct inode *inode, struct file *file,
5398                            int (*get)(void *, u64 *), int (*set)(void *, u64),
5399                            const char *fmt)
5400 {
5401         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5402                                           inode->i_private;
5403
5404         /*
5405          * The debugfs files are a reference to the kvm struct which
5406         * is still valid when kvm_destroy_vm is called.  kvm_get_kvm_safe
5407         * avoids the race between open and the removal of the debugfs directory.
5408          */
5409         if (!kvm_get_kvm_safe(stat_data->kvm))
5410                 return -ENOENT;
5411
5412         if (simple_attr_open(inode, file, get,
5413                     kvm_stats_debugfs_mode(stat_data->desc) & 0222
5414                     ? set : NULL,
5415                     fmt)) {
5416                 kvm_put_kvm(stat_data->kvm);
5417                 return -ENOMEM;
5418         }
5419
5420         return 0;
5421 }
5422
5423 static int kvm_debugfs_release(struct inode *inode, struct file *file)
5424 {
5425         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)
5426                                           inode->i_private;
5427
5428         simple_attr_release(inode, file);
5429         kvm_put_kvm(stat_data->kvm);
5430
5431         return 0;
5432 }
5433
5434 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val)
5435 {
5436         *val = *(u64 *)((void *)(&kvm->stat) + offset);
5437
5438         return 0;
5439 }
5440
5441 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset)
5442 {
5443         *(u64 *)((void *)(&kvm->stat) + offset) = 0;
5444
5445         return 0;
5446 }
5447
5448 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val)
5449 {
5450         unsigned long i;
5451         struct kvm_vcpu *vcpu;
5452
5453         *val = 0;
5454
5455         kvm_for_each_vcpu(i, vcpu, kvm)
5456                 *val += *(u64 *)((void *)(&vcpu->stat) + offset);
5457
5458         return 0;
5459 }
5460
5461 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset)
5462 {
5463         unsigned long i;
5464         struct kvm_vcpu *vcpu;
5465
5466         kvm_for_each_vcpu(i, vcpu, kvm)
5467                 *(u64 *)((void *)(&vcpu->stat) + offset) = 0;
5468
5469         return 0;
5470 }
5471
5472 static int kvm_stat_data_get(void *data, u64 *val)
5473 {
5474         int r = -EFAULT;
5475         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5476
5477         switch (stat_data->kind) {
5478         case KVM_STAT_VM:
5479                 r = kvm_get_stat_per_vm(stat_data->kvm,
5480                                         stat_data->desc->desc.offset, val);
5481                 break;
5482         case KVM_STAT_VCPU:
5483                 r = kvm_get_stat_per_vcpu(stat_data->kvm,
5484                                           stat_data->desc->desc.offset, val);
5485                 break;
5486         }
5487
5488         return r;
5489 }
5490
5491 static int kvm_stat_data_clear(void *data, u64 val)
5492 {
5493         int r = -EFAULT;
5494         struct kvm_stat_data *stat_data = (struct kvm_stat_data *)data;
5495
5496         if (val)
5497                 return -EINVAL;
5498
5499         switch (stat_data->kind) {
5500         case KVM_STAT_VM:
5501                 r = kvm_clear_stat_per_vm(stat_data->kvm,
5502                                           stat_data->desc->desc.offset);
5503                 break;
5504         case KVM_STAT_VCPU:
5505                 r = kvm_clear_stat_per_vcpu(stat_data->kvm,
5506                                             stat_data->desc->desc.offset);
5507                 break;
5508         }
5509
5510         return r;
5511 }
5512
5513 static int kvm_stat_data_open(struct inode *inode, struct file *file)
5514 {
5515         __simple_attr_check_format("%llu\n", 0ull);
5516         return kvm_debugfs_open(inode, file, kvm_stat_data_get,
5517                                 kvm_stat_data_clear, "%llu\n");
5518 }
5519
5520 static const struct file_operations stat_fops_per_vm = {
5521         .owner = THIS_MODULE,
5522         .open = kvm_stat_data_open,
5523         .release = kvm_debugfs_release,
5524         .read = simple_attr_read,
5525         .write = simple_attr_write,
5526         .llseek = no_llseek,
5527 };
5528
5529 static int vm_stat_get(void *_offset, u64 *val)
5530 {
5531         unsigned offset = (long)_offset;
5532         struct kvm *kvm;
5533         u64 tmp_val;
5534
5535         *val = 0;
5536         mutex_lock(&kvm_lock);
5537         list_for_each_entry(kvm, &vm_list, vm_list) {
5538                 kvm_get_stat_per_vm(kvm, offset, &tmp_val);
5539                 *val += tmp_val;
5540         }
5541         mutex_unlock(&kvm_lock);
5542         return 0;
5543 }
5544
5545 static int vm_stat_clear(void *_offset, u64 val)
5546 {
5547         unsigned offset = (long)_offset;
5548         struct kvm *kvm;
5549
5550         if (val)
5551                 return -EINVAL;
5552
5553         mutex_lock(&kvm_lock);
5554         list_for_each_entry(kvm, &vm_list, vm_list) {
5555                 kvm_clear_stat_per_vm(kvm, offset);
5556         }
5557         mutex_unlock(&kvm_lock);
5558
5559         return 0;
5560 }
5561
5562 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, vm_stat_clear, "%llu\n");
5563 DEFINE_SIMPLE_ATTRIBUTE(vm_stat_readonly_fops, vm_stat_get, NULL, "%llu\n");
5564
5565 static int vcpu_stat_get(void *_offset, u64 *val)
5566 {
5567         unsigned offset = (long)_offset;
5568         struct kvm *kvm;
5569         u64 tmp_val;
5570
5571         *val = 0;
5572         mutex_lock(&kvm_lock);
5573         list_for_each_entry(kvm, &vm_list, vm_list) {
5574                 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val);
5575                 *val += tmp_val;
5576         }
5577         mutex_unlock(&kvm_lock);
5578         return 0;
5579 }
5580
5581 static int vcpu_stat_clear(void *_offset, u64 val)
5582 {
5583         unsigned offset = (long)_offset;
5584         struct kvm *kvm;
5585
5586         if (val)
5587                 return -EINVAL;
5588
5589         mutex_lock(&kvm_lock);
5590         list_for_each_entry(kvm, &vm_list, vm_list) {
5591                 kvm_clear_stat_per_vcpu(kvm, offset);
5592         }
5593         mutex_unlock(&kvm_lock);
5594
5595         return 0;
5596 }
5597
5598 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear,
5599                         "%llu\n");
5600 DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n");
5601
5602 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
5603 {
5604         struct kobj_uevent_env *env;
5605         unsigned long long created, active;
5606
5607         if (!kvm_dev.this_device || !kvm)
5608                 return;
5609
5610         mutex_lock(&kvm_lock);
5611         if (type == KVM_EVENT_CREATE_VM) {
5612                 kvm_createvm_count++;
5613                 kvm_active_vms++;
5614         } else if (type == KVM_EVENT_DESTROY_VM) {
5615                 kvm_active_vms--;
5616         }
5617         created = kvm_createvm_count;
5618         active = kvm_active_vms;
5619         mutex_unlock(&kvm_lock);
5620
5621         env = kzalloc(sizeof(*env), GFP_KERNEL_ACCOUNT);
5622         if (!env)
5623                 return;
5624
5625         add_uevent_var(env, "CREATED=%llu", created);
5626         add_uevent_var(env, "COUNT=%llu", active);
5627
5628         if (type == KVM_EVENT_CREATE_VM) {
5629                 add_uevent_var(env, "EVENT=create");
5630                 kvm->userspace_pid = task_pid_nr(current);
5631         } else if (type == KVM_EVENT_DESTROY_VM) {
5632                 add_uevent_var(env, "EVENT=destroy");
5633         }
5634         add_uevent_var(env, "PID=%d", kvm->userspace_pid);
5635
5636         if (!IS_ERR(kvm->debugfs_dentry)) {
5637                 char *tmp, *p = kmalloc(PATH_MAX, GFP_KERNEL_ACCOUNT);
5638
5639                 if (p) {
5640                         tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX);
5641                         if (!IS_ERR(tmp))
5642                                 add_uevent_var(env, "STATS_PATH=%s", tmp);
5643                         kfree(p);
5644                 }
5645         }
5646         /* no need for checks, since we are adding at most only 5 keys */
5647         env->envp[env->envp_idx++] = NULL;
5648         kobject_uevent_env(&kvm_dev.this_device->kobj, KOBJ_CHANGE, env->envp);
5649         kfree(env);
5650 }
5651
5652 static void kvm_init_debug(void)
5653 {
5654         const struct file_operations *fops;
5655         const struct _kvm_stats_desc *pdesc;
5656         int i;
5657
5658         kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
5659
5660         for (i = 0; i < kvm_vm_stats_header.num_desc; ++i) {
5661                 pdesc = &kvm_vm_stats_desc[i];
5662                 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5663                         fops = &vm_stat_fops;
5664                 else
5665                         fops = &vm_stat_readonly_fops;
5666                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5667                                 kvm_debugfs_dir,
5668                                 (void *)(long)pdesc->desc.offset, fops);
5669         }
5670
5671         for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
5672                 pdesc = &kvm_vcpu_stats_desc[i];
5673                 if (kvm_stats_debugfs_mode(pdesc) & 0222)
5674                         fops = &vcpu_stat_fops;
5675                 else
5676                         fops = &vcpu_stat_readonly_fops;
5677                 debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
5678                                 kvm_debugfs_dir,
5679                                 (void *)(long)pdesc->desc.offset, fops);
5680         }
5681 }
5682
5683 static int kvm_suspend(void)
5684 {
5685         if (kvm_usage_count)
5686                 hardware_disable_nolock(NULL);
5687         return 0;
5688 }
5689
5690 static void kvm_resume(void)
5691 {
5692         if (kvm_usage_count) {
5693                 lockdep_assert_not_held(&kvm_count_lock);
5694                 hardware_enable_nolock(NULL);
5695         }
5696 }
5697
5698 static struct syscore_ops kvm_syscore_ops = {
5699         .suspend = kvm_suspend,
5700         .resume = kvm_resume,
5701 };
5702
5703 static inline
5704 struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
5705 {
5706         return container_of(pn, struct kvm_vcpu, preempt_notifier);
5707 }
5708
5709 static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
5710 {
5711         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5712
5713         WRITE_ONCE(vcpu->preempted, false);
5714         WRITE_ONCE(vcpu->ready, false);
5715
5716         __this_cpu_write(kvm_running_vcpu, vcpu);
5717         kvm_arch_sched_in(vcpu, cpu);
5718         kvm_arch_vcpu_load(vcpu, cpu);
5719 }
5720
5721 static void kvm_sched_out(struct preempt_notifier *pn,
5722                           struct task_struct *next)
5723 {
5724         struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
5725
5726         if (current->on_rq) {
5727                 WRITE_ONCE(vcpu->preempted, true);
5728                 WRITE_ONCE(vcpu->ready, true);
5729         }
5730         kvm_arch_vcpu_put(vcpu);
5731         __this_cpu_write(kvm_running_vcpu, NULL);
5732 }
5733
5734 /**
5735  * kvm_get_running_vcpu - get the vcpu running on the current CPU.
5736  *
5737  * We can disable preemption locally around accessing the per-CPU variable,
5738  * and use the resolved vcpu pointer after enabling preemption again,
5739  * because even if the current thread is migrated to another CPU, reading
5740  * the per-CPU value later will give us the same value as we update the
5741  * per-CPU variable in the preempt notifier handlers.
5742  */
5743 struct kvm_vcpu *kvm_get_running_vcpu(void)
5744 {
5745         struct kvm_vcpu *vcpu;
5746
5747         preempt_disable();
5748         vcpu = __this_cpu_read(kvm_running_vcpu);
5749         preempt_enable();
5750
5751         return vcpu;
5752 }
5753 EXPORT_SYMBOL_GPL(kvm_get_running_vcpu);
5754
5755 /**
5756  * kvm_get_running_vcpus - get the per-CPU array of currently running vcpus.
5757  */
5758 struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void)
5759 {
5760         return &kvm_running_vcpu;
5761 }
5762
5763 #ifdef CONFIG_GUEST_PERF_EVENTS
5764 static unsigned int kvm_guest_state(void)
5765 {
5766         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5767         unsigned int state;
5768
5769         if (!kvm_arch_pmi_in_guest(vcpu))
5770                 return 0;
5771
5772         state = PERF_GUEST_ACTIVE;
5773         if (!kvm_arch_vcpu_in_kernel(vcpu))
5774                 state |= PERF_GUEST_USER;
5775
5776         return state;
5777 }
5778
5779 static unsigned long kvm_guest_get_ip(void)
5780 {
5781         struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
5782
5783         /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
5784         if (WARN_ON_ONCE(!kvm_arch_pmi_in_guest(vcpu)))
5785                 return 0;
5786
5787         return kvm_arch_vcpu_get_ip(vcpu);
5788 }
5789
5790 static struct perf_guest_info_callbacks kvm_guest_cbs = {
5791         .state                  = kvm_guest_state,
5792         .get_ip                 = kvm_guest_get_ip,
5793         .handle_intel_pt_intr   = NULL,
5794 };
5795
5796 void kvm_register_perf_callbacks(unsigned int (*pt_intr_handler)(void))
5797 {
5798         kvm_guest_cbs.handle_intel_pt_intr = pt_intr_handler;
5799         perf_register_guest_info_callbacks(&kvm_guest_cbs);
5800 }
5801 void kvm_unregister_perf_callbacks(void)
5802 {
5803         perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
5804 }
5805 #endif
5806
5807 struct kvm_cpu_compat_check {
5808         void *opaque;
5809         int *ret;
5810 };
5811
5812 static void check_processor_compat(void *data)
5813 {
5814         struct kvm_cpu_compat_check *c = data;
5815
5816         *c->ret = kvm_arch_check_processor_compat(c->opaque);
5817 }
5818
5819 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
5820                   struct module *module)
5821 {
5822         struct kvm_cpu_compat_check c;
5823         int r;
5824         int cpu;
5825
5826         r = kvm_arch_init(opaque);
5827         if (r)
5828                 goto out_fail;
5829
5830         /*
5831          * kvm_arch_init makes sure there's at most one caller
5832          * for architectures that support multiple implementations,
5833          * like intel and amd on x86.
5834          * kvm_arch_init must be called before kvm_irqfd_init to avoid creating
5835          * conflicts in case kvm is already setup for another implementation.
5836          */
5837         r = kvm_irqfd_init();
5838         if (r)
5839                 goto out_irqfd;
5840
5841         if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
5842                 r = -ENOMEM;
5843                 goto out_free_0;
5844         }
5845
5846         r = kvm_arch_hardware_setup(opaque);
5847         if (r < 0)
5848                 goto out_free_1;
5849
5850         c.ret = &r;
5851         c.opaque = opaque;
5852         for_each_online_cpu(cpu) {
5853                 smp_call_function_single(cpu, check_processor_compat, &c, 1);
5854                 if (r < 0)
5855                         goto out_free_2;
5856         }
5857
5858         r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "kvm/cpu:starting",
5859                                       kvm_starting_cpu, kvm_dying_cpu);
5860         if (r)
5861                 goto out_free_2;
5862         register_reboot_notifier(&kvm_reboot_notifier);
5863
5864         /* A kmem cache lets us meet the alignment requirements of fx_save. */
5865         if (!vcpu_align)
5866                 vcpu_align = __alignof__(struct kvm_vcpu);
5867         kvm_vcpu_cache =
5868                 kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align,
5869                                            SLAB_ACCOUNT,
5870                                            offsetof(struct kvm_vcpu, arch),
5871                                            offsetofend(struct kvm_vcpu, stats_id)
5872                                            - offsetof(struct kvm_vcpu, arch),
5873                                            NULL);
5874         if (!kvm_vcpu_cache) {
5875                 r = -ENOMEM;
5876                 goto out_free_3;
5877         }
5878
5879         for_each_possible_cpu(cpu) {
5880                 if (!alloc_cpumask_var_node(&per_cpu(cpu_kick_mask, cpu),
5881                                             GFP_KERNEL, cpu_to_node(cpu))) {
5882                         r = -ENOMEM;
5883                         goto out_free_4;
5884                 }
5885         }
5886
5887         r = kvm_async_pf_init();
5888         if (r)
5889                 goto out_free_4;
5890
5891         kvm_chardev_ops.owner = module;
5892
5893         r = misc_register(&kvm_dev);
5894         if (r) {
5895                 pr_err("kvm: misc device register failed\n");
5896                 goto out_unreg;
5897         }
5898
5899         register_syscore_ops(&kvm_syscore_ops);
5900
5901         kvm_preempt_ops.sched_in = kvm_sched_in;
5902         kvm_preempt_ops.sched_out = kvm_sched_out;
5903
5904         kvm_init_debug();
5905
5906         r = kvm_vfio_ops_init();
5907         WARN_ON(r);
5908
5909         return 0;
5910
5911 out_unreg:
5912         kvm_async_pf_deinit();
5913 out_free_4:
5914         for_each_possible_cpu(cpu)
5915                 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5916         kmem_cache_destroy(kvm_vcpu_cache);
5917 out_free_3:
5918         unregister_reboot_notifier(&kvm_reboot_notifier);
5919         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5920 out_free_2:
5921         kvm_arch_hardware_unsetup();
5922 out_free_1:
5923         free_cpumask_var(cpus_hardware_enabled);
5924 out_free_0:
5925         kvm_irqfd_exit();
5926 out_irqfd:
5927         kvm_arch_exit();
5928 out_fail:
5929         return r;
5930 }
5931 EXPORT_SYMBOL_GPL(kvm_init);
5932
5933 void kvm_exit(void)
5934 {
5935         int cpu;
5936
5937         debugfs_remove_recursive(kvm_debugfs_dir);
5938         misc_deregister(&kvm_dev);
5939         for_each_possible_cpu(cpu)
5940                 free_cpumask_var(per_cpu(cpu_kick_mask, cpu));
5941         kmem_cache_destroy(kvm_vcpu_cache);
5942         kvm_async_pf_deinit();
5943         unregister_syscore_ops(&kvm_syscore_ops);
5944         unregister_reboot_notifier(&kvm_reboot_notifier);
5945         cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING);
5946         on_each_cpu(hardware_disable_nolock, NULL, 1);
5947         kvm_arch_hardware_unsetup();
5948         kvm_arch_exit();
5949         kvm_irqfd_exit();
5950         free_cpumask_var(cpus_hardware_enabled);
5951         kvm_vfio_ops_exit();
5952 }
5953 EXPORT_SYMBOL_GPL(kvm_exit);
5954
5955 struct kvm_vm_worker_thread_context {
5956         struct kvm *kvm;
5957         struct task_struct *parent;
5958         struct completion init_done;
5959         kvm_vm_thread_fn_t thread_fn;
5960         uintptr_t data;
5961         int err;
5962 };
5963
5964 static int kvm_vm_worker_thread(void *context)
5965 {
5966         /*
5967          * The init_context is allocated on the stack of the parent thread, so
5968          * we have to locally copy anything that is needed beyond initialization
5969          */
5970         struct kvm_vm_worker_thread_context *init_context = context;
5971         struct task_struct *parent;
5972         struct kvm *kvm = init_context->kvm;
5973         kvm_vm_thread_fn_t thread_fn = init_context->thread_fn;
5974         uintptr_t data = init_context->data;
5975         int err;
5976
5977         err = kthread_park(current);
5978         /* kthread_park(current) is never supposed to return an error */
5979         WARN_ON(err != 0);
5980         if (err)
5981                 goto init_complete;
5982
5983         err = cgroup_attach_task_all(init_context->parent, current);
5984         if (err) {
5985                 kvm_err("%s: cgroup_attach_task_all failed with err %d\n",
5986                         __func__, err);
5987                 goto init_complete;
5988         }
5989
5990         set_user_nice(current, task_nice(init_context->parent));
5991
5992 init_complete:
5993         init_context->err = err;
5994         complete(&init_context->init_done);
5995         init_context = NULL;
5996
5997         if (err)
5998                 goto out;
5999
6000         /* Wait to be woken up by the spawner before proceeding. */
6001         kthread_parkme();
6002
6003         if (!kthread_should_stop())
6004                 err = thread_fn(kvm, data);
6005
6006 out:
6007         /*
6008          * Move kthread back to its original cgroup to prevent it lingering in
6009          * the cgroup of the VM process, after the latter finishes its
6010          * execution.
6011          *
6012          * kthread_stop() waits on the 'exited' completion condition which is
6013          * set in exit_mm(), via mm_release(), in do_exit(). However, the
6014          * kthread is removed from the cgroup in the cgroup_exit() which is
6015          * called after the exit_mm(). This causes the kthread_stop() to return
6016          * before the kthread actually quits the cgroup.
6017          */
6018         rcu_read_lock();
6019         parent = rcu_dereference(current->real_parent);
6020         get_task_struct(parent);
6021         rcu_read_unlock();
6022         cgroup_attach_task_all(parent, current);
6023         put_task_struct(parent);
6024
6025         return err;
6026 }
6027
6028 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
6029                                 uintptr_t data, const char *name,
6030                                 struct task_struct **thread_ptr)
6031 {
6032         struct kvm_vm_worker_thread_context init_context = {};
6033         struct task_struct *thread;
6034
6035         *thread_ptr = NULL;
6036         init_context.kvm = kvm;
6037         init_context.parent = current;
6038         init_context.thread_fn = thread_fn;
6039         init_context.data = data;
6040         init_completion(&init_context.init_done);
6041
6042         thread = kthread_run(kvm_vm_worker_thread, &init_context,
6043                              "%s-%d", name, task_pid_nr(current));
6044         if (IS_ERR(thread))
6045                 return PTR_ERR(thread);
6046
6047         /* kthread_run is never supposed to return NULL */
6048         WARN_ON(thread == NULL);
6049
6050         wait_for_completion(&init_context.init_done);
6051
6052         if (!init_context.err)
6053                 *thread_ptr = thread;
6054
6055         return init_context.err;
6056 }