Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[sfrench/cifs-2.6.git] / arch / x86 / kernel / kvm.c
1 /*
2  * KVM paravirt_ops implementation
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  *
18  * Copyright (C) 2007, Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
19  * Copyright IBM Corporation, 2007
20  *   Authors: Anthony Liguori <aliguori@us.ibm.com>
21  */
22
23 #include <linux/context_tracking.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
26 #include <linux/kvm_para.h>
27 #include <linux/cpu.h>
28 #include <linux/mm.h>
29 #include <linux/highmem.h>
30 #include <linux/hardirq.h>
31 #include <linux/notifier.h>
32 #include <linux/reboot.h>
33 #include <linux/hash.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/kprobes.h>
37 #include <linux/debugfs.h>
38 #include <linux/nmi.h>
39 #include <linux/swait.h>
40 #include <asm/timer.h>
41 #include <asm/cpu.h>
42 #include <asm/traps.h>
43 #include <asm/desc.h>
44 #include <asm/tlbflush.h>
45 #include <asm/apic.h>
46 #include <asm/apicdef.h>
47 #include <asm/hypervisor.h>
48 #include <asm/kvm_guest.h>
49
50 static int kvmapf = 1;
51
52 static int parse_no_kvmapf(char *arg)
53 {
54         kvmapf = 0;
55         return 0;
56 }
57
58 early_param("no-kvmapf", parse_no_kvmapf);
59
60 static int steal_acc = 1;
61 static int parse_no_stealacc(char *arg)
62 {
63         steal_acc = 0;
64         return 0;
65 }
66
67 early_param("no-steal-acc", parse_no_stealacc);
68
69 static int kvmclock_vsyscall = 1;
70 static int parse_no_kvmclock_vsyscall(char *arg)
71 {
72         kvmclock_vsyscall = 0;
73         return 0;
74 }
75
76 early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);
77
78 static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
79 static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
80 static int has_steal_clock = 0;
81
82 /*
83  * No need for any "IO delay" on KVM
84  */
85 static void kvm_io_delay(void)
86 {
87 }
88
89 #define KVM_TASK_SLEEP_HASHBITS 8
90 #define KVM_TASK_SLEEP_HASHSIZE (1<<KVM_TASK_SLEEP_HASHBITS)
91
92 struct kvm_task_sleep_node {
93         struct hlist_node link;
94         struct swait_queue_head wq;
95         u32 token;
96         int cpu;
97         bool halted;
98 };
99
100 static struct kvm_task_sleep_head {
101         raw_spinlock_t lock;
102         struct hlist_head list;
103 } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE];
104
105 static struct kvm_task_sleep_node *_find_apf_task(struct kvm_task_sleep_head *b,
106                                                   u32 token)
107 {
108         struct hlist_node *p;
109
110         hlist_for_each(p, &b->list) {
111                 struct kvm_task_sleep_node *n =
112                         hlist_entry(p, typeof(*n), link);
113                 if (n->token == token)
114                         return n;
115         }
116
117         return NULL;
118 }
119
120 void kvm_async_pf_task_wait(u32 token)
121 {
122         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
123         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
124         struct kvm_task_sleep_node n, *e;
125         DECLARE_SWAITQUEUE(wait);
126
127         rcu_irq_enter();
128
129         raw_spin_lock(&b->lock);
130         e = _find_apf_task(b, token);
131         if (e) {
132                 /* dummy entry exist -> wake up was delivered ahead of PF */
133                 hlist_del(&e->link);
134                 kfree(e);
135                 raw_spin_unlock(&b->lock);
136
137                 rcu_irq_exit();
138                 return;
139         }
140
141         n.token = token;
142         n.cpu = smp_processor_id();
143         n.halted = is_idle_task(current) || preempt_count() > 1 ||
144                    rcu_preempt_depth();
145         init_swait_queue_head(&n.wq);
146         hlist_add_head(&n.link, &b->list);
147         raw_spin_unlock(&b->lock);
148
149         for (;;) {
150                 if (!n.halted)
151                         prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE);
152                 if (hlist_unhashed(&n.link))
153                         break;
154
155                 rcu_irq_exit();
156
157                 if (!n.halted) {
158                         local_irq_enable();
159                         schedule();
160                         local_irq_disable();
161                 } else {
162                         /*
163                          * We cannot reschedule. So halt.
164                          */
165                         native_safe_halt();
166                         local_irq_disable();
167                 }
168
169                 rcu_irq_enter();
170         }
171         if (!n.halted)
172                 finish_swait(&n.wq, &wait);
173
174         rcu_irq_exit();
175         return;
176 }
177 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);
178
179 static void apf_task_wake_one(struct kvm_task_sleep_node *n)
180 {
181         hlist_del_init(&n->link);
182         if (n->halted)
183                 smp_send_reschedule(n->cpu);
184         else if (swq_has_sleeper(&n->wq))
185                 swake_up(&n->wq);
186 }
187
188 static void apf_task_wake_all(void)
189 {
190         int i;
191
192         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) {
193                 struct hlist_node *p, *next;
194                 struct kvm_task_sleep_head *b = &async_pf_sleepers[i];
195                 raw_spin_lock(&b->lock);
196                 hlist_for_each_safe(p, next, &b->list) {
197                         struct kvm_task_sleep_node *n =
198                                 hlist_entry(p, typeof(*n), link);
199                         if (n->cpu == smp_processor_id())
200                                 apf_task_wake_one(n);
201                 }
202                 raw_spin_unlock(&b->lock);
203         }
204 }
205
206 void kvm_async_pf_task_wake(u32 token)
207 {
208         u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS);
209         struct kvm_task_sleep_head *b = &async_pf_sleepers[key];
210         struct kvm_task_sleep_node *n;
211
212         if (token == ~0) {
213                 apf_task_wake_all();
214                 return;
215         }
216
217 again:
218         raw_spin_lock(&b->lock);
219         n = _find_apf_task(b, token);
220         if (!n) {
221                 /*
222                  * async PF was not yet handled.
223                  * Add dummy entry for the token.
224                  */
225                 n = kzalloc(sizeof(*n), GFP_ATOMIC);
226                 if (!n) {
227                         /*
228                          * Allocation failed! Busy wait while other cpu
229                          * handles async PF.
230                          */
231                         raw_spin_unlock(&b->lock);
232                         cpu_relax();
233                         goto again;
234                 }
235                 n->token = token;
236                 n->cpu = smp_processor_id();
237                 init_swait_queue_head(&n->wq);
238                 hlist_add_head(&n->link, &b->list);
239         } else
240                 apf_task_wake_one(n);
241         raw_spin_unlock(&b->lock);
242         return;
243 }
244 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
245
246 u32 kvm_read_and_reset_pf_reason(void)
247 {
248         u32 reason = 0;
249
250         if (__this_cpu_read(apf_reason.enabled)) {
251                 reason = __this_cpu_read(apf_reason.reason);
252                 __this_cpu_write(apf_reason.reason, 0);
253         }
254
255         return reason;
256 }
257 EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason);
258 NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason);
259
260 dotraplinkage void
261 do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
262 {
263         enum ctx_state prev_state;
264
265         switch (kvm_read_and_reset_pf_reason()) {
266         default:
267                 do_page_fault(regs, error_code);
268                 break;
269         case KVM_PV_REASON_PAGE_NOT_PRESENT:
270                 /* page is swapped out by the host. */
271                 prev_state = exception_enter();
272                 kvm_async_pf_task_wait((u32)read_cr2());
273                 exception_exit(prev_state);
274                 break;
275         case KVM_PV_REASON_PAGE_READY:
276                 rcu_irq_enter();
277                 kvm_async_pf_task_wake((u32)read_cr2());
278                 rcu_irq_exit();
279                 break;
280         }
281 }
282 NOKPROBE_SYMBOL(do_async_page_fault);
283
284 static void __init paravirt_ops_setup(void)
285 {
286         pv_info.name = "KVM";
287
288         if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
289                 pv_cpu_ops.io_delay = kvm_io_delay;
290
291 #ifdef CONFIG_X86_IO_APIC
292         no_timer_check = 1;
293 #endif
294 }
295
296 static void kvm_register_steal_time(void)
297 {
298         int cpu = smp_processor_id();
299         struct kvm_steal_time *st = &per_cpu(steal_time, cpu);
300
301         if (!has_steal_clock)
302                 return;
303
304         wrmsrl(MSR_KVM_STEAL_TIME, (slow_virt_to_phys(st) | KVM_MSR_ENABLED));
305         pr_info("kvm-stealtime: cpu %d, msr %llx\n",
306                 cpu, (unsigned long long) slow_virt_to_phys(st));
307 }
308
309 static DEFINE_PER_CPU(unsigned long, kvm_apic_eoi) = KVM_PV_EOI_DISABLED;
310
311 static notrace void kvm_guest_apic_eoi_write(u32 reg, u32 val)
312 {
313         /**
314          * This relies on __test_and_clear_bit to modify the memory
315          * in a way that is atomic with respect to the local CPU.
316          * The hypervisor only accesses this memory from the local CPU so
317          * there's no need for lock or memory barriers.
318          * An optimization barrier is implied in apic write.
319          */
320         if (__test_and_clear_bit(KVM_PV_EOI_BIT, this_cpu_ptr(&kvm_apic_eoi)))
321                 return;
322         apic->native_eoi_write(APIC_EOI, APIC_EOI_ACK);
323 }
324
325 static void kvm_guest_cpu_init(void)
326 {
327         if (!kvm_para_available())
328                 return;
329
330         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF) && kvmapf) {
331                 u64 pa = slow_virt_to_phys(this_cpu_ptr(&apf_reason));
332
333 #ifdef CONFIG_PREEMPT
334                 pa |= KVM_ASYNC_PF_SEND_ALWAYS;
335 #endif
336                 pa |= KVM_ASYNC_PF_ENABLED;
337
338                 /* Async page fault support for L1 hypervisor is optional */
339                 if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN,
340                         (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0)
341                         wrmsrl(MSR_KVM_ASYNC_PF_EN, pa);
342                 __this_cpu_write(apf_reason.enabled, 1);
343                 printk(KERN_INFO"KVM setup async PF for cpu %d\n",
344                        smp_processor_id());
345         }
346
347         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) {
348                 unsigned long pa;
349                 /* Size alignment is implied but just to make it explicit. */
350                 BUILD_BUG_ON(__alignof__(kvm_apic_eoi) < 4);
351                 __this_cpu_write(kvm_apic_eoi, 0);
352                 pa = slow_virt_to_phys(this_cpu_ptr(&kvm_apic_eoi))
353                         | KVM_MSR_ENABLED;
354                 wrmsrl(MSR_KVM_PV_EOI_EN, pa);
355         }
356
357         if (has_steal_clock)
358                 kvm_register_steal_time();
359 }
360
361 static void kvm_pv_disable_apf(void)
362 {
363         if (!__this_cpu_read(apf_reason.enabled))
364                 return;
365
366         wrmsrl(MSR_KVM_ASYNC_PF_EN, 0);
367         __this_cpu_write(apf_reason.enabled, 0);
368
369         printk(KERN_INFO"Unregister pv shared memory for cpu %d\n",
370                smp_processor_id());
371 }
372
373 static void kvm_pv_guest_cpu_reboot(void *unused)
374 {
375         /*
376          * We disable PV EOI before we load a new kernel by kexec,
377          * since MSR_KVM_PV_EOI_EN stores a pointer into old kernel's memory.
378          * New kernel can re-enable when it boots.
379          */
380         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
381                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
382         kvm_pv_disable_apf();
383         kvm_disable_steal_time();
384 }
385
386 static int kvm_pv_reboot_notify(struct notifier_block *nb,
387                                 unsigned long code, void *unused)
388 {
389         if (code == SYS_RESTART)
390                 on_each_cpu(kvm_pv_guest_cpu_reboot, NULL, 1);
391         return NOTIFY_DONE;
392 }
393
394 static struct notifier_block kvm_pv_reboot_nb = {
395         .notifier_call = kvm_pv_reboot_notify,
396 };
397
398 static u64 kvm_steal_clock(int cpu)
399 {
400         u64 steal;
401         struct kvm_steal_time *src;
402         int version;
403
404         src = &per_cpu(steal_time, cpu);
405         do {
406                 version = src->version;
407                 virt_rmb();
408                 steal = src->steal;
409                 virt_rmb();
410         } while ((version & 1) || (version != src->version));
411
412         return steal;
413 }
414
415 void kvm_disable_steal_time(void)
416 {
417         if (!has_steal_clock)
418                 return;
419
420         wrmsr(MSR_KVM_STEAL_TIME, 0, 0);
421 }
422
423 #ifdef CONFIG_SMP
424 static void __init kvm_smp_prepare_boot_cpu(void)
425 {
426         kvm_guest_cpu_init();
427         native_smp_prepare_boot_cpu();
428         kvm_spinlock_init();
429 }
430
431 static void kvm_guest_cpu_offline(void)
432 {
433         kvm_disable_steal_time();
434         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
435                 wrmsrl(MSR_KVM_PV_EOI_EN, 0);
436         kvm_pv_disable_apf();
437         apf_task_wake_all();
438 }
439
440 static int kvm_cpu_online(unsigned int cpu)
441 {
442         local_irq_disable();
443         kvm_guest_cpu_init();
444         local_irq_enable();
445         return 0;
446 }
447
448 static int kvm_cpu_down_prepare(unsigned int cpu)
449 {
450         local_irq_disable();
451         kvm_guest_cpu_offline();
452         local_irq_enable();
453         return 0;
454 }
455 #endif
456
457 static void __init kvm_apf_trap_init(void)
458 {
459         update_intr_gate(X86_TRAP_PF, async_page_fault);
460 }
461
462 void __init kvm_guest_init(void)
463 {
464         int i;
465
466         if (!kvm_para_available())
467                 return;
468
469         paravirt_ops_setup();
470         register_reboot_notifier(&kvm_pv_reboot_nb);
471         for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++)
472                 raw_spin_lock_init(&async_pf_sleepers[i].lock);
473         if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF))
474                 x86_init.irqs.trap_init = kvm_apf_trap_init;
475
476         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
477                 has_steal_clock = 1;
478                 pv_time_ops.steal_clock = kvm_steal_clock;
479         }
480
481         if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
482                 apic_set_eoi_write(kvm_guest_apic_eoi_write);
483
484         if (kvmclock_vsyscall)
485                 kvm_setup_vsyscall_timeinfo();
486
487 #ifdef CONFIG_SMP
488         smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
489         if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/kvm:online",
490                                       kvm_cpu_online, kvm_cpu_down_prepare) < 0)
491                 pr_err("kvm_guest: Failed to install cpu hotplug callbacks\n");
492 #else
493         kvm_guest_cpu_init();
494 #endif
495
496         /*
497          * Hard lockup detection is enabled by default. Disable it, as guests
498          * can get false positives too easily, for example if the host is
499          * overcommitted.
500          */
501         hardlockup_detector_disable();
502 }
503
504 static noinline uint32_t __kvm_cpuid_base(void)
505 {
506         if (boot_cpu_data.cpuid_level < 0)
507                 return 0;       /* So we don't blow up on old processors */
508
509         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
510                 return hypervisor_cpuid_base("KVMKVMKVM\0\0\0", 0);
511
512         return 0;
513 }
514
515 static inline uint32_t kvm_cpuid_base(void)
516 {
517         static int kvm_cpuid_base = -1;
518
519         if (kvm_cpuid_base == -1)
520                 kvm_cpuid_base = __kvm_cpuid_base();
521
522         return kvm_cpuid_base;
523 }
524
525 bool kvm_para_available(void)
526 {
527         return kvm_cpuid_base() != 0;
528 }
529 EXPORT_SYMBOL_GPL(kvm_para_available);
530
531 unsigned int kvm_arch_para_features(void)
532 {
533         return cpuid_eax(kvm_cpuid_base() | KVM_CPUID_FEATURES);
534 }
535
536 static uint32_t __init kvm_detect(void)
537 {
538         return kvm_cpuid_base();
539 }
540
541 const struct hypervisor_x86 x86_hyper_kvm __refconst = {
542         .name                   = "KVM",
543         .detect                 = kvm_detect,
544         .x2apic_available       = kvm_para_available,
545 };
546 EXPORT_SYMBOL_GPL(x86_hyper_kvm);
547
548 static __init int activate_jump_labels(void)
549 {
550         if (has_steal_clock) {
551                 static_key_slow_inc(&paravirt_steal_enabled);
552                 if (steal_acc)
553                         static_key_slow_inc(&paravirt_steal_rq_enabled);
554         }
555
556         return 0;
557 }
558 arch_initcall(activate_jump_labels);
559
560 #ifdef CONFIG_PARAVIRT_SPINLOCKS
561
562 /* Kick a cpu by its apicid. Used to wake up a halted vcpu */
563 static void kvm_kick_cpu(int cpu)
564 {
565         int apicid;
566         unsigned long flags = 0;
567
568         apicid = per_cpu(x86_cpu_to_apicid, cpu);
569         kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
570 }
571
572 #include <asm/qspinlock.h>
573
574 static void kvm_wait(u8 *ptr, u8 val)
575 {
576         unsigned long flags;
577
578         if (in_nmi())
579                 return;
580
581         local_irq_save(flags);
582
583         if (READ_ONCE(*ptr) != val)
584                 goto out;
585
586         /*
587          * halt until it's our turn and kicked. Note that we do safe halt
588          * for irq enabled case to avoid hang when lock info is overwritten
589          * in irq spinlock slowpath and no spurious interrupt occur to save us.
590          */
591         if (arch_irqs_disabled_flags(flags))
592                 halt();
593         else
594                 safe_halt();
595
596 out:
597         local_irq_restore(flags);
598 }
599
600 #ifdef CONFIG_X86_32
601 __visible bool __kvm_vcpu_is_preempted(long cpu)
602 {
603         struct kvm_steal_time *src = &per_cpu(steal_time, cpu);
604
605         return !!src->preempted;
606 }
607 PV_CALLEE_SAVE_REGS_THUNK(__kvm_vcpu_is_preempted);
608
609 #else
610
611 #include <asm/asm-offsets.h>
612
613 extern bool __raw_callee_save___kvm_vcpu_is_preempted(long);
614
615 /*
616  * Hand-optimize version for x86-64 to avoid 8 64-bit register saving and
617  * restoring to/from the stack.
618  */
619 asm(
620 ".pushsection .text;"
621 ".global __raw_callee_save___kvm_vcpu_is_preempted;"
622 ".type __raw_callee_save___kvm_vcpu_is_preempted, @function;"
623 "__raw_callee_save___kvm_vcpu_is_preempted:"
624 "movq   __per_cpu_offset(,%rdi,8), %rax;"
625 "cmpb   $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax);"
626 "setne  %al;"
627 "ret;"
628 ".popsection");
629
630 #endif
631
632 /*
633  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
634  */
635 void __init kvm_spinlock_init(void)
636 {
637         if (!kvm_para_available())
638                 return;
639         /* Does host kernel support KVM_FEATURE_PV_UNHALT? */
640         if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
641                 return;
642
643         __pv_init_lock_hash();
644         pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
645         pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
646         pv_lock_ops.wait = kvm_wait;
647         pv_lock_ops.kick = kvm_kick_cpu;
648
649         if (kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
650                 pv_lock_ops.vcpu_is_preempted =
651                         PV_CALLEE_SAVE(__kvm_vcpu_is_preempted);
652         }
653 }
654
655 #endif  /* CONFIG_PARAVIRT_SPINLOCKS */