Merge tag 'nfsd-6.8-3' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
[sfrench/cifs-2.6.git] / arch / x86 / kvm / lapic.c
1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /*
4  * Local APIC virtualization
5  *
6  * Copyright (C) 2006 Qumranet, Inc.
7  * Copyright (C) 2007 Novell
8  * Copyright (C) 2007 Intel
9  * Copyright 2009 Red Hat, Inc. and/or its affiliates.
10  *
11  * Authors:
12  *   Dor Laor <dor.laor@qumranet.com>
13  *   Gregory Haskins <ghaskins@novell.com>
14  *   Yaozu (Eddie) Dong <eddie.dong@intel.com>
15  *
16  * Based on Xen 3.1 code, Copyright (c) 2004, Intel Corporation.
17  */
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
20 #include <linux/kvm_host.h>
21 #include <linux/kvm.h>
22 #include <linux/mm.h>
23 #include <linux/highmem.h>
24 #include <linux/smp.h>
25 #include <linux/hrtimer.h>
26 #include <linux/io.h>
27 #include <linux/export.h>
28 #include <linux/math64.h>
29 #include <linux/slab.h>
30 #include <asm/processor.h>
31 #include <asm/mce.h>
32 #include <asm/msr.h>
33 #include <asm/page.h>
34 #include <asm/current.h>
35 #include <asm/apicdef.h>
36 #include <asm/delay.h>
37 #include <linux/atomic.h>
38 #include <linux/jump_label.h>
39 #include "kvm_cache_regs.h"
40 #include "irq.h"
41 #include "ioapic.h"
42 #include "trace.h"
43 #include "x86.h"
44 #include "cpuid.h"
45 #include "hyperv.h"
46 #include "smm.h"
47
48 #ifndef CONFIG_X86_64
49 #define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
50 #else
51 #define mod_64(x, y) ((x) % (y))
52 #endif
53
54 /* 14 is the version for Xeon and Pentium 8.4.8*/
55 #define APIC_VERSION                    0x14UL
56 #define LAPIC_MMIO_LENGTH               (1 << 12)
57 /* followed define is not in apicdef.h */
58 #define MAX_APIC_VECTOR                 256
59 #define APIC_VECTORS_PER_REG            32
60
61 static bool lapic_timer_advance_dynamic __read_mostly;
62 #define LAPIC_TIMER_ADVANCE_ADJUST_MIN  100     /* clock cycles */
63 #define LAPIC_TIMER_ADVANCE_ADJUST_MAX  10000   /* clock cycles */
64 #define LAPIC_TIMER_ADVANCE_NS_INIT     1000
65 #define LAPIC_TIMER_ADVANCE_NS_MAX     5000
66 /* step-by-step approximation to mitigate fluctuation */
67 #define LAPIC_TIMER_ADVANCE_ADJUST_STEP 8
68 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data);
69 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data);
70
71 static inline void __kvm_lapic_set_reg(char *regs, int reg_off, u32 val)
72 {
73         *((u32 *) (regs + reg_off)) = val;
74 }
75
76 static inline void kvm_lapic_set_reg(struct kvm_lapic *apic, int reg_off, u32 val)
77 {
78         __kvm_lapic_set_reg(apic->regs, reg_off, val);
79 }
80
81 static __always_inline u64 __kvm_lapic_get_reg64(char *regs, int reg)
82 {
83         BUILD_BUG_ON(reg != APIC_ICR);
84         return *((u64 *) (regs + reg));
85 }
86
87 static __always_inline u64 kvm_lapic_get_reg64(struct kvm_lapic *apic, int reg)
88 {
89         return __kvm_lapic_get_reg64(apic->regs, reg);
90 }
91
92 static __always_inline void __kvm_lapic_set_reg64(char *regs, int reg, u64 val)
93 {
94         BUILD_BUG_ON(reg != APIC_ICR);
95         *((u64 *) (regs + reg)) = val;
96 }
97
98 static __always_inline void kvm_lapic_set_reg64(struct kvm_lapic *apic,
99                                                 int reg, u64 val)
100 {
101         __kvm_lapic_set_reg64(apic->regs, reg, val);
102 }
103
104 static inline int apic_test_vector(int vec, void *bitmap)
105 {
106         return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
107 }
108
109 bool kvm_apic_pending_eoi(struct kvm_vcpu *vcpu, int vector)
110 {
111         struct kvm_lapic *apic = vcpu->arch.apic;
112
113         return apic_test_vector(vector, apic->regs + APIC_ISR) ||
114                 apic_test_vector(vector, apic->regs + APIC_IRR);
115 }
116
117 static inline int __apic_test_and_set_vector(int vec, void *bitmap)
118 {
119         return __test_and_set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
120 }
121
122 static inline int __apic_test_and_clear_vector(int vec, void *bitmap)
123 {
124         return __test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
125 }
126
127 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_hw_disabled, HZ);
128 __read_mostly DEFINE_STATIC_KEY_DEFERRED_FALSE(apic_sw_disabled, HZ);
129
130 static inline int apic_enabled(struct kvm_lapic *apic)
131 {
132         return kvm_apic_sw_enabled(apic) &&     kvm_apic_hw_enabled(apic);
133 }
134
135 #define LVT_MASK        \
136         (APIC_LVT_MASKED | APIC_SEND_PENDING | APIC_VECTOR_MASK)
137
138 #define LINT_MASK       \
139         (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
140          APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
141
142 static inline u32 kvm_x2apic_id(struct kvm_lapic *apic)
143 {
144         return apic->vcpu->vcpu_id;
145 }
146
147 static bool kvm_can_post_timer_interrupt(struct kvm_vcpu *vcpu)
148 {
149         return pi_inject_timer && kvm_vcpu_apicv_active(vcpu) &&
150                 (kvm_mwait_in_guest(vcpu->kvm) || kvm_hlt_in_guest(vcpu->kvm));
151 }
152
153 bool kvm_can_use_hv_timer(struct kvm_vcpu *vcpu)
154 {
155         return kvm_x86_ops.set_hv_timer
156                && !(kvm_mwait_in_guest(vcpu->kvm) ||
157                     kvm_can_post_timer_interrupt(vcpu));
158 }
159
160 static bool kvm_use_posted_timer_interrupt(struct kvm_vcpu *vcpu)
161 {
162         return kvm_can_post_timer_interrupt(vcpu) && vcpu->mode == IN_GUEST_MODE;
163 }
164
165 static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
166 {
167         return ((id >> 4) << 16) | (1 << (id & 0xf));
168 }
169
170 static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
171                 u32 dest_id, struct kvm_lapic ***cluster, u16 *mask) {
172         switch (map->logical_mode) {
173         case KVM_APIC_MODE_SW_DISABLED:
174                 /* Arbitrarily use the flat map so that @cluster isn't NULL. */
175                 *cluster = map->xapic_flat_map;
176                 *mask = 0;
177                 return true;
178         case KVM_APIC_MODE_X2APIC: {
179                 u32 offset = (dest_id >> 16) * 16;
180                 u32 max_apic_id = map->max_apic_id;
181
182                 if (offset <= max_apic_id) {
183                         u8 cluster_size = min(max_apic_id - offset + 1, 16U);
184
185                         offset = array_index_nospec(offset, map->max_apic_id + 1);
186                         *cluster = &map->phys_map[offset];
187                         *mask = dest_id & (0xffff >> (16 - cluster_size));
188                 } else {
189                         *mask = 0;
190                 }
191
192                 return true;
193                 }
194         case KVM_APIC_MODE_XAPIC_FLAT:
195                 *cluster = map->xapic_flat_map;
196                 *mask = dest_id & 0xff;
197                 return true;
198         case KVM_APIC_MODE_XAPIC_CLUSTER:
199                 *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
200                 *mask = dest_id & 0xf;
201                 return true;
202         case KVM_APIC_MODE_MAP_DISABLED:
203                 return false;
204         default:
205                 WARN_ON_ONCE(1);
206                 return false;
207         }
208 }
209
210 static void kvm_apic_map_free(struct rcu_head *rcu)
211 {
212         struct kvm_apic_map *map = container_of(rcu, struct kvm_apic_map, rcu);
213
214         kvfree(map);
215 }
216
217 static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
218                                     struct kvm_vcpu *vcpu,
219                                     bool *xapic_id_mismatch)
220 {
221         struct kvm_lapic *apic = vcpu->arch.apic;
222         u32 x2apic_id = kvm_x2apic_id(apic);
223         u32 xapic_id = kvm_xapic_id(apic);
224         u32 physical_id;
225
226         /*
227          * For simplicity, KVM always allocates enough space for all possible
228          * xAPIC IDs.  Yell, but don't kill the VM, as KVM can continue on
229          * without the optimized map.
230          */
231         if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
232                 return -EINVAL;
233
234         /*
235          * Bail if a vCPU was added and/or enabled its APIC between allocating
236          * the map and doing the actual calculations for the map.  Note, KVM
237          * hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
238          * the compiler decides to reload x2apic_id after this check.
239          */
240         if (x2apic_id > new->max_apic_id)
241                 return -E2BIG;
242
243         /*
244          * Deliberately truncate the vCPU ID when detecting a mismatched APIC
245          * ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
246          * 32-bit value.  Any unwanted aliasing due to truncation results will
247          * be detected below.
248          */
249         if (!apic_x2apic_mode(apic) && xapic_id != (u8)vcpu->vcpu_id)
250                 *xapic_id_mismatch = true;
251
252         /*
253          * Apply KVM's hotplug hack if userspace has enable 32-bit APIC IDs.
254          * Allow sending events to vCPUs by their x2APIC ID even if the target
255          * vCPU is in legacy xAPIC mode, and silently ignore aliased xAPIC IDs
256          * (the x2APIC ID is truncated to 8 bits, causing IDs > 0xff to wrap
257          * and collide).
258          *
259          * Honor the architectural (and KVM's non-optimized) behavior if
260          * userspace has not enabled 32-bit x2APIC IDs.  Each APIC is supposed
261          * to process messages independently.  If multiple vCPUs have the same
262          * effective APIC ID, e.g. due to the x2APIC wrap or because the guest
263          * manually modified its xAPIC IDs, events targeting that ID are
264          * supposed to be recognized by all vCPUs with said ID.
265          */
266         if (vcpu->kvm->arch.x2apic_format) {
267                 /* See also kvm_apic_match_physical_addr(). */
268                 if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
269                         new->phys_map[x2apic_id] = apic;
270
271                 if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
272                         new->phys_map[xapic_id] = apic;
273         } else {
274                 /*
275                  * Disable the optimized map if the physical APIC ID is already
276                  * mapped, i.e. is aliased to multiple vCPUs.  The optimized
277                  * map requires a strict 1:1 mapping between IDs and vCPUs.
278                  */
279                 if (apic_x2apic_mode(apic))
280                         physical_id = x2apic_id;
281                 else
282                         physical_id = xapic_id;
283
284                 if (new->phys_map[physical_id])
285                         return -EINVAL;
286
287                 new->phys_map[physical_id] = apic;
288         }
289
290         return 0;
291 }
292
293 static void kvm_recalculate_logical_map(struct kvm_apic_map *new,
294                                         struct kvm_vcpu *vcpu)
295 {
296         struct kvm_lapic *apic = vcpu->arch.apic;
297         enum kvm_apic_logical_mode logical_mode;
298         struct kvm_lapic **cluster;
299         u16 mask;
300         u32 ldr;
301
302         if (new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
303                 return;
304
305         if (!kvm_apic_sw_enabled(apic))
306                 return;
307
308         ldr = kvm_lapic_get_reg(apic, APIC_LDR);
309         if (!ldr)
310                 return;
311
312         if (apic_x2apic_mode(apic)) {
313                 logical_mode = KVM_APIC_MODE_X2APIC;
314         } else {
315                 ldr = GET_APIC_LOGICAL_ID(ldr);
316                 if (kvm_lapic_get_reg(apic, APIC_DFR) == APIC_DFR_FLAT)
317                         logical_mode = KVM_APIC_MODE_XAPIC_FLAT;
318                 else
319                         logical_mode = KVM_APIC_MODE_XAPIC_CLUSTER;
320         }
321
322         /*
323          * To optimize logical mode delivery, all software-enabled APICs must
324          * be configured for the same mode.
325          */
326         if (new->logical_mode == KVM_APIC_MODE_SW_DISABLED) {
327                 new->logical_mode = logical_mode;
328         } else if (new->logical_mode != logical_mode) {
329                 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
330                 return;
331         }
332
333         /*
334          * In x2APIC mode, the LDR is read-only and derived directly from the
335          * x2APIC ID, thus is guaranteed to be addressable.  KVM reuses
336          * kvm_apic_map.phys_map to optimize logical mode x2APIC interrupts by
337          * reversing the LDR calculation to get cluster of APICs, i.e. no
338          * additional work is required.
339          */
340         if (apic_x2apic_mode(apic)) {
341                 WARN_ON_ONCE(ldr != kvm_apic_calc_x2apic_ldr(kvm_x2apic_id(apic)));
342                 return;
343         }
344
345         if (WARN_ON_ONCE(!kvm_apic_map_get_logical_dest(new, ldr,
346                                                         &cluster, &mask))) {
347                 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
348                 return;
349         }
350
351         if (!mask)
352                 return;
353
354         ldr = ffs(mask) - 1;
355         if (!is_power_of_2(mask) || cluster[ldr])
356                 new->logical_mode = KVM_APIC_MODE_MAP_DISABLED;
357         else
358                 cluster[ldr] = apic;
359 }
360
361 /*
362  * CLEAN -> DIRTY and UPDATE_IN_PROGRESS -> DIRTY changes happen without a lock.
363  *
364  * DIRTY -> UPDATE_IN_PROGRESS and UPDATE_IN_PROGRESS -> CLEAN happen with
365  * apic_map_lock_held.
366  */
367 enum {
368         CLEAN,
369         UPDATE_IN_PROGRESS,
370         DIRTY
371 };
372
373 void kvm_recalculate_apic_map(struct kvm *kvm)
374 {
375         struct kvm_apic_map *new, *old = NULL;
376         struct kvm_vcpu *vcpu;
377         unsigned long i;
378         u32 max_id = 255; /* enough space for any xAPIC ID */
379         bool xapic_id_mismatch;
380         int r;
381
382         /* Read kvm->arch.apic_map_dirty before kvm->arch.apic_map.  */
383         if (atomic_read_acquire(&kvm->arch.apic_map_dirty) == CLEAN)
384                 return;
385
386         WARN_ONCE(!irqchip_in_kernel(kvm),
387                   "Dirty APIC map without an in-kernel local APIC");
388
389         mutex_lock(&kvm->arch.apic_map_lock);
390
391 retry:
392         /*
393          * Read kvm->arch.apic_map_dirty before kvm->arch.apic_map (if clean)
394          * or the APIC registers (if dirty).  Note, on retry the map may have
395          * not yet been marked dirty by whatever task changed a vCPU's x2APIC
396          * ID, i.e. the map may still show up as in-progress.  In that case
397          * this task still needs to retry and complete its calculation.
398          */
399         if (atomic_cmpxchg_acquire(&kvm->arch.apic_map_dirty,
400                                    DIRTY, UPDATE_IN_PROGRESS) == CLEAN) {
401                 /* Someone else has updated the map. */
402                 mutex_unlock(&kvm->arch.apic_map_lock);
403                 return;
404         }
405
406         /*
407          * Reset the mismatch flag between attempts so that KVM does the right
408          * thing if a vCPU changes its xAPIC ID, but do NOT reset max_id, i.e.
409          * keep max_id strictly increasing.  Disallowing max_id from shrinking
410          * ensures KVM won't get stuck in an infinite loop, e.g. if the vCPU
411          * with the highest x2APIC ID is toggling its APIC on and off.
412          */
413         xapic_id_mismatch = false;
414
415         kvm_for_each_vcpu(i, vcpu, kvm)
416                 if (kvm_apic_present(vcpu))
417                         max_id = max(max_id, kvm_x2apic_id(vcpu->arch.apic));
418
419         new = kvzalloc(sizeof(struct kvm_apic_map) +
420                            sizeof(struct kvm_lapic *) * ((u64)max_id + 1),
421                            GFP_KERNEL_ACCOUNT);
422
423         if (!new)
424                 goto out;
425
426         new->max_apic_id = max_id;
427         new->logical_mode = KVM_APIC_MODE_SW_DISABLED;
428
429         kvm_for_each_vcpu(i, vcpu, kvm) {
430                 if (!kvm_apic_present(vcpu))
431                         continue;
432
433                 r = kvm_recalculate_phys_map(new, vcpu, &xapic_id_mismatch);
434                 if (r) {
435                         kvfree(new);
436                         new = NULL;
437                         if (r == -E2BIG) {
438                                 cond_resched();
439                                 goto retry;
440                         }
441
442                         goto out;
443                 }
444
445                 kvm_recalculate_logical_map(new, vcpu);
446         }
447 out:
448         /*
449          * The optimized map is effectively KVM's internal version of APICv,
450          * and all unwanted aliasing that results in disabling the optimized
451          * map also applies to APICv.
452          */
453         if (!new)
454                 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
455         else
456                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_ALIASED);
457
458         if (!new || new->logical_mode == KVM_APIC_MODE_MAP_DISABLED)
459                 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
460         else
461                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_LOGICAL_ID_ALIASED);
462
463         if (xapic_id_mismatch)
464                 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
465         else
466                 kvm_clear_apicv_inhibit(kvm, APICV_INHIBIT_REASON_APIC_ID_MODIFIED);
467
468         old = rcu_dereference_protected(kvm->arch.apic_map,
469                         lockdep_is_held(&kvm->arch.apic_map_lock));
470         rcu_assign_pointer(kvm->arch.apic_map, new);
471         /*
472          * Write kvm->arch.apic_map before clearing apic->apic_map_dirty.
473          * If another update has come in, leave it DIRTY.
474          */
475         atomic_cmpxchg_release(&kvm->arch.apic_map_dirty,
476                                UPDATE_IN_PROGRESS, CLEAN);
477         mutex_unlock(&kvm->arch.apic_map_lock);
478
479         if (old)
480                 call_rcu(&old->rcu, kvm_apic_map_free);
481
482         kvm_make_scan_ioapic_request(kvm);
483 }
484
485 static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val)
486 {
487         bool enabled = val & APIC_SPIV_APIC_ENABLED;
488
489         kvm_lapic_set_reg(apic, APIC_SPIV, val);
490
491         if (enabled != apic->sw_enabled) {
492                 apic->sw_enabled = enabled;
493                 if (enabled)
494                         static_branch_slow_dec_deferred(&apic_sw_disabled);
495                 else
496                         static_branch_inc(&apic_sw_disabled.key);
497
498                 atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
499         }
500
501         /* Check if there are APF page ready requests pending */
502         if (enabled)
503                 kvm_make_request(KVM_REQ_APF_READY, apic->vcpu);
504 }
505
506 static inline void kvm_apic_set_xapic_id(struct kvm_lapic *apic, u8 id)
507 {
508         kvm_lapic_set_reg(apic, APIC_ID, id << 24);
509         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
510 }
511
512 static inline void kvm_apic_set_ldr(struct kvm_lapic *apic, u32 id)
513 {
514         kvm_lapic_set_reg(apic, APIC_LDR, id);
515         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
516 }
517
518 static inline void kvm_apic_set_dfr(struct kvm_lapic *apic, u32 val)
519 {
520         kvm_lapic_set_reg(apic, APIC_DFR, val);
521         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
522 }
523
524 static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
525 {
526         u32 ldr = kvm_apic_calc_x2apic_ldr(id);
527
528         WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
529
530         kvm_lapic_set_reg(apic, APIC_ID, id);
531         kvm_lapic_set_reg(apic, APIC_LDR, ldr);
532         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
533 }
534
535 static inline int apic_lvt_enabled(struct kvm_lapic *apic, int lvt_type)
536 {
537         return !(kvm_lapic_get_reg(apic, lvt_type) & APIC_LVT_MASKED);
538 }
539
540 static inline int apic_lvtt_oneshot(struct kvm_lapic *apic)
541 {
542         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_ONESHOT;
543 }
544
545 static inline int apic_lvtt_period(struct kvm_lapic *apic)
546 {
547         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_PERIODIC;
548 }
549
550 static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic)
551 {
552         return apic->lapic_timer.timer_mode == APIC_LVT_TIMER_TSCDEADLINE;
553 }
554
555 static inline int apic_lvt_nmi_mode(u32 lvt_val)
556 {
557         return (lvt_val & (APIC_MODE_MASK | APIC_LVT_MASKED)) == APIC_DM_NMI;
558 }
559
560 static inline bool kvm_lapic_lvt_supported(struct kvm_lapic *apic, int lvt_index)
561 {
562         return apic->nr_lvt_entries > lvt_index;
563 }
564
565 static inline int kvm_apic_calc_nr_lvt_entries(struct kvm_vcpu *vcpu)
566 {
567         return KVM_APIC_MAX_NR_LVT_ENTRIES - !(vcpu->arch.mcg_cap & MCG_CMCI_P);
568 }
569
570 void kvm_apic_set_version(struct kvm_vcpu *vcpu)
571 {
572         struct kvm_lapic *apic = vcpu->arch.apic;
573         u32 v = 0;
574
575         if (!lapic_in_kernel(vcpu))
576                 return;
577
578         v = APIC_VERSION | ((apic->nr_lvt_entries - 1) << 16);
579
580         /*
581          * KVM emulates 82093AA datasheet (with in-kernel IOAPIC implementation)
582          * which doesn't have EOI register; Some buggy OSes (e.g. Windows with
583          * Hyper-V role) disable EOI broadcast in lapic not checking for IOAPIC
584          * version first and level-triggered interrupts never get EOIed in
585          * IOAPIC.
586          */
587         if (guest_cpuid_has(vcpu, X86_FEATURE_X2APIC) &&
588             !ioapic_in_kernel(vcpu->kvm))
589                 v |= APIC_LVR_DIRECTED_EOI;
590         kvm_lapic_set_reg(apic, APIC_LVR, v);
591 }
592
593 void kvm_apic_after_set_mcg_cap(struct kvm_vcpu *vcpu)
594 {
595         int nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
596         struct kvm_lapic *apic = vcpu->arch.apic;
597         int i;
598
599         if (!lapic_in_kernel(vcpu) || nr_lvt_entries == apic->nr_lvt_entries)
600                 return;
601
602         /* Initialize/mask any "new" LVT entries. */
603         for (i = apic->nr_lvt_entries; i < nr_lvt_entries; i++)
604                 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
605
606         apic->nr_lvt_entries = nr_lvt_entries;
607
608         /* The number of LVT entries is reflected in the version register. */
609         kvm_apic_set_version(vcpu);
610 }
611
612 static const unsigned int apic_lvt_mask[KVM_APIC_MAX_NR_LVT_ENTRIES] = {
613         [LVT_TIMER] = LVT_MASK,      /* timer mode mask added at runtime */
614         [LVT_THERMAL_MONITOR] = LVT_MASK | APIC_MODE_MASK,
615         [LVT_PERFORMANCE_COUNTER] = LVT_MASK | APIC_MODE_MASK,
616         [LVT_LINT0] = LINT_MASK,
617         [LVT_LINT1] = LINT_MASK,
618         [LVT_ERROR] = LVT_MASK,
619         [LVT_CMCI] = LVT_MASK | APIC_MODE_MASK
620 };
621
622 static int find_highest_vector(void *bitmap)
623 {
624         int vec;
625         u32 *reg;
626
627         for (vec = MAX_APIC_VECTOR - APIC_VECTORS_PER_REG;
628              vec >= 0; vec -= APIC_VECTORS_PER_REG) {
629                 reg = bitmap + REG_POS(vec);
630                 if (*reg)
631                         return __fls(*reg) + vec;
632         }
633
634         return -1;
635 }
636
637 static u8 count_vectors(void *bitmap)
638 {
639         int vec;
640         u32 *reg;
641         u8 count = 0;
642
643         for (vec = 0; vec < MAX_APIC_VECTOR; vec += APIC_VECTORS_PER_REG) {
644                 reg = bitmap + REG_POS(vec);
645                 count += hweight32(*reg);
646         }
647
648         return count;
649 }
650
651 bool __kvm_apic_update_irr(u32 *pir, void *regs, int *max_irr)
652 {
653         u32 i, vec;
654         u32 pir_val, irr_val, prev_irr_val;
655         int max_updated_irr;
656
657         max_updated_irr = -1;
658         *max_irr = -1;
659
660         for (i = vec = 0; i <= 7; i++, vec += 32) {
661                 u32 *p_irr = (u32 *)(regs + APIC_IRR + i * 0x10);
662
663                 irr_val = *p_irr;
664                 pir_val = READ_ONCE(pir[i]);
665
666                 if (pir_val) {
667                         pir_val = xchg(&pir[i], 0);
668
669                         prev_irr_val = irr_val;
670                         do {
671                                 irr_val = prev_irr_val | pir_val;
672                         } while (prev_irr_val != irr_val &&
673                                  !try_cmpxchg(p_irr, &prev_irr_val, irr_val));
674
675                         if (prev_irr_val != irr_val)
676                                 max_updated_irr = __fls(irr_val ^ prev_irr_val) + vec;
677                 }
678                 if (irr_val)
679                         *max_irr = __fls(irr_val) + vec;
680         }
681
682         return ((max_updated_irr != -1) &&
683                 (max_updated_irr == *max_irr));
684 }
685 EXPORT_SYMBOL_GPL(__kvm_apic_update_irr);
686
687 bool kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir, int *max_irr)
688 {
689         struct kvm_lapic *apic = vcpu->arch.apic;
690         bool irr_updated = __kvm_apic_update_irr(pir, apic->regs, max_irr);
691
692         if (unlikely(!apic->apicv_active && irr_updated))
693                 apic->irr_pending = true;
694         return irr_updated;
695 }
696 EXPORT_SYMBOL_GPL(kvm_apic_update_irr);
697
698 static inline int apic_search_irr(struct kvm_lapic *apic)
699 {
700         return find_highest_vector(apic->regs + APIC_IRR);
701 }
702
703 static inline int apic_find_highest_irr(struct kvm_lapic *apic)
704 {
705         int result;
706
707         /*
708          * Note that irr_pending is just a hint. It will be always
709          * true with virtual interrupt delivery enabled.
710          */
711         if (!apic->irr_pending)
712                 return -1;
713
714         result = apic_search_irr(apic);
715         ASSERT(result == -1 || result >= 16);
716
717         return result;
718 }
719
720 static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
721 {
722         if (unlikely(apic->apicv_active)) {
723                 /* need to update RVI */
724                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
725                 static_call_cond(kvm_x86_hwapic_irr_update)(apic->vcpu,
726                                                             apic_find_highest_irr(apic));
727         } else {
728                 apic->irr_pending = false;
729                 kvm_lapic_clear_vector(vec, apic->regs + APIC_IRR);
730                 if (apic_search_irr(apic) != -1)
731                         apic->irr_pending = true;
732         }
733 }
734
735 void kvm_apic_clear_irr(struct kvm_vcpu *vcpu, int vec)
736 {
737         apic_clear_irr(vec, vcpu->arch.apic);
738 }
739 EXPORT_SYMBOL_GPL(kvm_apic_clear_irr);
740
741 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
742 {
743         if (__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
744                 return;
745
746         /*
747          * With APIC virtualization enabled, all caching is disabled
748          * because the processor can modify ISR under the hood.  Instead
749          * just set SVI.
750          */
751         if (unlikely(apic->apicv_active))
752                 static_call_cond(kvm_x86_hwapic_isr_update)(vec);
753         else {
754                 ++apic->isr_count;
755                 BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
756                 /*
757                  * ISR (in service register) bit is set when injecting an interrupt.
758                  * The highest vector is injected. Thus the latest bit set matches
759                  * the highest bit in ISR.
760                  */
761                 apic->highest_isr_cache = vec;
762         }
763 }
764
765 static inline int apic_find_highest_isr(struct kvm_lapic *apic)
766 {
767         int result;
768
769         /*
770          * Note that isr_count is always 1, and highest_isr_cache
771          * is always -1, with APIC virtualization enabled.
772          */
773         if (!apic->isr_count)
774                 return -1;
775         if (likely(apic->highest_isr_cache != -1))
776                 return apic->highest_isr_cache;
777
778         result = find_highest_vector(apic->regs + APIC_ISR);
779         ASSERT(result == -1 || result >= 16);
780
781         return result;
782 }
783
784 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
785 {
786         if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
787                 return;
788
789         /*
790          * We do get here for APIC virtualization enabled if the guest
791          * uses the Hyper-V APIC enlightenment.  In this case we may need
792          * to trigger a new interrupt delivery by writing the SVI field;
793          * on the other hand isr_count and highest_isr_cache are unused
794          * and must be left alone.
795          */
796         if (unlikely(apic->apicv_active))
797                 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
798         else {
799                 --apic->isr_count;
800                 BUG_ON(apic->isr_count < 0);
801                 apic->highest_isr_cache = -1;
802         }
803 }
804
805 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
806 {
807         /* This may race with setting of irr in __apic_accept_irq() and
808          * value returned may be wrong, but kvm_vcpu_kick() in __apic_accept_irq
809          * will cause vmexit immediately and the value will be recalculated
810          * on the next vmentry.
811          */
812         return apic_find_highest_irr(vcpu->arch.apic);
813 }
814 EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
815
816 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
817                              int vector, int level, int trig_mode,
818                              struct dest_map *dest_map);
819
820 int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
821                      struct dest_map *dest_map)
822 {
823         struct kvm_lapic *apic = vcpu->arch.apic;
824
825         return __apic_accept_irq(apic, irq->delivery_mode, irq->vector,
826                         irq->level, irq->trig_mode, dest_map);
827 }
828
829 static int __pv_send_ipi(unsigned long *ipi_bitmap, struct kvm_apic_map *map,
830                          struct kvm_lapic_irq *irq, u32 min)
831 {
832         int i, count = 0;
833         struct kvm_vcpu *vcpu;
834
835         if (min > map->max_apic_id)
836                 return 0;
837
838         for_each_set_bit(i, ipi_bitmap,
839                 min((u32)BITS_PER_LONG, (map->max_apic_id - min + 1))) {
840                 if (map->phys_map[min + i]) {
841                         vcpu = map->phys_map[min + i]->vcpu;
842                         count += kvm_apic_set_irq(vcpu, irq, NULL);
843                 }
844         }
845
846         return count;
847 }
848
849 int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low,
850                     unsigned long ipi_bitmap_high, u32 min,
851                     unsigned long icr, int op_64_bit)
852 {
853         struct kvm_apic_map *map;
854         struct kvm_lapic_irq irq = {0};
855         int cluster_size = op_64_bit ? 64 : 32;
856         int count;
857
858         if (icr & (APIC_DEST_MASK | APIC_SHORT_MASK))
859                 return -KVM_EINVAL;
860
861         irq.vector = icr & APIC_VECTOR_MASK;
862         irq.delivery_mode = icr & APIC_MODE_MASK;
863         irq.level = (icr & APIC_INT_ASSERT) != 0;
864         irq.trig_mode = icr & APIC_INT_LEVELTRIG;
865
866         rcu_read_lock();
867         map = rcu_dereference(kvm->arch.apic_map);
868
869         count = -EOPNOTSUPP;
870         if (likely(map)) {
871                 count = __pv_send_ipi(&ipi_bitmap_low, map, &irq, min);
872                 min += cluster_size;
873                 count += __pv_send_ipi(&ipi_bitmap_high, map, &irq, min);
874         }
875
876         rcu_read_unlock();
877         return count;
878 }
879
880 static int pv_eoi_put_user(struct kvm_vcpu *vcpu, u8 val)
881 {
882
883         return kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, &val,
884                                       sizeof(val));
885 }
886
887 static int pv_eoi_get_user(struct kvm_vcpu *vcpu, u8 *val)
888 {
889
890         return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, val,
891                                       sizeof(*val));
892 }
893
894 static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
895 {
896         return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
897 }
898
899 static void pv_eoi_set_pending(struct kvm_vcpu *vcpu)
900 {
901         if (pv_eoi_put_user(vcpu, KVM_PV_EOI_ENABLED) < 0)
902                 return;
903
904         __set_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
905 }
906
907 static bool pv_eoi_test_and_clr_pending(struct kvm_vcpu *vcpu)
908 {
909         u8 val;
910
911         if (pv_eoi_get_user(vcpu, &val) < 0)
912                 return false;
913
914         val &= KVM_PV_EOI_ENABLED;
915
916         if (val && pv_eoi_put_user(vcpu, KVM_PV_EOI_DISABLED) < 0)
917                 return false;
918
919         /*
920          * Clear pending bit in any case: it will be set again on vmentry.
921          * While this might not be ideal from performance point of view,
922          * this makes sure pv eoi is only enabled when we know it's safe.
923          */
924         __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
925
926         return val;
927 }
928
929 static int apic_has_interrupt_for_ppr(struct kvm_lapic *apic, u32 ppr)
930 {
931         int highest_irr;
932         if (kvm_x86_ops.sync_pir_to_irr)
933                 highest_irr = static_call(kvm_x86_sync_pir_to_irr)(apic->vcpu);
934         else
935                 highest_irr = apic_find_highest_irr(apic);
936         if (highest_irr == -1 || (highest_irr & 0xF0) <= ppr)
937                 return -1;
938         return highest_irr;
939 }
940
941 static bool __apic_update_ppr(struct kvm_lapic *apic, u32 *new_ppr)
942 {
943         u32 tpr, isrv, ppr, old_ppr;
944         int isr;
945
946         old_ppr = kvm_lapic_get_reg(apic, APIC_PROCPRI);
947         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI);
948         isr = apic_find_highest_isr(apic);
949         isrv = (isr != -1) ? isr : 0;
950
951         if ((tpr & 0xf0) >= (isrv & 0xf0))
952                 ppr = tpr & 0xff;
953         else
954                 ppr = isrv & 0xf0;
955
956         *new_ppr = ppr;
957         if (old_ppr != ppr)
958                 kvm_lapic_set_reg(apic, APIC_PROCPRI, ppr);
959
960         return ppr < old_ppr;
961 }
962
963 static void apic_update_ppr(struct kvm_lapic *apic)
964 {
965         u32 ppr;
966
967         if (__apic_update_ppr(apic, &ppr) &&
968             apic_has_interrupt_for_ppr(apic, ppr) != -1)
969                 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
970 }
971
972 void kvm_apic_update_ppr(struct kvm_vcpu *vcpu)
973 {
974         apic_update_ppr(vcpu->arch.apic);
975 }
976 EXPORT_SYMBOL_GPL(kvm_apic_update_ppr);
977
978 static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr)
979 {
980         kvm_lapic_set_reg(apic, APIC_TASKPRI, tpr);
981         apic_update_ppr(apic);
982 }
983
984 static bool kvm_apic_broadcast(struct kvm_lapic *apic, u32 mda)
985 {
986         return mda == (apic_x2apic_mode(apic) ?
987                         X2APIC_BROADCAST : APIC_BROADCAST);
988 }
989
990 static bool kvm_apic_match_physical_addr(struct kvm_lapic *apic, u32 mda)
991 {
992         if (kvm_apic_broadcast(apic, mda))
993                 return true;
994
995         /*
996          * Hotplug hack: Accept interrupts for vCPUs in xAPIC mode as if they
997          * were in x2APIC mode if the target APIC ID can't be encoded as an
998          * xAPIC ID.  This allows unique addressing of hotplugged vCPUs (which
999          * start in xAPIC mode) with an APIC ID that is unaddressable in xAPIC
1000          * mode.  Match the x2APIC ID if and only if the target APIC ID can't
1001          * be encoded in xAPIC to avoid spurious matches against a vCPU that
1002          * changed its (addressable) xAPIC ID (which is writable).
1003          */
1004         if (apic_x2apic_mode(apic) || mda > 0xff)
1005                 return mda == kvm_x2apic_id(apic);
1006
1007         return mda == kvm_xapic_id(apic);
1008 }
1009
1010 static bool kvm_apic_match_logical_addr(struct kvm_lapic *apic, u32 mda)
1011 {
1012         u32 logical_id;
1013
1014         if (kvm_apic_broadcast(apic, mda))
1015                 return true;
1016
1017         logical_id = kvm_lapic_get_reg(apic, APIC_LDR);
1018
1019         if (apic_x2apic_mode(apic))
1020                 return ((logical_id >> 16) == (mda >> 16))
1021                        && (logical_id & mda & 0xffff) != 0;
1022
1023         logical_id = GET_APIC_LOGICAL_ID(logical_id);
1024
1025         switch (kvm_lapic_get_reg(apic, APIC_DFR)) {
1026         case APIC_DFR_FLAT:
1027                 return (logical_id & mda) != 0;
1028         case APIC_DFR_CLUSTER:
1029                 return ((logical_id >> 4) == (mda >> 4))
1030                        && (logical_id & mda & 0xf) != 0;
1031         default:
1032                 return false;
1033         }
1034 }
1035
1036 /* The KVM local APIC implementation has two quirks:
1037  *
1038  *  - Real hardware delivers interrupts destined to x2APIC ID > 0xff to LAPICs
1039  *    in xAPIC mode if the "destination & 0xff" matches its xAPIC ID.
1040  *    KVM doesn't do that aliasing.
1041  *
1042  *  - in-kernel IOAPIC messages have to be delivered directly to
1043  *    x2APIC, because the kernel does not support interrupt remapping.
1044  *    In order to support broadcast without interrupt remapping, x2APIC
1045  *    rewrites the destination of non-IPI messages from APIC_BROADCAST
1046  *    to X2APIC_BROADCAST.
1047  *
1048  * The broadcast quirk can be disabled with KVM_CAP_X2APIC_API.  This is
1049  * important when userspace wants to use x2APIC-format MSIs, because
1050  * APIC_BROADCAST (0xff) is a legal route for "cluster 0, CPUs 0-7".
1051  */
1052 static u32 kvm_apic_mda(struct kvm_vcpu *vcpu, unsigned int dest_id,
1053                 struct kvm_lapic *source, struct kvm_lapic *target)
1054 {
1055         bool ipi = source != NULL;
1056
1057         if (!vcpu->kvm->arch.x2apic_broadcast_quirk_disabled &&
1058             !ipi && dest_id == APIC_BROADCAST && apic_x2apic_mode(target))
1059                 return X2APIC_BROADCAST;
1060
1061         return dest_id;
1062 }
1063
1064 bool kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
1065                            int shorthand, unsigned int dest, int dest_mode)
1066 {
1067         struct kvm_lapic *target = vcpu->arch.apic;
1068         u32 mda = kvm_apic_mda(vcpu, dest, source, target);
1069
1070         ASSERT(target);
1071         switch (shorthand) {
1072         case APIC_DEST_NOSHORT:
1073                 if (dest_mode == APIC_DEST_PHYSICAL)
1074                         return kvm_apic_match_physical_addr(target, mda);
1075                 else
1076                         return kvm_apic_match_logical_addr(target, mda);
1077         case APIC_DEST_SELF:
1078                 return target == source;
1079         case APIC_DEST_ALLINC:
1080                 return true;
1081         case APIC_DEST_ALLBUT:
1082                 return target != source;
1083         default:
1084                 return false;
1085         }
1086 }
1087 EXPORT_SYMBOL_GPL(kvm_apic_match_dest);
1088
1089 int kvm_vector_to_index(u32 vector, u32 dest_vcpus,
1090                        const unsigned long *bitmap, u32 bitmap_size)
1091 {
1092         u32 mod;
1093         int i, idx = -1;
1094
1095         mod = vector % dest_vcpus;
1096
1097         for (i = 0; i <= mod; i++) {
1098                 idx = find_next_bit(bitmap, bitmap_size, idx + 1);
1099                 BUG_ON(idx == bitmap_size);
1100         }
1101
1102         return idx;
1103 }
1104
1105 static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
1106 {
1107         if (!kvm->arch.disabled_lapic_found) {
1108                 kvm->arch.disabled_lapic_found = true;
1109                 pr_info("Disabled LAPIC found during irq injection\n");
1110         }
1111 }
1112
1113 static bool kvm_apic_is_broadcast_dest(struct kvm *kvm, struct kvm_lapic **src,
1114                 struct kvm_lapic_irq *irq, struct kvm_apic_map *map)
1115 {
1116         if (kvm->arch.x2apic_broadcast_quirk_disabled) {
1117                 if ((irq->dest_id == APIC_BROADCAST &&
1118                      map->logical_mode != KVM_APIC_MODE_X2APIC))
1119                         return true;
1120                 if (irq->dest_id == X2APIC_BROADCAST)
1121                         return true;
1122         } else {
1123                 bool x2apic_ipi = src && *src && apic_x2apic_mode(*src);
1124                 if (irq->dest_id == (x2apic_ipi ?
1125                                      X2APIC_BROADCAST : APIC_BROADCAST))
1126                         return true;
1127         }
1128
1129         return false;
1130 }
1131
1132 /* Return true if the interrupt can be handled by using *bitmap as index mask
1133  * for valid destinations in *dst array.
1134  * Return false if kvm_apic_map_get_dest_lapic did nothing useful.
1135  * Note: we may have zero kvm_lapic destinations when we return true, which
1136  * means that the interrupt should be dropped.  In this case, *bitmap would be
1137  * zero and *dst undefined.
1138  */
1139 static inline bool kvm_apic_map_get_dest_lapic(struct kvm *kvm,
1140                 struct kvm_lapic **src, struct kvm_lapic_irq *irq,
1141                 struct kvm_apic_map *map, struct kvm_lapic ***dst,
1142                 unsigned long *bitmap)
1143 {
1144         int i, lowest;
1145
1146         if (irq->shorthand == APIC_DEST_SELF && src) {
1147                 *dst = src;
1148                 *bitmap = 1;
1149                 return true;
1150         } else if (irq->shorthand)
1151                 return false;
1152
1153         if (!map || kvm_apic_is_broadcast_dest(kvm, src, irq, map))
1154                 return false;
1155
1156         if (irq->dest_mode == APIC_DEST_PHYSICAL) {
1157                 if (irq->dest_id > map->max_apic_id) {
1158                         *bitmap = 0;
1159                 } else {
1160                         u32 dest_id = array_index_nospec(irq->dest_id, map->max_apic_id + 1);
1161                         *dst = &map->phys_map[dest_id];
1162                         *bitmap = 1;
1163                 }
1164                 return true;
1165         }
1166
1167         *bitmap = 0;
1168         if (!kvm_apic_map_get_logical_dest(map, irq->dest_id, dst,
1169                                 (u16 *)bitmap))
1170                 return false;
1171
1172         if (!kvm_lowest_prio_delivery(irq))
1173                 return true;
1174
1175         if (!kvm_vector_hashing_enabled()) {
1176                 lowest = -1;
1177                 for_each_set_bit(i, bitmap, 16) {
1178                         if (!(*dst)[i])
1179                                 continue;
1180                         if (lowest < 0)
1181                                 lowest = i;
1182                         else if (kvm_apic_compare_prio((*dst)[i]->vcpu,
1183                                                 (*dst)[lowest]->vcpu) < 0)
1184                                 lowest = i;
1185                 }
1186         } else {
1187                 if (!*bitmap)
1188                         return true;
1189
1190                 lowest = kvm_vector_to_index(irq->vector, hweight16(*bitmap),
1191                                 bitmap, 16);
1192
1193                 if (!(*dst)[lowest]) {
1194                         kvm_apic_disabled_lapic_found(kvm);
1195                         *bitmap = 0;
1196                         return true;
1197                 }
1198         }
1199
1200         *bitmap = (lowest >= 0) ? 1 << lowest : 0;
1201
1202         return true;
1203 }
1204
1205 bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
1206                 struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
1207 {
1208         struct kvm_apic_map *map;
1209         unsigned long bitmap;
1210         struct kvm_lapic **dst = NULL;
1211         int i;
1212         bool ret;
1213
1214         *r = -1;
1215
1216         if (irq->shorthand == APIC_DEST_SELF) {
1217                 if (KVM_BUG_ON(!src, kvm)) {
1218                         *r = 0;
1219                         return true;
1220                 }
1221                 *r = kvm_apic_set_irq(src->vcpu, irq, dest_map);
1222                 return true;
1223         }
1224
1225         rcu_read_lock();
1226         map = rcu_dereference(kvm->arch.apic_map);
1227
1228         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dst, &bitmap);
1229         if (ret) {
1230                 *r = 0;
1231                 for_each_set_bit(i, &bitmap, 16) {
1232                         if (!dst[i])
1233                                 continue;
1234                         *r += kvm_apic_set_irq(dst[i]->vcpu, irq, dest_map);
1235                 }
1236         }
1237
1238         rcu_read_unlock();
1239         return ret;
1240 }
1241
1242 /*
1243  * This routine tries to handle interrupts in posted mode, here is how
1244  * it deals with different cases:
1245  * - For single-destination interrupts, handle it in posted mode
1246  * - Else if vector hashing is enabled and it is a lowest-priority
1247  *   interrupt, handle it in posted mode and use the following mechanism
1248  *   to find the destination vCPU.
1249  *      1. For lowest-priority interrupts, store all the possible
1250  *         destination vCPUs in an array.
1251  *      2. Use "guest vector % max number of destination vCPUs" to find
1252  *         the right destination vCPU in the array for the lowest-priority
1253  *         interrupt.
1254  * - Otherwise, use remapped mode to inject the interrupt.
1255  */
1256 bool kvm_intr_is_single_vcpu_fast(struct kvm *kvm, struct kvm_lapic_irq *irq,
1257                         struct kvm_vcpu **dest_vcpu)
1258 {
1259         struct kvm_apic_map *map;
1260         unsigned long bitmap;
1261         struct kvm_lapic **dst = NULL;
1262         bool ret = false;
1263
1264         if (irq->shorthand)
1265                 return false;
1266
1267         rcu_read_lock();
1268         map = rcu_dereference(kvm->arch.apic_map);
1269
1270         if (kvm_apic_map_get_dest_lapic(kvm, NULL, irq, map, &dst, &bitmap) &&
1271                         hweight16(bitmap) == 1) {
1272                 unsigned long i = find_first_bit(&bitmap, 16);
1273
1274                 if (dst[i]) {
1275                         *dest_vcpu = dst[i]->vcpu;
1276                         ret = true;
1277                 }
1278         }
1279
1280         rcu_read_unlock();
1281         return ret;
1282 }
1283
1284 /*
1285  * Add a pending IRQ into lapic.
1286  * Return 1 if successfully added and 0 if discarded.
1287  */
1288 static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
1289                              int vector, int level, int trig_mode,
1290                              struct dest_map *dest_map)
1291 {
1292         int result = 0;
1293         struct kvm_vcpu *vcpu = apic->vcpu;
1294
1295         trace_kvm_apic_accept_irq(vcpu->vcpu_id, delivery_mode,
1296                                   trig_mode, vector);
1297         switch (delivery_mode) {
1298         case APIC_DM_LOWEST:
1299                 vcpu->arch.apic_arb_prio++;
1300                 fallthrough;
1301         case APIC_DM_FIXED:
1302                 if (unlikely(trig_mode && !level))
1303                         break;
1304
1305                 /* FIXME add logic for vcpu on reset */
1306                 if (unlikely(!apic_enabled(apic)))
1307                         break;
1308
1309                 result = 1;
1310
1311                 if (dest_map) {
1312                         __set_bit(vcpu->vcpu_id, dest_map->map);
1313                         dest_map->vectors[vcpu->vcpu_id] = vector;
1314                 }
1315
1316                 if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
1317                         if (trig_mode)
1318                                 kvm_lapic_set_vector(vector,
1319                                                      apic->regs + APIC_TMR);
1320                         else
1321                                 kvm_lapic_clear_vector(vector,
1322                                                        apic->regs + APIC_TMR);
1323                 }
1324
1325                 static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode,
1326                                                        trig_mode, vector);
1327                 break;
1328
1329         case APIC_DM_REMRD:
1330                 result = 1;
1331                 vcpu->arch.pv.pv_unhalted = 1;
1332                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1333                 kvm_vcpu_kick(vcpu);
1334                 break;
1335
1336         case APIC_DM_SMI:
1337                 if (!kvm_inject_smi(vcpu)) {
1338                         kvm_vcpu_kick(vcpu);
1339                         result = 1;
1340                 }
1341                 break;
1342
1343         case APIC_DM_NMI:
1344                 result = 1;
1345                 kvm_inject_nmi(vcpu);
1346                 kvm_vcpu_kick(vcpu);
1347                 break;
1348
1349         case APIC_DM_INIT:
1350                 if (!trig_mode || level) {
1351                         result = 1;
1352                         /* assumes that there are only KVM_APIC_INIT/SIPI */
1353                         apic->pending_events = (1UL << KVM_APIC_INIT);
1354                         kvm_make_request(KVM_REQ_EVENT, vcpu);
1355                         kvm_vcpu_kick(vcpu);
1356                 }
1357                 break;
1358
1359         case APIC_DM_STARTUP:
1360                 result = 1;
1361                 apic->sipi_vector = vector;
1362                 /* make sure sipi_vector is visible for the receiver */
1363                 smp_wmb();
1364                 set_bit(KVM_APIC_SIPI, &apic->pending_events);
1365                 kvm_make_request(KVM_REQ_EVENT, vcpu);
1366                 kvm_vcpu_kick(vcpu);
1367                 break;
1368
1369         case APIC_DM_EXTINT:
1370                 /*
1371                  * Should only be called by kvm_apic_local_deliver() with LVT0,
1372                  * before NMI watchdog was enabled. Already handled by
1373                  * kvm_apic_accept_pic_intr().
1374                  */
1375                 break;
1376
1377         default:
1378                 printk(KERN_ERR "TODO: unsupported delivery mode %x\n",
1379                        delivery_mode);
1380                 break;
1381         }
1382         return result;
1383 }
1384
1385 /*
1386  * This routine identifies the destination vcpus mask meant to receive the
1387  * IOAPIC interrupts. It either uses kvm_apic_map_get_dest_lapic() to find
1388  * out the destination vcpus array and set the bitmap or it traverses to
1389  * each available vcpu to identify the same.
1390  */
1391 void kvm_bitmap_or_dest_vcpus(struct kvm *kvm, struct kvm_lapic_irq *irq,
1392                               unsigned long *vcpu_bitmap)
1393 {
1394         struct kvm_lapic **dest_vcpu = NULL;
1395         struct kvm_lapic *src = NULL;
1396         struct kvm_apic_map *map;
1397         struct kvm_vcpu *vcpu;
1398         unsigned long bitmap, i;
1399         int vcpu_idx;
1400         bool ret;
1401
1402         rcu_read_lock();
1403         map = rcu_dereference(kvm->arch.apic_map);
1404
1405         ret = kvm_apic_map_get_dest_lapic(kvm, &src, irq, map, &dest_vcpu,
1406                                           &bitmap);
1407         if (ret) {
1408                 for_each_set_bit(i, &bitmap, 16) {
1409                         if (!dest_vcpu[i])
1410                                 continue;
1411                         vcpu_idx = dest_vcpu[i]->vcpu->vcpu_idx;
1412                         __set_bit(vcpu_idx, vcpu_bitmap);
1413                 }
1414         } else {
1415                 kvm_for_each_vcpu(i, vcpu, kvm) {
1416                         if (!kvm_apic_present(vcpu))
1417                                 continue;
1418                         if (!kvm_apic_match_dest(vcpu, NULL,
1419                                                  irq->shorthand,
1420                                                  irq->dest_id,
1421                                                  irq->dest_mode))
1422                                 continue;
1423                         __set_bit(i, vcpu_bitmap);
1424                 }
1425         }
1426         rcu_read_unlock();
1427 }
1428
1429 int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
1430 {
1431         return vcpu1->arch.apic_arb_prio - vcpu2->arch.apic_arb_prio;
1432 }
1433
1434 static bool kvm_ioapic_handles_vector(struct kvm_lapic *apic, int vector)
1435 {
1436         return test_bit(vector, apic->vcpu->arch.ioapic_handled_vectors);
1437 }
1438
1439 static void kvm_ioapic_send_eoi(struct kvm_lapic *apic, int vector)
1440 {
1441         int trigger_mode;
1442
1443         /* Eoi the ioapic only if the ioapic doesn't own the vector. */
1444         if (!kvm_ioapic_handles_vector(apic, vector))
1445                 return;
1446
1447         /* Request a KVM exit to inform the userspace IOAPIC. */
1448         if (irqchip_split(apic->vcpu->kvm)) {
1449                 apic->vcpu->arch.pending_ioapic_eoi = vector;
1450                 kvm_make_request(KVM_REQ_IOAPIC_EOI_EXIT, apic->vcpu);
1451                 return;
1452         }
1453
1454         if (apic_test_vector(vector, apic->regs + APIC_TMR))
1455                 trigger_mode = IOAPIC_LEVEL_TRIG;
1456         else
1457                 trigger_mode = IOAPIC_EDGE_TRIG;
1458
1459         kvm_ioapic_update_eoi(apic->vcpu, vector, trigger_mode);
1460 }
1461
1462 static int apic_set_eoi(struct kvm_lapic *apic)
1463 {
1464         int vector = apic_find_highest_isr(apic);
1465
1466         trace_kvm_eoi(apic, vector);
1467
1468         /*
1469          * Not every write EOI will has corresponding ISR,
1470          * one example is when Kernel check timer on setup_IO_APIC
1471          */
1472         if (vector == -1)
1473                 return vector;
1474
1475         apic_clear_isr(vector, apic);
1476         apic_update_ppr(apic);
1477
1478         if (kvm_hv_synic_has_vector(apic->vcpu, vector))
1479                 kvm_hv_synic_send_eoi(apic->vcpu, vector);
1480
1481         kvm_ioapic_send_eoi(apic, vector);
1482         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1483         return vector;
1484 }
1485
1486 /*
1487  * this interface assumes a trap-like exit, which has already finished
1488  * desired side effect including vISR and vPPR update.
1489  */
1490 void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector)
1491 {
1492         struct kvm_lapic *apic = vcpu->arch.apic;
1493
1494         trace_kvm_eoi(apic, vector);
1495
1496         kvm_ioapic_send_eoi(apic, vector);
1497         kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
1498 }
1499 EXPORT_SYMBOL_GPL(kvm_apic_set_eoi_accelerated);
1500
1501 void kvm_apic_send_ipi(struct kvm_lapic *apic, u32 icr_low, u32 icr_high)
1502 {
1503         struct kvm_lapic_irq irq;
1504
1505         /* KVM has no delay and should always clear the BUSY/PENDING flag. */
1506         WARN_ON_ONCE(icr_low & APIC_ICR_BUSY);
1507
1508         irq.vector = icr_low & APIC_VECTOR_MASK;
1509         irq.delivery_mode = icr_low & APIC_MODE_MASK;
1510         irq.dest_mode = icr_low & APIC_DEST_MASK;
1511         irq.level = (icr_low & APIC_INT_ASSERT) != 0;
1512         irq.trig_mode = icr_low & APIC_INT_LEVELTRIG;
1513         irq.shorthand = icr_low & APIC_SHORT_MASK;
1514         irq.msi_redir_hint = false;
1515         if (apic_x2apic_mode(apic))
1516                 irq.dest_id = icr_high;
1517         else
1518                 irq.dest_id = GET_XAPIC_DEST_FIELD(icr_high);
1519
1520         trace_kvm_apic_ipi(icr_low, irq.dest_id);
1521
1522         kvm_irq_delivery_to_apic(apic->vcpu->kvm, apic, &irq, NULL);
1523 }
1524 EXPORT_SYMBOL_GPL(kvm_apic_send_ipi);
1525
1526 static u32 apic_get_tmcct(struct kvm_lapic *apic)
1527 {
1528         ktime_t remaining, now;
1529         s64 ns;
1530
1531         ASSERT(apic != NULL);
1532
1533         /* if initial count is 0, current count should also be 0 */
1534         if (kvm_lapic_get_reg(apic, APIC_TMICT) == 0 ||
1535                 apic->lapic_timer.period == 0)
1536                 return 0;
1537
1538         now = ktime_get();
1539         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1540         if (ktime_to_ns(remaining) < 0)
1541                 remaining = 0;
1542
1543         ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period);
1544         return div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->divide_count));
1545 }
1546
1547 static void __report_tpr_access(struct kvm_lapic *apic, bool write)
1548 {
1549         struct kvm_vcpu *vcpu = apic->vcpu;
1550         struct kvm_run *run = vcpu->run;
1551
1552         kvm_make_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu);
1553         run->tpr_access.rip = kvm_rip_read(vcpu);
1554         run->tpr_access.is_write = write;
1555 }
1556
1557 static inline void report_tpr_access(struct kvm_lapic *apic, bool write)
1558 {
1559         if (apic->vcpu->arch.tpr_access_reporting)
1560                 __report_tpr_access(apic, write);
1561 }
1562
1563 static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset)
1564 {
1565         u32 val = 0;
1566
1567         if (offset >= LAPIC_MMIO_LENGTH)
1568                 return 0;
1569
1570         switch (offset) {
1571         case APIC_ARBPRI:
1572                 break;
1573
1574         case APIC_TMCCT:        /* Timer CCR */
1575                 if (apic_lvtt_tscdeadline(apic))
1576                         return 0;
1577
1578                 val = apic_get_tmcct(apic);
1579                 break;
1580         case APIC_PROCPRI:
1581                 apic_update_ppr(apic);
1582                 val = kvm_lapic_get_reg(apic, offset);
1583                 break;
1584         case APIC_TASKPRI:
1585                 report_tpr_access(apic, false);
1586                 fallthrough;
1587         default:
1588                 val = kvm_lapic_get_reg(apic, offset);
1589                 break;
1590         }
1591
1592         return val;
1593 }
1594
1595 static inline struct kvm_lapic *to_lapic(struct kvm_io_device *dev)
1596 {
1597         return container_of(dev, struct kvm_lapic, dev);
1598 }
1599
1600 #define APIC_REG_MASK(reg)      (1ull << ((reg) >> 4))
1601 #define APIC_REGS_MASK(first, count) \
1602         (APIC_REG_MASK(first) * ((1ull << (count)) - 1))
1603
1604 u64 kvm_lapic_readable_reg_mask(struct kvm_lapic *apic)
1605 {
1606         /* Leave bits '0' for reserved and write-only registers. */
1607         u64 valid_reg_mask =
1608                 APIC_REG_MASK(APIC_ID) |
1609                 APIC_REG_MASK(APIC_LVR) |
1610                 APIC_REG_MASK(APIC_TASKPRI) |
1611                 APIC_REG_MASK(APIC_PROCPRI) |
1612                 APIC_REG_MASK(APIC_LDR) |
1613                 APIC_REG_MASK(APIC_SPIV) |
1614                 APIC_REGS_MASK(APIC_ISR, APIC_ISR_NR) |
1615                 APIC_REGS_MASK(APIC_TMR, APIC_ISR_NR) |
1616                 APIC_REGS_MASK(APIC_IRR, APIC_ISR_NR) |
1617                 APIC_REG_MASK(APIC_ESR) |
1618                 APIC_REG_MASK(APIC_ICR) |
1619                 APIC_REG_MASK(APIC_LVTT) |
1620                 APIC_REG_MASK(APIC_LVTTHMR) |
1621                 APIC_REG_MASK(APIC_LVTPC) |
1622                 APIC_REG_MASK(APIC_LVT0) |
1623                 APIC_REG_MASK(APIC_LVT1) |
1624                 APIC_REG_MASK(APIC_LVTERR) |
1625                 APIC_REG_MASK(APIC_TMICT) |
1626                 APIC_REG_MASK(APIC_TMCCT) |
1627                 APIC_REG_MASK(APIC_TDCR);
1628
1629         if (kvm_lapic_lvt_supported(apic, LVT_CMCI))
1630                 valid_reg_mask |= APIC_REG_MASK(APIC_LVTCMCI);
1631
1632         /* ARBPRI, DFR, and ICR2 are not valid in x2APIC mode. */
1633         if (!apic_x2apic_mode(apic))
1634                 valid_reg_mask |= APIC_REG_MASK(APIC_ARBPRI) |
1635                                   APIC_REG_MASK(APIC_DFR) |
1636                                   APIC_REG_MASK(APIC_ICR2);
1637
1638         return valid_reg_mask;
1639 }
1640 EXPORT_SYMBOL_GPL(kvm_lapic_readable_reg_mask);
1641
1642 static int kvm_lapic_reg_read(struct kvm_lapic *apic, u32 offset, int len,
1643                               void *data)
1644 {
1645         unsigned char alignment = offset & 0xf;
1646         u32 result;
1647
1648         /*
1649          * WARN if KVM reads ICR in x2APIC mode, as it's an 8-byte register in
1650          * x2APIC and needs to be manually handled by the caller.
1651          */
1652         WARN_ON_ONCE(apic_x2apic_mode(apic) && offset == APIC_ICR);
1653
1654         if (alignment + len > 4)
1655                 return 1;
1656
1657         if (offset > 0x3f0 ||
1658             !(kvm_lapic_readable_reg_mask(apic) & APIC_REG_MASK(offset)))
1659                 return 1;
1660
1661         result = __apic_read(apic, offset & ~0xf);
1662
1663         trace_kvm_apic_read(offset, result);
1664
1665         switch (len) {
1666         case 1:
1667         case 2:
1668         case 4:
1669                 memcpy(data, (char *)&result + alignment, len);
1670                 break;
1671         default:
1672                 printk(KERN_ERR "Local APIC read with len = %x, "
1673                        "should be 1,2, or 4 instead\n", len);
1674                 break;
1675         }
1676         return 0;
1677 }
1678
1679 static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
1680 {
1681         return addr >= apic->base_address &&
1682                 addr < apic->base_address + LAPIC_MMIO_LENGTH;
1683 }
1684
1685 static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
1686                            gpa_t address, int len, void *data)
1687 {
1688         struct kvm_lapic *apic = to_lapic(this);
1689         u32 offset = address - apic->base_address;
1690
1691         if (!apic_mmio_in_range(apic, address))
1692                 return -EOPNOTSUPP;
1693
1694         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
1695                 if (!kvm_check_has_quirk(vcpu->kvm,
1696                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
1697                         return -EOPNOTSUPP;
1698
1699                 memset(data, 0xff, len);
1700                 return 0;
1701         }
1702
1703         kvm_lapic_reg_read(apic, offset, len, data);
1704
1705         return 0;
1706 }
1707
1708 static void update_divide_count(struct kvm_lapic *apic)
1709 {
1710         u32 tmp1, tmp2, tdcr;
1711
1712         tdcr = kvm_lapic_get_reg(apic, APIC_TDCR);
1713         tmp1 = tdcr & 0xf;
1714         tmp2 = ((tmp1 & 0x3) | ((tmp1 & 0x8) >> 1)) + 1;
1715         apic->divide_count = 0x1 << (tmp2 & 0x7);
1716 }
1717
1718 static void limit_periodic_timer_frequency(struct kvm_lapic *apic)
1719 {
1720         /*
1721          * Do not allow the guest to program periodic timers with small
1722          * interval, since the hrtimers are not throttled by the host
1723          * scheduler.
1724          */
1725         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
1726                 s64 min_period = min_timer_period_us * 1000LL;
1727
1728                 if (apic->lapic_timer.period < min_period) {
1729                         pr_info_ratelimited(
1730                             "vcpu %i: requested %lld ns "
1731                             "lapic timer period limited to %lld ns\n",
1732                             apic->vcpu->vcpu_id,
1733                             apic->lapic_timer.period, min_period);
1734                         apic->lapic_timer.period = min_period;
1735                 }
1736         }
1737 }
1738
1739 static void cancel_hv_timer(struct kvm_lapic *apic);
1740
1741 static void cancel_apic_timer(struct kvm_lapic *apic)
1742 {
1743         hrtimer_cancel(&apic->lapic_timer.timer);
1744         preempt_disable();
1745         if (apic->lapic_timer.hv_timer_in_use)
1746                 cancel_hv_timer(apic);
1747         preempt_enable();
1748         atomic_set(&apic->lapic_timer.pending, 0);
1749 }
1750
1751 static void apic_update_lvtt(struct kvm_lapic *apic)
1752 {
1753         u32 timer_mode = kvm_lapic_get_reg(apic, APIC_LVTT) &
1754                         apic->lapic_timer.timer_mode_mask;
1755
1756         if (apic->lapic_timer.timer_mode != timer_mode) {
1757                 if (apic_lvtt_tscdeadline(apic) != (timer_mode ==
1758                                 APIC_LVT_TIMER_TSCDEADLINE)) {
1759                         cancel_apic_timer(apic);
1760                         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
1761                         apic->lapic_timer.period = 0;
1762                         apic->lapic_timer.tscdeadline = 0;
1763                 }
1764                 apic->lapic_timer.timer_mode = timer_mode;
1765                 limit_periodic_timer_frequency(apic);
1766         }
1767 }
1768
1769 /*
1770  * On APICv, this test will cause a busy wait
1771  * during a higher-priority task.
1772  */
1773
1774 static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1775 {
1776         struct kvm_lapic *apic = vcpu->arch.apic;
1777         u32 reg = kvm_lapic_get_reg(apic, APIC_LVTT);
1778
1779         if (kvm_apic_hw_enabled(apic)) {
1780                 int vec = reg & APIC_VECTOR_MASK;
1781                 void *bitmap = apic->regs + APIC_ISR;
1782
1783                 if (apic->apicv_active)
1784                         bitmap = apic->regs + APIC_IRR;
1785
1786                 if (apic_test_vector(vec, bitmap))
1787                         return true;
1788         }
1789         return false;
1790 }
1791
1792 static inline void __wait_lapic_expire(struct kvm_vcpu *vcpu, u64 guest_cycles)
1793 {
1794         u64 timer_advance_ns = vcpu->arch.apic->lapic_timer.timer_advance_ns;
1795
1796         /*
1797          * If the guest TSC is running at a different ratio than the host, then
1798          * convert the delay to nanoseconds to achieve an accurate delay.  Note
1799          * that __delay() uses delay_tsc whenever the hardware has TSC, thus
1800          * always for VMX enabled hardware.
1801          */
1802         if (vcpu->arch.tsc_scaling_ratio == kvm_caps.default_tsc_scaling_ratio) {
1803                 __delay(min(guest_cycles,
1804                         nsec_to_cycles(vcpu, timer_advance_ns)));
1805         } else {
1806                 u64 delay_ns = guest_cycles * 1000000ULL;
1807                 do_div(delay_ns, vcpu->arch.virtual_tsc_khz);
1808                 ndelay(min_t(u32, delay_ns, timer_advance_ns));
1809         }
1810 }
1811
1812 static inline void adjust_lapic_timer_advance(struct kvm_vcpu *vcpu,
1813                                               s64 advance_expire_delta)
1814 {
1815         struct kvm_lapic *apic = vcpu->arch.apic;
1816         u32 timer_advance_ns = apic->lapic_timer.timer_advance_ns;
1817         u64 ns;
1818
1819         /* Do not adjust for tiny fluctuations or large random spikes. */
1820         if (abs(advance_expire_delta) > LAPIC_TIMER_ADVANCE_ADJUST_MAX ||
1821             abs(advance_expire_delta) < LAPIC_TIMER_ADVANCE_ADJUST_MIN)
1822                 return;
1823
1824         /* too early */
1825         if (advance_expire_delta < 0) {
1826                 ns = -advance_expire_delta * 1000000ULL;
1827                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1828                 timer_advance_ns -= ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1829         } else {
1830         /* too late */
1831                 ns = advance_expire_delta * 1000000ULL;
1832                 do_div(ns, vcpu->arch.virtual_tsc_khz);
1833                 timer_advance_ns += ns/LAPIC_TIMER_ADVANCE_ADJUST_STEP;
1834         }
1835
1836         if (unlikely(timer_advance_ns > LAPIC_TIMER_ADVANCE_NS_MAX))
1837                 timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
1838         apic->lapic_timer.timer_advance_ns = timer_advance_ns;
1839 }
1840
1841 static void __kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1842 {
1843         struct kvm_lapic *apic = vcpu->arch.apic;
1844         u64 guest_tsc, tsc_deadline;
1845
1846         tsc_deadline = apic->lapic_timer.expired_tscdeadline;
1847         apic->lapic_timer.expired_tscdeadline = 0;
1848         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1849         trace_kvm_wait_lapic_expire(vcpu->vcpu_id, guest_tsc - tsc_deadline);
1850
1851         if (lapic_timer_advance_dynamic) {
1852                 adjust_lapic_timer_advance(vcpu, guest_tsc - tsc_deadline);
1853                 /*
1854                  * If the timer fired early, reread the TSC to account for the
1855                  * overhead of the above adjustment to avoid waiting longer
1856                  * than is necessary.
1857                  */
1858                 if (guest_tsc < tsc_deadline)
1859                         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1860         }
1861
1862         if (guest_tsc < tsc_deadline)
1863                 __wait_lapic_expire(vcpu, tsc_deadline - guest_tsc);
1864 }
1865
1866 void kvm_wait_lapic_expire(struct kvm_vcpu *vcpu)
1867 {
1868         if (lapic_in_kernel(vcpu) &&
1869             vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1870             vcpu->arch.apic->lapic_timer.timer_advance_ns &&
1871             lapic_timer_int_injected(vcpu))
1872                 __kvm_wait_lapic_expire(vcpu);
1873 }
1874 EXPORT_SYMBOL_GPL(kvm_wait_lapic_expire);
1875
1876 static void kvm_apic_inject_pending_timer_irqs(struct kvm_lapic *apic)
1877 {
1878         struct kvm_timer *ktimer = &apic->lapic_timer;
1879
1880         kvm_apic_local_deliver(apic, APIC_LVTT);
1881         if (apic_lvtt_tscdeadline(apic)) {
1882                 ktimer->tscdeadline = 0;
1883         } else if (apic_lvtt_oneshot(apic)) {
1884                 ktimer->tscdeadline = 0;
1885                 ktimer->target_expiration = 0;
1886         }
1887 }
1888
1889 static void apic_timer_expired(struct kvm_lapic *apic, bool from_timer_fn)
1890 {
1891         struct kvm_vcpu *vcpu = apic->vcpu;
1892         struct kvm_timer *ktimer = &apic->lapic_timer;
1893
1894         if (atomic_read(&apic->lapic_timer.pending))
1895                 return;
1896
1897         if (apic_lvtt_tscdeadline(apic) || ktimer->hv_timer_in_use)
1898                 ktimer->expired_tscdeadline = ktimer->tscdeadline;
1899
1900         if (!from_timer_fn && apic->apicv_active) {
1901                 WARN_ON(kvm_get_running_vcpu() != vcpu);
1902                 kvm_apic_inject_pending_timer_irqs(apic);
1903                 return;
1904         }
1905
1906         if (kvm_use_posted_timer_interrupt(apic->vcpu)) {
1907                 /*
1908                  * Ensure the guest's timer has truly expired before posting an
1909                  * interrupt.  Open code the relevant checks to avoid querying
1910                  * lapic_timer_int_injected(), which will be false since the
1911                  * interrupt isn't yet injected.  Waiting until after injecting
1912                  * is not an option since that won't help a posted interrupt.
1913                  */
1914                 if (vcpu->arch.apic->lapic_timer.expired_tscdeadline &&
1915                     vcpu->arch.apic->lapic_timer.timer_advance_ns)
1916                         __kvm_wait_lapic_expire(vcpu);
1917                 kvm_apic_inject_pending_timer_irqs(apic);
1918                 return;
1919         }
1920
1921         atomic_inc(&apic->lapic_timer.pending);
1922         kvm_make_request(KVM_REQ_UNBLOCK, vcpu);
1923         if (from_timer_fn)
1924                 kvm_vcpu_kick(vcpu);
1925 }
1926
1927 static void start_sw_tscdeadline(struct kvm_lapic *apic)
1928 {
1929         struct kvm_timer *ktimer = &apic->lapic_timer;
1930         u64 guest_tsc, tscdeadline = ktimer->tscdeadline;
1931         u64 ns = 0;
1932         ktime_t expire;
1933         struct kvm_vcpu *vcpu = apic->vcpu;
1934         unsigned long this_tsc_khz = vcpu->arch.virtual_tsc_khz;
1935         unsigned long flags;
1936         ktime_t now;
1937
1938         if (unlikely(!tscdeadline || !this_tsc_khz))
1939                 return;
1940
1941         local_irq_save(flags);
1942
1943         now = ktime_get();
1944         guest_tsc = kvm_read_l1_tsc(vcpu, rdtsc());
1945
1946         ns = (tscdeadline - guest_tsc) * 1000000ULL;
1947         do_div(ns, this_tsc_khz);
1948
1949         if (likely(tscdeadline > guest_tsc) &&
1950             likely(ns > apic->lapic_timer.timer_advance_ns)) {
1951                 expire = ktime_add_ns(now, ns);
1952                 expire = ktime_sub_ns(expire, ktimer->timer_advance_ns);
1953                 hrtimer_start(&ktimer->timer, expire, HRTIMER_MODE_ABS_HARD);
1954         } else
1955                 apic_timer_expired(apic, false);
1956
1957         local_irq_restore(flags);
1958 }
1959
1960 static inline u64 tmict_to_ns(struct kvm_lapic *apic, u32 tmict)
1961 {
1962         return (u64)tmict * APIC_BUS_CYCLE_NS * (u64)apic->divide_count;
1963 }
1964
1965 static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
1966 {
1967         ktime_t now, remaining;
1968         u64 ns_remaining_old, ns_remaining_new;
1969
1970         apic->lapic_timer.period =
1971                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1972         limit_periodic_timer_frequency(apic);
1973
1974         now = ktime_get();
1975         remaining = ktime_sub(apic->lapic_timer.target_expiration, now);
1976         if (ktime_to_ns(remaining) < 0)
1977                 remaining = 0;
1978
1979         ns_remaining_old = ktime_to_ns(remaining);
1980         ns_remaining_new = mul_u64_u32_div(ns_remaining_old,
1981                                            apic->divide_count, old_divisor);
1982
1983         apic->lapic_timer.tscdeadline +=
1984                 nsec_to_cycles(apic->vcpu, ns_remaining_new) -
1985                 nsec_to_cycles(apic->vcpu, ns_remaining_old);
1986         apic->lapic_timer.target_expiration = ktime_add_ns(now, ns_remaining_new);
1987 }
1988
1989 static bool set_target_expiration(struct kvm_lapic *apic, u32 count_reg)
1990 {
1991         ktime_t now;
1992         u64 tscl = rdtsc();
1993         s64 deadline;
1994
1995         now = ktime_get();
1996         apic->lapic_timer.period =
1997                         tmict_to_ns(apic, kvm_lapic_get_reg(apic, APIC_TMICT));
1998
1999         if (!apic->lapic_timer.period) {
2000                 apic->lapic_timer.tscdeadline = 0;
2001                 return false;
2002         }
2003
2004         limit_periodic_timer_frequency(apic);
2005         deadline = apic->lapic_timer.period;
2006
2007         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) {
2008                 if (unlikely(count_reg != APIC_TMICT)) {
2009                         deadline = tmict_to_ns(apic,
2010                                      kvm_lapic_get_reg(apic, count_reg));
2011                         if (unlikely(deadline <= 0)) {
2012                                 if (apic_lvtt_period(apic))
2013                                         deadline = apic->lapic_timer.period;
2014                                 else
2015                                         deadline = 0;
2016                         }
2017                         else if (unlikely(deadline > apic->lapic_timer.period)) {
2018                                 pr_info_ratelimited(
2019                                     "vcpu %i: requested lapic timer restore with "
2020                                     "starting count register %#x=%u (%lld ns) > initial count (%lld ns). "
2021                                     "Using initial count to start timer.\n",
2022                                     apic->vcpu->vcpu_id,
2023                                     count_reg,
2024                                     kvm_lapic_get_reg(apic, count_reg),
2025                                     deadline, apic->lapic_timer.period);
2026                                 kvm_lapic_set_reg(apic, count_reg, 0);
2027                                 deadline = apic->lapic_timer.period;
2028                         }
2029                 }
2030         }
2031
2032         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2033                 nsec_to_cycles(apic->vcpu, deadline);
2034         apic->lapic_timer.target_expiration = ktime_add_ns(now, deadline);
2035
2036         return true;
2037 }
2038
2039 static void advance_periodic_target_expiration(struct kvm_lapic *apic)
2040 {
2041         ktime_t now = ktime_get();
2042         u64 tscl = rdtsc();
2043         ktime_t delta;
2044
2045         /*
2046          * Synchronize both deadlines to the same time source or
2047          * differences in the periods (caused by differences in the
2048          * underlying clocks or numerical approximation errors) will
2049          * cause the two to drift apart over time as the errors
2050          * accumulate.
2051          */
2052         apic->lapic_timer.target_expiration =
2053                 ktime_add_ns(apic->lapic_timer.target_expiration,
2054                                 apic->lapic_timer.period);
2055         delta = ktime_sub(apic->lapic_timer.target_expiration, now);
2056         apic->lapic_timer.tscdeadline = kvm_read_l1_tsc(apic->vcpu, tscl) +
2057                 nsec_to_cycles(apic->vcpu, delta);
2058 }
2059
2060 static void start_sw_period(struct kvm_lapic *apic)
2061 {
2062         if (!apic->lapic_timer.period)
2063                 return;
2064
2065         if (ktime_after(ktime_get(),
2066                         apic->lapic_timer.target_expiration)) {
2067                 apic_timer_expired(apic, false);
2068
2069                 if (apic_lvtt_oneshot(apic))
2070                         return;
2071
2072                 advance_periodic_target_expiration(apic);
2073         }
2074
2075         hrtimer_start(&apic->lapic_timer.timer,
2076                 apic->lapic_timer.target_expiration,
2077                 HRTIMER_MODE_ABS_HARD);
2078 }
2079
2080 bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
2081 {
2082         if (!lapic_in_kernel(vcpu))
2083                 return false;
2084
2085         return vcpu->arch.apic->lapic_timer.hv_timer_in_use;
2086 }
2087
2088 static void cancel_hv_timer(struct kvm_lapic *apic)
2089 {
2090         WARN_ON(preemptible());
2091         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2092         static_call(kvm_x86_cancel_hv_timer)(apic->vcpu);
2093         apic->lapic_timer.hv_timer_in_use = false;
2094 }
2095
2096 static bool start_hv_timer(struct kvm_lapic *apic)
2097 {
2098         struct kvm_timer *ktimer = &apic->lapic_timer;
2099         struct kvm_vcpu *vcpu = apic->vcpu;
2100         bool expired;
2101
2102         WARN_ON(preemptible());
2103         if (!kvm_can_use_hv_timer(vcpu))
2104                 return false;
2105
2106         if (!ktimer->tscdeadline)
2107                 return false;
2108
2109         if (static_call(kvm_x86_set_hv_timer)(vcpu, ktimer->tscdeadline, &expired))
2110                 return false;
2111
2112         ktimer->hv_timer_in_use = true;
2113         hrtimer_cancel(&ktimer->timer);
2114
2115         /*
2116          * To simplify handling the periodic timer, leave the hv timer running
2117          * even if the deadline timer has expired, i.e. rely on the resulting
2118          * VM-Exit to recompute the periodic timer's target expiration.
2119          */
2120         if (!apic_lvtt_period(apic)) {
2121                 /*
2122                  * Cancel the hv timer if the sw timer fired while the hv timer
2123                  * was being programmed, or if the hv timer itself expired.
2124                  */
2125                 if (atomic_read(&ktimer->pending)) {
2126                         cancel_hv_timer(apic);
2127                 } else if (expired) {
2128                         apic_timer_expired(apic, false);
2129                         cancel_hv_timer(apic);
2130                 }
2131         }
2132
2133         trace_kvm_hv_timer_state(vcpu->vcpu_id, ktimer->hv_timer_in_use);
2134
2135         return true;
2136 }
2137
2138 static void start_sw_timer(struct kvm_lapic *apic)
2139 {
2140         struct kvm_timer *ktimer = &apic->lapic_timer;
2141
2142         WARN_ON(preemptible());
2143         if (apic->lapic_timer.hv_timer_in_use)
2144                 cancel_hv_timer(apic);
2145         if (!apic_lvtt_period(apic) && atomic_read(&ktimer->pending))
2146                 return;
2147
2148         if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2149                 start_sw_period(apic);
2150         else if (apic_lvtt_tscdeadline(apic))
2151                 start_sw_tscdeadline(apic);
2152         trace_kvm_hv_timer_state(apic->vcpu->vcpu_id, false);
2153 }
2154
2155 static void restart_apic_timer(struct kvm_lapic *apic)
2156 {
2157         preempt_disable();
2158
2159         if (!apic_lvtt_period(apic) && atomic_read(&apic->lapic_timer.pending))
2160                 goto out;
2161
2162         if (!start_hv_timer(apic))
2163                 start_sw_timer(apic);
2164 out:
2165         preempt_enable();
2166 }
2167
2168 void kvm_lapic_expired_hv_timer(struct kvm_vcpu *vcpu)
2169 {
2170         struct kvm_lapic *apic = vcpu->arch.apic;
2171
2172         preempt_disable();
2173         /* If the preempt notifier has already run, it also called apic_timer_expired */
2174         if (!apic->lapic_timer.hv_timer_in_use)
2175                 goto out;
2176         WARN_ON(kvm_vcpu_is_blocking(vcpu));
2177         apic_timer_expired(apic, false);
2178         cancel_hv_timer(apic);
2179
2180         if (apic_lvtt_period(apic) && apic->lapic_timer.period) {
2181                 advance_periodic_target_expiration(apic);
2182                 restart_apic_timer(apic);
2183         }
2184 out:
2185         preempt_enable();
2186 }
2187 EXPORT_SYMBOL_GPL(kvm_lapic_expired_hv_timer);
2188
2189 void kvm_lapic_switch_to_hv_timer(struct kvm_vcpu *vcpu)
2190 {
2191         restart_apic_timer(vcpu->arch.apic);
2192 }
2193
2194 void kvm_lapic_switch_to_sw_timer(struct kvm_vcpu *vcpu)
2195 {
2196         struct kvm_lapic *apic = vcpu->arch.apic;
2197
2198         preempt_disable();
2199         /* Possibly the TSC deadline timer is not enabled yet */
2200         if (apic->lapic_timer.hv_timer_in_use)
2201                 start_sw_timer(apic);
2202         preempt_enable();
2203 }
2204
2205 void kvm_lapic_restart_hv_timer(struct kvm_vcpu *vcpu)
2206 {
2207         struct kvm_lapic *apic = vcpu->arch.apic;
2208
2209         WARN_ON(!apic->lapic_timer.hv_timer_in_use);
2210         restart_apic_timer(apic);
2211 }
2212
2213 static void __start_apic_timer(struct kvm_lapic *apic, u32 count_reg)
2214 {
2215         atomic_set(&apic->lapic_timer.pending, 0);
2216
2217         if ((apic_lvtt_period(apic) || apic_lvtt_oneshot(apic))
2218             && !set_target_expiration(apic, count_reg))
2219                 return;
2220
2221         restart_apic_timer(apic);
2222 }
2223
2224 static void start_apic_timer(struct kvm_lapic *apic)
2225 {
2226         __start_apic_timer(apic, APIC_TMICT);
2227 }
2228
2229 static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val)
2230 {
2231         bool lvt0_in_nmi_mode = apic_lvt_nmi_mode(lvt0_val);
2232
2233         if (apic->lvt0_in_nmi_mode != lvt0_in_nmi_mode) {
2234                 apic->lvt0_in_nmi_mode = lvt0_in_nmi_mode;
2235                 if (lvt0_in_nmi_mode) {
2236                         atomic_inc(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2237                 } else
2238                         atomic_dec(&apic->vcpu->kvm->arch.vapics_in_nmi_mode);
2239         }
2240 }
2241
2242 static int get_lvt_index(u32 reg)
2243 {
2244         if (reg == APIC_LVTCMCI)
2245                 return LVT_CMCI;
2246         if (reg < APIC_LVTT || reg > APIC_LVTERR)
2247                 return -1;
2248         return array_index_nospec(
2249                         (reg - APIC_LVTT) >> 4, KVM_APIC_MAX_NR_LVT_ENTRIES);
2250 }
2251
2252 static int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
2253 {
2254         int ret = 0;
2255
2256         trace_kvm_apic_write(reg, val);
2257
2258         switch (reg) {
2259         case APIC_ID:           /* Local APIC ID */
2260                 if (!apic_x2apic_mode(apic)) {
2261                         kvm_apic_set_xapic_id(apic, val >> 24);
2262                 } else {
2263                         ret = 1;
2264                 }
2265                 break;
2266
2267         case APIC_TASKPRI:
2268                 report_tpr_access(apic, true);
2269                 apic_set_tpr(apic, val & 0xff);
2270                 break;
2271
2272         case APIC_EOI:
2273                 apic_set_eoi(apic);
2274                 break;
2275
2276         case APIC_LDR:
2277                 if (!apic_x2apic_mode(apic))
2278                         kvm_apic_set_ldr(apic, val & APIC_LDR_MASK);
2279                 else
2280                         ret = 1;
2281                 break;
2282
2283         case APIC_DFR:
2284                 if (!apic_x2apic_mode(apic))
2285                         kvm_apic_set_dfr(apic, val | 0x0FFFFFFF);
2286                 else
2287                         ret = 1;
2288                 break;
2289
2290         case APIC_SPIV: {
2291                 u32 mask = 0x3ff;
2292                 if (kvm_lapic_get_reg(apic, APIC_LVR) & APIC_LVR_DIRECTED_EOI)
2293                         mask |= APIC_SPIV_DIRECTED_EOI;
2294                 apic_set_spiv(apic, val & mask);
2295                 if (!(val & APIC_SPIV_APIC_ENABLED)) {
2296                         int i;
2297
2298                         for (i = 0; i < apic->nr_lvt_entries; i++) {
2299                                 kvm_lapic_set_reg(apic, APIC_LVTx(i),
2300                                         kvm_lapic_get_reg(apic, APIC_LVTx(i)) | APIC_LVT_MASKED);
2301                         }
2302                         apic_update_lvtt(apic);
2303                         atomic_set(&apic->lapic_timer.pending, 0);
2304
2305                 }
2306                 break;
2307         }
2308         case APIC_ICR:
2309                 WARN_ON_ONCE(apic_x2apic_mode(apic));
2310
2311                 /* No delay here, so we always clear the pending bit */
2312                 val &= ~APIC_ICR_BUSY;
2313                 kvm_apic_send_ipi(apic, val, kvm_lapic_get_reg(apic, APIC_ICR2));
2314                 kvm_lapic_set_reg(apic, APIC_ICR, val);
2315                 break;
2316         case APIC_ICR2:
2317                 if (apic_x2apic_mode(apic))
2318                         ret = 1;
2319                 else
2320                         kvm_lapic_set_reg(apic, APIC_ICR2, val & 0xff000000);
2321                 break;
2322
2323         case APIC_LVT0:
2324                 apic_manage_nmi_watchdog(apic, val);
2325                 fallthrough;
2326         case APIC_LVTTHMR:
2327         case APIC_LVTPC:
2328         case APIC_LVT1:
2329         case APIC_LVTERR:
2330         case APIC_LVTCMCI: {
2331                 u32 index = get_lvt_index(reg);
2332                 if (!kvm_lapic_lvt_supported(apic, index)) {
2333                         ret = 1;
2334                         break;
2335                 }
2336                 if (!kvm_apic_sw_enabled(apic))
2337                         val |= APIC_LVT_MASKED;
2338                 val &= apic_lvt_mask[index];
2339                 kvm_lapic_set_reg(apic, reg, val);
2340                 break;
2341         }
2342
2343         case APIC_LVTT:
2344                 if (!kvm_apic_sw_enabled(apic))
2345                         val |= APIC_LVT_MASKED;
2346                 val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
2347                 kvm_lapic_set_reg(apic, APIC_LVTT, val);
2348                 apic_update_lvtt(apic);
2349                 break;
2350
2351         case APIC_TMICT:
2352                 if (apic_lvtt_tscdeadline(apic))
2353                         break;
2354
2355                 cancel_apic_timer(apic);
2356                 kvm_lapic_set_reg(apic, APIC_TMICT, val);
2357                 start_apic_timer(apic);
2358                 break;
2359
2360         case APIC_TDCR: {
2361                 uint32_t old_divisor = apic->divide_count;
2362
2363                 kvm_lapic_set_reg(apic, APIC_TDCR, val & 0xb);
2364                 update_divide_count(apic);
2365                 if (apic->divide_count != old_divisor &&
2366                                 apic->lapic_timer.period) {
2367                         hrtimer_cancel(&apic->lapic_timer.timer);
2368                         update_target_expiration(apic, old_divisor);
2369                         restart_apic_timer(apic);
2370                 }
2371                 break;
2372         }
2373         case APIC_ESR:
2374                 if (apic_x2apic_mode(apic) && val != 0)
2375                         ret = 1;
2376                 break;
2377
2378         case APIC_SELF_IPI:
2379                 /*
2380                  * Self-IPI exists only when x2APIC is enabled.  Bits 7:0 hold
2381                  * the vector, everything else is reserved.
2382                  */
2383                 if (!apic_x2apic_mode(apic) || (val & ~APIC_VECTOR_MASK))
2384                         ret = 1;
2385                 else
2386                         kvm_apic_send_ipi(apic, APIC_DEST_SELF | val, 0);
2387                 break;
2388         default:
2389                 ret = 1;
2390                 break;
2391         }
2392
2393         /*
2394          * Recalculate APIC maps if necessary, e.g. if the software enable bit
2395          * was toggled, the APIC ID changed, etc...   The maps are marked dirty
2396          * on relevant changes, i.e. this is a nop for most writes.
2397          */
2398         kvm_recalculate_apic_map(apic->vcpu->kvm);
2399
2400         return ret;
2401 }
2402
2403 static int apic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
2404                             gpa_t address, int len, const void *data)
2405 {
2406         struct kvm_lapic *apic = to_lapic(this);
2407         unsigned int offset = address - apic->base_address;
2408         u32 val;
2409
2410         if (!apic_mmio_in_range(apic, address))
2411                 return -EOPNOTSUPP;
2412
2413         if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
2414                 if (!kvm_check_has_quirk(vcpu->kvm,
2415                                          KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
2416                         return -EOPNOTSUPP;
2417
2418                 return 0;
2419         }
2420
2421         /*
2422          * APIC register must be aligned on 128-bits boundary.
2423          * 32/64/128 bits registers must be accessed thru 32 bits.
2424          * Refer SDM 8.4.1
2425          */
2426         if (len != 4 || (offset & 0xf))
2427                 return 0;
2428
2429         val = *(u32*)data;
2430
2431         kvm_lapic_reg_write(apic, offset & 0xff0, val);
2432
2433         return 0;
2434 }
2435
2436 void kvm_lapic_set_eoi(struct kvm_vcpu *vcpu)
2437 {
2438         kvm_lapic_reg_write(vcpu->arch.apic, APIC_EOI, 0);
2439 }
2440 EXPORT_SYMBOL_GPL(kvm_lapic_set_eoi);
2441
2442 /* emulate APIC access in a trap manner */
2443 void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset)
2444 {
2445         struct kvm_lapic *apic = vcpu->arch.apic;
2446
2447         /*
2448          * ICR is a single 64-bit register when x2APIC is enabled, all others
2449          * registers hold 32-bit values.  For legacy xAPIC, ICR writes need to
2450          * go down the common path to get the upper half from ICR2.
2451          *
2452          * Note, using the write helpers may incur an unnecessary write to the
2453          * virtual APIC state, but KVM needs to conditionally modify the value
2454          * in certain cases, e.g. to clear the ICR busy bit.  The cost of extra
2455          * conditional branches is likely a wash relative to the cost of the
2456          * maybe-unecessary write, and both are in the noise anyways.
2457          */
2458         if (apic_x2apic_mode(apic) && offset == APIC_ICR)
2459                 kvm_x2apic_icr_write(apic, kvm_lapic_get_reg64(apic, APIC_ICR));
2460         else
2461                 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
2462 }
2463 EXPORT_SYMBOL_GPL(kvm_apic_write_nodecode);
2464
2465 void kvm_free_lapic(struct kvm_vcpu *vcpu)
2466 {
2467         struct kvm_lapic *apic = vcpu->arch.apic;
2468
2469         if (!vcpu->arch.apic)
2470                 return;
2471
2472         hrtimer_cancel(&apic->lapic_timer.timer);
2473
2474         if (!(vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE))
2475                 static_branch_slow_dec_deferred(&apic_hw_disabled);
2476
2477         if (!apic->sw_enabled)
2478                 static_branch_slow_dec_deferred(&apic_sw_disabled);
2479
2480         if (apic->regs)
2481                 free_page((unsigned long)apic->regs);
2482
2483         kfree(apic);
2484 }
2485
2486 /*
2487  *----------------------------------------------------------------------
2488  * LAPIC interface
2489  *----------------------------------------------------------------------
2490  */
2491 u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu)
2492 {
2493         struct kvm_lapic *apic = vcpu->arch.apic;
2494
2495         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2496                 return 0;
2497
2498         return apic->lapic_timer.tscdeadline;
2499 }
2500
2501 void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data)
2502 {
2503         struct kvm_lapic *apic = vcpu->arch.apic;
2504
2505         if (!kvm_apic_present(vcpu) || !apic_lvtt_tscdeadline(apic))
2506                 return;
2507
2508         hrtimer_cancel(&apic->lapic_timer.timer);
2509         apic->lapic_timer.tscdeadline = data;
2510         start_apic_timer(apic);
2511 }
2512
2513 void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
2514 {
2515         apic_set_tpr(vcpu->arch.apic, (cr8 & 0x0f) << 4);
2516 }
2517
2518 u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
2519 {
2520         u64 tpr;
2521
2522         tpr = (u64) kvm_lapic_get_reg(vcpu->arch.apic, APIC_TASKPRI);
2523
2524         return (tpr & 0xf0) >> 4;
2525 }
2526
2527 void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
2528 {
2529         u64 old_value = vcpu->arch.apic_base;
2530         struct kvm_lapic *apic = vcpu->arch.apic;
2531
2532         vcpu->arch.apic_base = value;
2533
2534         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
2535                 kvm_update_cpuid_runtime(vcpu);
2536
2537         if (!apic)
2538                 return;
2539
2540         /* update jump label if enable bit changes */
2541         if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) {
2542                 if (value & MSR_IA32_APICBASE_ENABLE) {
2543                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2544                         static_branch_slow_dec_deferred(&apic_hw_disabled);
2545                         /* Check if there are APF page ready requests pending */
2546                         kvm_make_request(KVM_REQ_APF_READY, vcpu);
2547                 } else {
2548                         static_branch_inc(&apic_hw_disabled.key);
2549                         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2550                 }
2551         }
2552
2553         if ((old_value ^ value) & X2APIC_ENABLE) {
2554                 if (value & X2APIC_ENABLE)
2555                         kvm_apic_set_x2apic_id(apic, vcpu->vcpu_id);
2556                 else if (value & MSR_IA32_APICBASE_ENABLE)
2557                         kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2558         }
2559
2560         if ((old_value ^ value) & (MSR_IA32_APICBASE_ENABLE | X2APIC_ENABLE)) {
2561                 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
2562                 static_call_cond(kvm_x86_set_virtual_apic_mode)(vcpu);
2563         }
2564
2565         apic->base_address = apic->vcpu->arch.apic_base &
2566                              MSR_IA32_APICBASE_BASE;
2567
2568         if ((value & MSR_IA32_APICBASE_ENABLE) &&
2569              apic->base_address != APIC_DEFAULT_PHYS_BASE) {
2570                 kvm_set_apicv_inhibit(apic->vcpu->kvm,
2571                                       APICV_INHIBIT_REASON_APIC_BASE_MODIFIED);
2572         }
2573 }
2574
2575 void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
2576 {
2577         struct kvm_lapic *apic = vcpu->arch.apic;
2578
2579         if (apic->apicv_active) {
2580                 /* irr_pending is always true when apicv is activated. */
2581                 apic->irr_pending = true;
2582                 apic->isr_count = 1;
2583         } else {
2584                 /*
2585                  * Don't clear irr_pending, searching the IRR can race with
2586                  * updates from the CPU as APICv is still active from hardware's
2587                  * perspective.  The flag will be cleared as appropriate when
2588                  * KVM injects the interrupt.
2589                  */
2590                 apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2591         }
2592         apic->highest_isr_cache = -1;
2593 }
2594
2595 int kvm_alloc_apic_access_page(struct kvm *kvm)
2596 {
2597         struct page *page;
2598         void __user *hva;
2599         int ret = 0;
2600
2601         mutex_lock(&kvm->slots_lock);
2602         if (kvm->arch.apic_access_memslot_enabled ||
2603             kvm->arch.apic_access_memslot_inhibited)
2604                 goto out;
2605
2606         hva = __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
2607                                       APIC_DEFAULT_PHYS_BASE, PAGE_SIZE);
2608         if (IS_ERR(hva)) {
2609                 ret = PTR_ERR(hva);
2610                 goto out;
2611         }
2612
2613         page = gfn_to_page(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
2614         if (is_error_page(page)) {
2615                 ret = -EFAULT;
2616                 goto out;
2617         }
2618
2619         /*
2620          * Do not pin the page in memory, so that memory hot-unplug
2621          * is able to migrate it.
2622          */
2623         put_page(page);
2624         kvm->arch.apic_access_memslot_enabled = true;
2625 out:
2626         mutex_unlock(&kvm->slots_lock);
2627         return ret;
2628 }
2629 EXPORT_SYMBOL_GPL(kvm_alloc_apic_access_page);
2630
2631 void kvm_inhibit_apic_access_page(struct kvm_vcpu *vcpu)
2632 {
2633         struct kvm *kvm = vcpu->kvm;
2634
2635         if (!kvm->arch.apic_access_memslot_enabled)
2636                 return;
2637
2638         kvm_vcpu_srcu_read_unlock(vcpu);
2639
2640         mutex_lock(&kvm->slots_lock);
2641
2642         if (kvm->arch.apic_access_memslot_enabled) {
2643                 __x86_set_memory_region(kvm, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, 0, 0);
2644                 /*
2645                  * Clear "enabled" after the memslot is deleted so that a
2646                  * different vCPU doesn't get a false negative when checking
2647                  * the flag out of slots_lock.  No additional memory barrier is
2648                  * needed as modifying memslots requires waiting other vCPUs to
2649                  * drop SRCU (see above), and false positives are ok as the
2650                  * flag is rechecked after acquiring slots_lock.
2651                  */
2652                 kvm->arch.apic_access_memslot_enabled = false;
2653
2654                 /*
2655                  * Mark the memslot as inhibited to prevent reallocating the
2656                  * memslot during vCPU creation, e.g. if a vCPU is hotplugged.
2657                  */
2658                 kvm->arch.apic_access_memslot_inhibited = true;
2659         }
2660
2661         mutex_unlock(&kvm->slots_lock);
2662
2663         kvm_vcpu_srcu_read_lock(vcpu);
2664 }
2665
2666 void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
2667 {
2668         struct kvm_lapic *apic = vcpu->arch.apic;
2669         u64 msr_val;
2670         int i;
2671
2672         static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2673
2674         if (!init_event) {
2675                 msr_val = APIC_DEFAULT_PHYS_BASE | MSR_IA32_APICBASE_ENABLE;
2676                 if (kvm_vcpu_is_reset_bsp(vcpu))
2677                         msr_val |= MSR_IA32_APICBASE_BSP;
2678                 kvm_lapic_set_base(vcpu, msr_val);
2679         }
2680
2681         if (!apic)
2682                 return;
2683
2684         /* Stop the timer in case it's a reset to an active apic */
2685         hrtimer_cancel(&apic->lapic_timer.timer);
2686
2687         /* The xAPIC ID is set at RESET even if the APIC was already enabled. */
2688         if (!init_event)
2689                 kvm_apic_set_xapic_id(apic, vcpu->vcpu_id);
2690         kvm_apic_set_version(apic->vcpu);
2691
2692         for (i = 0; i < apic->nr_lvt_entries; i++)
2693                 kvm_lapic_set_reg(apic, APIC_LVTx(i), APIC_LVT_MASKED);
2694         apic_update_lvtt(apic);
2695         if (kvm_vcpu_is_reset_bsp(vcpu) &&
2696             kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
2697                 kvm_lapic_set_reg(apic, APIC_LVT0,
2698                              SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
2699         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
2700
2701         kvm_apic_set_dfr(apic, 0xffffffffU);
2702         apic_set_spiv(apic, 0xff);
2703         kvm_lapic_set_reg(apic, APIC_TASKPRI, 0);
2704         if (!apic_x2apic_mode(apic))
2705                 kvm_apic_set_ldr(apic, 0);
2706         kvm_lapic_set_reg(apic, APIC_ESR, 0);
2707         if (!apic_x2apic_mode(apic)) {
2708                 kvm_lapic_set_reg(apic, APIC_ICR, 0);
2709                 kvm_lapic_set_reg(apic, APIC_ICR2, 0);
2710         } else {
2711                 kvm_lapic_set_reg64(apic, APIC_ICR, 0);
2712         }
2713         kvm_lapic_set_reg(apic, APIC_TDCR, 0);
2714         kvm_lapic_set_reg(apic, APIC_TMICT, 0);
2715         for (i = 0; i < 8; i++) {
2716                 kvm_lapic_set_reg(apic, APIC_IRR + 0x10 * i, 0);
2717                 kvm_lapic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
2718                 kvm_lapic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
2719         }
2720         kvm_apic_update_apicv(vcpu);
2721         update_divide_count(apic);
2722         atomic_set(&apic->lapic_timer.pending, 0);
2723
2724         vcpu->arch.pv_eoi.msr_val = 0;
2725         apic_update_ppr(apic);
2726         if (apic->apicv_active) {
2727                 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
2728                 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, -1);
2729                 static_call_cond(kvm_x86_hwapic_isr_update)(-1);
2730         }
2731
2732         vcpu->arch.apic_arb_prio = 0;
2733         vcpu->arch.apic_attention = 0;
2734
2735         kvm_recalculate_apic_map(vcpu->kvm);
2736 }
2737
2738 /*
2739  *----------------------------------------------------------------------
2740  * timer interface
2741  *----------------------------------------------------------------------
2742  */
2743
2744 static bool lapic_is_periodic(struct kvm_lapic *apic)
2745 {
2746         return apic_lvtt_period(apic);
2747 }
2748
2749 int apic_has_pending_timer(struct kvm_vcpu *vcpu)
2750 {
2751         struct kvm_lapic *apic = vcpu->arch.apic;
2752
2753         if (apic_enabled(apic) && apic_lvt_enabled(apic, APIC_LVTT))
2754                 return atomic_read(&apic->lapic_timer.pending);
2755
2756         return 0;
2757 }
2758
2759 int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type)
2760 {
2761         u32 reg = kvm_lapic_get_reg(apic, lvt_type);
2762         int vector, mode, trig_mode;
2763         int r;
2764
2765         if (kvm_apic_hw_enabled(apic) && !(reg & APIC_LVT_MASKED)) {
2766                 vector = reg & APIC_VECTOR_MASK;
2767                 mode = reg & APIC_MODE_MASK;
2768                 trig_mode = reg & APIC_LVT_LEVEL_TRIGGER;
2769
2770                 r = __apic_accept_irq(apic, mode, vector, 1, trig_mode, NULL);
2771                 if (r && lvt_type == APIC_LVTPC)
2772                         kvm_lapic_set_reg(apic, APIC_LVTPC, reg | APIC_LVT_MASKED);
2773                 return r;
2774         }
2775         return 0;
2776 }
2777
2778 void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu)
2779 {
2780         struct kvm_lapic *apic = vcpu->arch.apic;
2781
2782         if (apic)
2783                 kvm_apic_local_deliver(apic, APIC_LVT0);
2784 }
2785
2786 static const struct kvm_io_device_ops apic_mmio_ops = {
2787         .read     = apic_mmio_read,
2788         .write    = apic_mmio_write,
2789 };
2790
2791 static enum hrtimer_restart apic_timer_fn(struct hrtimer *data)
2792 {
2793         struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
2794         struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
2795
2796         apic_timer_expired(apic, true);
2797
2798         if (lapic_is_periodic(apic)) {
2799                 advance_periodic_target_expiration(apic);
2800                 hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
2801                 return HRTIMER_RESTART;
2802         } else
2803                 return HRTIMER_NORESTART;
2804 }
2805
2806 int kvm_create_lapic(struct kvm_vcpu *vcpu, int timer_advance_ns)
2807 {
2808         struct kvm_lapic *apic;
2809
2810         ASSERT(vcpu != NULL);
2811
2812         apic = kzalloc(sizeof(*apic), GFP_KERNEL_ACCOUNT);
2813         if (!apic)
2814                 goto nomem;
2815
2816         vcpu->arch.apic = apic;
2817
2818         apic->regs = (void *)get_zeroed_page(GFP_KERNEL_ACCOUNT);
2819         if (!apic->regs) {
2820                 printk(KERN_ERR "malloc apic regs error for vcpu %x\n",
2821                        vcpu->vcpu_id);
2822                 goto nomem_free_apic;
2823         }
2824         apic->vcpu = vcpu;
2825
2826         apic->nr_lvt_entries = kvm_apic_calc_nr_lvt_entries(vcpu);
2827
2828         hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
2829                      HRTIMER_MODE_ABS_HARD);
2830         apic->lapic_timer.timer.function = apic_timer_fn;
2831         if (timer_advance_ns == -1) {
2832                 apic->lapic_timer.timer_advance_ns = LAPIC_TIMER_ADVANCE_NS_INIT;
2833                 lapic_timer_advance_dynamic = true;
2834         } else {
2835                 apic->lapic_timer.timer_advance_ns = timer_advance_ns;
2836                 lapic_timer_advance_dynamic = false;
2837         }
2838
2839         /*
2840          * Stuff the APIC ENABLE bit in lieu of temporarily incrementing
2841          * apic_hw_disabled; the full RESET value is set by kvm_lapic_reset().
2842          */
2843         vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE;
2844         static_branch_inc(&apic_sw_disabled.key); /* sw disabled at reset */
2845         kvm_iodevice_init(&apic->dev, &apic_mmio_ops);
2846
2847         return 0;
2848 nomem_free_apic:
2849         kfree(apic);
2850         vcpu->arch.apic = NULL;
2851 nomem:
2852         return -ENOMEM;
2853 }
2854
2855 int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
2856 {
2857         struct kvm_lapic *apic = vcpu->arch.apic;
2858         u32 ppr;
2859
2860         if (!kvm_apic_present(vcpu))
2861                 return -1;
2862
2863         __apic_update_ppr(apic, &ppr);
2864         return apic_has_interrupt_for_ppr(apic, ppr);
2865 }
2866 EXPORT_SYMBOL_GPL(kvm_apic_has_interrupt);
2867
2868 int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
2869 {
2870         u32 lvt0 = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LVT0);
2871
2872         if (!kvm_apic_hw_enabled(vcpu->arch.apic))
2873                 return 1;
2874         if ((lvt0 & APIC_LVT_MASKED) == 0 &&
2875             GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
2876                 return 1;
2877         return 0;
2878 }
2879
2880 void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
2881 {
2882         struct kvm_lapic *apic = vcpu->arch.apic;
2883
2884         if (atomic_read(&apic->lapic_timer.pending) > 0) {
2885                 kvm_apic_inject_pending_timer_irqs(apic);
2886                 atomic_set(&apic->lapic_timer.pending, 0);
2887         }
2888 }
2889
2890 int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
2891 {
2892         int vector = kvm_apic_has_interrupt(vcpu);
2893         struct kvm_lapic *apic = vcpu->arch.apic;
2894         u32 ppr;
2895
2896         if (vector == -1)
2897                 return -1;
2898
2899         /*
2900          * We get here even with APIC virtualization enabled, if doing
2901          * nested virtualization and L1 runs with the "acknowledge interrupt
2902          * on exit" mode.  Then we cannot inject the interrupt via RVI,
2903          * because the process would deliver it through the IDT.
2904          */
2905
2906         apic_clear_irr(vector, apic);
2907         if (kvm_hv_synic_auto_eoi_set(vcpu, vector)) {
2908                 /*
2909                  * For auto-EOI interrupts, there might be another pending
2910                  * interrupt above PPR, so check whether to raise another
2911                  * KVM_REQ_EVENT.
2912                  */
2913                 apic_update_ppr(apic);
2914         } else {
2915                 /*
2916                  * For normal interrupts, PPR has been raised and there cannot
2917                  * be a higher-priority pending interrupt---except if there was
2918                  * a concurrent interrupt injection, but that would have
2919                  * triggered KVM_REQ_EVENT already.
2920                  */
2921                 apic_set_isr(vector, apic);
2922                 __apic_update_ppr(apic, &ppr);
2923         }
2924
2925         return vector;
2926 }
2927
2928 static int kvm_apic_state_fixup(struct kvm_vcpu *vcpu,
2929                 struct kvm_lapic_state *s, bool set)
2930 {
2931         if (apic_x2apic_mode(vcpu->arch.apic)) {
2932                 u32 *id = (u32 *)(s->regs + APIC_ID);
2933                 u32 *ldr = (u32 *)(s->regs + APIC_LDR);
2934                 u64 icr;
2935
2936                 if (vcpu->kvm->arch.x2apic_format) {
2937                         if (*id != vcpu->vcpu_id)
2938                                 return -EINVAL;
2939                 } else {
2940                         if (set)
2941                                 *id >>= 24;
2942                         else
2943                                 *id <<= 24;
2944                 }
2945
2946                 /*
2947                  * In x2APIC mode, the LDR is fixed and based on the id.  And
2948                  * ICR is internally a single 64-bit register, but needs to be
2949                  * split to ICR+ICR2 in userspace for backwards compatibility.
2950                  */
2951                 if (set) {
2952                         *ldr = kvm_apic_calc_x2apic_ldr(*id);
2953
2954                         icr = __kvm_lapic_get_reg(s->regs, APIC_ICR) |
2955                               (u64)__kvm_lapic_get_reg(s->regs, APIC_ICR2) << 32;
2956                         __kvm_lapic_set_reg64(s->regs, APIC_ICR, icr);
2957                 } else {
2958                         icr = __kvm_lapic_get_reg64(s->regs, APIC_ICR);
2959                         __kvm_lapic_set_reg(s->regs, APIC_ICR2, icr >> 32);
2960                 }
2961         }
2962
2963         return 0;
2964 }
2965
2966 int kvm_apic_get_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2967 {
2968         memcpy(s->regs, vcpu->arch.apic->regs, sizeof(*s));
2969
2970         /*
2971          * Get calculated timer current count for remaining timer period (if
2972          * any) and store it in the returned register set.
2973          */
2974         __kvm_lapic_set_reg(s->regs, APIC_TMCCT,
2975                             __apic_read(vcpu->arch.apic, APIC_TMCCT));
2976
2977         return kvm_apic_state_fixup(vcpu, s, false);
2978 }
2979
2980 int kvm_apic_set_state(struct kvm_vcpu *vcpu, struct kvm_lapic_state *s)
2981 {
2982         struct kvm_lapic *apic = vcpu->arch.apic;
2983         int r;
2984
2985         static_call_cond(kvm_x86_apicv_pre_state_restore)(vcpu);
2986
2987         kvm_lapic_set_base(vcpu, vcpu->arch.apic_base);
2988         /* set SPIV separately to get count of SW disabled APICs right */
2989         apic_set_spiv(apic, *((u32 *)(s->regs + APIC_SPIV)));
2990
2991         r = kvm_apic_state_fixup(vcpu, s, true);
2992         if (r) {
2993                 kvm_recalculate_apic_map(vcpu->kvm);
2994                 return r;
2995         }
2996         memcpy(vcpu->arch.apic->regs, s->regs, sizeof(*s));
2997
2998         atomic_set_release(&apic->vcpu->kvm->arch.apic_map_dirty, DIRTY);
2999         kvm_recalculate_apic_map(vcpu->kvm);
3000         kvm_apic_set_version(vcpu);
3001
3002         apic_update_ppr(apic);
3003         cancel_apic_timer(apic);
3004         apic->lapic_timer.expired_tscdeadline = 0;
3005         apic_update_lvtt(apic);
3006         apic_manage_nmi_watchdog(apic, kvm_lapic_get_reg(apic, APIC_LVT0));
3007         update_divide_count(apic);
3008         __start_apic_timer(apic, APIC_TMCCT);
3009         kvm_lapic_set_reg(apic, APIC_TMCCT, 0);
3010         kvm_apic_update_apicv(vcpu);
3011         if (apic->apicv_active) {
3012                 static_call_cond(kvm_x86_apicv_post_state_restore)(vcpu);
3013                 static_call_cond(kvm_x86_hwapic_irr_update)(vcpu, apic_find_highest_irr(apic));
3014                 static_call_cond(kvm_x86_hwapic_isr_update)(apic_find_highest_isr(apic));
3015         }
3016         kvm_make_request(KVM_REQ_EVENT, vcpu);
3017         if (ioapic_in_kernel(vcpu->kvm))
3018                 kvm_rtc_eoi_tracking_restore_one(vcpu);
3019
3020         vcpu->arch.apic_arb_prio = 0;
3021
3022         return 0;
3023 }
3024
3025 void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
3026 {
3027         struct hrtimer *timer;
3028
3029         if (!lapic_in_kernel(vcpu) ||
3030                 kvm_can_post_timer_interrupt(vcpu))
3031                 return;
3032
3033         timer = &vcpu->arch.apic->lapic_timer.timer;
3034         if (hrtimer_cancel(timer))
3035                 hrtimer_start_expires(timer, HRTIMER_MODE_ABS_HARD);
3036 }
3037
3038 /*
3039  * apic_sync_pv_eoi_from_guest - called on vmexit or cancel interrupt
3040  *
3041  * Detect whether guest triggered PV EOI since the
3042  * last entry. If yes, set EOI on guests's behalf.
3043  * Clear PV EOI in guest memory in any case.
3044  */
3045 static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
3046                                         struct kvm_lapic *apic)
3047 {
3048         int vector;
3049         /*
3050          * PV EOI state is derived from KVM_APIC_PV_EOI_PENDING in host
3051          * and KVM_PV_EOI_ENABLED in guest memory as follows:
3052          *
3053          * KVM_APIC_PV_EOI_PENDING is unset:
3054          *      -> host disabled PV EOI.
3055          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is set:
3056          *      -> host enabled PV EOI, guest did not execute EOI yet.
3057          * KVM_APIC_PV_EOI_PENDING is set, KVM_PV_EOI_ENABLED is unset:
3058          *      -> host enabled PV EOI, guest executed EOI.
3059          */
3060         BUG_ON(!pv_eoi_enabled(vcpu));
3061
3062         if (pv_eoi_test_and_clr_pending(vcpu))
3063                 return;
3064         vector = apic_set_eoi(apic);
3065         trace_kvm_pv_eoi(apic, vector);
3066 }
3067
3068 void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
3069 {
3070         u32 data;
3071
3072         if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
3073                 apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
3074
3075         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3076                 return;
3077
3078         if (kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3079                                   sizeof(u32)))
3080                 return;
3081
3082         apic_set_tpr(vcpu->arch.apic, data & 0xff);
3083 }
3084
3085 /*
3086  * apic_sync_pv_eoi_to_guest - called before vmentry
3087  *
3088  * Detect whether it's safe to enable PV EOI and
3089  * if yes do so.
3090  */
3091 static void apic_sync_pv_eoi_to_guest(struct kvm_vcpu *vcpu,
3092                                         struct kvm_lapic *apic)
3093 {
3094         if (!pv_eoi_enabled(vcpu) ||
3095             /* IRR set or many bits in ISR: could be nested. */
3096             apic->irr_pending ||
3097             /* Cache not set: could be safe but we don't bother. */
3098             apic->highest_isr_cache == -1 ||
3099             /* Need EOI to update ioapic. */
3100             kvm_ioapic_handles_vector(apic, apic->highest_isr_cache)) {
3101                 /*
3102                  * PV EOI was disabled by apic_sync_pv_eoi_from_guest
3103                  * so we need not do anything here.
3104                  */
3105                 return;
3106         }
3107
3108         pv_eoi_set_pending(apic->vcpu);
3109 }
3110
3111 void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
3112 {
3113         u32 data, tpr;
3114         int max_irr, max_isr;
3115         struct kvm_lapic *apic = vcpu->arch.apic;
3116
3117         apic_sync_pv_eoi_to_guest(vcpu, apic);
3118
3119         if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
3120                 return;
3121
3122         tpr = kvm_lapic_get_reg(apic, APIC_TASKPRI) & 0xff;
3123         max_irr = apic_find_highest_irr(apic);
3124         if (max_irr < 0)
3125                 max_irr = 0;
3126         max_isr = apic_find_highest_isr(apic);
3127         if (max_isr < 0)
3128                 max_isr = 0;
3129         data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
3130
3131         kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
3132                                 sizeof(u32));
3133 }
3134
3135 int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
3136 {
3137         if (vapic_addr) {
3138                 if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
3139                                         &vcpu->arch.apic->vapic_cache,
3140                                         vapic_addr, sizeof(u32)))
3141                         return -EINVAL;
3142                 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3143         } else {
3144                 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
3145         }
3146
3147         vcpu->arch.apic->vapic_addr = vapic_addr;
3148         return 0;
3149 }
3150
3151 int kvm_x2apic_icr_write(struct kvm_lapic *apic, u64 data)
3152 {
3153         data &= ~APIC_ICR_BUSY;
3154
3155         kvm_apic_send_ipi(apic, (u32)data, (u32)(data >> 32));
3156         kvm_lapic_set_reg64(apic, APIC_ICR, data);
3157         trace_kvm_apic_write(APIC_ICR, data);
3158         return 0;
3159 }
3160
3161 static int kvm_lapic_msr_read(struct kvm_lapic *apic, u32 reg, u64 *data)
3162 {
3163         u32 low;
3164
3165         if (reg == APIC_ICR) {
3166                 *data = kvm_lapic_get_reg64(apic, APIC_ICR);
3167                 return 0;
3168         }
3169
3170         if (kvm_lapic_reg_read(apic, reg, 4, &low))
3171                 return 1;
3172
3173         *data = low;
3174
3175         return 0;
3176 }
3177
3178 static int kvm_lapic_msr_write(struct kvm_lapic *apic, u32 reg, u64 data)
3179 {
3180         /*
3181          * ICR is a 64-bit register in x2APIC mode (and Hyper-V PV vAPIC) and
3182          * can be written as such, all other registers remain accessible only
3183          * through 32-bit reads/writes.
3184          */
3185         if (reg == APIC_ICR)
3186                 return kvm_x2apic_icr_write(apic, data);
3187
3188         /* Bits 63:32 are reserved in all other registers. */
3189         if (data >> 32)
3190                 return 1;
3191
3192         return kvm_lapic_reg_write(apic, reg, (u32)data);
3193 }
3194
3195 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
3196 {
3197         struct kvm_lapic *apic = vcpu->arch.apic;
3198         u32 reg = (msr - APIC_BASE_MSR) << 4;
3199
3200         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3201                 return 1;
3202
3203         return kvm_lapic_msr_write(apic, reg, data);
3204 }
3205
3206 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
3207 {
3208         struct kvm_lapic *apic = vcpu->arch.apic;
3209         u32 reg = (msr - APIC_BASE_MSR) << 4;
3210
3211         if (!lapic_in_kernel(vcpu) || !apic_x2apic_mode(apic))
3212                 return 1;
3213
3214         return kvm_lapic_msr_read(apic, reg, data);
3215 }
3216
3217 int kvm_hv_vapic_msr_write(struct kvm_vcpu *vcpu, u32 reg, u64 data)
3218 {
3219         if (!lapic_in_kernel(vcpu))
3220                 return 1;
3221
3222         return kvm_lapic_msr_write(vcpu->arch.apic, reg, data);
3223 }
3224
3225 int kvm_hv_vapic_msr_read(struct kvm_vcpu *vcpu, u32 reg, u64 *data)
3226 {
3227         if (!lapic_in_kernel(vcpu))
3228                 return 1;
3229
3230         return kvm_lapic_msr_read(vcpu->arch.apic, reg, data);
3231 }
3232
3233 int kvm_lapic_set_pv_eoi(struct kvm_vcpu *vcpu, u64 data, unsigned long len)
3234 {
3235         u64 addr = data & ~KVM_MSR_ENABLED;
3236         struct gfn_to_hva_cache *ghc = &vcpu->arch.pv_eoi.data;
3237         unsigned long new_len;
3238         int ret;
3239
3240         if (!IS_ALIGNED(addr, 4))
3241                 return 1;
3242
3243         if (data & KVM_MSR_ENABLED) {
3244                 if (addr == ghc->gpa && len <= ghc->len)
3245                         new_len = ghc->len;
3246                 else
3247                         new_len = len;
3248
3249                 ret = kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, addr, new_len);
3250                 if (ret)
3251                         return ret;
3252         }
3253
3254         vcpu->arch.pv_eoi.msr_val = data;
3255
3256         return 0;
3257 }
3258
3259 int kvm_apic_accept_events(struct kvm_vcpu *vcpu)
3260 {
3261         struct kvm_lapic *apic = vcpu->arch.apic;
3262         u8 sipi_vector;
3263         int r;
3264
3265         if (!kvm_apic_has_pending_init_or_sipi(vcpu))
3266                 return 0;
3267
3268         if (is_guest_mode(vcpu)) {
3269                 r = kvm_check_nested_events(vcpu);
3270                 if (r < 0)
3271                         return r == -EBUSY ? 0 : r;
3272                 /*
3273                  * Continue processing INIT/SIPI even if a nested VM-Exit
3274                  * occurred, e.g. pending SIPIs should be dropped if INIT+SIPI
3275                  * are blocked as a result of transitioning to VMX root mode.
3276                  */
3277         }
3278
3279         /*
3280          * INITs are blocked while CPU is in specific states (SMM, VMX root
3281          * mode, SVM with GIF=0), while SIPIs are dropped if the CPU isn't in
3282          * wait-for-SIPI (WFS).
3283          */
3284         if (!kvm_apic_init_sipi_allowed(vcpu)) {
3285                 WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED);
3286                 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
3287                 return 0;
3288         }
3289
3290         if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) {
3291                 kvm_vcpu_reset(vcpu, true);
3292                 if (kvm_vcpu_is_bsp(apic->vcpu))
3293                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3294                 else
3295                         vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3296         }
3297         if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events)) {
3298                 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
3299                         /* evaluate pending_events before reading the vector */
3300                         smp_rmb();
3301                         sipi_vector = apic->sipi_vector;
3302                         static_call(kvm_x86_vcpu_deliver_sipi_vector)(vcpu, sipi_vector);
3303                         vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
3304                 }
3305         }
3306         return 0;
3307 }
3308
3309 void kvm_lapic_exit(void)
3310 {
3311         static_key_deferred_flush(&apic_hw_disabled);
3312         WARN_ON(static_branch_unlikely(&apic_hw_disabled.key));
3313         static_key_deferred_flush(&apic_sw_disabled);
3314         WARN_ON(static_branch_unlikely(&apic_sw_disabled.key));
3315 }