1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MSHYPER_H
3 #define _ASM_X86_MSHYPER_H
5 #include <linux/types.h>
6 #include <linux/atomic.h>
9 #include <asm/hyperv-tlfs.h>
10 #include <asm/nospec-branch.h>
12 #define VP_INVAL U32_MAX
14 struct ms_hyperv_info {
23 extern struct ms_hyperv_info ms_hyperv;
26 * Generate the guest ID.
29 static inline __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
34 guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
35 guest_id |= (d_info1 << 48);
36 guest_id |= (kernel_version << 16);
43 /* Free the message slot and signal end-of-message if required */
44 static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
47 * On crash we're reading some other CPU's message page and we need
48 * to be careful: this other CPU may already had cleared the header
49 * and the host may already had delivered some other message there.
50 * In case we blindly write msg->header.message_type we're going
51 * to lose it. We can still lose a message of the same type but
52 * we count on the fact that there can only be one
53 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
56 if (cmpxchg(&msg->header.message_type, old_msg_type,
57 HVMSG_NONE) != old_msg_type)
61 * Make sure the write to MessageType (ie set to
62 * HVMSG_NONE) happens before we read the
63 * MessagePending and EOMing. Otherwise, the EOMing
64 * will not deliver any more messages since there is
69 if (msg->header.message_flags.msg_pending) {
71 * This will cause message queue rescan to
72 * possibly deliver another msg from the
75 wrmsrl(HV_X64_MSR_EOM, 0);
79 #define hv_init_timer(timer, tick) wrmsrl(timer, tick)
80 #define hv_init_timer_config(config, val) wrmsrl(config, val)
82 #define hv_get_simp(val) rdmsrl(HV_X64_MSR_SIMP, val)
83 #define hv_set_simp(val) wrmsrl(HV_X64_MSR_SIMP, val)
85 #define hv_get_siefp(val) rdmsrl(HV_X64_MSR_SIEFP, val)
86 #define hv_set_siefp(val) wrmsrl(HV_X64_MSR_SIEFP, val)
88 #define hv_get_synic_state(val) rdmsrl(HV_X64_MSR_SCONTROL, val)
89 #define hv_set_synic_state(val) wrmsrl(HV_X64_MSR_SCONTROL, val)
91 #define hv_get_vp_index(index) rdmsrl(HV_X64_MSR_VP_INDEX, index)
93 #define hv_get_synint_state(int_num, val) rdmsrl(int_num, val)
94 #define hv_set_synint_state(int_num, val) wrmsrl(int_num, val)
96 void hyperv_callback_vector(void);
97 void hyperv_reenlightenment_vector(void);
99 #define trace_hyperv_callback_vector hyperv_callback_vector
101 void hyperv_vector_handler(struct pt_regs *regs);
102 void hv_setup_vmbus_irq(void (*handler)(void));
103 void hv_remove_vmbus_irq(void);
105 void hv_setup_kexec_handler(void (*handler)(void));
106 void hv_remove_kexec_handler(void);
107 void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
108 void hv_remove_crash_handler(void);
111 * Routines for stimer0 Direct Mode handling.
112 * On x86/x64, there are no percpu actions to take.
114 void hv_stimer0_vector_handler(struct pt_regs *regs);
115 void hv_stimer0_callback_vector(void);
116 int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
117 void hv_remove_stimer0_irq(int irq);
119 static inline void hv_enable_stimer0_percpu_irq(int irq) {}
120 static inline void hv_disable_stimer0_percpu_irq(int irq) {}
123 #if IS_ENABLED(CONFIG_HYPERV)
124 extern struct clocksource *hyperv_cs;
125 extern void *hv_hypercall_pg;
126 extern void __percpu **hyperv_pcpu_input_arg;
128 static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
130 u64 input_address = input ? virt_to_phys(input) : 0;
131 u64 output_address = output ? virt_to_phys(output) : 0;
135 if (!hv_hypercall_pg)
138 __asm__ __volatile__("mov %4, %%r8\n"
140 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
141 "+c" (control), "+d" (input_address)
142 : "r" (output_address),
143 THUNK_TARGET(hv_hypercall_pg)
144 : "cc", "memory", "r8", "r9", "r10", "r11");
146 u32 input_address_hi = upper_32_bits(input_address);
147 u32 input_address_lo = lower_32_bits(input_address);
148 u32 output_address_hi = upper_32_bits(output_address);
149 u32 output_address_lo = lower_32_bits(output_address);
151 if (!hv_hypercall_pg)
154 __asm__ __volatile__(CALL_NOSPEC
156 "+c" (input_address_lo), ASM_CALL_CONSTRAINT
158 "b" (input_address_hi),
159 "D"(output_address_hi), "S"(output_address_lo),
160 THUNK_TARGET(hv_hypercall_pg)
166 /* Fast hypercall with 8 bytes of input and no output */
167 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
169 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
173 __asm__ __volatile__(CALL_NOSPEC
174 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
175 "+c" (control), "+d" (input1)
176 : THUNK_TARGET(hv_hypercall_pg)
177 : "cc", "r8", "r9", "r10", "r11");
181 u32 input1_hi = upper_32_bits(input1);
182 u32 input1_lo = lower_32_bits(input1);
184 __asm__ __volatile__ (CALL_NOSPEC
190 THUNK_TARGET(hv_hypercall_pg)
191 : "cc", "edi", "esi");
197 /* Fast hypercall with 16 bytes of input */
198 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
200 u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
204 __asm__ __volatile__("mov %4, %%r8\n"
206 : "=a" (hv_status), ASM_CALL_CONSTRAINT,
207 "+c" (control), "+d" (input1)
209 THUNK_TARGET(hv_hypercall_pg)
210 : "cc", "r8", "r9", "r10", "r11");
214 u32 input1_hi = upper_32_bits(input1);
215 u32 input1_lo = lower_32_bits(input1);
216 u32 input2_hi = upper_32_bits(input2);
217 u32 input2_lo = lower_32_bits(input2);
219 __asm__ __volatile__ (CALL_NOSPEC
221 "+c"(input1_lo), ASM_CALL_CONSTRAINT
222 : "A" (control), "b" (input1_hi),
223 "D"(input2_hi), "S"(input2_lo),
224 THUNK_TARGET(hv_hypercall_pg)
232 * Rep hypercalls. Callers of this functions are supposed to ensure that
233 * rep_count and varhead_size comply with Hyper-V hypercall definition.
235 static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
236 void *input, void *output)
242 control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
243 control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
246 status = hv_do_hypercall(control, input, output);
247 if ((status & HV_HYPERCALL_RESULT_MASK) != HV_STATUS_SUCCESS)
250 /* Bits 32-43 of status have 'Reps completed' data. */
251 rep_comp = (status & HV_HYPERCALL_REP_COMP_MASK) >>
252 HV_HYPERCALL_REP_COMP_OFFSET;
254 control &= ~HV_HYPERCALL_REP_START_MASK;
255 control |= (u64)rep_comp << HV_HYPERCALL_REP_START_OFFSET;
257 touch_nmi_watchdog();
258 } while (rep_comp < rep_count);
264 * Hypervisor's notion of virtual processor ID is different from
265 * Linux' notion of CPU ID. This information can only be retrieved
266 * in the context of the calling CPU. Setup a map for easy access
267 * to this information.
269 extern u32 *hv_vp_index;
270 extern u32 hv_max_vp_index;
271 extern struct hv_vp_assist_page **hv_vp_assist_page;
273 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
275 if (!hv_vp_assist_page)
278 return hv_vp_assist_page[cpu];
282 * hv_cpu_number_to_vp_number() - Map CPU to VP.
283 * @cpu_number: CPU number in Linux terms
285 * This function returns the mapping between the Linux processor
286 * number and the hypervisor's virtual processor number, useful
287 * in making hypercalls and such that talk about specific
290 * Return: Virtual processor number in Hyper-V terms
292 static inline int hv_cpu_number_to_vp_number(int cpu_number)
294 return hv_vp_index[cpu_number];
297 static inline int cpumask_to_vpset(struct hv_vpset *vpset,
298 const struct cpumask *cpus)
300 int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
302 /* valid_bank_mask can represent up to 64 banks */
303 if (hv_max_vp_index / 64 >= 64)
307 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
308 * structs are not cleared between calls, we risk flushing unneeded
311 for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
312 vpset->bank_contents[vcpu_bank] = 0;
315 * Some banks may end up being empty but this is acceptable.
317 for_each_cpu(cpu, cpus) {
318 vcpu = hv_cpu_number_to_vp_number(cpu);
319 if (vcpu == VP_INVAL)
321 vcpu_bank = vcpu / 64;
322 vcpu_offset = vcpu % 64;
323 __set_bit(vcpu_offset, (unsigned long *)
324 &vpset->bank_contents[vcpu_bank]);
325 if (vcpu_bank >= nr_bank)
326 nr_bank = vcpu_bank + 1;
328 vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
332 void __init hyperv_init(void);
333 void hyperv_setup_mmu_ops(void);
334 void hyperv_report_panic(struct pt_regs *regs, long err);
335 bool hv_is_hyperv_initialized(void);
336 void hyperv_cleanup(void);
338 void hyperv_reenlightenment_intr(struct pt_regs *regs);
339 void set_hv_tscchange_cb(void (*cb)(void));
340 void clear_hv_tscchange_cb(void);
341 void hyperv_stop_tsc_emulation(void);
344 void hv_apic_init(void);
346 static inline void hv_apic_init(void) {}
349 #else /* CONFIG_HYPERV */
350 static inline void hyperv_init(void) {}
351 static inline bool hv_is_hyperv_initialized(void) { return false; }
352 static inline void hyperv_cleanup(void) {}
353 static inline void hyperv_setup_mmu_ops(void) {}
354 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
355 static inline void clear_hv_tscchange_cb(void) {}
356 static inline void hyperv_stop_tsc_emulation(void) {};
357 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
361 #endif /* CONFIG_HYPERV */
363 #ifdef CONFIG_HYPERV_TSCPAGE
364 struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
365 static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,
372 * The protocol for reading Hyper-V TSC page is specified in Hypervisor
373 * Top-Level Functional Specification ver. 3.0 and above. To get the
374 * reference time we must do the following:
375 * - READ ReferenceTscSequence
376 * A special '0' value indicates the time source is unreliable and we
377 * need to use something else. The currently published specification
378 * versions (up to 4.0b) contain a mistake and wrongly claim '-1'
379 * instead of '0' as the special value, see commit c35b82ef0294.
381 * ((RDTSC() * ReferenceTscScale) >> 64) + ReferenceTscOffset
382 * - READ ReferenceTscSequence again. In case its value has changed
383 * since our first reading we need to discard ReferenceTime and repeat
384 * the whole sequence as the hypervisor was updating the page in
388 sequence = READ_ONCE(tsc_pg->tsc_sequence);
392 * Make sure we read sequence before we read other values from
397 scale = READ_ONCE(tsc_pg->tsc_scale);
398 offset = READ_ONCE(tsc_pg->tsc_offset);
399 *cur_tsc = rdtsc_ordered();
402 * Make sure we read sequence after we read all other values
407 } while (READ_ONCE(tsc_pg->tsc_sequence) != sequence);
409 return mul_u64_u64_shr(*cur_tsc, scale, 64) + offset;
412 static inline u64 hv_read_tsc_page(const struct ms_hyperv_tsc_page *tsc_pg)
416 return hv_read_tsc_page_tsc(tsc_pg, &cur_tsc);
420 static inline struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
425 static inline u64 hv_read_tsc_page_tsc(const struct ms_hyperv_tsc_page *tsc_pg,