Merge tag 'arm-soc/for-5.20/devicetree-part2' of https://github.com/Broadcom/stblinux...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / mshyperv.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MSHYPER_H
3 #define _ASM_X86_MSHYPER_H
4
5 #include <linux/types.h>
6 #include <linux/nmi.h>
7 #include <linux/msi.h>
8 #include <asm/io.h>
9 #include <asm/hyperv-tlfs.h>
10 #include <asm/nospec-branch.h>
11 #include <asm/paravirt.h>
12 #include <asm/mshyperv.h>
13
14 union hv_ghcb;
15
16 DECLARE_STATIC_KEY_FALSE(isolation_type_snp);
17
18 typedef int (*hyperv_fill_flush_list_func)(
19                 struct hv_guest_mapping_flush_list *flush,
20                 void *data);
21
22 #define hv_get_raw_timer() rdtsc_ordered()
23
24 void hyperv_vector_handler(struct pt_regs *regs);
25
26 #if IS_ENABLED(CONFIG_HYPERV)
27 extern int hyperv_init_cpuhp;
28
29 extern void *hv_hypercall_pg;
30
31 extern u64 hv_current_partition_id;
32
33 extern union hv_ghcb * __percpu *hv_ghcb_pg;
34
35 int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages);
36 int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id);
37 int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags);
38
39 static inline u64 hv_do_hypercall(u64 control, void *input, void *output)
40 {
41         u64 input_address = input ? virt_to_phys(input) : 0;
42         u64 output_address = output ? virt_to_phys(output) : 0;
43         u64 hv_status;
44
45 #ifdef CONFIG_X86_64
46         if (!hv_hypercall_pg)
47                 return U64_MAX;
48
49         __asm__ __volatile__("mov %4, %%r8\n"
50                              CALL_NOSPEC
51                              : "=a" (hv_status), ASM_CALL_CONSTRAINT,
52                                "+c" (control), "+d" (input_address)
53                              :  "r" (output_address),
54                                 THUNK_TARGET(hv_hypercall_pg)
55                              : "cc", "memory", "r8", "r9", "r10", "r11");
56 #else
57         u32 input_address_hi = upper_32_bits(input_address);
58         u32 input_address_lo = lower_32_bits(input_address);
59         u32 output_address_hi = upper_32_bits(output_address);
60         u32 output_address_lo = lower_32_bits(output_address);
61
62         if (!hv_hypercall_pg)
63                 return U64_MAX;
64
65         __asm__ __volatile__(CALL_NOSPEC
66                              : "=A" (hv_status),
67                                "+c" (input_address_lo), ASM_CALL_CONSTRAINT
68                              : "A" (control),
69                                "b" (input_address_hi),
70                                "D"(output_address_hi), "S"(output_address_lo),
71                                THUNK_TARGET(hv_hypercall_pg)
72                              : "cc", "memory");
73 #endif /* !x86_64 */
74         return hv_status;
75 }
76
77 /* Fast hypercall with 8 bytes of input and no output */
78 static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1)
79 {
80         u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
81
82 #ifdef CONFIG_X86_64
83         {
84                 __asm__ __volatile__(CALL_NOSPEC
85                                      : "=a" (hv_status), ASM_CALL_CONSTRAINT,
86                                        "+c" (control), "+d" (input1)
87                                      : THUNK_TARGET(hv_hypercall_pg)
88                                      : "cc", "r8", "r9", "r10", "r11");
89         }
90 #else
91         {
92                 u32 input1_hi = upper_32_bits(input1);
93                 u32 input1_lo = lower_32_bits(input1);
94
95                 __asm__ __volatile__ (CALL_NOSPEC
96                                       : "=A"(hv_status),
97                                         "+c"(input1_lo),
98                                         ASM_CALL_CONSTRAINT
99                                       : "A" (control),
100                                         "b" (input1_hi),
101                                         THUNK_TARGET(hv_hypercall_pg)
102                                       : "cc", "edi", "esi");
103         }
104 #endif
105                 return hv_status;
106 }
107
108 /* Fast hypercall with 16 bytes of input */
109 static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2)
110 {
111         u64 hv_status, control = (u64)code | HV_HYPERCALL_FAST_BIT;
112
113 #ifdef CONFIG_X86_64
114         {
115                 __asm__ __volatile__("mov %4, %%r8\n"
116                                      CALL_NOSPEC
117                                      : "=a" (hv_status), ASM_CALL_CONSTRAINT,
118                                        "+c" (control), "+d" (input1)
119                                      : "r" (input2),
120                                        THUNK_TARGET(hv_hypercall_pg)
121                                      : "cc", "r8", "r9", "r10", "r11");
122         }
123 #else
124         {
125                 u32 input1_hi = upper_32_bits(input1);
126                 u32 input1_lo = lower_32_bits(input1);
127                 u32 input2_hi = upper_32_bits(input2);
128                 u32 input2_lo = lower_32_bits(input2);
129
130                 __asm__ __volatile__ (CALL_NOSPEC
131                                       : "=A"(hv_status),
132                                         "+c"(input1_lo), ASM_CALL_CONSTRAINT
133                                       : "A" (control), "b" (input1_hi),
134                                         "D"(input2_hi), "S"(input2_lo),
135                                         THUNK_TARGET(hv_hypercall_pg)
136                                       : "cc");
137         }
138 #endif
139         return hv_status;
140 }
141
142 extern struct hv_vp_assist_page **hv_vp_assist_page;
143
144 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
145 {
146         if (!hv_vp_assist_page)
147                 return NULL;
148
149         return hv_vp_assist_page[cpu];
150 }
151
152 void __init hyperv_init(void);
153 void hyperv_setup_mmu_ops(void);
154 void set_hv_tscchange_cb(void (*cb)(void));
155 void clear_hv_tscchange_cb(void);
156 void hyperv_stop_tsc_emulation(void);
157 int hyperv_flush_guest_mapping(u64 as);
158 int hyperv_flush_guest_mapping_range(u64 as,
159                 hyperv_fill_flush_list_func fill_func, void *data);
160 int hyperv_fill_flush_guest_mapping_list(
161                 struct hv_guest_mapping_flush_list *flush,
162                 u64 start_gfn, u64 end_gfn);
163
164 #ifdef CONFIG_X86_64
165 void hv_apic_init(void);
166 void __init hv_init_spinlocks(void);
167 bool hv_vcpu_is_preempted(int vcpu);
168 #else
169 static inline void hv_apic_init(void) {}
170 #endif
171
172 struct irq_domain *hv_create_pci_msi_domain(void);
173
174 int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector,
175                 struct hv_interrupt_entry *entry);
176 int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry);
177 int hv_set_mem_host_visibility(unsigned long addr, int numpages, bool visible);
178
179 #ifdef CONFIG_AMD_MEM_ENCRYPT
180 void hv_ghcb_msr_write(u64 msr, u64 value);
181 void hv_ghcb_msr_read(u64 msr, u64 *value);
182 bool hv_ghcb_negotiate_protocol(void);
183 void hv_ghcb_terminate(unsigned int set, unsigned int reason);
184 #else
185 static inline void hv_ghcb_msr_write(u64 msr, u64 value) {}
186 static inline void hv_ghcb_msr_read(u64 msr, u64 *value) {}
187 static inline bool hv_ghcb_negotiate_protocol(void) { return false; }
188 static inline void hv_ghcb_terminate(unsigned int set, unsigned int reason) {}
189 #endif
190
191 extern bool hv_isolation_type_snp(void);
192
193 static inline bool hv_is_synic_reg(unsigned int reg)
194 {
195         if ((reg >= HV_REGISTER_SCONTROL) &&
196             (reg <= HV_REGISTER_SINT15))
197                 return true;
198         return false;
199 }
200
201 static inline u64 hv_get_register(unsigned int reg)
202 {
203         u64 value;
204
205         if (hv_is_synic_reg(reg) && hv_isolation_type_snp())
206                 hv_ghcb_msr_read(reg, &value);
207         else
208                 rdmsrl(reg, value);
209         return value;
210 }
211
212 static inline void hv_set_register(unsigned int reg, u64 value)
213 {
214         if (hv_is_synic_reg(reg) && hv_isolation_type_snp()) {
215                 hv_ghcb_msr_write(reg, value);
216
217                 /* Write proxy bit via wrmsl instruction */
218                 if (reg >= HV_REGISTER_SINT0 &&
219                     reg <= HV_REGISTER_SINT15)
220                         wrmsrl(reg, value | 1 << 20);
221         } else {
222                 wrmsrl(reg, value);
223         }
224 }
225
226 #else /* CONFIG_HYPERV */
227 static inline void hyperv_init(void) {}
228 static inline void hyperv_setup_mmu_ops(void) {}
229 static inline void set_hv_tscchange_cb(void (*cb)(void)) {}
230 static inline void clear_hv_tscchange_cb(void) {}
231 static inline void hyperv_stop_tsc_emulation(void) {};
232 static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu)
233 {
234         return NULL;
235 }
236 static inline int hyperv_flush_guest_mapping(u64 as) { return -1; }
237 static inline int hyperv_flush_guest_mapping_range(u64 as,
238                 hyperv_fill_flush_list_func fill_func, void *data)
239 {
240         return -1;
241 }
242 static inline void hv_set_register(unsigned int reg, u64 value) { }
243 static inline u64 hv_get_register(unsigned int reg) { return 0; }
244 static inline int hv_set_mem_host_visibility(unsigned long addr, int numpages,
245                                              bool visible)
246 {
247         return -1;
248 }
249 #endif /* CONFIG_HYPERV */
250
251
252 #include <asm-generic/mshyperv.h>
253
254 #endif