Merge tag 'regulator-v4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[sfrench/cifs-2.6.git] / arch / x86 / xen / enlighten.c
1 /*
2  * Core of Xen paravirt_ops implementation.
3  *
4  * This file contains the xen_paravirt_ops structure itself, and the
5  * implementations for:
6  * - privileged instructions
7  * - interrupt flags
8  * - segment operations
9  * - booting and setup
10  *
11  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
12  */
13
14 #include <linux/cpu.h>
15 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/smp.h>
18 #include <linux/preempt.h>
19 #include <linux/hardirq.h>
20 #include <linux/percpu.h>
21 #include <linux/delay.h>
22 #include <linux/start_kernel.h>
23 #include <linux/sched.h>
24 #include <linux/kprobes.h>
25 #include <linux/bootmem.h>
26 #include <linux/module.h>
27 #include <linux/mm.h>
28 #include <linux/page-flags.h>
29 #include <linux/highmem.h>
30 #include <linux/console.h>
31 #include <linux/pci.h>
32 #include <linux/gfp.h>
33 #include <linux/memblock.h>
34 #include <linux/edd.h>
35 #include <linux/frame.h>
36
37 #ifdef CONFIG_KEXEC_CORE
38 #include <linux/kexec.h>
39 #endif
40
41 #include <xen/xen.h>
42 #include <xen/events.h>
43 #include <xen/interface/xen.h>
44 #include <xen/interface/version.h>
45 #include <xen/interface/physdev.h>
46 #include <xen/interface/vcpu.h>
47 #include <xen/interface/memory.h>
48 #include <xen/interface/nmi.h>
49 #include <xen/interface/xen-mca.h>
50 #include <xen/features.h>
51 #include <xen/page.h>
52 #include <xen/hvm.h>
53 #include <xen/hvc-console.h>
54 #include <xen/acpi.h>
55
56 #include <asm/paravirt.h>
57 #include <asm/apic.h>
58 #include <asm/page.h>
59 #include <asm/xen/pci.h>
60 #include <asm/xen/hypercall.h>
61 #include <asm/xen/hypervisor.h>
62 #include <asm/fixmap.h>
63 #include <asm/processor.h>
64 #include <asm/proto.h>
65 #include <asm/msr-index.h>
66 #include <asm/traps.h>
67 #include <asm/setup.h>
68 #include <asm/desc.h>
69 #include <asm/pgalloc.h>
70 #include <asm/pgtable.h>
71 #include <asm/tlbflush.h>
72 #include <asm/reboot.h>
73 #include <asm/stackprotector.h>
74 #include <asm/hypervisor.h>
75 #include <asm/mach_traps.h>
76 #include <asm/mwait.h>
77 #include <asm/pci_x86.h>
78 #include <asm/cpu.h>
79
80 #ifdef CONFIG_ACPI
81 #include <linux/acpi.h>
82 #include <asm/acpi.h>
83 #include <acpi/pdc_intel.h>
84 #include <acpi/processor.h>
85 #include <xen/interface/platform.h>
86 #endif
87
88 #include "xen-ops.h"
89 #include "mmu.h"
90 #include "smp.h"
91 #include "multicalls.h"
92 #include "pmu.h"
93
94 EXPORT_SYMBOL_GPL(hypercall_page);
95
96 /*
97  * Pointer to the xen_vcpu_info structure or
98  * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info
99  * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info
100  * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point
101  * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to
102  * acknowledge pending events.
103  * Also more subtly it is used by the patched version of irq enable/disable
104  * e.g. xen_irq_enable_direct and xen_iret in PV mode.
105  *
106  * The desire to be able to do those mask/unmask operations as a single
107  * instruction by using the per-cpu offset held in %gs is the real reason
108  * vcpu info is in a per-cpu pointer and the original reason for this
109  * hypercall.
110  *
111  */
112 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
113
114 /*
115  * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info
116  * hypercall. This can be used both in PV and PVHVM mode. The structure
117  * overrides the default per_cpu(xen_vcpu, cpu) value.
118  */
119 DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
120
121 enum xen_domain_type xen_domain_type = XEN_NATIVE;
122 EXPORT_SYMBOL_GPL(xen_domain_type);
123
124 unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
125 EXPORT_SYMBOL(machine_to_phys_mapping);
126 unsigned long  machine_to_phys_nr;
127 EXPORT_SYMBOL(machine_to_phys_nr);
128
129 struct start_info *xen_start_info;
130 EXPORT_SYMBOL_GPL(xen_start_info);
131
132 struct shared_info xen_dummy_shared_info;
133
134 void *xen_initial_gdt;
135
136 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
137 __read_mostly int xen_have_vector_callback;
138 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
139
140 /*
141  * Point at some empty memory to start with. We map the real shared_info
142  * page as soon as fixmap is up and running.
143  */
144 struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info;
145
146 /*
147  * Flag to determine whether vcpu info placement is available on all
148  * VCPUs.  We assume it is to start with, and then set it to zero on
149  * the first failure.  This is because it can succeed on some VCPUs
150  * and not others, since it can involve hypervisor memory allocation,
151  * or because the guest failed to guarantee all the appropriate
152  * constraints on all VCPUs (ie buffer can't cross a page boundary).
153  *
154  * Note that any particular CPU may be using a placed vcpu structure,
155  * but we can only optimise if the all are.
156  *
157  * 0: not available, 1: available
158  */
159 static int have_vcpu_info_placement = 1;
160
161 struct tls_descs {
162         struct desc_struct desc[3];
163 };
164
165 /*
166  * Updating the 3 TLS descriptors in the GDT on every task switch is
167  * surprisingly expensive so we avoid updating them if they haven't
168  * changed.  Since Xen writes different descriptors than the one
169  * passed in the update_descriptor hypercall we keep shadow copies to
170  * compare against.
171  */
172 static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc);
173
174 static void clamp_max_cpus(void)
175 {
176 #ifdef CONFIG_SMP
177         if (setup_max_cpus > MAX_VIRT_CPUS)
178                 setup_max_cpus = MAX_VIRT_CPUS;
179 #endif
180 }
181
182 static void xen_vcpu_setup(int cpu)
183 {
184         struct vcpu_register_vcpu_info info;
185         int err;
186         struct vcpu_info *vcpup;
187
188         BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);
189
190         /*
191          * This path is called twice on PVHVM - first during bootup via
192          * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being
193          * hotplugged: cpu_up -> xen_hvm_cpu_notify.
194          * As we can only do the VCPUOP_register_vcpu_info once lets
195          * not over-write its result.
196          *
197          * For PV it is called during restore (xen_vcpu_restore) and bootup
198          * (xen_setup_vcpu_info_placement). The hotplug mechanism does not
199          * use this function.
200          */
201         if (xen_hvm_domain()) {
202                 if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu))
203                         return;
204         }
205         if (cpu < MAX_VIRT_CPUS)
206                 per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
207
208         if (!have_vcpu_info_placement) {
209                 if (cpu >= MAX_VIRT_CPUS)
210                         clamp_max_cpus();
211                 return;
212         }
213
214         vcpup = &per_cpu(xen_vcpu_info, cpu);
215         info.mfn = arbitrary_virt_to_mfn(vcpup);
216         info.offset = offset_in_page(vcpup);
217
218         /* Check to see if the hypervisor will put the vcpu_info
219            structure where we want it, which allows direct access via
220            a percpu-variable.
221            N.B. This hypercall can _only_ be called once per CPU. Subsequent
222            calls will error out with -EINVAL. This is due to the fact that
223            hypervisor has no unregister variant and this hypercall does not
224            allow to over-write info.mfn and info.offset.
225          */
226         err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
227
228         if (err) {
229                 printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err);
230                 have_vcpu_info_placement = 0;
231                 clamp_max_cpus();
232         } else {
233                 /* This cpu is using the registered vcpu info, even if
234                    later ones fail to. */
235                 per_cpu(xen_vcpu, cpu) = vcpup;
236         }
237 }
238
239 /*
240  * On restore, set the vcpu placement up again.
241  * If it fails, then we're in a bad state, since
242  * we can't back out from using it...
243  */
244 void xen_vcpu_restore(void)
245 {
246         int cpu;
247
248         for_each_possible_cpu(cpu) {
249                 bool other_cpu = (cpu != smp_processor_id());
250                 bool is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL);
251
252                 if (other_cpu && is_up &&
253                     HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL))
254                         BUG();
255
256                 xen_setup_runstate_info(cpu);
257
258                 if (have_vcpu_info_placement)
259                         xen_vcpu_setup(cpu);
260
261                 if (other_cpu && is_up &&
262                     HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL))
263                         BUG();
264         }
265 }
266
267 static void __init xen_banner(void)
268 {
269         unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL);
270         struct xen_extraversion extra;
271         HYPERVISOR_xen_version(XENVER_extraversion, &extra);
272
273         pr_info("Booting paravirtualized kernel %son %s\n",
274                 xen_feature(XENFEAT_auto_translated_physmap) ?
275                         "with PVH extensions " : "", pv_info.name);
276         printk(KERN_INFO "Xen version: %d.%d%s%s\n",
277                version >> 16, version & 0xffff, extra.extraversion,
278                xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
279 }
280 /* Check if running on Xen version (major, minor) or later */
281 bool
282 xen_running_on_version_or_later(unsigned int major, unsigned int minor)
283 {
284         unsigned int version;
285
286         if (!xen_domain())
287                 return false;
288
289         version = HYPERVISOR_xen_version(XENVER_version, NULL);
290         if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) ||
291                 ((version >> 16) > major))
292                 return true;
293         return false;
294 }
295
296 #define CPUID_THERM_POWER_LEAF 6
297 #define APERFMPERF_PRESENT 0
298
299 static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
300 static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
301
302 static __read_mostly unsigned int cpuid_leaf1_ecx_set_mask;
303 static __read_mostly unsigned int cpuid_leaf5_ecx_val;
304 static __read_mostly unsigned int cpuid_leaf5_edx_val;
305
306 static void xen_cpuid(unsigned int *ax, unsigned int *bx,
307                       unsigned int *cx, unsigned int *dx)
308 {
309         unsigned maskebx = ~0;
310         unsigned maskecx = ~0;
311         unsigned maskedx = ~0;
312         unsigned setecx = 0;
313         /*
314          * Mask out inconvenient features, to try and disable as many
315          * unsupported kernel subsystems as possible.
316          */
317         switch (*ax) {
318         case 1:
319                 maskecx = cpuid_leaf1_ecx_mask;
320                 setecx = cpuid_leaf1_ecx_set_mask;
321                 maskedx = cpuid_leaf1_edx_mask;
322                 break;
323
324         case CPUID_MWAIT_LEAF:
325                 /* Synthesize the values.. */
326                 *ax = 0;
327                 *bx = 0;
328                 *cx = cpuid_leaf5_ecx_val;
329                 *dx = cpuid_leaf5_edx_val;
330                 return;
331
332         case CPUID_THERM_POWER_LEAF:
333                 /* Disabling APERFMPERF for kernel usage */
334                 maskecx = ~(1 << APERFMPERF_PRESENT);
335                 break;
336
337         case 0xb:
338                 /* Suppress extended topology stuff */
339                 maskebx = 0;
340                 break;
341         }
342
343         asm(XEN_EMULATE_PREFIX "cpuid"
344                 : "=a" (*ax),
345                   "=b" (*bx),
346                   "=c" (*cx),
347                   "=d" (*dx)
348                 : "0" (*ax), "2" (*cx));
349
350         *bx &= maskebx;
351         *cx &= maskecx;
352         *cx |= setecx;
353         *dx &= maskedx;
354 }
355 STACK_FRAME_NON_STANDARD(xen_cpuid); /* XEN_EMULATE_PREFIX */
356
357 static bool __init xen_check_mwait(void)
358 {
359 #ifdef CONFIG_ACPI
360         struct xen_platform_op op = {
361                 .cmd                    = XENPF_set_processor_pminfo,
362                 .u.set_pminfo.id        = -1,
363                 .u.set_pminfo.type      = XEN_PM_PDC,
364         };
365         uint32_t buf[3];
366         unsigned int ax, bx, cx, dx;
367         unsigned int mwait_mask;
368
369         /* We need to determine whether it is OK to expose the MWAIT
370          * capability to the kernel to harvest deeper than C3 states from ACPI
371          * _CST using the processor_harvest_xen.c module. For this to work, we
372          * need to gather the MWAIT_LEAF values (which the cstate.c code
373          * checks against). The hypervisor won't expose the MWAIT flag because
374          * it would break backwards compatibility; so we will find out directly
375          * from the hardware and hypercall.
376          */
377         if (!xen_initial_domain())
378                 return false;
379
380         /*
381          * When running under platform earlier than Xen4.2, do not expose
382          * mwait, to avoid the risk of loading native acpi pad driver
383          */
384         if (!xen_running_on_version_or_later(4, 2))
385                 return false;
386
387         ax = 1;
388         cx = 0;
389
390         native_cpuid(&ax, &bx, &cx, &dx);
391
392         mwait_mask = (1 << (X86_FEATURE_EST % 32)) |
393                      (1 << (X86_FEATURE_MWAIT % 32));
394
395         if ((cx & mwait_mask) != mwait_mask)
396                 return false;
397
398         /* We need to emulate the MWAIT_LEAF and for that we need both
399          * ecx and edx. The hypercall provides only partial information.
400          */
401
402         ax = CPUID_MWAIT_LEAF;
403         bx = 0;
404         cx = 0;
405         dx = 0;
406
407         native_cpuid(&ax, &bx, &cx, &dx);
408
409         /* Ask the Hypervisor whether to clear ACPI_PDC_C_C2C3_FFH. If so,
410          * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3.
411          */
412         buf[0] = ACPI_PDC_REVISION_ID;
413         buf[1] = 1;
414         buf[2] = (ACPI_PDC_C_CAPABILITY_SMP | ACPI_PDC_EST_CAPABILITY_SWSMP);
415
416         set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
417
418         if ((HYPERVISOR_platform_op(&op) == 0) &&
419             (buf[2] & (ACPI_PDC_C_C1_FFH | ACPI_PDC_C_C2C3_FFH))) {
420                 cpuid_leaf5_ecx_val = cx;
421                 cpuid_leaf5_edx_val = dx;
422         }
423         return true;
424 #else
425         return false;
426 #endif
427 }
428 static void __init xen_init_cpuid_mask(void)
429 {
430         unsigned int ax, bx, cx, dx;
431         unsigned int xsave_mask;
432
433         cpuid_leaf1_edx_mask =
434                 ~((1 << X86_FEATURE_MTRR) |  /* disable MTRR */
435                   (1 << X86_FEATURE_ACC));   /* thermal monitoring */
436
437         if (!xen_initial_domain())
438                 cpuid_leaf1_edx_mask &=
439                         ~((1 << X86_FEATURE_ACPI));  /* disable ACPI */
440
441         cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32));
442
443         ax = 1;
444         cx = 0;
445         cpuid(1, &ax, &bx, &cx, &dx);
446
447         xsave_mask =
448                 (1 << (X86_FEATURE_XSAVE % 32)) |
449                 (1 << (X86_FEATURE_OSXSAVE % 32));
450
451         /* Xen will set CR4.OSXSAVE if supported and not disabled by force */
452         if ((cx & xsave_mask) != xsave_mask)
453                 cpuid_leaf1_ecx_mask &= ~xsave_mask; /* disable XSAVE & OSXSAVE */
454         if (xen_check_mwait())
455                 cpuid_leaf1_ecx_set_mask = (1 << (X86_FEATURE_MWAIT % 32));
456 }
457
458 static void xen_set_debugreg(int reg, unsigned long val)
459 {
460         HYPERVISOR_set_debugreg(reg, val);
461 }
462
463 static unsigned long xen_get_debugreg(int reg)
464 {
465         return HYPERVISOR_get_debugreg(reg);
466 }
467
468 static void xen_end_context_switch(struct task_struct *next)
469 {
470         xen_mc_flush();
471         paravirt_end_context_switch(next);
472 }
473
474 static unsigned long xen_store_tr(void)
475 {
476         return 0;
477 }
478
479 /*
480  * Set the page permissions for a particular virtual address.  If the
481  * address is a vmalloc mapping (or other non-linear mapping), then
482  * find the linear mapping of the page and also set its protections to
483  * match.
484  */
485 static void set_aliased_prot(void *v, pgprot_t prot)
486 {
487         int level;
488         pte_t *ptep;
489         pte_t pte;
490         unsigned long pfn;
491         struct page *page;
492         unsigned char dummy;
493
494         ptep = lookup_address((unsigned long)v, &level);
495         BUG_ON(ptep == NULL);
496
497         pfn = pte_pfn(*ptep);
498         page = pfn_to_page(pfn);
499
500         pte = pfn_pte(pfn, prot);
501
502         /*
503          * Careful: update_va_mapping() will fail if the virtual address
504          * we're poking isn't populated in the page tables.  We don't
505          * need to worry about the direct map (that's always in the page
506          * tables), but we need to be careful about vmap space.  In
507          * particular, the top level page table can lazily propagate
508          * entries between processes, so if we've switched mms since we
509          * vmapped the target in the first place, we might not have the
510          * top-level page table entry populated.
511          *
512          * We disable preemption because we want the same mm active when
513          * we probe the target and when we issue the hypercall.  We'll
514          * have the same nominal mm, but if we're a kernel thread, lazy
515          * mm dropping could change our pgd.
516          *
517          * Out of an abundance of caution, this uses __get_user() to fault
518          * in the target address just in case there's some obscure case
519          * in which the target address isn't readable.
520          */
521
522         preempt_disable();
523
524         probe_kernel_read(&dummy, v, 1);
525
526         if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
527                 BUG();
528
529         if (!PageHighMem(page)) {
530                 void *av = __va(PFN_PHYS(pfn));
531
532                 if (av != v)
533                         if (HYPERVISOR_update_va_mapping((unsigned long)av, pte, 0))
534                                 BUG();
535         } else
536                 kmap_flush_unused();
537
538         preempt_enable();
539 }
540
541 static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
542 {
543         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
544         int i;
545
546         /*
547          * We need to mark the all aliases of the LDT pages RO.  We
548          * don't need to call vm_flush_aliases(), though, since that's
549          * only responsible for flushing aliases out the TLBs, not the
550          * page tables, and Xen will flush the TLB for us if needed.
551          *
552          * To avoid confusing future readers: none of this is necessary
553          * to load the LDT.  The hypervisor only checks this when the
554          * LDT is faulted in due to subsequent descriptor access.
555          */
556
557         for(i = 0; i < entries; i += entries_per_page)
558                 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
559 }
560
561 static void xen_free_ldt(struct desc_struct *ldt, unsigned entries)
562 {
563         const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
564         int i;
565
566         for(i = 0; i < entries; i += entries_per_page)
567                 set_aliased_prot(ldt + i, PAGE_KERNEL);
568 }
569
570 static void xen_set_ldt(const void *addr, unsigned entries)
571 {
572         struct mmuext_op *op;
573         struct multicall_space mcs = xen_mc_entry(sizeof(*op));
574
575         trace_xen_cpu_set_ldt(addr, entries);
576
577         op = mcs.args;
578         op->cmd = MMUEXT_SET_LDT;
579         op->arg1.linear_addr = (unsigned long)addr;
580         op->arg2.nr_ents = entries;
581
582         MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
583
584         xen_mc_issue(PARAVIRT_LAZY_CPU);
585 }
586
587 static void xen_load_gdt(const struct desc_ptr *dtr)
588 {
589         unsigned long va = dtr->address;
590         unsigned int size = dtr->size + 1;
591         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
592         unsigned long frames[pages];
593         int f;
594
595         /*
596          * A GDT can be up to 64k in size, which corresponds to 8192
597          * 8-byte entries, or 16 4k pages..
598          */
599
600         BUG_ON(size > 65536);
601         BUG_ON(va & ~PAGE_MASK);
602
603         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
604                 int level;
605                 pte_t *ptep;
606                 unsigned long pfn, mfn;
607                 void *virt;
608
609                 /*
610                  * The GDT is per-cpu and is in the percpu data area.
611                  * That can be virtually mapped, so we need to do a
612                  * page-walk to get the underlying MFN for the
613                  * hypercall.  The page can also be in the kernel's
614                  * linear range, so we need to RO that mapping too.
615                  */
616                 ptep = lookup_address(va, &level);
617                 BUG_ON(ptep == NULL);
618
619                 pfn = pte_pfn(*ptep);
620                 mfn = pfn_to_mfn(pfn);
621                 virt = __va(PFN_PHYS(pfn));
622
623                 frames[f] = mfn;
624
625                 make_lowmem_page_readonly((void *)va);
626                 make_lowmem_page_readonly(virt);
627         }
628
629         if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
630                 BUG();
631 }
632
633 /*
634  * load_gdt for early boot, when the gdt is only mapped once
635  */
636 static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
637 {
638         unsigned long va = dtr->address;
639         unsigned int size = dtr->size + 1;
640         unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
641         unsigned long frames[pages];
642         int f;
643
644         /*
645          * A GDT can be up to 64k in size, which corresponds to 8192
646          * 8-byte entries, or 16 4k pages..
647          */
648
649         BUG_ON(size > 65536);
650         BUG_ON(va & ~PAGE_MASK);
651
652         for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
653                 pte_t pte;
654                 unsigned long pfn, mfn;
655
656                 pfn = virt_to_pfn(va);
657                 mfn = pfn_to_mfn(pfn);
658
659                 pte = pfn_pte(pfn, PAGE_KERNEL_RO);
660
661                 if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
662                         BUG();
663
664                 frames[f] = mfn;
665         }
666
667         if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
668                 BUG();
669 }
670
671 static inline bool desc_equal(const struct desc_struct *d1,
672                               const struct desc_struct *d2)
673 {
674         return d1->a == d2->a && d1->b == d2->b;
675 }
676
677 static void load_TLS_descriptor(struct thread_struct *t,
678                                 unsigned int cpu, unsigned int i)
679 {
680         struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
681         struct desc_struct *gdt;
682         xmaddr_t maddr;
683         struct multicall_space mc;
684
685         if (desc_equal(shadow, &t->tls_array[i]))
686                 return;
687
688         *shadow = t->tls_array[i];
689
690         gdt = get_cpu_gdt_table(cpu);
691         maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
692         mc = __xen_mc_entry(0);
693
694         MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
695 }
696
697 static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
698 {
699         /*
700          * XXX sleazy hack: If we're being called in a lazy-cpu zone
701          * and lazy gs handling is enabled, it means we're in a
702          * context switch, and %gs has just been saved.  This means we
703          * can zero it out to prevent faults on exit from the
704          * hypervisor if the next process has no %gs.  Either way, it
705          * has been saved, and the new value will get loaded properly.
706          * This will go away as soon as Xen has been modified to not
707          * save/restore %gs for normal hypercalls.
708          *
709          * On x86_64, this hack is not used for %gs, because gs points
710          * to KERNEL_GS_BASE (and uses it for PDA references), so we
711          * must not zero %gs on x86_64
712          *
713          * For x86_64, we need to zero %fs, otherwise we may get an
714          * exception between the new %fs descriptor being loaded and
715          * %fs being effectively cleared at __switch_to().
716          */
717         if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
718 #ifdef CONFIG_X86_32
719                 lazy_load_gs(0);
720 #else
721                 loadsegment(fs, 0);
722 #endif
723         }
724
725         xen_mc_batch();
726
727         load_TLS_descriptor(t, cpu, 0);
728         load_TLS_descriptor(t, cpu, 1);
729         load_TLS_descriptor(t, cpu, 2);
730
731         xen_mc_issue(PARAVIRT_LAZY_CPU);
732 }
733
734 #ifdef CONFIG_X86_64
735 static void xen_load_gs_index(unsigned int idx)
736 {
737         if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx))
738                 BUG();
739 }
740 #endif
741
742 static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum,
743                                 const void *ptr)
744 {
745         xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]);
746         u64 entry = *(u64 *)ptr;
747
748         trace_xen_cpu_write_ldt_entry(dt, entrynum, entry);
749
750         preempt_disable();
751
752         xen_mc_flush();
753         if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry))
754                 BUG();
755
756         preempt_enable();
757 }
758
759 static int cvt_gate_to_trap(int vector, const gate_desc *val,
760                             struct trap_info *info)
761 {
762         unsigned long addr;
763
764         if (val->type != GATE_TRAP && val->type != GATE_INTERRUPT)
765                 return 0;
766
767         info->vector = vector;
768
769         addr = gate_offset(*val);
770 #ifdef CONFIG_X86_64
771         /*
772          * Look for known traps using IST, and substitute them
773          * appropriately.  The debugger ones are the only ones we care
774          * about.  Xen will handle faults like double_fault,
775          * so we should never see them.  Warn if
776          * there's an unexpected IST-using fault handler.
777          */
778         if (addr == (unsigned long)debug)
779                 addr = (unsigned long)xen_debug;
780         else if (addr == (unsigned long)int3)
781                 addr = (unsigned long)xen_int3;
782         else if (addr == (unsigned long)stack_segment)
783                 addr = (unsigned long)xen_stack_segment;
784         else if (addr == (unsigned long)double_fault) {
785                 /* Don't need to handle these */
786                 return 0;
787 #ifdef CONFIG_X86_MCE
788         } else if (addr == (unsigned long)machine_check) {
789                 /*
790                  * when xen hypervisor inject vMCE to guest,
791                  * use native mce handler to handle it
792                  */
793                 ;
794 #endif
795         } else if (addr == (unsigned long)nmi)
796                 /*
797                  * Use the native version as well.
798                  */
799                 ;
800         else {
801                 /* Some other trap using IST? */
802                 if (WARN_ON(val->ist != 0))
803                         return 0;
804         }
805 #endif  /* CONFIG_X86_64 */
806         info->address = addr;
807
808         info->cs = gate_segment(*val);
809         info->flags = val->dpl;
810         /* interrupt gates clear IF */
811         if (val->type == GATE_INTERRUPT)
812                 info->flags |= 1 << 2;
813
814         return 1;
815 }
816
817 /* Locations of each CPU's IDT */
818 static DEFINE_PER_CPU(struct desc_ptr, idt_desc);
819
820 /* Set an IDT entry.  If the entry is part of the current IDT, then
821    also update Xen. */
822 static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
823 {
824         unsigned long p = (unsigned long)&dt[entrynum];
825         unsigned long start, end;
826
827         trace_xen_cpu_write_idt_entry(dt, entrynum, g);
828
829         preempt_disable();
830
831         start = __this_cpu_read(idt_desc.address);
832         end = start + __this_cpu_read(idt_desc.size) + 1;
833
834         xen_mc_flush();
835
836         native_write_idt_entry(dt, entrynum, g);
837
838         if (p >= start && (p + 8) <= end) {
839                 struct trap_info info[2];
840
841                 info[1].address = 0;
842
843                 if (cvt_gate_to_trap(entrynum, g, &info[0]))
844                         if (HYPERVISOR_set_trap_table(info))
845                                 BUG();
846         }
847
848         preempt_enable();
849 }
850
851 static void xen_convert_trap_info(const struct desc_ptr *desc,
852                                   struct trap_info *traps)
853 {
854         unsigned in, out, count;
855
856         count = (desc->size+1) / sizeof(gate_desc);
857         BUG_ON(count > 256);
858
859         for (in = out = 0; in < count; in++) {
860                 gate_desc *entry = (gate_desc*)(desc->address) + in;
861
862                 if (cvt_gate_to_trap(in, entry, &traps[out]))
863                         out++;
864         }
865         traps[out].address = 0;
866 }
867
868 void xen_copy_trap_info(struct trap_info *traps)
869 {
870         const struct desc_ptr *desc = this_cpu_ptr(&idt_desc);
871
872         xen_convert_trap_info(desc, traps);
873 }
874
875 /* Load a new IDT into Xen.  In principle this can be per-CPU, so we
876    hold a spinlock to protect the static traps[] array (static because
877    it avoids allocation, and saves stack space). */
878 static void xen_load_idt(const struct desc_ptr *desc)
879 {
880         static DEFINE_SPINLOCK(lock);
881         static struct trap_info traps[257];
882
883         trace_xen_cpu_load_idt(desc);
884
885         spin_lock(&lock);
886
887         memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc));
888
889         xen_convert_trap_info(desc, traps);
890
891         xen_mc_flush();
892         if (HYPERVISOR_set_trap_table(traps))
893                 BUG();
894
895         spin_unlock(&lock);
896 }
897
898 /* Write a GDT descriptor entry.  Ignore LDT descriptors, since
899    they're handled differently. */
900 static void xen_write_gdt_entry(struct desc_struct *dt, int entry,
901                                 const void *desc, int type)
902 {
903         trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
904
905         preempt_disable();
906
907         switch (type) {
908         case DESC_LDT:
909         case DESC_TSS:
910                 /* ignore */
911                 break;
912
913         default: {
914                 xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]);
915
916                 xen_mc_flush();
917                 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
918                         BUG();
919         }
920
921         }
922
923         preempt_enable();
924 }
925
926 /*
927  * Version of write_gdt_entry for use at early boot-time needed to
928  * update an entry as simply as possible.
929  */
930 static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry,
931                                             const void *desc, int type)
932 {
933         trace_xen_cpu_write_gdt_entry(dt, entry, desc, type);
934
935         switch (type) {
936         case DESC_LDT:
937         case DESC_TSS:
938                 /* ignore */
939                 break;
940
941         default: {
942                 xmaddr_t maddr = virt_to_machine(&dt[entry]);
943
944                 if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc))
945                         dt[entry] = *(struct desc_struct *)desc;
946         }
947
948         }
949 }
950
951 static void xen_load_sp0(struct tss_struct *tss,
952                          struct thread_struct *thread)
953 {
954         struct multicall_space mcs;
955
956         mcs = xen_mc_entry(0);
957         MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->sp0);
958         xen_mc_issue(PARAVIRT_LAZY_CPU);
959         tss->x86_tss.sp0 = thread->sp0;
960 }
961
962 void xen_set_iopl_mask(unsigned mask)
963 {
964         struct physdev_set_iopl set_iopl;
965
966         /* Force the change at ring 0. */
967         set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
968         HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
969 }
970
971 static void xen_io_delay(void)
972 {
973 }
974
975 static void xen_clts(void)
976 {
977         struct multicall_space mcs;
978
979         mcs = xen_mc_entry(0);
980
981         MULTI_fpu_taskswitch(mcs.mc, 0);
982
983         xen_mc_issue(PARAVIRT_LAZY_CPU);
984 }
985
986 static DEFINE_PER_CPU(unsigned long, xen_cr0_value);
987
988 static unsigned long xen_read_cr0(void)
989 {
990         unsigned long cr0 = this_cpu_read(xen_cr0_value);
991
992         if (unlikely(cr0 == 0)) {
993                 cr0 = native_read_cr0();
994                 this_cpu_write(xen_cr0_value, cr0);
995         }
996
997         return cr0;
998 }
999
1000 static void xen_write_cr0(unsigned long cr0)
1001 {
1002         struct multicall_space mcs;
1003
1004         this_cpu_write(xen_cr0_value, cr0);
1005
1006         /* Only pay attention to cr0.TS; everything else is
1007            ignored. */
1008         mcs = xen_mc_entry(0);
1009
1010         MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0);
1011
1012         xen_mc_issue(PARAVIRT_LAZY_CPU);
1013 }
1014
1015 static void xen_write_cr4(unsigned long cr4)
1016 {
1017         cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE);
1018
1019         native_write_cr4(cr4);
1020 }
1021 #ifdef CONFIG_X86_64
1022 static inline unsigned long xen_read_cr8(void)
1023 {
1024         return 0;
1025 }
1026 static inline void xen_write_cr8(unsigned long val)
1027 {
1028         BUG_ON(val);
1029 }
1030 #endif
1031
1032 static u64 xen_read_msr_safe(unsigned int msr, int *err)
1033 {
1034         u64 val;
1035
1036         if (pmu_msr_read(msr, &val, err))
1037                 return val;
1038
1039         val = native_read_msr_safe(msr, err);
1040         switch (msr) {
1041         case MSR_IA32_APICBASE:
1042 #ifdef CONFIG_X86_X2APIC
1043                 if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31))))
1044 #endif
1045                         val &= ~X2APIC_ENABLE;
1046                 break;
1047         }
1048         return val;
1049 }
1050
1051 static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
1052 {
1053         int ret;
1054
1055         ret = 0;
1056
1057         switch (msr) {
1058 #ifdef CONFIG_X86_64
1059                 unsigned which;
1060                 u64 base;
1061
1062         case MSR_FS_BASE:               which = SEGBASE_FS; goto set;
1063         case MSR_KERNEL_GS_BASE:        which = SEGBASE_GS_USER; goto set;
1064         case MSR_GS_BASE:               which = SEGBASE_GS_KERNEL; goto set;
1065
1066         set:
1067                 base = ((u64)high << 32) | low;
1068                 if (HYPERVISOR_set_segment_base(which, base) != 0)
1069                         ret = -EIO;
1070                 break;
1071 #endif
1072
1073         case MSR_STAR:
1074         case MSR_CSTAR:
1075         case MSR_LSTAR:
1076         case MSR_SYSCALL_MASK:
1077         case MSR_IA32_SYSENTER_CS:
1078         case MSR_IA32_SYSENTER_ESP:
1079         case MSR_IA32_SYSENTER_EIP:
1080                 /* Fast syscall setup is all done in hypercalls, so
1081                    these are all ignored.  Stub them out here to stop
1082                    Xen console noise. */
1083                 break;
1084
1085         default:
1086                 if (!pmu_msr_write(msr, low, high, &ret))
1087                         ret = native_write_msr_safe(msr, low, high);
1088         }
1089
1090         return ret;
1091 }
1092
1093 static u64 xen_read_msr(unsigned int msr)
1094 {
1095         /*
1096          * This will silently swallow a #GP from RDMSR.  It may be worth
1097          * changing that.
1098          */
1099         int err;
1100
1101         return xen_read_msr_safe(msr, &err);
1102 }
1103
1104 static void xen_write_msr(unsigned int msr, unsigned low, unsigned high)
1105 {
1106         /*
1107          * This will silently swallow a #GP from WRMSR.  It may be worth
1108          * changing that.
1109          */
1110         xen_write_msr_safe(msr, low, high);
1111 }
1112
1113 void xen_setup_shared_info(void)
1114 {
1115         if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1116                 set_fixmap(FIX_PARAVIRT_BOOTMAP,
1117                            xen_start_info->shared_info);
1118
1119                 HYPERVISOR_shared_info =
1120                         (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP);
1121         } else
1122                 HYPERVISOR_shared_info =
1123                         (struct shared_info *)__va(xen_start_info->shared_info);
1124
1125 #ifndef CONFIG_SMP
1126         /* In UP this is as good a place as any to set up shared info */
1127         xen_setup_vcpu_info_placement();
1128 #endif
1129
1130         xen_setup_mfn_list_list();
1131 }
1132
1133 /* This is called once we have the cpu_possible_mask */
1134 void xen_setup_vcpu_info_placement(void)
1135 {
1136         int cpu;
1137
1138         for_each_possible_cpu(cpu)
1139                 xen_vcpu_setup(cpu);
1140
1141         /* xen_vcpu_setup managed to place the vcpu_info within the
1142          * percpu area for all cpus, so make use of it. Note that for
1143          * PVH we want to use native IRQ mechanism. */
1144         if (have_vcpu_info_placement && !xen_pvh_domain()) {
1145                 pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct);
1146                 pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct);
1147                 pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct);
1148                 pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct);
1149                 pv_mmu_ops.read_cr2 = xen_read_cr2_direct;
1150         }
1151 }
1152
1153 static unsigned xen_patch(u8 type, u16 clobbers, void *insnbuf,
1154                           unsigned long addr, unsigned len)
1155 {
1156         char *start, *end, *reloc;
1157         unsigned ret;
1158
1159         start = end = reloc = NULL;
1160
1161 #define SITE(op, x)                                                     \
1162         case PARAVIRT_PATCH(op.x):                                      \
1163         if (have_vcpu_info_placement) {                                 \
1164                 start = (char *)xen_##x##_direct;                       \
1165                 end = xen_##x##_direct_end;                             \
1166                 reloc = xen_##x##_direct_reloc;                         \
1167         }                                                               \
1168         goto patch_site
1169
1170         switch (type) {
1171                 SITE(pv_irq_ops, irq_enable);
1172                 SITE(pv_irq_ops, irq_disable);
1173                 SITE(pv_irq_ops, save_fl);
1174                 SITE(pv_irq_ops, restore_fl);
1175 #undef SITE
1176
1177         patch_site:
1178                 if (start == NULL || (end-start) > len)
1179                         goto default_patch;
1180
1181                 ret = paravirt_patch_insns(insnbuf, len, start, end);
1182
1183                 /* Note: because reloc is assigned from something that
1184                    appears to be an array, gcc assumes it's non-null,
1185                    but doesn't know its relationship with start and
1186                    end. */
1187                 if (reloc > start && reloc < end) {
1188                         int reloc_off = reloc - start;
1189                         long *relocp = (long *)(insnbuf + reloc_off);
1190                         long delta = start - (char *)addr;
1191
1192                         *relocp += delta;
1193                 }
1194                 break;
1195
1196         default_patch:
1197         default:
1198                 ret = paravirt_patch_default(type, clobbers, insnbuf,
1199                                              addr, len);
1200                 break;
1201         }
1202
1203         return ret;
1204 }
1205
1206 static const struct pv_info xen_info __initconst = {
1207         .shared_kernel_pmd = 0,
1208
1209 #ifdef CONFIG_X86_64
1210         .extra_user_64bit_cs = FLAT_USER_CS64,
1211 #endif
1212         .name = "Xen",
1213 };
1214
1215 static const struct pv_init_ops xen_init_ops __initconst = {
1216         .patch = xen_patch,
1217 };
1218
1219 static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1220         .cpuid = xen_cpuid,
1221
1222         .set_debugreg = xen_set_debugreg,
1223         .get_debugreg = xen_get_debugreg,
1224
1225         .clts = xen_clts,
1226
1227         .read_cr0 = xen_read_cr0,
1228         .write_cr0 = xen_write_cr0,
1229
1230         .read_cr4 = native_read_cr4,
1231         .read_cr4_safe = native_read_cr4_safe,
1232         .write_cr4 = xen_write_cr4,
1233
1234 #ifdef CONFIG_X86_64
1235         .read_cr8 = xen_read_cr8,
1236         .write_cr8 = xen_write_cr8,
1237 #endif
1238
1239         .wbinvd = native_wbinvd,
1240
1241         .read_msr = xen_read_msr,
1242         .write_msr = xen_write_msr,
1243
1244         .read_msr_safe = xen_read_msr_safe,
1245         .write_msr_safe = xen_write_msr_safe,
1246
1247         .read_pmc = xen_read_pmc,
1248
1249         .iret = xen_iret,
1250 #ifdef CONFIG_X86_64
1251         .usergs_sysret64 = xen_sysret64,
1252 #endif
1253
1254         .load_tr_desc = paravirt_nop,
1255         .set_ldt = xen_set_ldt,
1256         .load_gdt = xen_load_gdt,
1257         .load_idt = xen_load_idt,
1258         .load_tls = xen_load_tls,
1259 #ifdef CONFIG_X86_64
1260         .load_gs_index = xen_load_gs_index,
1261 #endif
1262
1263         .alloc_ldt = xen_alloc_ldt,
1264         .free_ldt = xen_free_ldt,
1265
1266         .store_idt = native_store_idt,
1267         .store_tr = xen_store_tr,
1268
1269         .write_ldt_entry = xen_write_ldt_entry,
1270         .write_gdt_entry = xen_write_gdt_entry,
1271         .write_idt_entry = xen_write_idt_entry,
1272         .load_sp0 = xen_load_sp0,
1273
1274         .set_iopl_mask = xen_set_iopl_mask,
1275         .io_delay = xen_io_delay,
1276
1277         /* Xen takes care of %gs when switching to usermode for us */
1278         .swapgs = paravirt_nop,
1279
1280         .start_context_switch = paravirt_start_context_switch,
1281         .end_context_switch = xen_end_context_switch,
1282 };
1283
1284 static void xen_reboot(int reason)
1285 {
1286         struct sched_shutdown r = { .reason = reason };
1287         int cpu;
1288
1289         for_each_online_cpu(cpu)
1290                 xen_pmu_finish(cpu);
1291
1292         if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1293                 BUG();
1294 }
1295
1296 static void xen_restart(char *msg)
1297 {
1298         xen_reboot(SHUTDOWN_reboot);
1299 }
1300
1301 static void xen_emergency_restart(void)
1302 {
1303         xen_reboot(SHUTDOWN_reboot);
1304 }
1305
1306 static void xen_machine_halt(void)
1307 {
1308         xen_reboot(SHUTDOWN_poweroff);
1309 }
1310
1311 static void xen_machine_power_off(void)
1312 {
1313         if (pm_power_off)
1314                 pm_power_off();
1315         xen_reboot(SHUTDOWN_poweroff);
1316 }
1317
1318 static void xen_crash_shutdown(struct pt_regs *regs)
1319 {
1320         xen_reboot(SHUTDOWN_crash);
1321 }
1322
1323 static int
1324 xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
1325 {
1326         xen_reboot(SHUTDOWN_crash);
1327         return NOTIFY_DONE;
1328 }
1329
1330 static struct notifier_block xen_panic_block = {
1331         .notifier_call= xen_panic_event,
1332         .priority = INT_MIN
1333 };
1334
1335 int xen_panic_handler_init(void)
1336 {
1337         atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
1338         return 0;
1339 }
1340
1341 static const struct machine_ops xen_machine_ops __initconst = {
1342         .restart = xen_restart,
1343         .halt = xen_machine_halt,
1344         .power_off = xen_machine_power_off,
1345         .shutdown = xen_machine_halt,
1346         .crash_shutdown = xen_crash_shutdown,
1347         .emergency_restart = xen_emergency_restart,
1348 };
1349
1350 static unsigned char xen_get_nmi_reason(void)
1351 {
1352         unsigned char reason = 0;
1353
1354         /* Construct a value which looks like it came from port 0x61. */
1355         if (test_bit(_XEN_NMIREASON_io_error,
1356                      &HYPERVISOR_shared_info->arch.nmi_reason))
1357                 reason |= NMI_REASON_IOCHK;
1358         if (test_bit(_XEN_NMIREASON_pci_serr,
1359                      &HYPERVISOR_shared_info->arch.nmi_reason))
1360                 reason |= NMI_REASON_SERR;
1361
1362         return reason;
1363 }
1364
1365 static void __init xen_boot_params_init_edd(void)
1366 {
1367 #if IS_ENABLED(CONFIG_EDD)
1368         struct xen_platform_op op;
1369         struct edd_info *edd_info;
1370         u32 *mbr_signature;
1371         unsigned nr;
1372         int ret;
1373
1374         edd_info = boot_params.eddbuf;
1375         mbr_signature = boot_params.edd_mbr_sig_buffer;
1376
1377         op.cmd = XENPF_firmware_info;
1378
1379         op.u.firmware_info.type = XEN_FW_DISK_INFO;
1380         for (nr = 0; nr < EDDMAXNR; nr++) {
1381                 struct edd_info *info = edd_info + nr;
1382
1383                 op.u.firmware_info.index = nr;
1384                 info->params.length = sizeof(info->params);
1385                 set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
1386                                      &info->params);
1387                 ret = HYPERVISOR_platform_op(&op);
1388                 if (ret)
1389                         break;
1390
1391 #define C(x) info->x = op.u.firmware_info.u.disk_info.x
1392                 C(device);
1393                 C(version);
1394                 C(interface_support);
1395                 C(legacy_max_cylinder);
1396                 C(legacy_max_head);
1397                 C(legacy_sectors_per_track);
1398 #undef C
1399         }
1400         boot_params.eddbuf_entries = nr;
1401
1402         op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
1403         for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) {
1404                 op.u.firmware_info.index = nr;
1405                 ret = HYPERVISOR_platform_op(&op);
1406                 if (ret)
1407                         break;
1408                 mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
1409         }
1410         boot_params.edd_mbr_sig_buf_entries = nr;
1411 #endif
1412 }
1413
1414 /*
1415  * Set up the GDT and segment registers for -fstack-protector.  Until
1416  * we do this, we have to be careful not to call any stack-protected
1417  * function, which is most of the kernel.
1418  *
1419  * Note, that it is __ref because the only caller of this after init
1420  * is PVH which is not going to use xen_load_gdt_boot or other
1421  * __init functions.
1422  */
1423 static void __ref xen_setup_gdt(int cpu)
1424 {
1425         if (xen_feature(XENFEAT_auto_translated_physmap)) {
1426 #ifdef CONFIG_X86_64
1427                 unsigned long dummy;
1428
1429                 load_percpu_segment(cpu); /* We need to access per-cpu area */
1430                 switch_to_new_gdt(cpu); /* GDT and GS set */
1431
1432                 /* We are switching of the Xen provided GDT to our HVM mode
1433                  * GDT. The new GDT has  __KERNEL_CS with CS.L = 1
1434                  * and we are jumping to reload it.
1435                  */
1436                 asm volatile ("pushq %0\n"
1437                               "leaq 1f(%%rip),%0\n"
1438                               "pushq %0\n"
1439                               "lretq\n"
1440                               "1:\n"
1441                               : "=&r" (dummy) : "0" (__KERNEL_CS));
1442
1443                 /*
1444                  * While not needed, we also set the %es, %ds, and %fs
1445                  * to zero. We don't care about %ss as it is NULL.
1446                  * Strictly speaking this is not needed as Xen zeros those
1447                  * out (and also MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE)
1448                  *
1449                  * Linux zeros them in cpu_init() and in secondary_startup_64
1450                  * (for BSP).
1451                  */
1452                 loadsegment(es, 0);
1453                 loadsegment(ds, 0);
1454                 loadsegment(fs, 0);
1455 #else
1456                 /* PVH: TODO Implement. */
1457                 BUG();
1458 #endif
1459                 return; /* PVH does not need any PV GDT ops. */
1460         }
1461         pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry_boot;
1462         pv_cpu_ops.load_gdt = xen_load_gdt_boot;
1463
1464         setup_stack_canary_segment(0);
1465         switch_to_new_gdt(0);
1466
1467         pv_cpu_ops.write_gdt_entry = xen_write_gdt_entry;
1468         pv_cpu_ops.load_gdt = xen_load_gdt;
1469 }
1470
1471 #ifdef CONFIG_XEN_PVH
1472 /*
1473  * A PV guest starts with default flags that are not set for PVH, set them
1474  * here asap.
1475  */
1476 static void xen_pvh_set_cr_flags(int cpu)
1477 {
1478
1479         /* Some of these are setup in 'secondary_startup_64'. The others:
1480          * X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
1481          * (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
1482         write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
1483
1484         if (!cpu)
1485                 return;
1486         /*
1487          * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
1488          * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
1489         */
1490         if (boot_cpu_has(X86_FEATURE_PSE))
1491                 cr4_set_bits_and_update_boot(X86_CR4_PSE);
1492
1493         if (boot_cpu_has(X86_FEATURE_PGE))
1494                 cr4_set_bits_and_update_boot(X86_CR4_PGE);
1495 }
1496
1497 /*
1498  * Note, that it is ref - because the only caller of this after init
1499  * is PVH which is not going to use xen_load_gdt_boot or other
1500  * __init functions.
1501  */
1502 void __ref xen_pvh_secondary_vcpu_init(int cpu)
1503 {
1504         xen_setup_gdt(cpu);
1505         xen_pvh_set_cr_flags(cpu);
1506 }
1507
1508 static void __init xen_pvh_early_guest_init(void)
1509 {
1510         if (!xen_feature(XENFEAT_auto_translated_physmap))
1511                 return;
1512
1513         if (!xen_feature(XENFEAT_hvm_callback_vector))
1514                 return;
1515
1516         xen_have_vector_callback = 1;
1517
1518         xen_pvh_early_cpu_init(0, false);
1519         xen_pvh_set_cr_flags(0);
1520
1521 #ifdef CONFIG_X86_32
1522         BUG(); /* PVH: Implement proper support. */
1523 #endif
1524 }
1525 #endif    /* CONFIG_XEN_PVH */
1526
1527 static void __init xen_dom0_set_legacy_features(void)
1528 {
1529         x86_platform.legacy.rtc = 1;
1530 }
1531
1532 /* First C function to be called on Xen boot */
1533 asmlinkage __visible void __init xen_start_kernel(void)
1534 {
1535         struct physdev_set_iopl set_iopl;
1536         unsigned long initrd_start = 0;
1537         int rc;
1538
1539         if (!xen_start_info)
1540                 return;
1541
1542         xen_domain_type = XEN_PV_DOMAIN;
1543
1544         xen_setup_features();
1545 #ifdef CONFIG_XEN_PVH
1546         xen_pvh_early_guest_init();
1547 #endif
1548         xen_setup_machphys_mapping();
1549
1550         /* Install Xen paravirt ops */
1551         pv_info = xen_info;
1552         pv_init_ops = xen_init_ops;
1553         if (!xen_pvh_domain()) {
1554                 pv_cpu_ops = xen_cpu_ops;
1555
1556                 x86_platform.get_nmi_reason = xen_get_nmi_reason;
1557         }
1558
1559         if (xen_feature(XENFEAT_auto_translated_physmap))
1560                 x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
1561         else
1562                 x86_init.resources.memory_setup = xen_memory_setup;
1563         x86_init.oem.arch_setup = xen_arch_setup;
1564         x86_init.oem.banner = xen_banner;
1565
1566         xen_init_time_ops();
1567
1568         /*
1569          * Set up some pagetable state before starting to set any ptes.
1570          */
1571
1572         xen_init_mmu_ops();
1573
1574         /* Prevent unwanted bits from being set in PTEs. */
1575         __supported_pte_mask &= ~_PAGE_GLOBAL;
1576
1577         /*
1578          * Prevent page tables from being allocated in highmem, even
1579          * if CONFIG_HIGHPTE is enabled.
1580          */
1581         __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
1582
1583         /* Work out if we support NX */
1584         x86_configure_nx();
1585
1586         /* Get mfn list */
1587         xen_build_dynamic_phys_to_machine();
1588
1589         /*
1590          * Set up kernel GDT and segment registers, mainly so that
1591          * -fstack-protector code can be executed.
1592          */
1593         xen_setup_gdt(0);
1594
1595         xen_init_irq_ops();
1596         xen_init_cpuid_mask();
1597
1598 #ifdef CONFIG_X86_LOCAL_APIC
1599         /*
1600          * set up the basic apic ops.
1601          */
1602         xen_init_apic();
1603 #endif
1604
1605         if (xen_feature(XENFEAT_mmu_pt_update_preserve_ad)) {
1606                 pv_mmu_ops.ptep_modify_prot_start = xen_ptep_modify_prot_start;
1607                 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
1608         }
1609
1610         machine_ops = xen_machine_ops;
1611
1612         /*
1613          * The only reliable way to retain the initial address of the
1614          * percpu gdt_page is to remember it here, so we can go and
1615          * mark it RW later, when the initial percpu area is freed.
1616          */
1617         xen_initial_gdt = &per_cpu(gdt_page, 0);
1618
1619         xen_smp_init();
1620
1621 #ifdef CONFIG_ACPI_NUMA
1622         /*
1623          * The pages we from Xen are not related to machine pages, so
1624          * any NUMA information the kernel tries to get from ACPI will
1625          * be meaningless.  Prevent it from trying.
1626          */
1627         acpi_numa = -1;
1628 #endif
1629         /* Don't do the full vcpu_info placement stuff until we have a
1630            possible map and a non-dummy shared_info. */
1631         per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
1632
1633         local_irq_disable();
1634         early_boot_irqs_disabled = true;
1635
1636         xen_raw_console_write("mapping kernel into physical memory\n");
1637         xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base,
1638                                    xen_start_info->nr_pages);
1639         xen_reserve_special_pages();
1640
1641         /* keep using Xen gdt for now; no urgent need to change it */
1642
1643 #ifdef CONFIG_X86_32
1644         pv_info.kernel_rpl = 1;
1645         if (xen_feature(XENFEAT_supervisor_mode_kernel))
1646                 pv_info.kernel_rpl = 0;
1647 #else
1648         pv_info.kernel_rpl = 0;
1649 #endif
1650         /* set the limit of our address space */
1651         xen_reserve_top();
1652
1653         /* PVH: runs at default kernel iopl of 0 */
1654         if (!xen_pvh_domain()) {
1655                 /*
1656                  * We used to do this in xen_arch_setup, but that is too late
1657                  * on AMD were early_cpu_init (run before ->arch_setup()) calls
1658                  * early_amd_init which pokes 0xcf8 port.
1659                  */
1660                 set_iopl.iopl = 1;
1661                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1662                 if (rc != 0)
1663                         xen_raw_printk("physdev_op failed %d\n", rc);
1664         }
1665
1666 #ifdef CONFIG_X86_32
1667         /* set up basic CPUID stuff */
1668         cpu_detect(&new_cpu_data);
1669         set_cpu_cap(&new_cpu_data, X86_FEATURE_FPU);
1670         new_cpu_data.wp_works_ok = 1;
1671         new_cpu_data.x86_capability[CPUID_1_EDX] = cpuid_edx(1);
1672 #endif
1673
1674         if (xen_start_info->mod_start) {
1675             if (xen_start_info->flags & SIF_MOD_START_PFN)
1676                 initrd_start = PFN_PHYS(xen_start_info->mod_start);
1677             else
1678                 initrd_start = __pa(xen_start_info->mod_start);
1679         }
1680
1681         /* Poke various useful things into boot_params */
1682         boot_params.hdr.type_of_loader = (9 << 4) | 0;
1683         boot_params.hdr.ramdisk_image = initrd_start;
1684         boot_params.hdr.ramdisk_size = xen_start_info->mod_len;
1685         boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line);
1686         boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN;
1687
1688         if (!xen_initial_domain()) {
1689                 add_preferred_console("xenboot", 0, NULL);
1690                 add_preferred_console("tty", 0, NULL);
1691                 add_preferred_console("hvc", 0, NULL);
1692                 if (pci_xen)
1693                         x86_init.pci.arch_init = pci_xen_init;
1694         } else {
1695                 const struct dom0_vga_console_info *info =
1696                         (void *)((char *)xen_start_info +
1697                                  xen_start_info->console.dom0.info_off);
1698                 struct xen_platform_op op = {
1699                         .cmd = XENPF_firmware_info,
1700                         .interface_version = XENPF_INTERFACE_VERSION,
1701                         .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS,
1702                 };
1703
1704                 x86_platform.set_legacy_features =
1705                                 xen_dom0_set_legacy_features;
1706                 xen_init_vga(info, xen_start_info->console.dom0.info_size);
1707                 xen_start_info->console.domU.mfn = 0;
1708                 xen_start_info->console.domU.evtchn = 0;
1709
1710                 if (HYPERVISOR_platform_op(&op) == 0)
1711                         boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags;
1712
1713                 /* Make sure ACS will be enabled */
1714                 pci_request_acs();
1715
1716                 xen_acpi_sleep_register();
1717
1718                 /* Avoid searching for BIOS MP tables */
1719                 x86_init.mpparse.find_smp_config = x86_init_noop;
1720                 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
1721
1722                 xen_boot_params_init_edd();
1723         }
1724 #ifdef CONFIG_PCI
1725         /* PCI BIOS service won't work from a PV guest. */
1726         pci_probe &= ~PCI_PROBE_BIOS;
1727 #endif
1728         xen_raw_console_write("about to get started...\n");
1729
1730         xen_setup_runstate_info(0);
1731
1732         xen_efi_init();
1733
1734         /* Start the world */
1735 #ifdef CONFIG_X86_32
1736         i386_start_kernel();
1737 #else
1738         cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */
1739         x86_64_start_reservations((char *)__pa_symbol(&boot_params));
1740 #endif
1741 }
1742
1743 void __ref xen_hvm_init_shared_info(void)
1744 {
1745         int cpu;
1746         struct xen_add_to_physmap xatp;
1747         static struct shared_info *shared_info_page = 0;
1748
1749         if (!shared_info_page)
1750                 shared_info_page = (struct shared_info *)
1751                         extend_brk(PAGE_SIZE, PAGE_SIZE);
1752         xatp.domid = DOMID_SELF;
1753         xatp.idx = 0;
1754         xatp.space = XENMAPSPACE_shared_info;
1755         xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
1756         if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
1757                 BUG();
1758
1759         HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
1760
1761         /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
1762          * page, we use it in the event channel upcall and in some pvclock
1763          * related functions. We don't need the vcpu_info placement
1764          * optimizations because we don't use any pv_mmu or pv_irq op on
1765          * HVM.
1766          * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is
1767          * online but xen_hvm_init_shared_info is run at resume time too and
1768          * in that case multiple vcpus might be online. */
1769         for_each_online_cpu(cpu) {
1770                 /* Leave it to be NULL. */
1771                 if (cpu >= MAX_VIRT_CPUS)
1772                         continue;
1773                 per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu];
1774         }
1775 }
1776
1777 #ifdef CONFIG_XEN_PVHVM
1778 static void __init init_hvm_pv_info(void)
1779 {
1780         int major, minor;
1781         uint32_t eax, ebx, ecx, edx, pages, msr, base;
1782         u64 pfn;
1783
1784         base = xen_cpuid_base();
1785         cpuid(base + 1, &eax, &ebx, &ecx, &edx);
1786
1787         major = eax >> 16;
1788         minor = eax & 0xffff;
1789         printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
1790
1791         cpuid(base + 2, &pages, &msr, &ecx, &edx);
1792
1793         pfn = __pa(hypercall_page);
1794         wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
1795
1796         xen_setup_features();
1797
1798         pv_info.name = "Xen HVM";
1799
1800         xen_domain_type = XEN_HVM_DOMAIN;
1801 }
1802
1803 static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1804                               void *hcpu)
1805 {
1806         int cpu = (long)hcpu;
1807         switch (action) {
1808         case CPU_UP_PREPARE:
1809                 xen_vcpu_setup(cpu);
1810                 if (xen_have_vector_callback) {
1811                         if (xen_feature(XENFEAT_hvm_safe_pvclock))
1812                                 xen_setup_timer(cpu);
1813                 }
1814                 break;
1815         default:
1816                 break;
1817         }
1818         return NOTIFY_OK;
1819 }
1820
1821 static struct notifier_block xen_hvm_cpu_notifier = {
1822         .notifier_call  = xen_hvm_cpu_notify,
1823 };
1824
1825 #ifdef CONFIG_KEXEC_CORE
1826 static void xen_hvm_shutdown(void)
1827 {
1828         native_machine_shutdown();
1829         if (kexec_in_progress)
1830                 xen_reboot(SHUTDOWN_soft_reset);
1831 }
1832
1833 static void xen_hvm_crash_shutdown(struct pt_regs *regs)
1834 {
1835         native_machine_crash_shutdown(regs);
1836         xen_reboot(SHUTDOWN_soft_reset);
1837 }
1838 #endif
1839
1840 static void __init xen_hvm_guest_init(void)
1841 {
1842         if (xen_pv_domain())
1843                 return;
1844
1845         init_hvm_pv_info();
1846
1847         xen_hvm_init_shared_info();
1848
1849         xen_panic_handler_init();
1850
1851         if (xen_feature(XENFEAT_hvm_callback_vector))
1852                 xen_have_vector_callback = 1;
1853         xen_hvm_smp_init();
1854         register_cpu_notifier(&xen_hvm_cpu_notifier);
1855         xen_unplug_emulated_devices();
1856         x86_init.irqs.intr_init = xen_init_IRQ;
1857         xen_hvm_init_time_ops();
1858         xen_hvm_init_mmu_ops();
1859 #ifdef CONFIG_KEXEC_CORE
1860         machine_ops.shutdown = xen_hvm_shutdown;
1861         machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
1862 #endif
1863 }
1864 #endif
1865
1866 static bool xen_nopv = false;
1867 static __init int xen_parse_nopv(char *arg)
1868 {
1869        xen_nopv = true;
1870        return 0;
1871 }
1872 early_param("xen_nopv", xen_parse_nopv);
1873
1874 static uint32_t __init xen_platform(void)
1875 {
1876         if (xen_nopv)
1877                 return 0;
1878
1879         return xen_cpuid_base();
1880 }
1881
1882 bool xen_hvm_need_lapic(void)
1883 {
1884         if (xen_nopv)
1885                 return false;
1886         if (xen_pv_domain())
1887                 return false;
1888         if (!xen_hvm_domain())
1889                 return false;
1890         if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
1891                 return false;
1892         return true;
1893 }
1894 EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
1895
1896 static void xen_set_cpu_features(struct cpuinfo_x86 *c)
1897 {
1898         if (xen_pv_domain()) {
1899                 clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
1900                 set_cpu_cap(c, X86_FEATURE_XENPV);
1901         }
1902 }
1903
1904 const struct hypervisor_x86 x86_hyper_xen = {
1905         .name                   = "Xen",
1906         .detect                 = xen_platform,
1907 #ifdef CONFIG_XEN_PVHVM
1908         .init_platform          = xen_hvm_guest_init,
1909 #endif
1910         .x2apic_available       = xen_x2apic_para_available,
1911         .set_cpu_features       = xen_set_cpu_features,
1912 };
1913 EXPORT_SYMBOL(x86_hyper_xen);
1914
1915 #ifdef CONFIG_HOTPLUG_CPU
1916 void xen_arch_register_cpu(int num)
1917 {
1918         arch_register_cpu(num);
1919 }
1920 EXPORT_SYMBOL(xen_arch_register_cpu);
1921
1922 void xen_arch_unregister_cpu(int num)
1923 {
1924         arch_unregister_cpu(num);
1925 }
1926 EXPORT_SYMBOL(xen_arch_unregister_cpu);
1927 #endif