Merge tag 'rpmsg-v4.15' of git://github.com/andersson/remoteproc
[sfrench/cifs-2.6.git] / arch / x86 / kernel / paravirt_patch_64.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <asm/paravirt.h>
3 #include <asm/asm-offsets.h>
4 #include <linux/stringify.h>
5
6 DEF_NATIVE(pv_irq_ops, irq_disable, "cli");
7 DEF_NATIVE(pv_irq_ops, irq_enable, "sti");
8 DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq");
9 DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax");
10 DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax");
11 DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax");
12 DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3");
13 DEF_NATIVE(pv_mmu_ops, flush_tlb_single, "invlpg (%rdi)");
14 DEF_NATIVE(pv_cpu_ops, wbinvd, "wbinvd");
15
16 DEF_NATIVE(pv_cpu_ops, usergs_sysret64, "swapgs; sysretq");
17 DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
18
19 DEF_NATIVE(, mov32, "mov %edi, %eax");
20 DEF_NATIVE(, mov64, "mov %rdi, %rax");
21
22 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
23 DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
24 DEF_NATIVE(pv_lock_ops, vcpu_is_preempted, "xor %rax, %rax");
25 #endif
26
27 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
28 {
29         return paravirt_patch_insns(insnbuf, len,
30                                     start__mov32, end__mov32);
31 }
32
33 unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
34 {
35         return paravirt_patch_insns(insnbuf, len,
36                                     start__mov64, end__mov64);
37 }
38
39 extern bool pv_is_native_spin_unlock(void);
40 extern bool pv_is_native_vcpu_is_preempted(void);
41
42 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
43                       unsigned long addr, unsigned len)
44 {
45         const unsigned char *start, *end;
46         unsigned ret;
47
48 #define PATCH_SITE(ops, x)                                      \
49                 case PARAVIRT_PATCH(ops.x):                     \
50                         start = start_##ops##_##x;              \
51                         end = end_##ops##_##x;                  \
52                         goto patch_site
53         switch(type) {
54                 PATCH_SITE(pv_irq_ops, restore_fl);
55                 PATCH_SITE(pv_irq_ops, save_fl);
56                 PATCH_SITE(pv_irq_ops, irq_enable);
57                 PATCH_SITE(pv_irq_ops, irq_disable);
58                 PATCH_SITE(pv_cpu_ops, usergs_sysret64);
59                 PATCH_SITE(pv_cpu_ops, swapgs);
60                 PATCH_SITE(pv_mmu_ops, read_cr2);
61                 PATCH_SITE(pv_mmu_ops, read_cr3);
62                 PATCH_SITE(pv_mmu_ops, write_cr3);
63                 PATCH_SITE(pv_mmu_ops, flush_tlb_single);
64                 PATCH_SITE(pv_cpu_ops, wbinvd);
65 #if defined(CONFIG_PARAVIRT_SPINLOCKS)
66                 case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
67                         if (pv_is_native_spin_unlock()) {
68                                 start = start_pv_lock_ops_queued_spin_unlock;
69                                 end   = end_pv_lock_ops_queued_spin_unlock;
70                                 goto patch_site;
71                         }
72                         goto patch_default;
73
74                 case PARAVIRT_PATCH(pv_lock_ops.vcpu_is_preempted):
75                         if (pv_is_native_vcpu_is_preempted()) {
76                                 start = start_pv_lock_ops_vcpu_is_preempted;
77                                 end   = end_pv_lock_ops_vcpu_is_preempted;
78                                 goto patch_site;
79                         }
80                         goto patch_default;
81 #endif
82
83         default:
84 patch_default: __maybe_unused
85                 ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
86                 break;
87
88 patch_site:
89                 ret = paravirt_patch_insns(ibuf, len, start, end);
90                 break;
91         }
92 #undef PATCH_SITE
93         return ret;
94 }