Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / arch / x86 / xen / irq.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/hardirq.h>
3
4 #include <asm/x86_init.h>
5
6 #include <xen/interface/xen.h>
7 #include <xen/interface/sched.h>
8 #include <xen/interface/vcpu.h>
9 #include <xen/features.h>
10 #include <xen/events.h>
11
12 #include <asm/xen/hypercall.h>
13 #include <asm/xen/hypervisor.h>
14
15 #include "xen-ops.h"
16
17 /*
18  * Force a proper event-channel callback from Xen after clearing the
19  * callback mask. We do this in a very simple manner, by making a call
20  * down into Xen. The pending flag will be checked by Xen on return.
21  */
22 void xen_force_evtchn_callback(void)
23 {
24         (void)HYPERVISOR_xen_version(0, NULL);
25 }
26
27 asmlinkage __visible unsigned long xen_save_fl(void)
28 {
29         struct vcpu_info *vcpu;
30         unsigned long flags;
31
32         vcpu = this_cpu_read(xen_vcpu);
33
34         /* flag has opposite sense of mask */
35         flags = !vcpu->evtchn_upcall_mask;
36
37         /* convert to IF type flag
38            -0 -> 0x00000000
39            -1 -> 0xffffffff
40         */
41         return (-flags) & X86_EFLAGS_IF;
42 }
43 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
44
45 __visible void xen_restore_fl(unsigned long flags)
46 {
47         struct vcpu_info *vcpu;
48
49         /* convert from IF type flag */
50         flags = !(flags & X86_EFLAGS_IF);
51
52         /* See xen_irq_enable() for why preemption must be disabled. */
53         preempt_disable();
54         vcpu = this_cpu_read(xen_vcpu);
55         vcpu->evtchn_upcall_mask = flags;
56
57         if (flags == 0) {
58                 barrier(); /* unmask then check (avoid races) */
59                 if (unlikely(vcpu->evtchn_upcall_pending))
60                         xen_force_evtchn_callback();
61                 preempt_enable();
62         } else
63                 preempt_enable_no_resched();
64 }
65 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
66
67 asmlinkage __visible void xen_irq_disable(void)
68 {
69         /* There's a one instruction preempt window here.  We need to
70            make sure we're don't switch CPUs between getting the vcpu
71            pointer and updating the mask. */
72         preempt_disable();
73         this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
74         preempt_enable_no_resched();
75 }
76 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
77
78 asmlinkage __visible void xen_irq_enable(void)
79 {
80         struct vcpu_info *vcpu;
81
82         /*
83          * We may be preempted as soon as vcpu->evtchn_upcall_mask is
84          * cleared, so disable preemption to ensure we check for
85          * events on the VCPU we are still running on.
86          */
87         preempt_disable();
88
89         vcpu = this_cpu_read(xen_vcpu);
90         vcpu->evtchn_upcall_mask = 0;
91
92         /* Doesn't matter if we get preempted here, because any
93            pending event will get dealt with anyway. */
94
95         barrier(); /* unmask then check (avoid races) */
96         if (unlikely(vcpu->evtchn_upcall_pending))
97                 xen_force_evtchn_callback();
98
99         preempt_enable();
100 }
101 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
102
103 static void xen_safe_halt(void)
104 {
105         /* Blocking includes an implicit local_irq_enable(). */
106         if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
107                 BUG();
108 }
109
110 static void xen_halt(void)
111 {
112         if (irqs_disabled())
113                 HYPERVISOR_vcpu_op(VCPUOP_down,
114                                    xen_vcpu_nr(smp_processor_id()), NULL);
115         else
116                 xen_safe_halt();
117 }
118
119 static const struct pv_irq_ops xen_irq_ops __initconst = {
120         .save_fl = PV_CALLEE_SAVE(xen_save_fl),
121         .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
122         .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
123         .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
124
125         .safe_halt = xen_safe_halt,
126         .halt = xen_halt,
127 };
128
129 void __init xen_init_irq_ops(void)
130 {
131         pv_irq_ops = xen_irq_ops;
132         x86_init.irqs.intr_init = xen_init_IRQ;
133 }