Merge tag 'amlogic-dt' of git://git.kernel.org/pub/scm/linux/kernel/git/khilman/linux...
[sfrench/cifs-2.6.git] / arch / arm64 / include / asm / daifflags.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2017 ARM Ltd.
4  */
5 #ifndef __ASM_DAIFFLAGS_H
6 #define __ASM_DAIFFLAGS_H
7
8 #include <linux/irqflags.h>
9
10 #include <asm/arch_gicv3.h>
11 #include <asm/cpufeature.h>
12
13 #define DAIF_PROCCTX            0
14 #define DAIF_PROCCTX_NOIRQ      PSR_I_BIT
15 #define DAIF_ERRCTX             (PSR_I_BIT | PSR_A_BIT)
16 #define DAIF_MASK               (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT)
17
18
19 /* mask/save/unmask/restore all exceptions, including interrupts. */
20 static inline void local_daif_mask(void)
21 {
22         WARN_ON(system_has_prio_mask_debugging() &&
23                 (read_sysreg_s(SYS_ICC_PMR_EL1) == (GIC_PRIO_IRQOFF |
24                                                     GIC_PRIO_PSR_I_SET)));
25
26         asm volatile(
27                 "msr    daifset, #0xf           // local_daif_mask\n"
28                 :
29                 :
30                 : "memory");
31
32         /* Don't really care for a dsb here, we don't intend to enable IRQs */
33         if (system_uses_irq_prio_masking())
34                 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
35
36         trace_hardirqs_off();
37 }
38
39 static inline unsigned long local_daif_save(void)
40 {
41         unsigned long flags;
42
43         flags = read_sysreg(daif);
44
45         if (system_uses_irq_prio_masking()) {
46                 /* If IRQs are masked with PMR, reflect it in the flags */
47                 if (read_sysreg_s(SYS_ICC_PMR_EL1) != GIC_PRIO_IRQON)
48                         flags |= PSR_I_BIT;
49         }
50
51         local_daif_mask();
52
53         return flags;
54 }
55
56 static inline void local_daif_restore(unsigned long flags)
57 {
58         bool irq_disabled = flags & PSR_I_BIT;
59
60         WARN_ON(system_has_prio_mask_debugging() &&
61                 !(read_sysreg(daif) & PSR_I_BIT));
62
63         if (!irq_disabled) {
64                 trace_hardirqs_on();
65
66                 if (system_uses_irq_prio_masking()) {
67                         gic_write_pmr(GIC_PRIO_IRQON);
68                         dsb(sy);
69                 }
70         } else if (system_uses_irq_prio_masking()) {
71                 u64 pmr;
72
73                 if (!(flags & PSR_A_BIT)) {
74                         /*
75                          * If interrupts are disabled but we can take
76                          * asynchronous errors, we can take NMIs
77                          */
78                         flags &= ~PSR_I_BIT;
79                         pmr = GIC_PRIO_IRQOFF;
80                 } else {
81                         pmr = GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET;
82                 }
83
84                 /*
85                  * There has been concern that the write to daif
86                  * might be reordered before this write to PMR.
87                  * From the ARM ARM DDI 0487D.a, section D1.7.1
88                  * "Accessing PSTATE fields":
89                  *   Writes to the PSTATE fields have side-effects on
90                  *   various aspects of the PE operation. All of these
91                  *   side-effects are guaranteed:
92                  *     - Not to be visible to earlier instructions in
93                  *       the execution stream.
94                  *     - To be visible to later instructions in the
95                  *       execution stream
96                  *
97                  * Also, writes to PMR are self-synchronizing, so no
98                  * interrupts with a lower priority than PMR is signaled
99                  * to the PE after the write.
100                  *
101                  * So we don't need additional synchronization here.
102                  */
103                 gic_write_pmr(pmr);
104         }
105
106         write_sysreg(flags, daif);
107
108         if (irq_disabled)
109                 trace_hardirqs_off();
110 }
111
112 #endif