x86: convert pda ops to wrappers around x86 percpu accessors
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / percpu.h
1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
3
4 #ifdef CONFIG_X86_64
5 #define __percpu_seg            gs
6 #define __percpu_mov_op         movq
7 #else
8 #define __percpu_seg            fs
9 #define __percpu_mov_op         movl
10 #endif
11
12 #ifdef __ASSEMBLY__
13
14 /*
15  * PER_CPU finds an address of a per-cpu variable.
16  *
17  * Args:
18  *    var - variable name
19  *    reg - 32bit register
20  *
21  * The resulting address is stored in the "reg" argument.
22  *
23  * Example:
24  *    PER_CPU(cpu_gdt_descr, %ebx)
25  */
26 #ifdef CONFIG_SMP
27 #define PER_CPU(var, reg)                                               \
28         __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;       \
29         lea per_cpu__##var(reg), reg
30 #define PER_CPU_VAR(var)        %__percpu_seg:per_cpu__##var
31 #else /* ! SMP */
32 #define PER_CPU(var, reg)                                               \
33         __percpu_mov_op $per_cpu__##var, reg
34 #define PER_CPU_VAR(var)        per_cpu__##var
35 #endif  /* SMP */
36
37 #else /* ...!ASSEMBLY */
38
39 #include <linux/stringify.h>
40
41 #ifdef CONFIG_SMP
42 #define __percpu_seg_str        "%%"__stringify(__percpu_seg)":"
43 #define __my_cpu_offset         x86_read_percpu(this_cpu_off)
44 #else
45 #define __percpu_seg_str
46 #endif
47
48 #include <asm-generic/percpu.h>
49
50 /* We can use this directly for local CPU (faster). */
51 DECLARE_PER_CPU(unsigned long, this_cpu_off);
52
53 /* For arch-specific code, we can use direct single-insn ops (they
54  * don't give an lvalue though). */
55 extern void __bad_percpu_size(void);
56
57 #define percpu_to_op(op, var, val)                      \
58 do {                                                    \
59         typedef typeof(var) T__;                        \
60         if (0) {                                        \
61                 T__ tmp__;                              \
62                 tmp__ = (val);                          \
63         }                                               \
64         switch (sizeof(var)) {                          \
65         case 1:                                         \
66                 asm(op "b %1,"__percpu_seg_str"%0"      \
67                     : "+m" (var)                        \
68                     : "ri" ((T__)val));                 \
69                 break;                                  \
70         case 2:                                         \
71                 asm(op "w %1,"__percpu_seg_str"%0"      \
72                     : "+m" (var)                        \
73                     : "ri" ((T__)val));                 \
74                 break;                                  \
75         case 4:                                         \
76                 asm(op "l %1,"__percpu_seg_str"%0"      \
77                     : "+m" (var)                        \
78                     : "ri" ((T__)val));                 \
79                 break;                                  \
80         case 8:                                         \
81                 asm(op "q %1,"__percpu_seg_str"%0"      \
82                     : "+m" (var)                        \
83                     : "r" ((T__)val));                  \
84                 break;                                  \
85         default: __bad_percpu_size();                   \
86         }                                               \
87 } while (0)
88
89 #define percpu_from_op(op, var)                         \
90 ({                                                      \
91         typeof(var) ret__;                              \
92         switch (sizeof(var)) {                          \
93         case 1:                                         \
94                 asm(op "b "__percpu_seg_str"%1,%0"      \
95                     : "=r" (ret__)                      \
96                     : "m" (var));                       \
97                 break;                                  \
98         case 2:                                         \
99                 asm(op "w "__percpu_seg_str"%1,%0"      \
100                     : "=r" (ret__)                      \
101                     : "m" (var));                       \
102                 break;                                  \
103         case 4:                                         \
104                 asm(op "l "__percpu_seg_str"%1,%0"      \
105                     : "=r" (ret__)                      \
106                     : "m" (var));                       \
107                 break;                                  \
108         case 8:                                         \
109                 asm(op "q "__percpu_seg_str"%1,%0"      \
110                     : "=r" (ret__)                      \
111                     : "m" (var));                       \
112                 break;                                  \
113         default: __bad_percpu_size();                   \
114         }                                               \
115         ret__;                                          \
116 })
117
118 #define x86_read_percpu(var) percpu_from_op("mov", per_cpu__##var)
119 #define x86_write_percpu(var, val) percpu_to_op("mov", per_cpu__##var, val)
120 #define x86_add_percpu(var, val) percpu_to_op("add", per_cpu__##var, val)
121 #define x86_sub_percpu(var, val) percpu_to_op("sub", per_cpu__##var, val)
122 #define x86_or_percpu(var, val) percpu_to_op("or", per_cpu__##var, val)
123
124 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
125 #define x86_test_and_clear_bit_percpu(bit, var)                         \
126 ({                                                                      \
127         int old__;                                                      \
128         asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0"       \
129                      : "=r" (old__)                                     \
130                      : "dIr" (bit), "i" (&per_cpu__##var) : "memory");  \
131         old__;                                                          \
132 })
133
134 #ifdef CONFIG_X86_64
135 extern void load_pda_offset(int cpu);
136 #else
137 static inline void load_pda_offset(int cpu) { }
138 #endif
139
140 #endif /* !__ASSEMBLY__ */
141
142 #ifdef CONFIG_SMP
143
144 /*
145  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
146  * variables that are initialized and accessed before there are per_cpu
147  * areas allocated.
148  */
149
150 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)                  \
151         DEFINE_PER_CPU(_type, _name) = _initvalue;                      \
152         __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =       \
153                                 { [0 ... NR_CPUS-1] = _initvalue };     \
154         __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
155
156 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
157         EXPORT_PER_CPU_SYMBOL(_name)
158
159 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
160         DECLARE_PER_CPU(_type, _name);                          \
161         extern __typeof__(_type) *_name##_early_ptr;            \
162         extern __typeof__(_type)  _name##_early_map[]
163
164 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
165 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
166 #define early_per_cpu(_name, _cpu)                              \
167         *(early_per_cpu_ptr(_name) ?                            \
168                 &early_per_cpu_ptr(_name)[_cpu] :               \
169                 &per_cpu(_name, _cpu))
170
171 #else   /* !CONFIG_SMP */
172 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
173         DEFINE_PER_CPU(_type, _name) = _initvalue
174
175 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
176         EXPORT_PER_CPU_SYMBOL(_name)
177
178 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
179         DECLARE_PER_CPU(_type, _name)
180
181 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
182 #define early_per_cpu_ptr(_name) NULL
183 /* no early_per_cpu_map() */
184
185 #endif  /* !CONFIG_SMP */
186
187 #endif /* _ASM_X86_PERCPU_H */