Merge branch 'tracing/ftrace' of git://git.kernel.org/pub/scm/linux/kernel/git/tip...
[sfrench/cifs-2.6.git] / arch / x86 / include / asm / percpu.h
1 #ifndef _ASM_X86_PERCPU_H
2 #define _ASM_X86_PERCPU_H
3
4 #ifdef CONFIG_X86_64
5 #define __percpu_seg            gs
6 #define __percpu_mov_op         movq
7 #else
8 #define __percpu_seg            fs
9 #define __percpu_mov_op         movl
10 #endif
11
12 #ifdef __ASSEMBLY__
13
14 /*
15  * PER_CPU finds an address of a per-cpu variable.
16  *
17  * Args:
18  *    var - variable name
19  *    reg - 32bit register
20  *
21  * The resulting address is stored in the "reg" argument.
22  *
23  * Example:
24  *    PER_CPU(cpu_gdt_descr, %ebx)
25  */
26 #ifdef CONFIG_SMP
27 #define PER_CPU(var, reg)                                               \
28         __percpu_mov_op %__percpu_seg:per_cpu__this_cpu_off, reg;       \
29         lea per_cpu__##var(reg), reg
30 #define PER_CPU_VAR(var)        %__percpu_seg:per_cpu__##var
31 #else /* ! SMP */
32 #define PER_CPU(var, reg)                                               \
33         __percpu_mov_op $per_cpu__##var, reg
34 #define PER_CPU_VAR(var)        per_cpu__##var
35 #endif  /* SMP */
36
37 #ifdef CONFIG_X86_64_SMP
38 #define INIT_PER_CPU_VAR(var)  init_per_cpu__##var
39 #else
40 #define INIT_PER_CPU_VAR(var)  per_cpu__##var
41 #endif
42
43 #else /* ...!ASSEMBLY */
44
45 #include <linux/stringify.h>
46 #include <asm/sections.h>
47
48 #define __addr_to_pcpu_ptr(addr)                                        \
49         (void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr  \
50                  + (unsigned long)__per_cpu_start)
51 #define __pcpu_ptr_to_addr(ptr)                                         \
52         (void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr   \
53                  - (unsigned long)__per_cpu_start)
54
55 #ifdef CONFIG_SMP
56 #define __percpu_arg(x)         "%%"__stringify(__percpu_seg)":%P" #x
57 #define __my_cpu_offset         percpu_read(this_cpu_off)
58 #else
59 #define __percpu_arg(x)         "%" #x
60 #endif
61
62 /*
63  * Initialized pointers to per-cpu variables needed for the boot
64  * processor need to use these macros to get the proper address
65  * offset from __per_cpu_load on SMP.
66  *
67  * There also must be an entry in vmlinux_64.lds.S
68  */
69 #define DECLARE_INIT_PER_CPU(var) \
70        extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
71
72 #ifdef CONFIG_X86_64_SMP
73 #define init_per_cpu_var(var)  init_per_cpu__##var
74 #else
75 #define init_per_cpu_var(var)  per_cpu_var(var)
76 #endif
77
78 /* For arch-specific code, we can use direct single-insn ops (they
79  * don't give an lvalue though). */
80 extern void __bad_percpu_size(void);
81
82 #define percpu_to_op(op, var, val)                      \
83 do {                                                    \
84         typedef typeof(var) T__;                        \
85         if (0) {                                        \
86                 T__ tmp__;                              \
87                 tmp__ = (val);                          \
88         }                                               \
89         switch (sizeof(var)) {                          \
90         case 1:                                         \
91                 asm(op "b %1,"__percpu_arg(0)           \
92                     : "+m" (var)                        \
93                     : "ri" ((T__)val));                 \
94                 break;                                  \
95         case 2:                                         \
96                 asm(op "w %1,"__percpu_arg(0)           \
97                     : "+m" (var)                        \
98                     : "ri" ((T__)val));                 \
99                 break;                                  \
100         case 4:                                         \
101                 asm(op "l %1,"__percpu_arg(0)           \
102                     : "+m" (var)                        \
103                     : "ri" ((T__)val));                 \
104                 break;                                  \
105         case 8:                                         \
106                 asm(op "q %1,"__percpu_arg(0)           \
107                     : "+m" (var)                        \
108                     : "re" ((T__)val));                 \
109                 break;                                  \
110         default: __bad_percpu_size();                   \
111         }                                               \
112 } while (0)
113
114 #define percpu_from_op(op, var)                         \
115 ({                                                      \
116         typeof(var) ret__;                              \
117         switch (sizeof(var)) {                          \
118         case 1:                                         \
119                 asm(op "b "__percpu_arg(1)",%0"         \
120                     : "=r" (ret__)                      \
121                     : "m" (var));                       \
122                 break;                                  \
123         case 2:                                         \
124                 asm(op "w "__percpu_arg(1)",%0"         \
125                     : "=r" (ret__)                      \
126                     : "m" (var));                       \
127                 break;                                  \
128         case 4:                                         \
129                 asm(op "l "__percpu_arg(1)",%0"         \
130                     : "=r" (ret__)                      \
131                     : "m" (var));                       \
132                 break;                                  \
133         case 8:                                         \
134                 asm(op "q "__percpu_arg(1)",%0"         \
135                     : "=r" (ret__)                      \
136                     : "m" (var));                       \
137                 break;                                  \
138         default: __bad_percpu_size();                   \
139         }                                               \
140         ret__;                                          \
141 })
142
143 #define percpu_read(var)        percpu_from_op("mov", per_cpu__##var)
144 #define percpu_write(var, val)  percpu_to_op("mov", per_cpu__##var, val)
145 #define percpu_add(var, val)    percpu_to_op("add", per_cpu__##var, val)
146 #define percpu_sub(var, val)    percpu_to_op("sub", per_cpu__##var, val)
147 #define percpu_and(var, val)    percpu_to_op("and", per_cpu__##var, val)
148 #define percpu_or(var, val)     percpu_to_op("or", per_cpu__##var, val)
149 #define percpu_xor(var, val)    percpu_to_op("xor", per_cpu__##var, val)
150
151 /* This is not atomic against other CPUs -- CPU preemption needs to be off */
152 #define x86_test_and_clear_bit_percpu(bit, var)                         \
153 ({                                                                      \
154         int old__;                                                      \
155         asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0"           \
156                      : "=r" (old__), "+m" (per_cpu__##var)              \
157                      : "dIr" (bit));                                    \
158         old__;                                                          \
159 })
160
161 #include <asm-generic/percpu.h>
162
163 /* We can use this directly for local CPU (faster). */
164 DECLARE_PER_CPU(unsigned long, this_cpu_off);
165
166 #endif /* !__ASSEMBLY__ */
167
168 #ifdef CONFIG_SMP
169
170 /*
171  * Define the "EARLY_PER_CPU" macros.  These are used for some per_cpu
172  * variables that are initialized and accessed before there are per_cpu
173  * areas allocated.
174  */
175
176 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)                  \
177         DEFINE_PER_CPU(_type, _name) = _initvalue;                      \
178         __typeof__(_type) _name##_early_map[NR_CPUS] __initdata =       \
179                                 { [0 ... NR_CPUS-1] = _initvalue };     \
180         __typeof__(_type) *_name##_early_ptr __refdata = _name##_early_map
181
182 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
183         EXPORT_PER_CPU_SYMBOL(_name)
184
185 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
186         DECLARE_PER_CPU(_type, _name);                          \
187         extern __typeof__(_type) *_name##_early_ptr;            \
188         extern __typeof__(_type)  _name##_early_map[]
189
190 #define early_per_cpu_ptr(_name) (_name##_early_ptr)
191 #define early_per_cpu_map(_name, _idx) (_name##_early_map[_idx])
192 #define early_per_cpu(_name, _cpu)                              \
193         *(early_per_cpu_ptr(_name) ?                            \
194                 &early_per_cpu_ptr(_name)[_cpu] :               \
195                 &per_cpu(_name, _cpu))
196
197 #else   /* !CONFIG_SMP */
198 #define DEFINE_EARLY_PER_CPU(_type, _name, _initvalue)          \
199         DEFINE_PER_CPU(_type, _name) = _initvalue
200
201 #define EXPORT_EARLY_PER_CPU_SYMBOL(_name)                      \
202         EXPORT_PER_CPU_SYMBOL(_name)
203
204 #define DECLARE_EARLY_PER_CPU(_type, _name)                     \
205         DECLARE_PER_CPU(_type, _name)
206
207 #define early_per_cpu(_name, _cpu) per_cpu(_name, _cpu)
208 #define early_per_cpu_ptr(_name) NULL
209 /* no early_per_cpu_map() */
210
211 #endif  /* !CONFIG_SMP */
212
213 #endif /* _ASM_X86_PERCPU_H */