#define PER_CPU_VAR(var) per_cpu__##var
#endif /* SMP */
+#ifdef CONFIG_X86_64_SMP
+#define INIT_PER_CPU_VAR(var) init_per_cpu__##var
+#else
+#define INIT_PER_CPU_VAR(var) per_cpu__##var
+#endif
+
#else /* ...!ASSEMBLY */
#include <linux/stringify.h>
#ifdef CONFIG_SMP
-#define __percpu_seg_str "%%"__stringify(__percpu_seg)":"
+#define __percpu_arg(x) "%%"__stringify(__percpu_seg)":%P" #x
#define __my_cpu_offset percpu_read(this_cpu_off)
#else
-#define __percpu_seg_str
+#define __percpu_arg(x) "%" #x
+#endif
+
+/*
+ * Initialized pointers to per-cpu variables needed for the boot
+ * processor need to use these macros to get the proper address
+ * offset from __per_cpu_load on SMP.
+ *
+ * There also must be an entry in vmlinux_64.lds.S
+ */
+#define DECLARE_INIT_PER_CPU(var) \
+ extern typeof(per_cpu_var(var)) init_per_cpu_var(var)
+
+#ifdef CONFIG_X86_64_SMP
+#define init_per_cpu_var(var) init_per_cpu__##var
+#else
+#define init_per_cpu_var(var) per_cpu_var(var)
#endif
/* For arch-specific code, we can use direct single-insn ops (they
} \
switch (sizeof(var)) { \
case 1: \
- asm(op "b %1,"__percpu_seg_str"%0" \
+ asm(op "b %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "qi" ((T__)(val))); \
break; \
case 2: \
- asm(op "w %1,"__percpu_seg_str"%0" \
+ asm(op "w %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "ri" ((T__)(val))); \
break; \
case 4: \
- asm(op "l %1,"__percpu_seg_str"%0" \
+ asm(op "l %1,"__percpu_arg(0) \
: "+m" (var) \
- : "ri" ((T__)val)); \
+ : "ri" ((T__)(val))); \
break; \
case 8: \
- asm(op "q %1,"__percpu_seg_str"%0" \
+ asm(op "q %1,"__percpu_arg(0) \
: "+m" (var) \
- : "r" ((T__)val)); \
+ : "re" ((T__)(val))); \
break; \
default: __bad_percpu_size(); \
} \
typeof(var) ret__; \
switch (sizeof(var)) { \
case 1: \
- asm(op "b "__percpu_seg_str"%1,%0" \
- : "=r" (ret__) \
+ asm(op "b "__percpu_arg(1)",%0" \
+ : "=q" (ret__) \
: "m" (var)); \
break; \
case 2: \
- asm(op "w "__percpu_seg_str"%1,%0" \
+ asm(op "w "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 4: \
- asm(op "l "__percpu_seg_str"%1,%0" \
+ asm(op "l "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
case 8: \
- asm(op "q "__percpu_seg_str"%1,%0" \
+ asm(op "q "__percpu_arg(1)",%0" \
: "=r" (ret__) \
: "m" (var)); \
break; \
#define x86_test_and_clear_bit_percpu(bit, var) \
({ \
int old__; \
- asm volatile("btr %1,"__percpu_seg_str"%c2\n\tsbbl %0,%0" \
- : "=r" (old__) \
- : "dIr" (bit), "i" (&per_cpu__##var) : "memory"); \
+ asm volatile("btr %2,"__percpu_arg(1)"\n\tsbbl %0,%0" \
+ : "=r" (old__), "+m" (per_cpu__##var) \
+ : "dIr" (bit)); \
old__; \
})
/* We can use this directly for local CPU (faster). */
DECLARE_PER_CPU(unsigned long, this_cpu_off);
-#ifdef CONFIG_X86_64
-extern void load_pda_offset(int cpu);
-#else
-static inline void load_pda_offset(int cpu) { }
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_SMP