1 #ifndef _ASM_X86_SYSTEM_H_
2 #define _ASM_X86_SYSTEM_H_
7 # include "system_32.h"
9 # include "system_64.h"
13 #define _set_base(addr, base) do { unsigned long __pr; \
14 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
15 "rorl $16,%%edx\n\t" \
25 #define _set_limit(addr, limit) do { unsigned long __lr; \
26 __asm__ __volatile__ ("movw %%dx,%1\n\t" \
27 "rorl $16,%%edx\n\t" \
29 "andb $0xf0,%%dh\n\t" \
38 #define set_base(ldt, base) _set_base(((char *)&(ldt)) , (base))
39 #define set_limit(ldt, limit) _set_limit(((char *)&(ldt)) , ((limit)-1))
42 * Load a segment. Fall back on loading the zero
43 * segment if something goes wrong..
45 #define loadsegment(seg, value) \
48 "movl %k0,%%" #seg "\n" \
50 ".section .fixup,\"ax\"\n" \
52 "movl %k1, %%" #seg "\n\t" \
55 ".section __ex_table,\"a\"\n\t" \
59 : :"r" (value), "r" (0))
63 * Save a segment register away
65 #define savesegment(seg, value) \
66 asm volatile("mov %%" #seg ",%0":"=rm" (value))
68 static inline unsigned long get_limit(unsigned long segment)
70 unsigned long __limit;
72 :"=r" (__limit):"r" (segment));
75 #endif /* __KERNEL__ */
77 static inline void clflush(void *__p)
79 asm volatile("clflush %0" : "+m" (*(char __force *)__p));
82 #define nop() __asm__ __volatile__ ("nop")
84 void disable_hlt(void);
85 void enable_hlt(void);
87 extern int es7000_plat;
88 void cpu_idle_wait(void);
90 extern unsigned long arch_align_stack(unsigned long sp);
91 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
93 void default_idle(void);