1 #ifndef _ASM_X86_DESC_H
2 #define _ASM_X86_DESC_H
4 #include <asm/desc_defs.h>
7 #include <asm/fixmap.h>
8 #include <asm/irq_vectors.h>
10 #include <linux/smp.h>
11 #include <linux/percpu.h>
13 static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info)
15 desc->limit0 = info->limit & 0x0ffff;
17 desc->base0 = (info->base_addr & 0x0000ffff);
18 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
20 desc->type = (info->read_exec_only ^ 1) << 1;
21 desc->type |= info->contents << 2;
25 desc->p = info->seg_not_present ^ 1;
26 desc->limit1 = (info->limit & 0xf0000) >> 16;
27 desc->avl = info->useable;
28 desc->d = info->seg_32bit;
29 desc->g = info->limit_in_pages;
31 desc->base2 = (info->base_addr & 0xff000000) >> 24;
33 * Don't allow setting of the lm bit. It would confuse
34 * user_64bit_mode and would get overridden by sysret anyway.
39 extern struct desc_ptr idt_descr;
40 extern gate_desc idt_table[];
41 extern const struct desc_ptr debug_idt_descr;
42 extern gate_desc debug_idt_table[];
45 struct desc_struct gdt[GDT_ENTRIES];
46 } __attribute__((aligned(PAGE_SIZE)));
48 DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
50 /* Provide the original GDT */
51 static inline struct desc_struct *get_cpu_gdt_rw(unsigned int cpu)
53 return per_cpu(gdt_page, cpu).gdt;
56 /* Provide the current original GDT */
57 static inline struct desc_struct *get_current_gdt_rw(void)
59 return this_cpu_ptr(&gdt_page)->gdt;
62 /* Get the fixmap index for a specific processor */
63 static inline unsigned int get_cpu_gdt_ro_index(int cpu)
65 return FIX_GDT_REMAP_BEGIN + cpu;
68 /* Provide the fixmap address of the remapped GDT */
69 static inline struct desc_struct *get_cpu_gdt_ro(int cpu)
71 unsigned int idx = get_cpu_gdt_ro_index(cpu);
72 return (struct desc_struct *)__fix_to_virt(idx);
75 /* Provide the current read-only GDT */
76 static inline struct desc_struct *get_current_gdt_ro(void)
78 return get_cpu_gdt_ro(smp_processor_id());
81 /* Provide the physical address of the GDT page. */
82 static inline phys_addr_t get_cpu_gdt_paddr(unsigned int cpu)
84 return per_cpu_ptr_to_phys(get_cpu_gdt_rw(cpu));
87 static inline void pack_gate(gate_desc *gate, unsigned type, unsigned long func,
88 unsigned dpl, unsigned ist, unsigned seg)
90 gate->offset_low = (u16) func;
94 gate->bits.type = type;
95 gate->offset_middle = (u16) (func >> 16);
97 gate->segment = __KERNEL_CS;
100 gate->offset_high = (u32) (func >> 32);
107 static inline int desc_empty(const void *ptr)
109 const u32 *desc = ptr;
111 return !(desc[0] | desc[1]);
114 #ifdef CONFIG_PARAVIRT
115 #include <asm/paravirt.h>
117 #define load_TR_desc() native_load_tr_desc()
118 #define load_gdt(dtr) native_load_gdt(dtr)
119 #define load_idt(dtr) native_load_idt(dtr)
120 #define load_tr(tr) asm volatile("ltr %0"::"m" (tr))
121 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt))
123 #define store_gdt(dtr) native_store_gdt(dtr)
124 #define store_idt(dtr) native_store_idt(dtr)
125 #define store_tr(tr) (tr = native_store_tr())
127 #define load_TLS(t, cpu) native_load_tls(t, cpu)
128 #define set_ldt native_set_ldt
130 #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
131 #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
132 #define write_idt_entry(dt, entry, g) native_write_idt_entry(dt, entry, g)
134 static inline void paravirt_alloc_ldt(struct desc_struct *ldt, unsigned entries)
138 static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
141 #endif /* CONFIG_PARAVIRT */
143 #define store_ldt(ldt) asm("sldt %0" : "=m"(ldt))
145 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
147 memcpy(&idt[entry], gate, sizeof(*gate));
150 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
152 memcpy(&ldt[entry], desc, 8);
156 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type)
161 case DESC_TSS: size = sizeof(tss_desc); break;
162 case DESC_LDT: size = sizeof(ldt_desc); break;
163 default: size = sizeof(*gdt); break;
166 memcpy(&gdt[entry], desc, size);
169 static inline void set_tssldt_descriptor(void *d, unsigned long addr,
170 unsigned type, unsigned size)
172 struct ldttss_desc *desc = d;
174 memset(desc, 0, sizeof(*desc));
176 desc->limit0 = (u16) size;
177 desc->base0 = (u16) addr;
178 desc->base1 = (addr >> 16) & 0xFF;
181 desc->limit1 = (size >> 16) & 0xF;
182 desc->base2 = (addr >> 24) & 0xFF;
184 desc->base3 = (u32) (addr >> 32);
188 static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr)
190 struct desc_struct *d = get_cpu_gdt_rw(cpu);
193 set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS,
195 write_gdt_entry(d, entry, &tss, DESC_TSS);
198 #define set_tss_desc(cpu, addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
200 static inline void native_set_ldt(const void *addr, unsigned int entries)
202 if (likely(entries == 0))
203 asm volatile("lldt %w0"::"q" (0));
205 unsigned cpu = smp_processor_id();
208 set_tssldt_descriptor(&ldt, (unsigned long)addr, DESC_LDT,
209 entries * LDT_ENTRY_SIZE - 1);
210 write_gdt_entry(get_cpu_gdt_rw(cpu), GDT_ENTRY_LDT,
212 asm volatile("lldt %w0"::"q" (GDT_ENTRY_LDT*8));
216 static inline void native_load_gdt(const struct desc_ptr *dtr)
218 asm volatile("lgdt %0"::"m" (*dtr));
221 static inline void native_load_idt(const struct desc_ptr *dtr)
223 asm volatile("lidt %0"::"m" (*dtr));
226 static inline void native_store_gdt(struct desc_ptr *dtr)
228 asm volatile("sgdt %0":"=m" (*dtr));
231 static inline void native_store_idt(struct desc_ptr *dtr)
233 asm volatile("sidt %0":"=m" (*dtr));
237 * The LTR instruction marks the TSS GDT entry as busy. On 64-bit, the GDT is
238 * a read-only remapping. To prevent a page fault, the GDT is switched to the
239 * original writeable version when needed.
242 static inline void native_load_tr_desc(void)
245 int cpu = raw_smp_processor_id();
247 struct desc_struct *fixmap_gdt;
249 native_store_gdt(&gdt);
250 fixmap_gdt = get_cpu_gdt_ro(cpu);
253 * If the current GDT is the read-only fixmap, swap to the original
254 * writeable version. Swap back at the end.
256 if (gdt.address == (unsigned long)fixmap_gdt) {
257 load_direct_gdt(cpu);
260 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
262 load_fixmap_gdt(cpu);
265 static inline void native_load_tr_desc(void)
267 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
271 static inline unsigned long native_store_tr(void)
275 asm volatile("str %0":"=r" (tr));
280 static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
282 struct desc_struct *gdt = get_cpu_gdt_rw(cpu);
285 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
286 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
289 DECLARE_PER_CPU(bool, __tss_limit_invalid);
291 static inline void force_reload_TR(void)
293 struct desc_struct *d = get_current_gdt_rw();
296 memcpy(&tss, &d[GDT_ENTRY_TSS], sizeof(tss_desc));
299 * LTR requires an available TSS, and the TSS is currently
300 * busy. Make it be available so that LTR will work.
303 write_gdt_entry(d, GDT_ENTRY_TSS, &tss, DESC_TSS);
306 this_cpu_write(__tss_limit_invalid, false);
310 * Call this if you need the TSS limit to be correct, which should be the case
311 * if and only if you have TIF_IO_BITMAP set or you're switching to a task
312 * with TIF_IO_BITMAP set.
314 static inline void refresh_tss_limit(void)
316 DEBUG_LOCKS_WARN_ON(preemptible());
318 if (unlikely(this_cpu_read(__tss_limit_invalid)))
323 * If you do something evil that corrupts the cached TSS limit (I'm looking
324 * at you, VMX exits), call this function.
326 * The optimization here is that the TSS limit only matters for Linux if the
327 * IO bitmap is in use. If the TSS limit gets forced to its minimum value,
328 * everything works except that IO bitmap will be ignored and all CPL 3 IO
329 * instructions will #GP, which is exactly what we want for normal tasks.
331 static inline void invalidate_tss_limit(void)
333 DEBUG_LOCKS_WARN_ON(preemptible());
335 if (unlikely(test_thread_flag(TIF_IO_BITMAP)))
338 this_cpu_write(__tss_limit_invalid, true);
341 /* This intentionally ignores lm, since 32-bit apps don't have that field. */
342 #define LDT_empty(info) \
343 ((info)->base_addr == 0 && \
344 (info)->limit == 0 && \
345 (info)->contents == 0 && \
346 (info)->read_exec_only == 1 && \
347 (info)->seg_32bit == 0 && \
348 (info)->limit_in_pages == 0 && \
349 (info)->seg_not_present == 1 && \
350 (info)->useable == 0)
352 /* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
353 static inline bool LDT_zero(const struct user_desc *info)
355 return (info->base_addr == 0 &&
357 info->contents == 0 &&
358 info->read_exec_only == 0 &&
359 info->seg_32bit == 0 &&
360 info->limit_in_pages == 0 &&
361 info->seg_not_present == 0 &&
365 static inline void clear_LDT(void)
370 static inline unsigned long get_desc_base(const struct desc_struct *desc)
372 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
375 static inline void set_desc_base(struct desc_struct *desc, unsigned long base)
377 desc->base0 = base & 0xffff;
378 desc->base1 = (base >> 16) & 0xff;
379 desc->base2 = (base >> 24) & 0xff;
382 static inline unsigned long get_desc_limit(const struct desc_struct *desc)
384 return desc->limit0 | (desc->limit1 << 16);
387 static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
389 desc->limit0 = limit & 0xffff;
390 desc->limit1 = (limit >> 16) & 0xf;
393 void update_intr_gate(unsigned int n, const void *addr);
394 void alloc_intr_gate(unsigned int n, const void *addr);
396 extern unsigned long used_vectors[];
399 DECLARE_PER_CPU(u32, debug_idt_ctr);
400 static inline bool is_debug_idt_enabled(void)
402 if (this_cpu_read(debug_idt_ctr))
408 static inline void load_debug_idt(void)
410 load_idt((const struct desc_ptr *)&debug_idt_descr);
413 static inline bool is_debug_idt_enabled(void)
418 static inline void load_debug_idt(void)
424 * The load_current_idt() must be called with interrupts disabled
425 * to avoid races. That way the IDT will always be set back to the expected
426 * descriptor. It's also called when a CPU is being initialized, and
427 * that doesn't need to disable interrupts, as nothing should be
428 * bothering the CPU then.
430 static inline void load_current_idt(void)
432 if (is_debug_idt_enabled())
435 load_idt((const struct desc_ptr *)&idt_descr);
438 extern void idt_setup_early_handler(void);
439 extern void idt_setup_early_traps(void);
440 extern void idt_setup_traps(void);
441 extern void idt_setup_apic_and_irq_gates(void);
444 extern void idt_setup_early_pf(void);
445 extern void idt_setup_ist_traps(void);
446 extern void idt_setup_debugidt_traps(void);
448 static inline void idt_setup_early_pf(void) { }
449 static inline void idt_setup_ist_traps(void) { }
450 static inline void idt_setup_debugidt_traps(void) { }
453 extern void idt_invalidate(void *addr);
455 #endif /* _ASM_X86_DESC_H */