1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_MMU_CONTEXT_H
3 #define _ASM_X86_MMU_CONTEXT_H
6 #include <linux/atomic.h>
7 #include <linux/mm_types.h>
8 #include <linux/pkeys.h>
10 #include <trace/events/tlb.h>
12 #include <asm/pgalloc.h>
13 #include <asm/tlbflush.h>
14 #include <asm/paravirt.h>
17 extern atomic64_t last_mm_ctx_id;
19 #ifndef CONFIG_PARAVIRT
20 static inline void paravirt_activate_mm(struct mm_struct *prev,
21 struct mm_struct *next)
24 #endif /* !CONFIG_PARAVIRT */
26 #ifdef CONFIG_PERF_EVENTS
28 DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key);
30 static inline void load_mm_cr4(struct mm_struct *mm)
32 if (static_branch_unlikely(&rdpmc_always_available_key) ||
33 atomic_read(&mm->context.perf_rdpmc_allowed))
34 cr4_set_bits(X86_CR4_PCE);
36 cr4_clear_bits(X86_CR4_PCE);
39 static inline void load_mm_cr4(struct mm_struct *mm) {}
42 #ifdef CONFIG_MODIFY_LDT_SYSCALL
44 * ldt_structs can be allocated, used, and freed, but they are never
45 * modified while live.
49 * Xen requires page-aligned LDTs with special permissions. This is
50 * needed to prevent us from installing evil descriptors such as
51 * call gates. On native, we could merge the ldt_struct and LDT
52 * allocations, but it's not worth trying to optimize.
54 struct desc_struct *entries;
55 unsigned int nr_entries;
58 * If PTI is in use, then the entries array is not mapped while we're
59 * in user mode. The whole array will be aliased at the addressed
60 * given by ldt_slot_va(slot). We use two slots so that we can allocate
61 * and map, and enable a new LDT without invalidating the mapping
62 * of an older, still-in-use LDT.
64 * slot will be -1 if this LDT doesn't have an alias mapping.
69 /* This is a multiple of PAGE_SIZE. */
70 #define LDT_SLOT_STRIDE (LDT_ENTRIES * LDT_ENTRY_SIZE)
72 static inline void *ldt_slot_va(int slot)
75 return (void *)(LDT_BASE_ADDR + LDT_SLOT_STRIDE * slot);
78 return (void *)fix_to_virt(FIX_HOLE);
83 * Used for LDT copy/destruction.
85 static inline void init_new_context_ldt(struct mm_struct *mm)
87 mm->context.ldt = NULL;
88 init_rwsem(&mm->context.ldt_usr_sem);
90 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
91 void destroy_context_ldt(struct mm_struct *mm);
92 void ldt_arch_exit_mmap(struct mm_struct *mm);
93 #else /* CONFIG_MODIFY_LDT_SYSCALL */
94 static inline void init_new_context_ldt(struct mm_struct *mm) { }
95 static inline int ldt_dup_context(struct mm_struct *oldmm,
100 static inline void destroy_context_ldt(struct mm_struct *mm) { }
101 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { }
104 static inline void load_mm_ldt(struct mm_struct *mm)
106 #ifdef CONFIG_MODIFY_LDT_SYSCALL
107 struct ldt_struct *ldt;
109 /* READ_ONCE synchronizes with smp_store_release */
110 ldt = READ_ONCE(mm->context.ldt);
113 * Any change to mm->context.ldt is followed by an IPI to all
114 * CPUs with the mm active. The LDT will not be freed until
115 * after the IPI is handled by all such CPUs. This means that,
116 * if the ldt_struct changes before we return, the values we see
117 * will be safe, and the new values will be loaded before we run
120 * NB: don't try to convert this to use RCU without extreme care.
121 * We would still need IRQs off, because we don't want to change
122 * the local LDT after an IPI loaded a newer value than the one
127 if (static_cpu_has(X86_FEATURE_PTI)) {
128 if (WARN_ON_ONCE((unsigned long)ldt->slot > 1)) {
130 * Whoops -- either the new LDT isn't mapped
131 * (if slot == -1) or is mapped into a bogus
132 * slot (if slot > 1).
139 * If page table isolation is enabled, ldt->entries
140 * will not be mapped in the userspace pagetables.
141 * Tell the CPU to access the LDT through the alias
142 * at ldt_slot_va(ldt->slot).
144 set_ldt(ldt_slot_va(ldt->slot), ldt->nr_entries);
146 set_ldt(ldt->entries, ldt->nr_entries);
156 static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
158 #ifdef CONFIG_MODIFY_LDT_SYSCALL
160 * Load the LDT if either the old or new mm had an LDT.
162 * An mm will never go from having an LDT to not having an LDT. Two
163 * mms never share an LDT, so we don't gain anything by checking to
164 * see whether the LDT changed. There's also no guarantee that
165 * prev->context.ldt actually matches LDTR, but, if LDTR is non-NULL,
166 * then prev->context.ldt will also be non-NULL.
168 * If we really cared, we could optimize the case where prev == next
169 * and we're exiting lazy mode. Most of the time, if this happens,
170 * we don't actually need to reload LDTR, but modify_ldt() is mostly
171 * used by legacy code and emulators where we don't need this level of
174 * This uses | instead of || because it generates better code.
176 if (unlikely((unsigned long)prev->context.ldt |
177 (unsigned long)next->context.ldt))
181 DEBUG_LOCKS_WARN_ON(preemptible());
184 void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
186 static inline int init_new_context(struct task_struct *tsk,
187 struct mm_struct *mm)
189 mutex_init(&mm->context.lock);
191 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
192 atomic64_set(&mm->context.tlb_gen, 0);
194 #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
195 if (cpu_feature_enabled(X86_FEATURE_OSPKE)) {
196 /* pkey 0 is the default and allocated implicitly */
197 mm->context.pkey_allocation_map = 0x1;
198 /* -1 means unallocated or invalid */
199 mm->context.execute_only_pkey = -1;
202 init_new_context_ldt(mm);
205 static inline void destroy_context(struct mm_struct *mm)
207 destroy_context_ldt(mm);
210 extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
211 struct task_struct *tsk);
213 extern void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
214 struct task_struct *tsk);
215 #define switch_mm_irqs_off switch_mm_irqs_off
217 #define activate_mm(prev, next) \
219 paravirt_activate_mm((prev), (next)); \
220 switch_mm((prev), (next), NULL); \
224 #define deactivate_mm(tsk, mm) \
229 #define deactivate_mm(tsk, mm) \
232 loadsegment(fs, 0); \
236 static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
238 paravirt_arch_dup_mmap(oldmm, mm);
239 return ldt_dup_context(oldmm, mm);
242 static inline void arch_exit_mmap(struct mm_struct *mm)
244 paravirt_arch_exit_mmap(mm);
245 ldt_arch_exit_mmap(mm);
249 static inline bool is_64bit_mm(struct mm_struct *mm)
251 return !IS_ENABLED(CONFIG_IA32_EMULATION) ||
252 !(mm->context.ia32_compat == TIF_IA32);
255 static inline bool is_64bit_mm(struct mm_struct *mm)
261 static inline void arch_bprm_mm_init(struct mm_struct *mm,
262 struct vm_area_struct *vma)
267 static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
268 unsigned long start, unsigned long end)
271 * mpx_notify_unmap() goes and reads a rarely-hot
272 * cacheline in the mm_struct. That can be expensive
273 * enough to be seen in profiles.
275 * The mpx_notify_unmap() call and its contents have been
276 * observed to affect munmap() performance on hardware
277 * where MPX is not present.
279 * The unlikely() optimizes for the fast case: no MPX
280 * in the CPU, or no MPX use in the process. Even if
281 * we get this wrong (in the unlikely event that MPX
282 * is widely enabled on some system) the overhead of
283 * MPX itself (reading bounds tables) is expected to
284 * overwhelm the overhead of getting this unlikely()
285 * consistently wrong.
287 if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
288 mpx_notify_unmap(mm, vma, start, end);
292 * We only want to enforce protection keys on the current process
293 * because we effectively have no access to PKRU for other
294 * processes or any way to tell *which * PKRU in a threaded
295 * process we could use.
297 * So do not enforce things if the VMA is not from the current
298 * mm, or if we are in a kernel thread.
300 static inline bool vma_is_foreign(struct vm_area_struct *vma)
305 * Should PKRU be enforced on the access to this VMA? If
306 * the VMA is from another process, then PKRU has no
307 * relevance and should not be enforced.
309 if (current->mm != vma->vm_mm)
315 static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
316 bool write, bool execute, bool foreign)
318 /* pkeys never affect instruction fetches */
321 /* allow access if the VMA is not one from this process */
322 if (foreign || vma_is_foreign(vma))
324 return __pkru_allows_pkey(vma_pkey(vma), write);
328 * This can be used from process context to figure out what the value of
329 * CR3 is without needing to do a (slow) __read_cr3().
331 * It's intended to be used for code like KVM that sneakily changes CR3
332 * and needs to restore it. It needs to be used very carefully.
334 static inline unsigned long __get_current_cr3_fast(void)
336 unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
337 this_cpu_read(cpu_tlbstate.loaded_mm_asid));
339 /* For now, be very restrictive about when this can be called. */
340 VM_WARN_ON(in_nmi() || preemptible());
342 VM_BUG_ON(cr3 != __read_cr3());
346 #endif /* _ASM_X86_MMU_CONTEXT_H */