1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __SPARC64_MMU_CONTEXT_H
3 #define __SPARC64_MMU_CONTEXT_H
5 /* Derived heavily from Linus's Alpha/AXP ASN code... */
9 #include <linux/spinlock.h>
10 #include <linux/mm_types.h>
12 #include <asm/spitfire.h>
13 #include <asm-generic/mm_hooks.h>
15 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
19 extern spinlock_t ctx_alloc_lock;
20 extern unsigned long tlb_context_cache;
21 extern unsigned long mmu_context_bmap[];
23 DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
24 void get_new_mmu_context(struct mm_struct *mm);
25 int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
26 void destroy_context(struct mm_struct *mm);
28 void __tsb_context_switch(unsigned long pgd_pa,
29 struct tsb_config *tsb_base,
30 struct tsb_config *tsb_huge,
31 unsigned long tsb_descr_pa,
32 unsigned long secondary_ctx);
34 static inline void tsb_context_switch_ctx(struct mm_struct *mm,
37 __tsb_context_switch(__pa(mm->pgd),
38 &mm->context.tsb_block[MM_TSB_BASE],
39 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
40 (mm->context.tsb_block[MM_TSB_HUGE].tsb ?
41 &mm->context.tsb_block[MM_TSB_HUGE] :
46 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
50 #define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
52 void tsb_grow(struct mm_struct *mm,
53 unsigned long tsb_index,
54 unsigned long mm_rss);
56 void smp_tsb_sync(struct mm_struct *mm);
58 #define smp_tsb_sync(__mm) do { } while (0)
61 /* Set MMU context in the actual hardware. */
62 #define load_secondary_context(__mm) \
63 __asm__ __volatile__( \
64 "\n661: stxa %0, [%1] %2\n" \
65 " .section .sun4v_1insn_patch, \"ax\"\n" \
67 " stxa %0, [%1] %3\n" \
71 : "r" (CTX_HWBITS((__mm)->context)), \
72 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
74 void __flush_tlb_mm(unsigned long, unsigned long);
76 /* Switch the current MM context. */
77 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
79 unsigned long ctx_valid, flags;
80 int cpu = smp_processor_id();
82 per_cpu(per_cpu_secondary_mm, cpu) = mm;
83 if (unlikely(mm == &init_mm))
86 spin_lock_irqsave(&mm->context.lock, flags);
87 ctx_valid = CTX_VALID(mm->context);
89 get_new_mmu_context(mm);
91 /* We have to be extremely careful here or else we will miss
92 * a TSB grow if we switch back and forth between a kernel
93 * thread and an address space which has it's TSB size increased
94 * on another processor.
96 * It is possible to play some games in order to optimize the
97 * switch, but the safest thing to do is to unconditionally
98 * perform the secondary context load and the TSB context switch.
100 * For reference the bad case is, for address space "A":
103 * run address space A
104 * set cpu0's bits in cpu_vm_mask
105 * switch to kernel thread, borrow
106 * address space A via entry_lazy_tlb
107 * run address space A
108 * set cpu1's bit in cpu_vm_mask
109 * flush_tlb_pending()
110 * reset cpu_vm_mask to just cpu1
112 * run address space A
113 * context was valid, so skip
116 * At that point cpu0 continues to use a stale TSB, the one from
117 * before the TSB grow performed on cpu1. cpu1 did not cross-call
118 * cpu0 to update it's TSB because at that point the cpu_vm_mask
119 * only had cpu1 set in it.
121 tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
123 /* Any time a processor runs a context on an address space
124 * for the first time, we must flush that context out of the
127 if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
128 cpumask_set_cpu(cpu, mm_cpumask(mm));
129 __flush_tlb_mm(CTX_HWBITS(mm->context),
132 spin_unlock_irqrestore(&mm->context.lock, flags);
135 #define deactivate_mm(tsk,mm) do { } while (0)
136 #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
137 #endif /* !(__ASSEMBLY__) */
139 #endif /* !(__SPARC64_MMU_CONTEXT_H) */