1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
3 #define _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H
5 #define MMU_NO_CONTEXT ~0UL
7 #include <linux/mm_types.h>
8 #include <asm/book3s/64/tlbflush-hash.h>
9 #include <asm/book3s/64/tlbflush-radix.h>
11 /* TLB flush actions. Used as argument to tlbiel_all() */
13 TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
14 TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
17 static inline void tlbiel_all(void)
20 * This is used for host machine check and bootup.
22 * This uses early_radix_enabled and implementations use
23 * early_cpu_has_feature etc because that works early in boot
24 * and this is the machine check path which is not performance
27 if (early_radix_enabled())
28 radix__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
30 hash__tlbiel_all(TLB_INVAL_SCOPE_GLOBAL);
33 static inline void tlbiel_all_lpid(bool radix)
36 * This is used for guest machine check.
39 radix__tlbiel_all(TLB_INVAL_SCOPE_LPID);
41 hash__tlbiel_all(TLB_INVAL_SCOPE_LPID);
45 #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
46 static inline void flush_pmd_tlb_range(struct vm_area_struct *vma,
47 unsigned long start, unsigned long end)
50 return radix__flush_pmd_tlb_range(vma, start, end);
51 return hash__flush_tlb_range(vma, start, end);
54 #define __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
55 static inline void flush_hugetlb_tlb_range(struct vm_area_struct *vma,
60 return radix__flush_hugetlb_tlb_range(vma, start, end);
61 return hash__flush_tlb_range(vma, start, end);
64 static inline void flush_tlb_range(struct vm_area_struct *vma,
65 unsigned long start, unsigned long end)
68 return radix__flush_tlb_range(vma, start, end);
69 return hash__flush_tlb_range(vma, start, end);
72 static inline void flush_tlb_kernel_range(unsigned long start,
76 return radix__flush_tlb_kernel_range(start, end);
77 return hash__flush_tlb_kernel_range(start, end);
80 static inline void local_flush_tlb_mm(struct mm_struct *mm)
83 return radix__local_flush_tlb_mm(mm);
84 return hash__local_flush_tlb_mm(mm);
87 static inline void local_flush_tlb_page(struct vm_area_struct *vma,
91 return radix__local_flush_tlb_page(vma, vmaddr);
92 return hash__local_flush_tlb_page(vma, vmaddr);
95 static inline void local_flush_all_mm(struct mm_struct *mm)
98 return radix__local_flush_all_mm(mm);
99 return hash__local_flush_all_mm(mm);
102 static inline void tlb_flush(struct mmu_gather *tlb)
105 return radix__tlb_flush(tlb);
106 return hash__tlb_flush(tlb);
110 static inline void flush_tlb_mm(struct mm_struct *mm)
113 return radix__flush_tlb_mm(mm);
114 return hash__flush_tlb_mm(mm);
117 static inline void flush_tlb_page(struct vm_area_struct *vma,
118 unsigned long vmaddr)
121 return radix__flush_tlb_page(vma, vmaddr);
122 return hash__flush_tlb_page(vma, vmaddr);
125 static inline void flush_all_mm(struct mm_struct *mm)
128 return radix__flush_all_mm(mm);
129 return hash__flush_all_mm(mm);
132 #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
133 #define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr)
134 #define flush_all_mm(mm) local_flush_all_mm(mm)
135 #endif /* CONFIG_SMP */
137 #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
138 static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
139 unsigned long address)
142 * Book3S 64 does not require spurious fault flushes because the PTE
143 * must be re-fetched in case of an access permission problem. So the
144 * only reason for a spurious fault should be concurrent modification
145 * to the PTE, in which case the PTE will eventually be re-fetched by
146 * the MMU when it attempts the access again.
148 * See: Power ISA Version 3.1B, 6.10.1.2 Modifying a Translation Table
149 * Entry, Setting a Reference or Change Bit or Upgrading Access
150 * Authority (PTE Subject to Atomic Hardware Updates):
152 * "If the only change being made to a valid PTE that is subject to
153 * atomic hardware updates is to set the Reference or Change bit to
154 * 1 or to upgrade access authority, a simpler sequence suffices
155 * because the translation hardware will refetch the PTE if an
156 * access is attempted for which the only problems were reference
157 * and/or change bits needing to be set or insufficient access
160 * The nest MMU in POWER9 does not perform this PTE re-fetch, but
161 * it avoids the spurious fault problem by flushing the TLB before
162 * upgrading PTE permissions, see radix__ptep_set_access_flags.
166 static inline bool __pte_flags_need_flush(unsigned long oldval,
167 unsigned long newval)
169 unsigned long delta = oldval ^ newval;
172 * The return value of this function doesn't matter for hash,
173 * ptep_modify_prot_start() does a pte_update() which does or schedules
174 * any necessary hash table update and flush.
176 if (!radix_enabled())
180 * We do not expect kernel mappings or non-PTEs or not-present PTEs.
182 VM_WARN_ON_ONCE(oldval & _PAGE_PRIVILEGED);
183 VM_WARN_ON_ONCE(newval & _PAGE_PRIVILEGED);
184 VM_WARN_ON_ONCE(!(oldval & _PAGE_PTE));
185 VM_WARN_ON_ONCE(!(newval & _PAGE_PTE));
186 VM_WARN_ON_ONCE(!(oldval & _PAGE_PRESENT));
187 VM_WARN_ON_ONCE(!(newval & _PAGE_PRESENT));
190 * Must flush on any change except READ, WRITE, EXEC, DIRTY, ACCESSED.
192 * In theory, some changed software bits could be tolerated, in
193 * practice those should rarely if ever matter.
196 if (delta & ~(_PAGE_RWX | _PAGE_DIRTY | _PAGE_ACCESSED))
200 * If any of the above was present in old but cleared in new, flush.
201 * With the exception of _PAGE_ACCESSED, don't worry about flushing
202 * if that was cleared (see the comment in ptep_clear_flush_young()).
204 if ((delta & ~_PAGE_ACCESSED) & oldval)
210 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
212 return __pte_flags_need_flush(pte_val(oldpte), pte_val(newpte));
214 #define pte_needs_flush pte_needs_flush
216 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
218 return __pte_flags_need_flush(pmd_val(oldpmd), pmd_val(newpmd));
220 #define huge_pmd_needs_flush huge_pmd_needs_flush
222 extern bool tlbie_capable;
223 extern bool tlbie_enabled;
225 static inline bool cputlb_use_tlbie(void)
227 return tlbie_enabled;
230 #endif /* _ASM_POWERPC_BOOK3S_64_TLBFLUSH_H */