1 #ifndef _S390_TLBFLUSH_H
2 #define _S390_TLBFLUSH_H
5 #include <linux/sched.h>
6 #include <asm/processor.h>
7 #include <asm/pgalloc.h>
8 #include <asm/pgtable.h>
11 * Flush all TLB entries on the local CPU.
13 static inline void __tlb_flush_local(void)
15 asm volatile("ptlb" : : : "memory");
19 * Flush TLB entries for a specific ASCE on all CPUs
21 static inline void __tlb_flush_idte(unsigned long asce)
26 if (MACHINE_HAS_TLB_GUEST)
27 opt |= IDTE_GUEST_ASCE;
28 /* Global TLB flush for the mm */
30 " .insn rrf,0xb98e0000,0,%0,%1,0"
31 : : "a" (opt), "a" (asce) : "cc");
35 void smp_ptlb_all(void);
38 * Flush all TLB entries on all CPUs.
40 static inline void __tlb_flush_global(void)
42 unsigned int dummy = 0;
48 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
49 * this implicates multiple ASCEs!).
51 static inline void __tlb_flush_full(struct mm_struct *mm)
54 atomic_inc(&mm->context.flush_count);
55 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
59 /* Global TLB flush */
61 /* Reset TLB flush mask */
62 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
64 atomic_dec(&mm->context.flush_count);
68 static inline void __tlb_flush_mm(struct mm_struct *mm)
70 unsigned long gmap_asce;
73 * If the machine has IDTE we prefer to do a per mm flush
74 * on all cpus instead of doing a local flush if the mm
75 * only ran on the local cpu.
78 atomic_inc(&mm->context.flush_count);
79 gmap_asce = READ_ONCE(mm->context.gmap_asce);
80 if (MACHINE_HAS_IDTE && gmap_asce != -1UL) {
82 __tlb_flush_idte(gmap_asce);
83 __tlb_flush_idte(mm->context.asce);
87 /* Reset TLB flush mask */
88 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
89 atomic_dec(&mm->context.flush_count);
93 static inline void __tlb_flush_kernel(void)
96 __tlb_flush_idte(init_mm.context.asce);
101 #define __tlb_flush_global() __tlb_flush_local()
102 #define __tlb_flush_full(mm) __tlb_flush_local()
105 * Flush TLB entries for a specific ASCE on all CPUs.
107 static inline void __tlb_flush_mm(struct mm_struct *mm)
112 static inline void __tlb_flush_kernel(void)
118 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
120 if (mm->context.flush_mm) {
122 mm->context.flush_mm = 0;
128 * flush_tlb() - flushes the current mm struct TLBs
129 * flush_tlb_all() - flushes all processes TLBs
130 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
131 * flush_tlb_page(vma, vmaddr) - flushes one page
132 * flush_tlb_range(vma, start, end) - flushes a range of pages
133 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
137 * flush_tlb_mm goes together with ptep_set_wrprotect for the
138 * copy_page_range operation and flush_tlb_range is related to
139 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
140 * ptep_get_and_clear do not flush the TLBs directly if the mm has
141 * only one user. At the end of the update the flush_tlb_mm and
142 * flush_tlb_range functions need to do the flush.
144 #define flush_tlb() do { } while (0)
145 #define flush_tlb_all() do { } while (0)
146 #define flush_tlb_page(vma, addr) do { } while (0)
148 static inline void flush_tlb_mm(struct mm_struct *mm)
150 __tlb_flush_mm_lazy(mm);
153 static inline void flush_tlb_range(struct vm_area_struct *vma,
154 unsigned long start, unsigned long end)
156 __tlb_flush_mm_lazy(vma->vm_mm);
159 static inline void flush_tlb_kernel_range(unsigned long start,
162 __tlb_flush_kernel();
165 #endif /* _S390_TLBFLUSH_H */