1 // SPDX-License-Identifier: GPL-2.0
5 #include <linux/sched.h>
6 #include <linux/hugetlb.h>
8 #include <asm/mmu_context.h>
10 static inline void local_flush_tlb_all_asid(unsigned long asid)
12 if (asid != FLUSH_TLB_NO_ASID)
13 __asm__ __volatile__ ("sfence.vma x0, %0"
18 local_flush_tlb_all();
21 static inline void local_flush_tlb_page_asid(unsigned long addr,
24 if (asid != FLUSH_TLB_NO_ASID)
25 __asm__ __volatile__ ("sfence.vma %0, %1"
27 : "r" (addr), "r" (asid)
30 local_flush_tlb_page(addr);
34 * Flush entire TLB if number of entries to be flushed is greater
35 * than the threshold below.
37 static unsigned long tlb_flush_all_threshold __read_mostly = 64;
39 static void local_flush_tlb_range_threshold_asid(unsigned long start,
44 unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
47 if (nr_ptes_in_range > tlb_flush_all_threshold) {
48 local_flush_tlb_all_asid(asid);
52 for (i = 0; i < nr_ptes_in_range; ++i) {
53 local_flush_tlb_page_asid(start, asid);
58 static inline void local_flush_tlb_range_asid(unsigned long start,
59 unsigned long size, unsigned long stride, unsigned long asid)
62 local_flush_tlb_page_asid(start, asid);
63 else if (size == FLUSH_TLB_MAX_SIZE)
64 local_flush_tlb_all_asid(asid);
66 local_flush_tlb_range_threshold_asid(start, size, stride, asid);
69 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
71 local_flush_tlb_range_asid(start, end, PAGE_SIZE, FLUSH_TLB_NO_ASID);
74 static void __ipi_flush_tlb_all(void *info)
76 local_flush_tlb_all();
79 void flush_tlb_all(void)
81 if (riscv_use_ipi_for_rfence())
82 on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
84 sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
87 struct flush_tlb_range_data {
94 static void __ipi_flush_tlb_range_asid(void *info)
96 struct flush_tlb_range_data *d = info;
98 local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
101 static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
102 unsigned long start, unsigned long size,
103 unsigned long stride)
105 struct flush_tlb_range_data ftd;
108 if (cpumask_empty(cmask))
111 if (cmask != cpu_online_mask) {
115 /* check if the tlbflush needs to be sent to other CPUs */
116 broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
122 if (riscv_use_ipi_for_rfence()) {
127 on_each_cpu_mask(cmask,
128 __ipi_flush_tlb_range_asid,
131 sbi_remote_sfence_vma_asid(cmask,
134 local_flush_tlb_range_asid(start, size, stride, asid);
137 if (cmask != cpu_online_mask)
141 static inline unsigned long get_mm_asid(struct mm_struct *mm)
143 return static_branch_unlikely(&use_asid_allocator) ?
144 atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
147 void flush_tlb_mm(struct mm_struct *mm)
149 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
150 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
153 void flush_tlb_mm_range(struct mm_struct *mm,
154 unsigned long start, unsigned long end,
155 unsigned int page_size)
157 __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
158 start, end - start, page_size);
161 void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
163 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
164 addr, PAGE_SIZE, PAGE_SIZE);
167 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
170 unsigned long stride_size;
172 if (!is_vm_hugetlb_page(vma)) {
173 stride_size = PAGE_SIZE;
175 stride_size = huge_page_size(hstate_vma(vma));
178 * As stated in the privileged specification, every PTE in a
179 * NAPOT region must be invalidated, so reset the stride in that
183 if (stride_size >= PGDIR_SIZE)
184 stride_size = PGDIR_SIZE;
185 else if (stride_size >= P4D_SIZE)
186 stride_size = P4D_SIZE;
187 else if (stride_size >= PUD_SIZE)
188 stride_size = PUD_SIZE;
189 else if (stride_size >= PMD_SIZE)
190 stride_size = PMD_SIZE;
192 stride_size = PAGE_SIZE;
196 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
197 start, end - start, stride_size);
200 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
202 __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
203 start, end - start, PAGE_SIZE);
206 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
207 void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
210 __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
211 start, end - start, PMD_SIZE);
215 bool arch_tlbbatch_should_defer(struct mm_struct *mm)
220 void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
221 struct mm_struct *mm,
224 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
227 void arch_flush_tlb_batched_pending(struct mm_struct *mm)
232 void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
234 __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
235 FLUSH_TLB_MAX_SIZE, PAGE_SIZE);