2 * Based on arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
24 #include <linux/sched.h>
25 #include <asm/cputype.h>
29 * Raw TLBI operations.
31 * Where necessary, use the __tlbi() macro to avoid asm()
32 * boilerplate. Drivers and most kernel code should use the TLB
33 * management routines in preference to the macro below.
35 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
36 * on whether a particular TLBI operation takes an argument or
37 * not. The macros handles invoking the asm with or without the
38 * register argument as appropriate.
40 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
41 ALTERNATIVE("nop\n nop", \
42 "dsb ish\n tlbi " #op, \
43 ARM64_WORKAROUND_REPEAT_TLBI, \
44 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
47 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
48 ALTERNATIVE("nop\n nop", \
49 "dsb ish\n tlbi " #op ", %0", \
50 ARM64_WORKAROUND_REPEAT_TLBI, \
51 CONFIG_QCOM_FALKOR_ERRATUM_1009) \
54 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
56 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
58 #define __tlbi_user(op, arg) do { \
59 if (arm64_kernel_unmapped_at_el0()) \
60 __tlbi(op, (arg) | USER_ASID_FLAG); \
63 /* This macro creates a properly formatted VA operand for the TLBI */
64 #define __TLBI_VADDR(addr, asid) \
66 unsigned long __ta = (addr) >> 12; \
67 __ta &= GENMASK_ULL(43, 0); \
68 __ta |= (unsigned long)(asid) << 48; \
76 * This header file implements the low-level TLB invalidation routines
77 * (sometimes referred to as "flushing" in the kernel) for arm64.
79 * Every invalidation operation uses the following template:
81 * DSB ISHST // Ensure prior page-table updates have completed
82 * TLBI ... // Invalidate the TLB
83 * DSB ISH // Ensure the TLB invalidation has completed
84 * if (invalidated kernel mappings)
85 * ISB // Discard any instructions fetched from the old mapping
88 * The following functions form part of the "core" TLB invalidation API,
89 * as documented in Documentation/core-api/cachetlb.rst:
92 * Invalidate the entire TLB (kernel + user) on all CPUs
95 * Invalidate an entire user address space on all CPUs.
96 * The 'mm' argument identifies the ASID to invalidate.
98 * flush_tlb_range(vma, start, end)
99 * Invalidate the virtual-address range '[start, end)' on all
100 * CPUs for the user address space corresponding to 'vma->mm'.
101 * Note that this operation also invalidates any walk-cache
102 * entries associated with translations for the specified address
105 * flush_tlb_kernel_range(start, end)
106 * Same as flush_tlb_range(..., start, end), but applies to
107 * kernel mappings rather than a particular user address space.
108 * Whilst not explicitly documented, this function is used when
109 * unmapping pages from vmalloc/io space.
111 * flush_tlb_page(vma, addr)
112 * Invalidate a single user mapping for address 'addr' in the
113 * address space corresponding to 'vma->mm'. Note that this
114 * operation only invalidates a single, last-level page-table
115 * entry and therefore does not affect any walk-caches.
118 * Next, we have some undocumented invalidation routines that you probably
119 * don't want to call unless you know what you're doing:
121 * local_flush_tlb_all()
122 * Same as flush_tlb_all(), but only applies to the calling CPU.
124 * __flush_tlb_kernel_pgtable(addr)
125 * Invalidate a single kernel mapping for address 'addr' on all
126 * CPUs, ensuring that any walk-cache entries associated with the
127 * translation are also invalidated.
129 * __flush_tlb_range(vma, start, end, stride, last_level)
130 * Invalidate the virtual-address range '[start, end)' on all
131 * CPUs for the user address space corresponding to 'vma->mm'.
132 * The invalidation operations are issued at a granularity
133 * determined by 'stride' and only affect any walk-cache entries
134 * if 'last_level' is equal to false.
137 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
138 * on top of these routines, since that is our interface to the mmu_gather
139 * API as used by munmap() and friends.
141 static inline void local_flush_tlb_all(void)
149 static inline void flush_tlb_all(void)
157 static inline void flush_tlb_mm(struct mm_struct *mm)
159 unsigned long asid = __TLBI_VADDR(0, ASID(mm));
162 __tlbi(aside1is, asid);
163 __tlbi_user(aside1is, asid);
167 static inline void flush_tlb_page(struct vm_area_struct *vma,
170 unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
173 __tlbi(vale1is, addr);
174 __tlbi_user(vale1is, addr);
179 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
180 * necessarily a performance improvement.
182 #define MAX_TLBI_OPS 1024UL
184 static inline void __flush_tlb_range(struct vm_area_struct *vma,
185 unsigned long start, unsigned long end,
186 unsigned long stride, bool last_level)
188 unsigned long asid = ASID(vma->vm_mm);
191 if ((end - start) > (MAX_TLBI_OPS * stride)) {
192 flush_tlb_mm(vma->vm_mm);
196 /* Convert the stride into units of 4k */
199 start = __TLBI_VADDR(start, asid);
200 end = __TLBI_VADDR(end, asid);
203 for (addr = start; addr < end; addr += stride) {
205 __tlbi(vale1is, addr);
206 __tlbi_user(vale1is, addr);
208 __tlbi(vae1is, addr);
209 __tlbi_user(vae1is, addr);
215 static inline void flush_tlb_range(struct vm_area_struct *vma,
216 unsigned long start, unsigned long end)
219 * We cannot use leaf-only invalidation here, since we may be invalidating
220 * table entries as part of collapsing hugepages or moving page tables.
222 __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
225 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
229 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
234 start = __TLBI_VADDR(start, 0);
235 end = __TLBI_VADDR(end, 0);
238 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
239 __tlbi(vaale1is, addr);
245 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
246 * table levels (pgd/pud/pmd).
248 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
250 unsigned long addr = __TLBI_VADDR(kaddr, 0);
253 __tlbi(vaae1is, addr);