2 * Based on arch/arm/include/asm/tlbflush.h
4 * Copyright (C) 1999-2003 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef __ASM_TLBFLUSH_H
20 #define __ASM_TLBFLUSH_H
24 #include <linux/mm_types.h>
25 #include <linux/sched.h>
26 #include <asm/cputype.h>
30 * Raw TLBI operations.
32 * Where necessary, use the __tlbi() macro to avoid asm()
33 * boilerplate. Drivers and most kernel code should use the TLB
34 * management routines in preference to the macro below.
36 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
37 * on whether a particular TLBI operation takes an argument or
38 * not. The macros handles invoking the asm with or without the
39 * register argument as appropriate.
41 #define __TLBI_0(op, arg) asm ("tlbi " #op "\n" \
42 ALTERNATIVE("nop\n nop", \
43 "dsb ish\n tlbi " #op, \
44 ARM64_WORKAROUND_REPEAT_TLBI, \
45 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
48 #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \
49 ALTERNATIVE("nop\n nop", \
50 "dsb ish\n tlbi " #op ", %0", \
51 ARM64_WORKAROUND_REPEAT_TLBI, \
52 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
55 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
57 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
59 #define __tlbi_user(op, arg) do { \
60 if (arm64_kernel_unmapped_at_el0()) \
61 __tlbi(op, (arg) | USER_ASID_FLAG); \
64 /* This macro creates a properly formatted VA operand for the TLBI */
65 #define __TLBI_VADDR(addr, asid) \
67 unsigned long __ta = (addr) >> 12; \
68 __ta &= GENMASK_ULL(43, 0); \
69 __ta |= (unsigned long)(asid) << 48; \
77 * This header file implements the low-level TLB invalidation routines
78 * (sometimes referred to as "flushing" in the kernel) for arm64.
80 * Every invalidation operation uses the following template:
82 * DSB ISHST // Ensure prior page-table updates have completed
83 * TLBI ... // Invalidate the TLB
84 * DSB ISH // Ensure the TLB invalidation has completed
85 * if (invalidated kernel mappings)
86 * ISB // Discard any instructions fetched from the old mapping
89 * The following functions form part of the "core" TLB invalidation API,
90 * as documented in Documentation/core-api/cachetlb.rst:
93 * Invalidate the entire TLB (kernel + user) on all CPUs
96 * Invalidate an entire user address space on all CPUs.
97 * The 'mm' argument identifies the ASID to invalidate.
99 * flush_tlb_range(vma, start, end)
100 * Invalidate the virtual-address range '[start, end)' on all
101 * CPUs for the user address space corresponding to 'vma->mm'.
102 * Note that this operation also invalidates any walk-cache
103 * entries associated with translations for the specified address
106 * flush_tlb_kernel_range(start, end)
107 * Same as flush_tlb_range(..., start, end), but applies to
108 * kernel mappings rather than a particular user address space.
109 * Whilst not explicitly documented, this function is used when
110 * unmapping pages from vmalloc/io space.
112 * flush_tlb_page(vma, addr)
113 * Invalidate a single user mapping for address 'addr' in the
114 * address space corresponding to 'vma->mm'. Note that this
115 * operation only invalidates a single, last-level page-table
116 * entry and therefore does not affect any walk-caches.
119 * Next, we have some undocumented invalidation routines that you probably
120 * don't want to call unless you know what you're doing:
122 * local_flush_tlb_all()
123 * Same as flush_tlb_all(), but only applies to the calling CPU.
125 * __flush_tlb_kernel_pgtable(addr)
126 * Invalidate a single kernel mapping for address 'addr' on all
127 * CPUs, ensuring that any walk-cache entries associated with the
128 * translation are also invalidated.
130 * __flush_tlb_range(vma, start, end, stride, last_level)
131 * Invalidate the virtual-address range '[start, end)' on all
132 * CPUs for the user address space corresponding to 'vma->mm'.
133 * The invalidation operations are issued at a granularity
134 * determined by 'stride' and only affect any walk-cache entries
135 * if 'last_level' is equal to false.
138 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
139 * on top of these routines, since that is our interface to the mmu_gather
140 * API as used by munmap() and friends.
142 static inline void local_flush_tlb_all(void)
150 static inline void flush_tlb_all(void)
158 static inline void flush_tlb_mm(struct mm_struct *mm)
160 unsigned long asid = __TLBI_VADDR(0, ASID(mm));
163 __tlbi(aside1is, asid);
164 __tlbi_user(aside1is, asid);
168 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
171 unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm));
174 __tlbi(vale1is, addr);
175 __tlbi_user(vale1is, addr);
178 static inline void flush_tlb_page(struct vm_area_struct *vma,
181 flush_tlb_page_nosync(vma, uaddr);
186 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
187 * necessarily a performance improvement.
189 #define MAX_TLBI_OPS PTRS_PER_PTE
191 static inline void __flush_tlb_range(struct vm_area_struct *vma,
192 unsigned long start, unsigned long end,
193 unsigned long stride, bool last_level)
195 unsigned long asid = ASID(vma->vm_mm);
198 start = round_down(start, stride);
199 end = round_up(end, stride);
201 if ((end - start) >= (MAX_TLBI_OPS * stride)) {
202 flush_tlb_mm(vma->vm_mm);
206 /* Convert the stride into units of 4k */
209 start = __TLBI_VADDR(start, asid);
210 end = __TLBI_VADDR(end, asid);
213 for (addr = start; addr < end; addr += stride) {
215 __tlbi(vale1is, addr);
216 __tlbi_user(vale1is, addr);
218 __tlbi(vae1is, addr);
219 __tlbi_user(vae1is, addr);
225 static inline void flush_tlb_range(struct vm_area_struct *vma,
226 unsigned long start, unsigned long end)
229 * We cannot use leaf-only invalidation here, since we may be invalidating
230 * table entries as part of collapsing hugepages or moving page tables.
232 __flush_tlb_range(vma, start, end, PAGE_SIZE, false);
235 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
239 if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
244 start = __TLBI_VADDR(start, 0);
245 end = __TLBI_VADDR(end, 0);
248 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
249 __tlbi(vaale1is, addr);
255 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
256 * table levels (pgd/pud/pmd).
258 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
260 unsigned long addr = __TLBI_VADDR(kaddr, 0);
263 __tlbi(vaae1is, addr);