1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/tlbflush.h
5 * Copyright (C) 1999-2003 Russell King
6 * Copyright (C) 2012 ARM Ltd.
8 #ifndef __ASM_TLBFLUSH_H
9 #define __ASM_TLBFLUSH_H
13 #include <linux/bitfield.h>
14 #include <linux/mm_types.h>
15 #include <linux/sched.h>
16 #include <linux/mmu_notifier.h>
17 #include <asm/cputype.h>
21 * Raw TLBI operations.
23 * Where necessary, use the __tlbi() macro to avoid asm()
24 * boilerplate. Drivers and most kernel code should use the TLB
25 * management routines in preference to the macro below.
27 * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
28 * on whether a particular TLBI operation takes an argument or
29 * not. The macros handles invoking the asm with or without the
30 * register argument as appropriate.
32 #define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE \
34 ALTERNATIVE("nop\n nop", \
35 "dsb ish\n tlbi " #op, \
36 ARM64_WORKAROUND_REPEAT_TLBI, \
37 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
40 #define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE \
41 "tlbi " #op ", %0\n" \
42 ALTERNATIVE("nop\n nop", \
43 "dsb ish\n tlbi " #op ", %0", \
44 ARM64_WORKAROUND_REPEAT_TLBI, \
45 CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \
48 #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
50 #define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
52 #define __tlbi_user(op, arg) do { \
53 if (arm64_kernel_unmapped_at_el0()) \
54 __tlbi(op, (arg) | USER_ASID_FLAG); \
57 /* This macro creates a properly formatted VA operand for the TLBI */
58 #define __TLBI_VADDR(addr, asid) \
60 unsigned long __ta = (addr) >> 12; \
61 __ta &= GENMASK_ULL(43, 0); \
62 __ta |= (unsigned long)(asid) << 48; \
67 * Get translation granule of the system, which is decided by
68 * PAGE_SIZE. Used by TTL.
73 #define TLBI_TTL_TG_4K 1
74 #define TLBI_TTL_TG_16K 2
75 #define TLBI_TTL_TG_64K 3
77 static inline unsigned long get_trans_granule(void)
81 return TLBI_TTL_TG_4K;
83 return TLBI_TTL_TG_16K;
85 return TLBI_TTL_TG_64K;
92 * Level-based TLBI operations.
94 * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
95 * the level at which the invalidation must take place. If the level is
96 * wrong, no invalidation may take place. In the case where the level
97 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will perform
98 * a non-hinted invalidation. Any provided level outside the hint range
99 * will also cause fall-back to non-hinted invalidation.
101 * For Stage-2 invalidation, use the level values provided to that effect
102 * in asm/stage2_pgtable.h.
104 #define TLBI_TTL_MASK GENMASK_ULL(47, 44)
106 #define TLBI_TTL_UNKNOWN INT_MAX
108 #define __tlbi_level(op, addr, level) do { \
111 if (alternative_has_cap_unlikely(ARM64_HAS_ARMv8_4_TTL) && \
112 level >= 0 && level <= 3) { \
113 u64 ttl = level & 3; \
114 ttl |= get_trans_granule() << 2; \
115 arg &= ~TLBI_TTL_MASK; \
116 arg |= FIELD_PREP(TLBI_TTL_MASK, ttl); \
122 #define __tlbi_user_level(op, arg, level) do { \
123 if (arm64_kernel_unmapped_at_el0()) \
124 __tlbi_level(op, (arg | USER_ASID_FLAG), level); \
128 * This macro creates a properly formatted VA operand for the TLB RANGE. The
129 * value bit assignments are:
131 * +----------+------+-------+-------+-------+----------------------+
132 * | ASID | TG | SCALE | NUM | TTL | BADDR |
133 * +-----------------+-------+-------+-------+----------------------+
134 * |63 48|47 46|45 44|43 39|38 37|36 0|
136 * The address range is determined by below formula: [BADDR, BADDR + (NUM + 1) *
137 * 2^(5*SCALE + 1) * PAGESIZE)
139 * Note that the first argument, baddr, is pre-shifted; If LPA2 is in use, BADDR
140 * holds addr[52:16]. Else BADDR holds page number. See for example ARM DDI
141 * 0487J.a section C5.5.60 "TLBI VAE1IS, TLBI VAE1ISNXS, TLB Invalidate by VA,
142 * EL1, Inner Shareable".
145 #define __TLBI_VADDR_RANGE(baddr, asid, scale, num, ttl) \
147 unsigned long __ta = (baddr); \
148 unsigned long __ttl = (ttl >= 1 && ttl <= 3) ? ttl : 0; \
149 __ta &= GENMASK_ULL(36, 0); \
150 __ta |= __ttl << 37; \
151 __ta |= (unsigned long)(num) << 39; \
152 __ta |= (unsigned long)(scale) << 44; \
153 __ta |= get_trans_granule() << 46; \
154 __ta |= (unsigned long)(asid) << 48; \
158 /* These macros are used by the TLBI RANGE feature. */
159 #define __TLBI_RANGE_PAGES(num, scale) \
160 ((unsigned long)((num) + 1) << (5 * (scale) + 1))
161 #define MAX_TLBI_RANGE_PAGES __TLBI_RANGE_PAGES(31, 3)
164 * Generate 'num' values from -1 to 31 with -1 rejected by the
165 * __flush_tlb_range() loop below. Its return value is only
166 * significant for a maximum of MAX_TLBI_RANGE_PAGES pages. If
167 * 'pages' is more than that, you must iterate over the overall
170 #define __TLBI_RANGE_NUM(pages, scale) \
172 int __pages = min((pages), \
173 __TLBI_RANGE_PAGES(31, (scale))); \
174 (__pages >> (5 * (scale) + 1)) - 1; \
181 * This header file implements the low-level TLB invalidation routines
182 * (sometimes referred to as "flushing" in the kernel) for arm64.
184 * Every invalidation operation uses the following template:
186 * DSB ISHST // Ensure prior page-table updates have completed
187 * TLBI ... // Invalidate the TLB
188 * DSB ISH // Ensure the TLB invalidation has completed
189 * if (invalidated kernel mappings)
190 * ISB // Discard any instructions fetched from the old mapping
193 * The following functions form part of the "core" TLB invalidation API,
194 * as documented in Documentation/core-api/cachetlb.rst:
197 * Invalidate the entire TLB (kernel + user) on all CPUs
200 * Invalidate an entire user address space on all CPUs.
201 * The 'mm' argument identifies the ASID to invalidate.
203 * flush_tlb_range(vma, start, end)
204 * Invalidate the virtual-address range '[start, end)' on all
205 * CPUs for the user address space corresponding to 'vma->mm'.
206 * Note that this operation also invalidates any walk-cache
207 * entries associated with translations for the specified address
210 * flush_tlb_kernel_range(start, end)
211 * Same as flush_tlb_range(..., start, end), but applies to
212 * kernel mappings rather than a particular user address space.
213 * Whilst not explicitly documented, this function is used when
214 * unmapping pages from vmalloc/io space.
216 * flush_tlb_page(vma, addr)
217 * Invalidate a single user mapping for address 'addr' in the
218 * address space corresponding to 'vma->mm'. Note that this
219 * operation only invalidates a single, last-level page-table
220 * entry and therefore does not affect any walk-caches.
223 * Next, we have some undocumented invalidation routines that you probably
224 * don't want to call unless you know what you're doing:
226 * local_flush_tlb_all()
227 * Same as flush_tlb_all(), but only applies to the calling CPU.
229 * __flush_tlb_kernel_pgtable(addr)
230 * Invalidate a single kernel mapping for address 'addr' on all
231 * CPUs, ensuring that any walk-cache entries associated with the
232 * translation are also invalidated.
234 * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
235 * Invalidate the virtual-address range '[start, end)' on all
236 * CPUs for the user address space corresponding to 'vma->mm'.
237 * The invalidation operations are issued at a granularity
238 * determined by 'stride' and only affect any walk-cache entries
239 * if 'last_level' is equal to false. tlb_level is the level at
240 * which the invalidation must take place. If the level is wrong,
241 * no invalidation may take place. In the case where the level
242 * cannot be easily determined, the value TLBI_TTL_UNKNOWN will
243 * perform a non-hinted invalidation.
246 * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
247 * on top of these routines, since that is our interface to the mmu_gather
248 * API as used by munmap() and friends.
250 static inline void local_flush_tlb_all(void)
258 static inline void flush_tlb_all(void)
266 static inline void flush_tlb_mm(struct mm_struct *mm)
271 asid = __TLBI_VADDR(0, ASID(mm));
272 __tlbi(aside1is, asid);
273 __tlbi_user(aside1is, asid);
275 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
278 static inline void __flush_tlb_page_nosync(struct mm_struct *mm,
284 addr = __TLBI_VADDR(uaddr, ASID(mm));
285 __tlbi(vale1is, addr);
286 __tlbi_user(vale1is, addr);
287 mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
288 (uaddr & PAGE_MASK) + PAGE_SIZE);
291 static inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
294 return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
297 static inline void flush_tlb_page(struct vm_area_struct *vma,
300 flush_tlb_page_nosync(vma, uaddr);
304 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
307 * TLB flush deferral is not required on systems which are affected by
308 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
309 * will have two consecutive TLBI instructions with a dsb(ish) in between
310 * defeating the purpose (i.e save overall 'dsb ish' cost).
312 if (alternative_has_cap_unlikely(ARM64_WORKAROUND_REPEAT_TLBI))
318 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
319 struct mm_struct *mm,
322 __flush_tlb_page_nosync(mm, uaddr);
326 * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
327 * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
328 * flush_tlb_batched_pending().
330 static inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
336 * To support TLB batched flush for multiple pages unmapping, we only send
337 * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
338 * completion at the end in arch_tlbbatch_flush(). Since we've already issued
339 * TLBI for each page so only a DSB is needed to synchronise its effect on the
342 * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
345 static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
351 * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
352 * necessarily a performance improvement.
354 #define MAX_DVM_OPS PTRS_PER_PTE
357 * __flush_tlb_range_op - Perform TLBI operation upon a range
359 * @op: TLBI instruction that operates on a range (has 'r' prefix)
360 * @start: The start address of the range
361 * @pages: Range as the number of pages from 'start'
362 * @stride: Flush granularity
363 * @asid: The ASID of the task (0 for IPA instructions)
364 * @tlb_level: Translation Table level hint, if known
365 * @tlbi_user: If 'true', call an additional __tlbi_user()
366 * (typically for user ASIDs). 'flase' for IPA instructions
367 * @lpa2: If 'true', the lpa2 scheme is used as set out below
369 * When the CPU does not support TLB range operations, flush the TLB
370 * entries one by one at the granularity of 'stride'. If the TLB
371 * range ops are supported, then:
373 * 1. If FEAT_LPA2 is in use, the start address of a range operation must be
374 * 64KB aligned, so flush pages one by one until the alignment is reached
375 * using the non-range operations. This step is skipped if LPA2 is not in
378 * 2. The minimum range granularity is decided by 'scale', so multiple range
379 * TLBI operations may be required. Start from scale = 3, flush the largest
380 * possible number of pages ((num+1)*2^(5*scale+1)) that fit into the
381 * requested range, then decrement scale and continue until one or zero pages
382 * are left. We must start from highest scale to ensure 64KB start alignment
383 * is maintained in the LPA2 case.
385 * 3. If there is 1 page remaining, flush it through non-range operations. Range
386 * operations can only span an even number of pages. We save this for last to
387 * ensure 64KB start alignment is maintained for the LPA2 case.
389 #define __flush_tlb_range_op(op, start, pages, stride, \
390 asid, tlb_level, tlbi_user, lpa2) \
394 int shift = lpa2 ? 16 : PAGE_SHIFT; \
395 unsigned long addr; \
397 while (pages > 0) { \
398 if (!system_supports_tlb_range() || \
400 (lpa2 && start != ALIGN(start, SZ_64K))) { \
401 addr = __TLBI_VADDR(start, asid); \
402 __tlbi_level(op, addr, tlb_level); \
404 __tlbi_user_level(op, addr, tlb_level); \
406 pages -= stride >> PAGE_SHIFT; \
410 num = __TLBI_RANGE_NUM(pages, scale); \
412 addr = __TLBI_VADDR_RANGE(start >> shift, asid, \
413 scale, num, tlb_level); \
414 __tlbi(r##op, addr); \
416 __tlbi_user(r##op, addr); \
417 start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
418 pages -= __TLBI_RANGE_PAGES(num, scale); \
424 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
425 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
427 static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma,
428 unsigned long start, unsigned long end,
429 unsigned long stride, bool last_level,
432 unsigned long asid, pages;
434 start = round_down(start, stride);
435 end = round_up(end, stride);
436 pages = (end - start) >> PAGE_SHIFT;
439 * When not uses TLB range ops, we can handle up to
440 * (MAX_DVM_OPS - 1) pages;
441 * When uses TLB range ops, we can handle up to
442 * (MAX_TLBI_RANGE_PAGES - 1) pages.
444 if ((!system_supports_tlb_range() &&
445 (end - start) >= (MAX_DVM_OPS * stride)) ||
446 pages >= MAX_TLBI_RANGE_PAGES) {
447 flush_tlb_mm(vma->vm_mm);
452 asid = ASID(vma->vm_mm);
455 __flush_tlb_range_op(vale1is, start, pages, stride, asid,
456 tlb_level, true, lpa2_is_enabled());
458 __flush_tlb_range_op(vae1is, start, pages, stride, asid,
459 tlb_level, true, lpa2_is_enabled());
461 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
464 static inline void __flush_tlb_range(struct vm_area_struct *vma,
465 unsigned long start, unsigned long end,
466 unsigned long stride, bool last_level,
469 __flush_tlb_range_nosync(vma, start, end, stride,
470 last_level, tlb_level);
474 static inline void flush_tlb_range(struct vm_area_struct *vma,
475 unsigned long start, unsigned long end)
478 * We cannot use leaf-only invalidation here, since we may be invalidating
479 * table entries as part of collapsing hugepages or moving page tables.
480 * Set the tlb_level to TLBI_TTL_UNKNOWN because we can not get enough
483 __flush_tlb_range(vma, start, end, PAGE_SIZE, false, TLBI_TTL_UNKNOWN);
486 static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
490 if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) {
495 start = __TLBI_VADDR(start, 0);
496 end = __TLBI_VADDR(end, 0);
499 for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
500 __tlbi(vaale1is, addr);
506 * Used to invalidate the TLB (walk caches) corresponding to intermediate page
507 * table levels (pgd/pud/pmd).
509 static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
511 unsigned long addr = __TLBI_VADDR(kaddr, 0);
514 __tlbi(vaae1is, addr);