1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
5 * Copyright (C) 2014 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
21 #include <asm/barrier.h>
23 #include "io-pgtable-arm.h"
25 #define ARM_LPAE_MAX_ADDR_BITS 52
26 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
27 #define ARM_LPAE_MAX_LEVELS 4
29 /* Struct accessors */
30 #define io_pgtable_to_data(x) \
31 container_of((x), struct arm_lpae_io_pgtable, iop)
33 #define io_pgtable_ops_to_data(x) \
34 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
37 * Calculate the right shift amount to get to the portion describing level l
38 * in a virtual address mapped by the pagetable in d.
40 #define ARM_LPAE_LVL_SHIFT(l,d) \
41 (((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) + \
42 ilog2(sizeof(arm_lpae_iopte)))
44 #define ARM_LPAE_GRANULE(d) \
45 (sizeof(arm_lpae_iopte) << (d)->bits_per_level)
46 #define ARM_LPAE_PGD_SIZE(d) \
47 (sizeof(arm_lpae_iopte) << (d)->pgd_bits)
49 #define ARM_LPAE_PTES_PER_TABLE(d) \
50 (ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
53 * Calculate the index at level l used to map virtual address a using the
56 #define ARM_LPAE_PGD_IDX(l,d) \
57 ((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
59 #define ARM_LPAE_LVL_IDX(a,l,d) \
60 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
61 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
63 /* Calculate the block/page mapping size at level l for pagetable in d. */
64 #define ARM_LPAE_BLOCK_SIZE(l,d) (1ULL << ARM_LPAE_LVL_SHIFT(l,d))
67 #define ARM_LPAE_PTE_TYPE_SHIFT 0
68 #define ARM_LPAE_PTE_TYPE_MASK 0x3
70 #define ARM_LPAE_PTE_TYPE_BLOCK 1
71 #define ARM_LPAE_PTE_TYPE_TABLE 3
72 #define ARM_LPAE_PTE_TYPE_PAGE 3
74 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
76 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
77 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
78 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
79 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
80 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
81 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
82 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
83 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
85 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
86 /* Ignore the contiguous bit for block splitting */
87 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
88 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
89 ARM_LPAE_PTE_ATTR_HI_MASK)
90 /* Software bit for solving coherency races */
91 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
94 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
95 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
96 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
97 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
100 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
101 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
102 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
103 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
104 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
105 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
108 #define ARM_LPAE_VTCR_SL0_MASK 0x3
110 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
112 #define ARM_LPAE_VTCR_PS_SHIFT 16
113 #define ARM_LPAE_VTCR_PS_MASK 0x7
115 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
116 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
117 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
118 #define ARM_LPAE_MAIR_ATTR_NC 0x44
119 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA 0xf4
120 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
121 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
122 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
123 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
124 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE 3
126 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
127 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
128 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
130 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF 0x88ULL
131 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
133 /* IOPTE accessors */
134 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
136 #define iopte_type(pte) \
137 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
139 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
141 struct arm_lpae_io_pgtable {
142 struct io_pgtable iop;
151 typedef u64 arm_lpae_iopte;
153 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
154 enum io_pgtable_fmt fmt)
156 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
157 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
159 return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
162 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
163 struct arm_lpae_io_pgtable *data)
165 arm_lpae_iopte pte = paddr;
167 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
168 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
171 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
172 struct arm_lpae_io_pgtable *data)
174 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
176 if (ARM_LPAE_GRANULE(data) < SZ_64K)
179 /* Rotate the packed high-order bits back to the top */
180 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
183 static bool selftest_running = false;
185 static dma_addr_t __arm_lpae_dma_addr(void *pages)
187 return (dma_addr_t)virt_to_phys(pages);
190 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
191 struct io_pgtable_cfg *cfg,
194 struct device *dev = cfg->iommu_dev;
195 int order = get_order(size);
199 VM_BUG_ON((gfp & __GFP_HIGHMEM));
202 pages = cfg->alloc(cookie, size, gfp);
206 p = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, order);
207 pages = p ? page_address(p) : NULL;
213 if (!cfg->coherent_walk) {
214 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
215 if (dma_mapping_error(dev, dma))
218 * We depend on the IOMMU being able to work with any physical
219 * address directly, so if the DMA layer suggests otherwise by
220 * translating or truncating them, that bodes very badly...
222 if (dma != virt_to_phys(pages))
229 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
230 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
234 cfg->free(cookie, pages, size);
236 free_pages((unsigned long)pages, order);
241 static void __arm_lpae_free_pages(void *pages, size_t size,
242 struct io_pgtable_cfg *cfg,
245 if (!cfg->coherent_walk)
246 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
247 size, DMA_TO_DEVICE);
250 cfg->free(cookie, pages, size);
252 free_pages((unsigned long)pages, get_order(size));
255 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
256 struct io_pgtable_cfg *cfg)
258 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
259 sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
262 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg)
267 if (!cfg->coherent_walk)
268 __arm_lpae_sync_pte(ptep, 1, cfg);
271 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
272 struct iommu_iotlb_gather *gather,
273 unsigned long iova, size_t size, size_t pgcount,
274 int lvl, arm_lpae_iopte *ptep);
276 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
277 phys_addr_t paddr, arm_lpae_iopte prot,
278 int lvl, int num_entries, arm_lpae_iopte *ptep)
280 arm_lpae_iopte pte = prot;
281 struct io_pgtable_cfg *cfg = &data->iop.cfg;
282 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
285 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
286 pte |= ARM_LPAE_PTE_TYPE_PAGE;
288 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
290 for (i = 0; i < num_entries; i++)
291 ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
293 if (!cfg->coherent_walk)
294 __arm_lpae_sync_pte(ptep, num_entries, cfg);
297 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
298 unsigned long iova, phys_addr_t paddr,
299 arm_lpae_iopte prot, int lvl, int num_entries,
300 arm_lpae_iopte *ptep)
304 for (i = 0; i < num_entries; i++)
305 if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
306 /* We require an unmap first */
307 WARN_ON(!selftest_running);
309 } else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
311 * We need to unmap and free the old table before
312 * overwriting it with a block entry.
314 arm_lpae_iopte *tblp;
315 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
317 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
318 if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
325 __arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
329 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
330 arm_lpae_iopte *ptep,
332 struct arm_lpae_io_pgtable *data)
334 arm_lpae_iopte old, new;
335 struct io_pgtable_cfg *cfg = &data->iop.cfg;
337 new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
338 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
339 new |= ARM_LPAE_PTE_NSTABLE;
342 * Ensure the table itself is visible before its PTE can be.
343 * Whilst we could get away with cmpxchg64_release below, this
344 * doesn't have any ordering semantics when !CONFIG_SMP.
348 old = cmpxchg64_relaxed(ptep, curr, new);
350 if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
353 /* Even if it's not ours, there's no point waiting; just kick it */
354 __arm_lpae_sync_pte(ptep, 1, cfg);
356 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
361 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
362 phys_addr_t paddr, size_t size, size_t pgcount,
363 arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
364 gfp_t gfp, size_t *mapped)
366 arm_lpae_iopte *cptep, pte;
367 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
368 size_t tblsz = ARM_LPAE_GRANULE(data);
369 struct io_pgtable_cfg *cfg = &data->iop.cfg;
370 int ret = 0, num_entries, max_entries, map_idx_start;
372 /* Find our entry at the current level */
373 map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
374 ptep += map_idx_start;
376 /* If we can install a leaf entry at this level, then do so */
377 if (size == block_size) {
378 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - map_idx_start;
379 num_entries = min_t(int, pgcount, max_entries);
380 ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
382 *mapped += num_entries * size;
387 /* We can't allocate tables at the final level */
388 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
391 /* Grab a pointer to the next level */
392 pte = READ_ONCE(*ptep);
394 cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
398 pte = arm_lpae_install_table(cptep, ptep, 0, data);
400 __arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
401 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
402 __arm_lpae_sync_pte(ptep, 1, cfg);
405 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
406 cptep = iopte_deref(pte, data);
408 /* We require an unmap first */
409 WARN_ON(!selftest_running);
414 return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
418 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
423 if (data->iop.fmt == ARM_64_LPAE_S1 ||
424 data->iop.fmt == ARM_32_LPAE_S1) {
425 pte = ARM_LPAE_PTE_nG;
426 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
427 pte |= ARM_LPAE_PTE_AP_RDONLY;
428 if (!(prot & IOMMU_PRIV))
429 pte |= ARM_LPAE_PTE_AP_UNPRIV;
431 pte = ARM_LPAE_PTE_HAP_FAULT;
432 if (prot & IOMMU_READ)
433 pte |= ARM_LPAE_PTE_HAP_READ;
434 if (prot & IOMMU_WRITE)
435 pte |= ARM_LPAE_PTE_HAP_WRITE;
439 * Note that this logic is structured to accommodate Mali LPAE
440 * having stage-1-like attributes but stage-2-like permissions.
442 if (data->iop.fmt == ARM_64_LPAE_S2 ||
443 data->iop.fmt == ARM_32_LPAE_S2) {
444 if (prot & IOMMU_MMIO)
445 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
446 else if (prot & IOMMU_CACHE)
447 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
449 pte |= ARM_LPAE_PTE_MEMATTR_NC;
451 if (prot & IOMMU_MMIO)
452 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
453 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
454 else if (prot & IOMMU_CACHE)
455 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
456 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
460 * Also Mali has its own notions of shareability wherein its Inner
461 * domain covers the cores within the GPU, and its Outer domain is
462 * "outside the GPU" (i.e. either the Inner or System domain in CPU
463 * terms, depending on coherency).
465 if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
466 pte |= ARM_LPAE_PTE_SH_IS;
468 pte |= ARM_LPAE_PTE_SH_OS;
470 if (prot & IOMMU_NOEXEC)
471 pte |= ARM_LPAE_PTE_XN;
473 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
474 pte |= ARM_LPAE_PTE_NS;
476 if (data->iop.fmt != ARM_MALI_LPAE)
477 pte |= ARM_LPAE_PTE_AF;
482 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
483 phys_addr_t paddr, size_t pgsize, size_t pgcount,
484 int iommu_prot, gfp_t gfp, size_t *mapped)
486 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
487 struct io_pgtable_cfg *cfg = &data->iop.cfg;
488 arm_lpae_iopte *ptep = data->pgd;
489 int ret, lvl = data->start_level;
491 long iaext = (s64)iova >> cfg->ias;
493 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
496 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
498 if (WARN_ON(iaext || paddr >> cfg->oas))
501 /* If no access, then nothing to do */
502 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
505 prot = arm_lpae_prot_to_pte(data, iommu_prot);
506 ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
509 * Synchronise all PTE updates for the new mapping before there's
510 * a chance for anything to kick off a table walk for the new iova.
517 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
518 arm_lpae_iopte *ptep)
520 arm_lpae_iopte *start, *end;
521 unsigned long table_size;
523 if (lvl == data->start_level)
524 table_size = ARM_LPAE_PGD_SIZE(data);
526 table_size = ARM_LPAE_GRANULE(data);
530 /* Only leaf entries at the last level */
531 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
534 end = (void *)ptep + table_size;
536 while (ptep != end) {
537 arm_lpae_iopte pte = *ptep++;
539 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
542 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
545 __arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
548 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
550 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
552 __arm_lpae_free_pgtable(data, data->start_level, data->pgd);
556 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
557 struct iommu_iotlb_gather *gather,
558 unsigned long iova, size_t size,
559 arm_lpae_iopte blk_pte, int lvl,
560 arm_lpae_iopte *ptep, size_t pgcount)
562 struct io_pgtable_cfg *cfg = &data->iop.cfg;
563 arm_lpae_iopte pte, *tablep;
564 phys_addr_t blk_paddr;
565 size_t tablesz = ARM_LPAE_GRANULE(data);
566 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
567 int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
568 int i, unmap_idx_start = -1, num_entries = 0, max_entries;
570 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
573 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg, data->iop.cookie);
575 return 0; /* Bytes unmapped */
577 if (size == split_sz) {
578 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
579 max_entries = ptes_per_table - unmap_idx_start;
580 num_entries = min_t(int, pgcount, max_entries);
583 blk_paddr = iopte_to_paddr(blk_pte, data);
584 pte = iopte_prot(blk_pte);
586 for (i = 0; i < ptes_per_table; i++, blk_paddr += split_sz) {
588 if (i >= unmap_idx_start && i < (unmap_idx_start + num_entries))
591 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, 1, &tablep[i]);
594 pte = arm_lpae_install_table(tablep, ptep, blk_pte, data);
595 if (pte != blk_pte) {
596 __arm_lpae_free_pages(tablep, tablesz, cfg, data->iop.cookie);
598 * We may race against someone unmapping another part of this
599 * block, but anything else is invalid. We can't misinterpret
600 * a page entry here since we're never at the last level.
602 if (iopte_type(pte) != ARM_LPAE_PTE_TYPE_TABLE)
605 tablep = iopte_deref(pte, data);
606 } else if (unmap_idx_start >= 0) {
607 for (i = 0; i < num_entries; i++)
608 io_pgtable_tlb_add_page(&data->iop, gather, iova + i * size, size);
610 return num_entries * size;
613 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl, tablep);
616 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
617 struct iommu_iotlb_gather *gather,
618 unsigned long iova, size_t size, size_t pgcount,
619 int lvl, arm_lpae_iopte *ptep)
622 struct io_pgtable *iop = &data->iop;
623 int i = 0, num_entries, max_entries, unmap_idx_start;
625 /* Something went horribly wrong and we ran out of page table */
626 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
629 unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
630 ptep += unmap_idx_start;
631 pte = READ_ONCE(*ptep);
635 /* If the size matches this level, we're in the right place */
636 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
637 max_entries = ARM_LPAE_PTES_PER_TABLE(data) - unmap_idx_start;
638 num_entries = min_t(int, pgcount, max_entries);
640 while (i < num_entries) {
641 pte = READ_ONCE(*ptep);
645 __arm_lpae_clear_pte(ptep, &iop->cfg);
647 if (!iopte_leaf(pte, lvl, iop->fmt)) {
648 /* Also flush any partial walks */
649 io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
650 ARM_LPAE_GRANULE(data));
651 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
652 } else if (!iommu_iotlb_gather_queued(gather)) {
653 io_pgtable_tlb_add_page(iop, gather, iova + i * size, size);
661 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
663 * Insert a table at the next level to map the old region,
664 * minus the part we want to unmap
666 return arm_lpae_split_blk_unmap(data, gather, iova, size, pte,
667 lvl + 1, ptep, pgcount);
670 /* Keep on walkin' */
671 ptep = iopte_deref(pte, data);
672 return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
675 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
676 size_t pgsize, size_t pgcount,
677 struct iommu_iotlb_gather *gather)
679 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
680 struct io_pgtable_cfg *cfg = &data->iop.cfg;
681 arm_lpae_iopte *ptep = data->pgd;
682 long iaext = (s64)iova >> cfg->ias;
684 if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
687 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
692 return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
693 data->start_level, ptep);
696 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
699 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
700 arm_lpae_iopte pte, *ptep = data->pgd;
701 int lvl = data->start_level;
704 /* Valid IOPTE pointer? */
708 /* Grab the IOPTE we're interested in */
709 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
710 pte = READ_ONCE(*ptep);
717 if (iopte_leaf(pte, lvl, data->iop.fmt))
718 goto found_translation;
720 /* Take it to the next level */
721 ptep = iopte_deref(pte, data);
722 } while (++lvl < ARM_LPAE_MAX_LEVELS);
724 /* Ran out of page tables to walk */
728 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
729 return iopte_to_paddr(pte, data) | iova;
732 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
734 unsigned long granule, page_sizes;
735 unsigned int max_addr_bits = 48;
738 * We need to restrict the supported page sizes to match the
739 * translation regime for a particular granule. Aim to match
740 * the CPU page size if possible, otherwise prefer smaller sizes.
741 * While we're at it, restrict the block sizes to match the
744 if (cfg->pgsize_bitmap & PAGE_SIZE)
746 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
747 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
748 else if (cfg->pgsize_bitmap & PAGE_MASK)
749 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
755 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
758 page_sizes = (SZ_16K | SZ_32M);
762 page_sizes = (SZ_64K | SZ_512M);
764 page_sizes |= 1ULL << 42; /* 4TB */
770 cfg->pgsize_bitmap &= page_sizes;
771 cfg->ias = min(cfg->ias, max_addr_bits);
772 cfg->oas = min(cfg->oas, max_addr_bits);
775 static struct arm_lpae_io_pgtable *
776 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
778 struct arm_lpae_io_pgtable *data;
779 int levels, va_bits, pg_shift;
781 arm_lpae_restrict_pgsizes(cfg);
783 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
786 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
789 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
792 data = kmalloc(sizeof(*data), GFP_KERNEL);
796 pg_shift = __ffs(cfg->pgsize_bitmap);
797 data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
799 va_bits = cfg->ias - pg_shift;
800 levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
801 data->start_level = ARM_LPAE_MAX_LEVELS - levels;
803 /* Calculate the actual size of our pgd (without concatenation) */
804 data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
806 data->iop.ops = (struct io_pgtable_ops) {
807 .map_pages = arm_lpae_map_pages,
808 .unmap_pages = arm_lpae_unmap_pages,
809 .iova_to_phys = arm_lpae_iova_to_phys,
815 static struct io_pgtable *
816 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
819 struct arm_lpae_io_pgtable *data;
820 typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
823 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
824 IO_PGTABLE_QUIRK_ARM_TTBR1 |
825 IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
828 data = arm_lpae_alloc_pgtable(cfg);
833 if (cfg->coherent_walk) {
834 tcr->sh = ARM_LPAE_TCR_SH_IS;
835 tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
836 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
837 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
840 tcr->sh = ARM_LPAE_TCR_SH_OS;
841 tcr->irgn = ARM_LPAE_TCR_RGN_NC;
842 if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
843 tcr->orgn = ARM_LPAE_TCR_RGN_NC;
845 tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
848 tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
849 switch (ARM_LPAE_GRANULE(data)) {
851 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
854 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
857 tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
863 tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
866 tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
869 tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
872 tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
875 tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
878 tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
881 tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
887 tcr->tsz = 64ULL - cfg->ias;
890 reg = (ARM_LPAE_MAIR_ATTR_NC
891 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
892 (ARM_LPAE_MAIR_ATTR_WBRWA
893 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
894 (ARM_LPAE_MAIR_ATTR_DEVICE
895 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
896 (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
897 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
899 cfg->arm_lpae_s1_cfg.mair = reg;
901 /* Looking good; allocate a pgd */
902 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
903 GFP_KERNEL, cfg, cookie);
907 /* Ensure the empty pgd is visible before any actual TTBR write */
911 cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
919 static struct io_pgtable *
920 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
923 struct arm_lpae_io_pgtable *data;
924 typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
926 /* The NS quirk doesn't apply at stage 2 */
930 data = arm_lpae_alloc_pgtable(cfg);
935 * Concatenate PGDs at level 1 if possible in order to reduce
936 * the depth of the stage-2 walk.
938 if (data->start_level == 0) {
939 unsigned long pgd_pages;
941 pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
942 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
943 data->pgd_bits += data->bits_per_level;
949 if (cfg->coherent_walk) {
950 vtcr->sh = ARM_LPAE_TCR_SH_IS;
951 vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
952 vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
954 vtcr->sh = ARM_LPAE_TCR_SH_OS;
955 vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
956 vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
959 sl = data->start_level;
961 switch (ARM_LPAE_GRANULE(data)) {
963 vtcr->tg = ARM_LPAE_TCR_TG0_4K;
964 sl++; /* SL0 format is different for 4K granule size */
967 vtcr->tg = ARM_LPAE_TCR_TG0_16K;
970 vtcr->tg = ARM_LPAE_TCR_TG0_64K;
976 vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
979 vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
982 vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
985 vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
988 vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
991 vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
994 vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1000 vtcr->tsz = 64ULL - cfg->ias;
1001 vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1003 /* Allocate pgd pages */
1004 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1005 GFP_KERNEL, cfg, cookie);
1009 /* Ensure the empty pgd is visible before any actual TTBR write */
1013 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1021 static struct io_pgtable *
1022 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1024 if (cfg->ias > 32 || cfg->oas > 40)
1027 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1028 return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1031 static struct io_pgtable *
1032 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1034 if (cfg->ias > 40 || cfg->oas > 40)
1037 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1038 return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1041 static struct io_pgtable *
1042 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1044 struct arm_lpae_io_pgtable *data;
1046 /* No quirks for Mali (hopefully) */
1050 if (cfg->ias > 48 || cfg->oas > 40)
1053 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1055 data = arm_lpae_alloc_pgtable(cfg);
1059 /* Mali seems to need a full 4-level table regardless of IAS */
1060 if (data->start_level > 0) {
1061 data->start_level = 0;
1065 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1066 * best we can do is mimic the out-of-tree driver and hope that the
1067 * "implementation-defined caching policy" is good enough. Similarly,
1068 * we'll use it for the sake of a valid attribute for our 'device'
1069 * index, although callers should never request that in practice.
1071 cfg->arm_mali_lpae_cfg.memattr =
1072 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1073 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1074 (ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1075 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1076 (ARM_MALI_LPAE_MEMATTR_IMP_DEF
1077 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1079 data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1084 /* Ensure the empty pgd is visible before TRANSTAB can be written */
1087 cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1088 ARM_MALI_LPAE_TTBR_READ_INNER |
1089 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1090 if (cfg->coherent_walk)
1091 cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1100 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1101 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1102 .alloc = arm_64_lpae_alloc_pgtable_s1,
1103 .free = arm_lpae_free_pgtable,
1106 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1107 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1108 .alloc = arm_64_lpae_alloc_pgtable_s2,
1109 .free = arm_lpae_free_pgtable,
1112 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1113 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1114 .alloc = arm_32_lpae_alloc_pgtable_s1,
1115 .free = arm_lpae_free_pgtable,
1118 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1119 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1120 .alloc = arm_32_lpae_alloc_pgtable_s2,
1121 .free = arm_lpae_free_pgtable,
1124 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1125 .caps = IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1126 .alloc = arm_mali_lpae_alloc_pgtable,
1127 .free = arm_lpae_free_pgtable,
1130 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1132 static struct io_pgtable_cfg *cfg_cookie __initdata;
1134 static void __init dummy_tlb_flush_all(void *cookie)
1136 WARN_ON(cookie != cfg_cookie);
1139 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1140 size_t granule, void *cookie)
1142 WARN_ON(cookie != cfg_cookie);
1143 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1146 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1147 unsigned long iova, size_t granule,
1150 dummy_tlb_flush(iova, granule, granule, cookie);
1153 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1154 .tlb_flush_all = dummy_tlb_flush_all,
1155 .tlb_flush_walk = dummy_tlb_flush,
1156 .tlb_add_page = dummy_tlb_add_page,
1159 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1161 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1162 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1164 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1165 cfg->pgsize_bitmap, cfg->ias);
1166 pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1167 ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1168 ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1171 #define __FAIL(ops, i) ({ \
1172 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1173 arm_lpae_dump_ops(ops); \
1174 selftest_running = false; \
1178 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1180 static const enum io_pgtable_fmt fmts[] __initconst = {
1187 size_t size, mapped;
1188 struct io_pgtable_ops *ops;
1190 selftest_running = true;
1192 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1194 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1196 pr_err("selftest: failed to allocate io pgtable ops\n");
1201 * Initial sanity checks.
1202 * Empty page tables shouldn't provide any translations.
1204 if (ops->iova_to_phys(ops, 42))
1205 return __FAIL(ops, i);
1207 if (ops->iova_to_phys(ops, SZ_1G + 42))
1208 return __FAIL(ops, i);
1210 if (ops->iova_to_phys(ops, SZ_2G + 42))
1211 return __FAIL(ops, i);
1214 * Distinct mappings of different granule sizes.
1217 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1220 if (ops->map_pages(ops, iova, iova, size, 1,
1221 IOMMU_READ | IOMMU_WRITE |
1222 IOMMU_NOEXEC | IOMMU_CACHE,
1223 GFP_KERNEL, &mapped))
1224 return __FAIL(ops, i);
1226 /* Overlapping mappings */
1227 if (!ops->map_pages(ops, iova, iova + size, size, 1,
1228 IOMMU_READ | IOMMU_NOEXEC,
1229 GFP_KERNEL, &mapped))
1230 return __FAIL(ops, i);
1232 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1233 return __FAIL(ops, i);
1239 size = 1UL << __ffs(cfg->pgsize_bitmap);
1240 if (ops->unmap_pages(ops, SZ_1G + size, size, 1, NULL) != size)
1241 return __FAIL(ops, i);
1243 /* Remap of partial unmap */
1244 if (ops->map_pages(ops, SZ_1G + size, size, size, 1,
1245 IOMMU_READ, GFP_KERNEL, &mapped))
1246 return __FAIL(ops, i);
1248 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1249 return __FAIL(ops, i);
1253 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1256 if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1257 return __FAIL(ops, i);
1259 if (ops->iova_to_phys(ops, iova + 42))
1260 return __FAIL(ops, i);
1262 /* Remap full block */
1263 if (ops->map_pages(ops, iova, iova, size, 1,
1264 IOMMU_WRITE, GFP_KERNEL, &mapped))
1265 return __FAIL(ops, i);
1267 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1268 return __FAIL(ops, i);
1273 free_io_pgtable_ops(ops);
1276 selftest_running = false;
1280 static int __init arm_lpae_do_selftests(void)
1282 static const unsigned long pgsize[] __initconst = {
1283 SZ_4K | SZ_2M | SZ_1G,
1288 static const unsigned int ias[] __initconst = {
1289 32, 36, 40, 42, 44, 48,
1292 int i, j, pass = 0, fail = 0;
1294 struct io_pgtable_cfg cfg = {
1295 .tlb = &dummy_tlb_ops,
1297 .coherent_walk = true,
1301 /* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1302 set_dev_node(&dev, NUMA_NO_NODE);
1304 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1305 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1306 cfg.pgsize_bitmap = pgsize[i];
1308 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1310 if (arm_lpae_run_tests(&cfg))
1317 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1318 return fail ? -EFAULT : 0;
1320 subsys_initcall(arm_lpae_do_selftests);