1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
5 * Copyright (C) 2014 ARM Limited
7 * Author: Will Deacon <will.deacon@arm.com>
10 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/iommu.h>
16 #include <linux/kernel.h>
17 #include <linux/sizes.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/dma-mapping.h>
22 #include <asm/barrier.h>
24 #define ARM_LPAE_MAX_ADDR_BITS 52
25 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
26 #define ARM_LPAE_MAX_LEVELS 4
28 /* Struct accessors */
29 #define io_pgtable_to_data(x) \
30 container_of((x), struct arm_lpae_io_pgtable, iop)
32 #define io_pgtable_ops_to_data(x) \
33 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
36 * For consistency with the architecture, we always consider
37 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
39 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
42 * Calculate the right shift amount to get to the portion describing level l
43 * in a virtual address mapped by the pagetable in d.
45 #define ARM_LPAE_LVL_SHIFT(l,d) \
46 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
47 * (d)->bits_per_level) + (d)->pg_shift)
49 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
51 #define ARM_LPAE_PAGES_PER_PGD(d) \
52 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
55 * Calculate the index at level l used to map virtual address a using the
58 #define ARM_LPAE_PGD_IDX(l,d) \
59 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
61 #define ARM_LPAE_LVL_IDX(a,l,d) \
62 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
63 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
65 /* Calculate the block/page mapping size at level l for pagetable in d. */
66 #define ARM_LPAE_BLOCK_SIZE(l,d) \
67 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
68 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
71 #define ARM_LPAE_PTE_TYPE_SHIFT 0
72 #define ARM_LPAE_PTE_TYPE_MASK 0x3
74 #define ARM_LPAE_PTE_TYPE_BLOCK 1
75 #define ARM_LPAE_PTE_TYPE_TABLE 3
76 #define ARM_LPAE_PTE_TYPE_PAGE 3
78 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
80 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
81 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
82 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
83 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
84 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
85 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
86 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
87 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
89 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
90 /* Ignore the contiguous bit for block splitting */
91 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
92 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
93 ARM_LPAE_PTE_ATTR_HI_MASK)
94 /* Software bit for solving coherency races */
95 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
98 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
99 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
100 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
101 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
104 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
105 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
106 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
107 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
108 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
109 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
112 #define ARM_32_LPAE_TCR_EAE (1 << 31)
113 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
115 #define ARM_LPAE_TCR_EPD1 (1 << 23)
117 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
118 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
119 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
121 #define ARM_LPAE_TCR_SH0_SHIFT 12
122 #define ARM_LPAE_TCR_SH0_MASK 0x3
123 #define ARM_LPAE_TCR_SH_NS 0
124 #define ARM_LPAE_TCR_SH_OS 2
125 #define ARM_LPAE_TCR_SH_IS 3
127 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
128 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
129 #define ARM_LPAE_TCR_RGN_MASK 0x3
130 #define ARM_LPAE_TCR_RGN_NC 0
131 #define ARM_LPAE_TCR_RGN_WBWA 1
132 #define ARM_LPAE_TCR_RGN_WT 2
133 #define ARM_LPAE_TCR_RGN_WB 3
135 #define ARM_LPAE_TCR_SL0_SHIFT 6
136 #define ARM_LPAE_TCR_SL0_MASK 0x3
138 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
139 #define ARM_LPAE_TCR_SZ_MASK 0xf
141 #define ARM_LPAE_TCR_PS_SHIFT 16
142 #define ARM_LPAE_TCR_PS_MASK 0x7
144 #define ARM_LPAE_TCR_IPS_SHIFT 32
145 #define ARM_LPAE_TCR_IPS_MASK 0x7
147 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
148 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
149 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
150 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
151 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
152 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
153 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
155 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
156 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
157 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
158 #define ARM_LPAE_MAIR_ATTR_NC 0x44
159 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
160 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
161 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
162 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
164 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
165 #define ARM_MALI_LPAE_TTBR_READ_INNER BIT(2)
166 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER BIT(4)
168 /* IOPTE accessors */
169 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
171 #define iopte_type(pte,l) \
172 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
174 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
176 struct arm_lpae_io_pgtable {
177 struct io_pgtable iop;
181 unsigned long pg_shift;
182 unsigned long bits_per_level;
187 typedef u64 arm_lpae_iopte;
189 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
190 enum io_pgtable_fmt fmt)
192 if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
193 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_PAGE;
195 return iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_BLOCK;
198 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
199 struct arm_lpae_io_pgtable *data)
201 arm_lpae_iopte pte = paddr;
203 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
204 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
207 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
208 struct arm_lpae_io_pgtable *data)
210 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
212 if (data->pg_shift < 16)
215 /* Rotate the packed high-order bits back to the top */
216 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
219 static bool selftest_running = false;
221 static dma_addr_t __arm_lpae_dma_addr(void *pages)
223 return (dma_addr_t)virt_to_phys(pages);
226 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
227 struct io_pgtable_cfg *cfg)
229 struct device *dev = cfg->iommu_dev;
230 int order = get_order(size);
235 VM_BUG_ON((gfp & __GFP_HIGHMEM));
236 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
237 gfp | __GFP_ZERO, order);
241 pages = page_address(p);
242 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
243 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
244 if (dma_mapping_error(dev, dma))
247 * We depend on the IOMMU being able to work with any physical
248 * address directly, so if the DMA layer suggests otherwise by
249 * translating or truncating them, that bodes very badly...
251 if (dma != virt_to_phys(pages))
258 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
259 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
261 __free_pages(p, order);
265 static void __arm_lpae_free_pages(void *pages, size_t size,
266 struct io_pgtable_cfg *cfg)
268 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
269 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
270 size, DMA_TO_DEVICE);
271 free_pages((unsigned long)pages, get_order(size));
274 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
275 struct io_pgtable_cfg *cfg)
277 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
278 sizeof(*ptep), DMA_TO_DEVICE);
281 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
282 struct io_pgtable_cfg *cfg)
286 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
287 __arm_lpae_sync_pte(ptep, cfg);
290 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
291 unsigned long iova, size_t size, int lvl,
292 arm_lpae_iopte *ptep);
294 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
295 phys_addr_t paddr, arm_lpae_iopte prot,
296 int lvl, arm_lpae_iopte *ptep)
298 arm_lpae_iopte pte = prot;
300 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
301 pte |= ARM_LPAE_PTE_NS;
303 if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
304 pte |= ARM_LPAE_PTE_TYPE_PAGE;
306 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
308 if (data->iop.fmt != ARM_MALI_LPAE)
309 pte |= ARM_LPAE_PTE_AF;
310 pte |= ARM_LPAE_PTE_SH_IS;
311 pte |= paddr_to_iopte(paddr, data);
313 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
316 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
317 unsigned long iova, phys_addr_t paddr,
318 arm_lpae_iopte prot, int lvl,
319 arm_lpae_iopte *ptep)
321 arm_lpae_iopte pte = *ptep;
323 if (iopte_leaf(pte, lvl, data->iop.fmt)) {
324 /* We require an unmap first */
325 WARN_ON(!selftest_running);
327 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
329 * We need to unmap and free the old table before
330 * overwriting it with a block entry.
332 arm_lpae_iopte *tblp;
333 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
335 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
336 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
340 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
344 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
345 arm_lpae_iopte *ptep,
347 struct io_pgtable_cfg *cfg)
349 arm_lpae_iopte old, new;
351 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
352 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
353 new |= ARM_LPAE_PTE_NSTABLE;
356 * Ensure the table itself is visible before its PTE can be.
357 * Whilst we could get away with cmpxchg64_release below, this
358 * doesn't have any ordering semantics when !CONFIG_SMP.
362 old = cmpxchg64_relaxed(ptep, curr, new);
364 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
365 (old & ARM_LPAE_PTE_SW_SYNC))
368 /* Even if it's not ours, there's no point waiting; just kick it */
369 __arm_lpae_sync_pte(ptep, cfg);
371 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
376 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
377 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
378 int lvl, arm_lpae_iopte *ptep)
380 arm_lpae_iopte *cptep, pte;
381 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
382 size_t tblsz = ARM_LPAE_GRANULE(data);
383 struct io_pgtable_cfg *cfg = &data->iop.cfg;
385 /* Find our entry at the current level */
386 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
388 /* If we can install a leaf entry at this level, then do so */
389 if (size == block_size && (size & cfg->pgsize_bitmap))
390 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
392 /* We can't allocate tables at the final level */
393 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
396 /* Grab a pointer to the next level */
397 pte = READ_ONCE(*ptep);
399 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
403 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
405 __arm_lpae_free_pages(cptep, tblsz, cfg);
406 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
407 !(pte & ARM_LPAE_PTE_SW_SYNC)) {
408 __arm_lpae_sync_pte(ptep, cfg);
411 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
412 cptep = iopte_deref(pte, data);
414 /* We require an unmap first */
415 WARN_ON(!selftest_running);
420 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
423 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
428 if (data->iop.fmt == ARM_64_LPAE_S1 ||
429 data->iop.fmt == ARM_32_LPAE_S1) {
430 pte = ARM_LPAE_PTE_nG;
431 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
432 pte |= ARM_LPAE_PTE_AP_RDONLY;
433 if (!(prot & IOMMU_PRIV))
434 pte |= ARM_LPAE_PTE_AP_UNPRIV;
436 pte = ARM_LPAE_PTE_HAP_FAULT;
437 if (prot & IOMMU_READ)
438 pte |= ARM_LPAE_PTE_HAP_READ;
439 if (prot & IOMMU_WRITE)
440 pte |= ARM_LPAE_PTE_HAP_WRITE;
444 * Note that this logic is structured to accommodate Mali LPAE
445 * having stage-1-like attributes but stage-2-like permissions.
447 if (data->iop.fmt == ARM_64_LPAE_S2 ||
448 data->iop.fmt == ARM_32_LPAE_S2) {
449 if (prot & IOMMU_MMIO)
450 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
451 else if (prot & IOMMU_CACHE)
452 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
454 pte |= ARM_LPAE_PTE_MEMATTR_NC;
456 if (prot & IOMMU_MMIO)
457 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
458 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
459 else if (prot & IOMMU_CACHE)
460 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
461 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
464 if (prot & IOMMU_NOEXEC)
465 pte |= ARM_LPAE_PTE_XN;
470 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
471 phys_addr_t paddr, size_t size, int iommu_prot)
473 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
474 arm_lpae_iopte *ptep = data->pgd;
475 int ret, lvl = ARM_LPAE_START_LVL(data);
478 /* If no access, then nothing to do */
479 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
482 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
483 paddr >= (1ULL << data->iop.cfg.oas)))
486 prot = arm_lpae_prot_to_pte(data, iommu_prot);
487 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
489 * Synchronise all PTE updates for the new mapping before there's
490 * a chance for anything to kick off a table walk for the new iova.
497 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
498 arm_lpae_iopte *ptep)
500 arm_lpae_iopte *start, *end;
501 unsigned long table_size;
503 if (lvl == ARM_LPAE_START_LVL(data))
504 table_size = data->pgd_size;
506 table_size = ARM_LPAE_GRANULE(data);
510 /* Only leaf entries at the last level */
511 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
514 end = (void *)ptep + table_size;
516 while (ptep != end) {
517 arm_lpae_iopte pte = *ptep++;
519 if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
522 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
525 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
528 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
530 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
532 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
536 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
537 unsigned long iova, size_t size,
538 arm_lpae_iopte blk_pte, int lvl,
539 arm_lpae_iopte *ptep)
541 struct io_pgtable_cfg *cfg = &data->iop.cfg;
542 arm_lpae_iopte pte, *tablep;
543 phys_addr_t blk_paddr;
544 size_t tablesz = ARM_LPAE_GRANULE(data);
545 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
546 int i, unmap_idx = -1;
548 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
551 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
553 return 0; /* Bytes unmapped */
555 if (size == split_sz)
556 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
558 blk_paddr = iopte_to_paddr(blk_pte, data);
559 pte = iopte_prot(blk_pte);
561 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
566 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
569 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
570 if (pte != blk_pte) {
571 __arm_lpae_free_pages(tablep, tablesz, cfg);
573 * We may race against someone unmapping another part of this
574 * block, but anything else is invalid. We can't misinterpret
575 * a page entry here since we're never at the last level.
577 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
580 tablep = iopte_deref(pte, data);
581 } else if (unmap_idx >= 0) {
582 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
583 io_pgtable_tlb_sync(&data->iop);
587 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
590 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
591 unsigned long iova, size_t size, int lvl,
592 arm_lpae_iopte *ptep)
595 struct io_pgtable *iop = &data->iop;
597 /* Something went horribly wrong and we ran out of page table */
598 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
601 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
602 pte = READ_ONCE(*ptep);
606 /* If the size matches this level, we're in the right place */
607 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
608 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
610 if (!iopte_leaf(pte, lvl, iop->fmt)) {
611 /* Also flush any partial walks */
612 io_pgtable_tlb_add_flush(iop, iova, size,
613 ARM_LPAE_GRANULE(data), false);
614 io_pgtable_tlb_sync(iop);
615 ptep = iopte_deref(pte, data);
616 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
617 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
619 * Order the PTE update against queueing the IOVA, to
620 * guarantee that a flush callback from a different CPU
621 * has observed it before the TLBIALL can be issued.
625 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
629 } else if (iopte_leaf(pte, lvl, iop->fmt)) {
631 * Insert a table at the next level to map the old region,
632 * minus the part we want to unmap
634 return arm_lpae_split_blk_unmap(data, iova, size, pte,
638 /* Keep on walkin' */
639 ptep = iopte_deref(pte, data);
640 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
643 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
646 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
647 arm_lpae_iopte *ptep = data->pgd;
648 int lvl = ARM_LPAE_START_LVL(data);
650 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
653 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
656 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
659 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
660 arm_lpae_iopte pte, *ptep = data->pgd;
661 int lvl = ARM_LPAE_START_LVL(data);
664 /* Valid IOPTE pointer? */
668 /* Grab the IOPTE we're interested in */
669 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
670 pte = READ_ONCE(*ptep);
677 if (iopte_leaf(pte, lvl, data->iop.fmt))
678 goto found_translation;
680 /* Take it to the next level */
681 ptep = iopte_deref(pte, data);
682 } while (++lvl < ARM_LPAE_MAX_LEVELS);
684 /* Ran out of page tables to walk */
688 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
689 return iopte_to_paddr(pte, data) | iova;
692 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
694 unsigned long granule, page_sizes;
695 unsigned int max_addr_bits = 48;
698 * We need to restrict the supported page sizes to match the
699 * translation regime for a particular granule. Aim to match
700 * the CPU page size if possible, otherwise prefer smaller sizes.
701 * While we're at it, restrict the block sizes to match the
704 if (cfg->pgsize_bitmap & PAGE_SIZE)
706 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
707 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
708 else if (cfg->pgsize_bitmap & PAGE_MASK)
709 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
715 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
718 page_sizes = (SZ_16K | SZ_32M);
722 page_sizes = (SZ_64K | SZ_512M);
724 page_sizes |= 1ULL << 42; /* 4TB */
730 cfg->pgsize_bitmap &= page_sizes;
731 cfg->ias = min(cfg->ias, max_addr_bits);
732 cfg->oas = min(cfg->oas, max_addr_bits);
735 static struct arm_lpae_io_pgtable *
736 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
738 unsigned long va_bits, pgd_bits;
739 struct arm_lpae_io_pgtable *data;
741 arm_lpae_restrict_pgsizes(cfg);
743 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
746 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
749 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
752 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
753 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
757 data = kmalloc(sizeof(*data), GFP_KERNEL);
761 data->pg_shift = __ffs(cfg->pgsize_bitmap);
762 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
764 va_bits = cfg->ias - data->pg_shift;
765 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
767 /* Calculate the actual size of our pgd (without concatenation) */
768 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
769 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
771 data->iop.ops = (struct io_pgtable_ops) {
773 .unmap = arm_lpae_unmap,
774 .iova_to_phys = arm_lpae_iova_to_phys,
780 static struct io_pgtable *
781 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
784 struct arm_lpae_io_pgtable *data;
786 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
787 IO_PGTABLE_QUIRK_NON_STRICT))
790 data = arm_lpae_alloc_pgtable(cfg);
795 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
796 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
797 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
799 switch (ARM_LPAE_GRANULE(data)) {
801 reg |= ARM_LPAE_TCR_TG0_4K;
804 reg |= ARM_LPAE_TCR_TG0_16K;
807 reg |= ARM_LPAE_TCR_TG0_64K;
813 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
816 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
819 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
822 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
825 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
828 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
831 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
837 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
839 /* Disable speculative walks through TTBR1 */
840 reg |= ARM_LPAE_TCR_EPD1;
841 cfg->arm_lpae_s1_cfg.tcr = reg;
844 reg = (ARM_LPAE_MAIR_ATTR_NC
845 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
846 (ARM_LPAE_MAIR_ATTR_WBRWA
847 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
848 (ARM_LPAE_MAIR_ATTR_DEVICE
849 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
851 cfg->arm_lpae_s1_cfg.mair[0] = reg;
852 cfg->arm_lpae_s1_cfg.mair[1] = 0;
854 /* Looking good; allocate a pgd */
855 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
859 /* Ensure the empty pgd is visible before any actual TTBR write */
863 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
864 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
872 static struct io_pgtable *
873 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
876 struct arm_lpae_io_pgtable *data;
878 /* The NS quirk doesn't apply at stage 2 */
879 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
880 IO_PGTABLE_QUIRK_NON_STRICT))
883 data = arm_lpae_alloc_pgtable(cfg);
888 * Concatenate PGDs at level 1 if possible in order to reduce
889 * the depth of the stage-2 walk.
891 if (data->levels == ARM_LPAE_MAX_LEVELS) {
892 unsigned long pgd_pages;
894 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
895 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
896 data->pgd_size = pgd_pages << data->pg_shift;
902 reg = ARM_64_LPAE_S2_TCR_RES1 |
903 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
904 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
905 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
907 sl = ARM_LPAE_START_LVL(data);
909 switch (ARM_LPAE_GRANULE(data)) {
911 reg |= ARM_LPAE_TCR_TG0_4K;
912 sl++; /* SL0 format is different for 4K granule size */
915 reg |= ARM_LPAE_TCR_TG0_16K;
918 reg |= ARM_LPAE_TCR_TG0_64K;
924 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
927 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
930 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
933 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
936 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
939 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
942 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
948 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
949 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
950 cfg->arm_lpae_s2_cfg.vtcr = reg;
952 /* Allocate pgd pages */
953 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
957 /* Ensure the empty pgd is visible before any actual TTBR write */
961 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
969 static struct io_pgtable *
970 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
972 struct io_pgtable *iop;
974 if (cfg->ias > 32 || cfg->oas > 40)
977 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
978 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
980 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
981 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
987 static struct io_pgtable *
988 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
990 struct io_pgtable *iop;
992 if (cfg->ias > 40 || cfg->oas > 40)
995 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
996 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
998 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
1003 static struct io_pgtable *
1004 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1006 struct io_pgtable *iop;
1008 if (cfg->ias != 48 || cfg->oas > 40)
1011 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1012 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1016 /* Copy values as union fields overlap */
1017 mair = cfg->arm_lpae_s1_cfg.mair[0];
1018 ttbr = cfg->arm_lpae_s1_cfg.ttbr[0];
1020 cfg->arm_mali_lpae_cfg.memattr = mair;
1021 cfg->arm_mali_lpae_cfg.transtab = ttbr |
1022 ARM_MALI_LPAE_TTBR_READ_INNER |
1023 ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1029 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1030 .alloc = arm_64_lpae_alloc_pgtable_s1,
1031 .free = arm_lpae_free_pgtable,
1034 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1035 .alloc = arm_64_lpae_alloc_pgtable_s2,
1036 .free = arm_lpae_free_pgtable,
1039 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1040 .alloc = arm_32_lpae_alloc_pgtable_s1,
1041 .free = arm_lpae_free_pgtable,
1044 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1045 .alloc = arm_32_lpae_alloc_pgtable_s2,
1046 .free = arm_lpae_free_pgtable,
1049 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1050 .alloc = arm_mali_lpae_alloc_pgtable,
1051 .free = arm_lpae_free_pgtable,
1054 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1056 static struct io_pgtable_cfg *cfg_cookie;
1058 static void dummy_tlb_flush_all(void *cookie)
1060 WARN_ON(cookie != cfg_cookie);
1063 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1064 size_t granule, bool leaf, void *cookie)
1066 WARN_ON(cookie != cfg_cookie);
1067 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1070 static void dummy_tlb_sync(void *cookie)
1072 WARN_ON(cookie != cfg_cookie);
1075 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
1076 .tlb_flush_all = dummy_tlb_flush_all,
1077 .tlb_add_flush = dummy_tlb_add_flush,
1078 .tlb_sync = dummy_tlb_sync,
1081 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1083 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1084 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1086 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1087 cfg->pgsize_bitmap, cfg->ias);
1088 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1089 data->levels, data->pgd_size, data->pg_shift,
1090 data->bits_per_level, data->pgd);
1093 #define __FAIL(ops, i) ({ \
1094 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1095 arm_lpae_dump_ops(ops); \
1096 selftest_running = false; \
1100 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1102 static const enum io_pgtable_fmt fmts[] = {
1110 struct io_pgtable_ops *ops;
1112 selftest_running = true;
1114 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1116 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1118 pr_err("selftest: failed to allocate io pgtable ops\n");
1123 * Initial sanity checks.
1124 * Empty page tables shouldn't provide any translations.
1126 if (ops->iova_to_phys(ops, 42))
1127 return __FAIL(ops, i);
1129 if (ops->iova_to_phys(ops, SZ_1G + 42))
1130 return __FAIL(ops, i);
1132 if (ops->iova_to_phys(ops, SZ_2G + 42))
1133 return __FAIL(ops, i);
1136 * Distinct mappings of different granule sizes.
1139 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1142 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1146 return __FAIL(ops, i);
1148 /* Overlapping mappings */
1149 if (!ops->map(ops, iova, iova + size, size,
1150 IOMMU_READ | IOMMU_NOEXEC))
1151 return __FAIL(ops, i);
1153 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1154 return __FAIL(ops, i);
1160 size = 1UL << __ffs(cfg->pgsize_bitmap);
1161 if (ops->unmap(ops, SZ_1G + size, size) != size)
1162 return __FAIL(ops, i);
1164 /* Remap of partial unmap */
1165 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1166 return __FAIL(ops, i);
1168 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1169 return __FAIL(ops, i);
1173 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1176 if (ops->unmap(ops, iova, size) != size)
1177 return __FAIL(ops, i);
1179 if (ops->iova_to_phys(ops, iova + 42))
1180 return __FAIL(ops, i);
1182 /* Remap full block */
1183 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1184 return __FAIL(ops, i);
1186 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1187 return __FAIL(ops, i);
1192 free_io_pgtable_ops(ops);
1195 selftest_running = false;
1199 static int __init arm_lpae_do_selftests(void)
1201 static const unsigned long pgsize[] = {
1202 SZ_4K | SZ_2M | SZ_1G,
1207 static const unsigned int ias[] = {
1208 32, 36, 40, 42, 44, 48,
1211 int i, j, pass = 0, fail = 0;
1212 struct io_pgtable_cfg cfg = {
1213 .tlb = &dummy_tlb_ops,
1215 .quirks = IO_PGTABLE_QUIRK_NO_DMA,
1218 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1219 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1220 cfg.pgsize_bitmap = pgsize[i];
1222 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1224 if (arm_lpae_run_tests(&cfg))
1231 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1232 return fail ? -EFAULT : 0;
1234 subsys_initcall(arm_lpae_do_selftests);