2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include "io-pgtable.h"
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS 128
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 #define smmu_writeq writeq_relaxed
77 #define smmu_writeq(reg64, addr) \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_BSU_SHIFT 14
98 #define sCR0_BSU_MASK 0x3
100 /* Identification registers */
101 #define ARM_SMMU_GR0_ID0 0x20
102 #define ARM_SMMU_GR0_ID1 0x24
103 #define ARM_SMMU_GR0_ID2 0x28
104 #define ARM_SMMU_GR0_ID3 0x2c
105 #define ARM_SMMU_GR0_ID4 0x30
106 #define ARM_SMMU_GR0_ID5 0x34
107 #define ARM_SMMU_GR0_ID6 0x38
108 #define ARM_SMMU_GR0_ID7 0x3c
109 #define ARM_SMMU_GR0_sGFSR 0x48
110 #define ARM_SMMU_GR0_sGFSYNR0 0x50
111 #define ARM_SMMU_GR0_sGFSYNR1 0x54
112 #define ARM_SMMU_GR0_sGFSYNR2 0x58
114 #define ID0_S1TS (1 << 30)
115 #define ID0_S2TS (1 << 29)
116 #define ID0_NTS (1 << 28)
117 #define ID0_SMS (1 << 27)
118 #define ID0_ATOSNS (1 << 26)
119 #define ID0_CTTW (1 << 14)
120 #define ID0_NUMIRPT_SHIFT 16
121 #define ID0_NUMIRPT_MASK 0xff
122 #define ID0_NUMSIDB_SHIFT 9
123 #define ID0_NUMSIDB_MASK 0xf
124 #define ID0_NUMSMRG_SHIFT 0
125 #define ID0_NUMSMRG_MASK 0xff
127 #define ID1_PAGESIZE (1 << 31)
128 #define ID1_NUMPAGENDXB_SHIFT 28
129 #define ID1_NUMPAGENDXB_MASK 7
130 #define ID1_NUMS2CB_SHIFT 16
131 #define ID1_NUMS2CB_MASK 0xff
132 #define ID1_NUMCB_SHIFT 0
133 #define ID1_NUMCB_MASK 0xff
135 #define ID2_OAS_SHIFT 4
136 #define ID2_OAS_MASK 0xf
137 #define ID2_IAS_SHIFT 0
138 #define ID2_IAS_MASK 0xf
139 #define ID2_UBS_SHIFT 8
140 #define ID2_UBS_MASK 0xf
141 #define ID2_PTFS_4K (1 << 12)
142 #define ID2_PTFS_16K (1 << 13)
143 #define ID2_PTFS_64K (1 << 14)
145 /* Global TLB invalidation */
146 #define ARM_SMMU_GR0_TLBIVMID 0x64
147 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
148 #define ARM_SMMU_GR0_TLBIALLH 0x6c
149 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
150 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
151 #define sTLBGSTATUS_GSACTIVE (1 << 0)
152 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
154 /* Stream mapping registers */
155 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
156 #define SMR_VALID (1 << 31)
157 #define SMR_MASK_SHIFT 16
158 #define SMR_MASK_MASK 0x7fff
159 #define SMR_ID_SHIFT 0
160 #define SMR_ID_MASK 0x7fff
162 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
163 #define S2CR_CBNDX_SHIFT 0
164 #define S2CR_CBNDX_MASK 0xff
165 #define S2CR_TYPE_SHIFT 16
166 #define S2CR_TYPE_MASK 0x3
167 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
168 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
169 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
171 #define S2CR_PRIVCFG_SHIFT 24
172 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
174 /* Context bank attribute registers */
175 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
176 #define CBAR_VMID_SHIFT 0
177 #define CBAR_VMID_MASK 0xff
178 #define CBAR_S1_BPSHCFG_SHIFT 8
179 #define CBAR_S1_BPSHCFG_MASK 3
180 #define CBAR_S1_BPSHCFG_NSH 3
181 #define CBAR_S1_MEMATTR_SHIFT 12
182 #define CBAR_S1_MEMATTR_MASK 0xf
183 #define CBAR_S1_MEMATTR_WB 0xf
184 #define CBAR_TYPE_SHIFT 16
185 #define CBAR_TYPE_MASK 0x3
186 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
187 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
188 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
190 #define CBAR_IRPTNDX_SHIFT 24
191 #define CBAR_IRPTNDX_MASK 0xff
193 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
194 #define CBA2R_RW64_32BIT (0 << 0)
195 #define CBA2R_RW64_64BIT (1 << 0)
197 /* Translation context bank */
198 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
199 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
201 #define ARM_SMMU_CB_SCTLR 0x0
202 #define ARM_SMMU_CB_RESUME 0x8
203 #define ARM_SMMU_CB_TTBCR2 0x10
204 #define ARM_SMMU_CB_TTBR0 0x20
205 #define ARM_SMMU_CB_TTBR1 0x28
206 #define ARM_SMMU_CB_TTBCR 0x30
207 #define ARM_SMMU_CB_S1_MAIR0 0x38
208 #define ARM_SMMU_CB_S1_MAIR1 0x3c
209 #define ARM_SMMU_CB_PAR_LO 0x50
210 #define ARM_SMMU_CB_PAR_HI 0x54
211 #define ARM_SMMU_CB_FSR 0x58
212 #define ARM_SMMU_CB_FAR_LO 0x60
213 #define ARM_SMMU_CB_FAR_HI 0x64
214 #define ARM_SMMU_CB_FSYNR0 0x68
215 #define ARM_SMMU_CB_S1_TLBIVA 0x600
216 #define ARM_SMMU_CB_S1_TLBIASID 0x610
217 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
218 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
219 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
220 #define ARM_SMMU_CB_ATS1PR 0x800
221 #define ARM_SMMU_CB_ATSR 0x8f0
223 #define SCTLR_S1_ASIDPNE (1 << 12)
224 #define SCTLR_CFCFG (1 << 7)
225 #define SCTLR_CFIE (1 << 6)
226 #define SCTLR_CFRE (1 << 5)
227 #define SCTLR_E (1 << 4)
228 #define SCTLR_AFE (1 << 2)
229 #define SCTLR_TRE (1 << 1)
230 #define SCTLR_M (1 << 0)
231 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
233 #define CB_PAR_F (1 << 0)
235 #define ATSR_ACTIVE (1 << 0)
237 #define RESUME_RETRY (0 << 0)
238 #define RESUME_TERMINATE (1 << 0)
240 #define TTBCR2_SEP_SHIFT 15
241 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
243 #define TTBRn_ASID_SHIFT 48
245 #define FSR_MULTI (1 << 31)
246 #define FSR_SS (1 << 30)
247 #define FSR_UUT (1 << 8)
248 #define FSR_ASF (1 << 7)
249 #define FSR_TLBLKF (1 << 6)
250 #define FSR_TLBMCF (1 << 5)
251 #define FSR_EF (1 << 4)
252 #define FSR_PF (1 << 3)
253 #define FSR_AFF (1 << 2)
254 #define FSR_TF (1 << 1)
256 #define FSR_IGN (FSR_AFF | FSR_ASF | \
257 FSR_TLBMCF | FSR_TLBLKF)
258 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
259 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
261 #define FSYNR0_WNR (1 << 4)
263 static int force_stage;
264 module_param(force_stage, int, S_IRUGO);
265 MODULE_PARM_DESC(force_stage,
266 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
267 static bool disable_bypass;
268 module_param(disable_bypass, bool, S_IRUGO);
269 MODULE_PARM_DESC(disable_bypass,
270 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
272 enum arm_smmu_arch_version {
277 struct arm_smmu_smr {
283 struct arm_smmu_master_cfg {
285 u16 streamids[MAX_MASTER_STREAMIDS];
286 struct arm_smmu_smr *smrs;
289 struct arm_smmu_master {
290 struct device_node *of_node;
292 struct arm_smmu_master_cfg cfg;
295 struct arm_smmu_device {
300 unsigned long pgshift;
302 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
303 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
304 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
305 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
306 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
307 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
310 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
312 enum arm_smmu_arch_version version;
314 u32 num_context_banks;
315 u32 num_s2_context_banks;
316 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
319 u32 num_mapping_groups;
320 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
322 unsigned long va_size;
323 unsigned long ipa_size;
324 unsigned long pa_size;
327 u32 num_context_irqs;
330 struct list_head list;
331 struct rb_root masters;
334 struct arm_smmu_cfg {
339 #define INVALID_IRPTNDX 0xff
341 #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
342 #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
344 enum arm_smmu_domain_stage {
345 ARM_SMMU_DOMAIN_S1 = 0,
347 ARM_SMMU_DOMAIN_NESTED,
350 struct arm_smmu_domain {
351 struct arm_smmu_device *smmu;
352 struct io_pgtable_ops *pgtbl_ops;
353 spinlock_t pgtbl_lock;
354 struct arm_smmu_cfg cfg;
355 enum arm_smmu_domain_stage stage;
356 struct mutex init_mutex; /* Protects smmu pointer */
357 struct iommu_domain domain;
360 struct arm_smmu_phandle_args {
361 struct device_node *np;
363 uint32_t args[MAX_MASTER_STREAMIDS];
366 static struct iommu_ops arm_smmu_ops;
368 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
369 static LIST_HEAD(arm_smmu_devices);
371 struct arm_smmu_option_prop {
376 static struct arm_smmu_option_prop arm_smmu_options[] = {
377 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
381 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
383 return container_of(dom, struct arm_smmu_domain, domain);
386 static void parse_driver_options(struct arm_smmu_device *smmu)
391 if (of_property_read_bool(smmu->dev->of_node,
392 arm_smmu_options[i].prop)) {
393 smmu->options |= arm_smmu_options[i].opt;
394 dev_notice(smmu->dev, "option %s\n",
395 arm_smmu_options[i].prop);
397 } while (arm_smmu_options[++i].opt);
400 static struct device_node *dev_get_dev_node(struct device *dev)
402 if (dev_is_pci(dev)) {
403 struct pci_bus *bus = to_pci_dev(dev)->bus;
405 while (!pci_is_root_bus(bus))
407 return bus->bridge->parent->of_node;
413 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
414 struct device_node *dev_node)
416 struct rb_node *node = smmu->masters.rb_node;
419 struct arm_smmu_master *master;
421 master = container_of(node, struct arm_smmu_master, node);
423 if (dev_node < master->of_node)
424 node = node->rb_left;
425 else if (dev_node > master->of_node)
426 node = node->rb_right;
434 static struct arm_smmu_master_cfg *
435 find_smmu_master_cfg(struct device *dev)
437 struct arm_smmu_master_cfg *cfg = NULL;
438 struct iommu_group *group = iommu_group_get(dev);
441 cfg = iommu_group_get_iommudata(group);
442 iommu_group_put(group);
448 static int insert_smmu_master(struct arm_smmu_device *smmu,
449 struct arm_smmu_master *master)
451 struct rb_node **new, *parent;
453 new = &smmu->masters.rb_node;
456 struct arm_smmu_master *this
457 = container_of(*new, struct arm_smmu_master, node);
460 if (master->of_node < this->of_node)
461 new = &((*new)->rb_left);
462 else if (master->of_node > this->of_node)
463 new = &((*new)->rb_right);
468 rb_link_node(&master->node, parent, new);
469 rb_insert_color(&master->node, &smmu->masters);
473 static int register_smmu_master(struct arm_smmu_device *smmu,
475 struct arm_smmu_phandle_args *masterspec)
478 struct arm_smmu_master *master;
480 master = find_smmu_master(smmu, masterspec->np);
483 "rejecting multiple registrations for master device %s\n",
484 masterspec->np->name);
488 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
490 "reached maximum number (%d) of stream IDs for master device %s\n",
491 MAX_MASTER_STREAMIDS, masterspec->np->name);
495 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
499 master->of_node = masterspec->np;
500 master->cfg.num_streamids = masterspec->args_count;
502 for (i = 0; i < master->cfg.num_streamids; ++i) {
503 u16 streamid = masterspec->args[i];
505 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
506 (streamid >= smmu->num_mapping_groups)) {
508 "stream ID for master device %s greater than maximum allowed (%d)\n",
509 masterspec->np->name, smmu->num_mapping_groups);
512 master->cfg.streamids[i] = streamid;
514 return insert_smmu_master(smmu, master);
517 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
519 struct arm_smmu_device *smmu;
520 struct arm_smmu_master *master = NULL;
521 struct device_node *dev_node = dev_get_dev_node(dev);
523 spin_lock(&arm_smmu_devices_lock);
524 list_for_each_entry(smmu, &arm_smmu_devices, list) {
525 master = find_smmu_master(smmu, dev_node);
529 spin_unlock(&arm_smmu_devices_lock);
531 return master ? smmu : NULL;
534 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
539 idx = find_next_zero_bit(map, end, start);
542 } while (test_and_set_bit(idx, map));
547 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
552 /* Wait for any pending TLB invalidations to complete */
553 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
556 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
558 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
559 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
560 & sTLBGSTATUS_GSACTIVE) {
562 if (++count == TLB_LOOP_TIMEOUT) {
563 dev_err_ratelimited(smmu->dev,
564 "TLB sync timed out -- SMMU may be deadlocked\n");
571 static void arm_smmu_tlb_sync(void *cookie)
573 struct arm_smmu_domain *smmu_domain = cookie;
574 __arm_smmu_tlb_sync(smmu_domain->smmu);
577 static void arm_smmu_tlb_inv_context(void *cookie)
579 struct arm_smmu_domain *smmu_domain = cookie;
580 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
581 struct arm_smmu_device *smmu = smmu_domain->smmu;
582 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
586 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
587 writel_relaxed(ARM_SMMU_CB_ASID(cfg),
588 base + ARM_SMMU_CB_S1_TLBIASID);
590 base = ARM_SMMU_GR0(smmu);
591 writel_relaxed(ARM_SMMU_CB_VMID(cfg),
592 base + ARM_SMMU_GR0_TLBIVMID);
595 __arm_smmu_tlb_sync(smmu);
598 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
599 size_t granule, bool leaf, void *cookie)
601 struct arm_smmu_domain *smmu_domain = cookie;
602 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
603 struct arm_smmu_device *smmu = smmu_domain->smmu;
604 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
608 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
609 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
611 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
613 iova |= ARM_SMMU_CB_ASID(cfg);
615 writel_relaxed(iova, reg);
617 } while (size -= granule);
621 iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
623 writeq_relaxed(iova, reg);
624 iova += granule >> 12;
625 } while (size -= granule);
629 } else if (smmu->version == ARM_SMMU_V2) {
630 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
631 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
632 ARM_SMMU_CB_S2_TLBIIPAS2;
635 writeq_relaxed(iova, reg);
636 iova += granule >> 12;
637 } while (size -= granule);
640 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
641 writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg);
645 static struct iommu_gather_ops arm_smmu_gather_ops = {
646 .tlb_flush_all = arm_smmu_tlb_inv_context,
647 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
648 .tlb_sync = arm_smmu_tlb_sync,
651 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
654 u32 fsr, far, fsynr, resume;
656 struct iommu_domain *domain = dev;
657 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
658 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
659 struct arm_smmu_device *smmu = smmu_domain->smmu;
660 void __iomem *cb_base;
662 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
663 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
665 if (!(fsr & FSR_FAULT))
669 dev_err_ratelimited(smmu->dev,
670 "Unexpected context fault (fsr 0x%x)\n",
673 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
674 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
676 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
679 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
680 iova |= ((unsigned long)far << 32);
683 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
685 resume = RESUME_RETRY;
687 dev_err_ratelimited(smmu->dev,
688 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
689 iova, fsynr, cfg->cbndx);
691 resume = RESUME_TERMINATE;
694 /* Clear the faulting FSR */
695 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
697 /* Retry or terminate any stalled transactions */
699 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
704 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
706 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
707 struct arm_smmu_device *smmu = dev;
708 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
710 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
711 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
712 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
713 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
718 dev_err_ratelimited(smmu->dev,
719 "Unexpected global fault, this could be serious\n");
720 dev_err_ratelimited(smmu->dev,
721 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
722 gfsr, gfsynr0, gfsynr1, gfsynr2);
724 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
728 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
729 struct io_pgtable_cfg *pgtbl_cfg)
734 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
735 struct arm_smmu_device *smmu = smmu_domain->smmu;
736 void __iomem *cb_base, *gr1_base;
738 gr1_base = ARM_SMMU_GR1(smmu);
739 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
740 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
742 if (smmu->version > ARM_SMMU_V1) {
745 * *Must* be initialised before CBAR thanks to VMID16
746 * architectural oversight affected some implementations.
749 reg = CBA2R_RW64_64BIT;
751 reg = CBA2R_RW64_32BIT;
753 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
758 if (smmu->version == ARM_SMMU_V1)
759 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
762 * Use the weakest shareability/memory types, so they are
763 * overridden by the ttbcr/pte.
766 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
767 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
769 reg |= ARM_SMMU_CB_VMID(cfg) << CBAR_VMID_SHIFT;
771 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
775 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
777 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
778 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
780 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
781 reg64 |= ((u64)ARM_SMMU_CB_ASID(cfg)) << TTBRn_ASID_SHIFT;
782 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
784 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
785 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
790 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
791 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
792 if (smmu->version > ARM_SMMU_V1) {
793 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
794 reg |= TTBCR2_SEP_UPSTREAM;
795 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
798 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
799 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
802 /* MAIRs (stage-1 only) */
804 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
805 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
806 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
807 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
811 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
813 reg |= SCTLR_S1_ASIDPNE;
817 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
820 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
821 struct arm_smmu_device *smmu)
823 int irq, start, ret = 0;
824 unsigned long ias, oas;
825 struct io_pgtable_ops *pgtbl_ops;
826 struct io_pgtable_cfg pgtbl_cfg;
827 enum io_pgtable_fmt fmt;
828 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
829 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
831 mutex_lock(&smmu_domain->init_mutex);
832 if (smmu_domain->smmu)
836 * Mapping the requested stage onto what we support is surprisingly
837 * complicated, mainly because the spec allows S1+S2 SMMUs without
838 * support for nested translation. That means we end up with the
841 * Requested Supported Actual
851 * Note that you can't actually request stage-2 mappings.
853 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
854 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
855 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
856 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
858 switch (smmu_domain->stage) {
859 case ARM_SMMU_DOMAIN_S1:
860 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
861 start = smmu->num_s2_context_banks;
863 oas = smmu->ipa_size;
864 if (IS_ENABLED(CONFIG_64BIT))
865 fmt = ARM_64_LPAE_S1;
867 fmt = ARM_32_LPAE_S1;
869 case ARM_SMMU_DOMAIN_NESTED:
871 * We will likely want to change this if/when KVM gets
874 case ARM_SMMU_DOMAIN_S2:
875 cfg->cbar = CBAR_TYPE_S2_TRANS;
877 ias = smmu->ipa_size;
879 if (IS_ENABLED(CONFIG_64BIT))
880 fmt = ARM_64_LPAE_S2;
882 fmt = ARM_32_LPAE_S2;
889 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
890 smmu->num_context_banks);
891 if (IS_ERR_VALUE(ret))
895 if (smmu->version == ARM_SMMU_V1) {
896 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
897 cfg->irptndx %= smmu->num_context_irqs;
899 cfg->irptndx = cfg->cbndx;
902 pgtbl_cfg = (struct io_pgtable_cfg) {
903 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
906 .tlb = &arm_smmu_gather_ops,
907 .iommu_dev = smmu->dev,
910 smmu_domain->smmu = smmu;
911 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
917 /* Update our support page sizes to reflect the page table format */
918 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
920 /* Initialise the context bank with our page table cfg */
921 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
924 * Request context fault interrupt. Do this last to avoid the
925 * handler seeing a half-initialised domain state.
927 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
928 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
929 "arm-smmu-context-fault", domain);
930 if (IS_ERR_VALUE(ret)) {
931 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
933 cfg->irptndx = INVALID_IRPTNDX;
936 mutex_unlock(&smmu_domain->init_mutex);
938 /* Publish page table ops for map/unmap */
939 smmu_domain->pgtbl_ops = pgtbl_ops;
943 smmu_domain->smmu = NULL;
945 mutex_unlock(&smmu_domain->init_mutex);
949 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
951 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
952 struct arm_smmu_device *smmu = smmu_domain->smmu;
953 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
954 void __iomem *cb_base;
961 * Disable the context bank and free the page tables before freeing
964 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
965 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
967 if (cfg->irptndx != INVALID_IRPTNDX) {
968 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
969 free_irq(irq, domain);
972 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
973 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
976 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
978 struct arm_smmu_domain *smmu_domain;
980 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
983 * Allocate the domain and initialise some of its data structures.
984 * We can't really do anything meaningful until we've added a
987 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
991 if (type == IOMMU_DOMAIN_DMA &&
992 iommu_get_dma_cookie(&smmu_domain->domain)) {
997 mutex_init(&smmu_domain->init_mutex);
998 spin_lock_init(&smmu_domain->pgtbl_lock);
1000 return &smmu_domain->domain;
1003 static void arm_smmu_domain_free(struct iommu_domain *domain)
1005 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1008 * Free the domain resources. We assume that all devices have
1009 * already been detached.
1011 iommu_put_dma_cookie(domain);
1012 arm_smmu_destroy_domain_context(domain);
1016 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1017 struct arm_smmu_master_cfg *cfg)
1020 struct arm_smmu_smr *smrs;
1021 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1023 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1029 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1031 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1032 cfg->num_streamids);
1036 /* Allocate the SMRs on the SMMU */
1037 for (i = 0; i < cfg->num_streamids; ++i) {
1038 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1039 smmu->num_mapping_groups);
1040 if (IS_ERR_VALUE(idx)) {
1041 dev_err(smmu->dev, "failed to allocate free SMR\n");
1045 smrs[i] = (struct arm_smmu_smr) {
1047 .mask = 0, /* We don't currently share SMRs */
1048 .id = cfg->streamids[i],
1052 /* It worked! Now, poke the actual hardware */
1053 for (i = 0; i < cfg->num_streamids; ++i) {
1054 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1055 smrs[i].mask << SMR_MASK_SHIFT;
1056 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1064 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1069 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1070 struct arm_smmu_master_cfg *cfg)
1073 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1074 struct arm_smmu_smr *smrs = cfg->smrs;
1079 /* Invalidate the SMRs before freeing back to the allocator */
1080 for (i = 0; i < cfg->num_streamids; ++i) {
1081 u8 idx = smrs[i].idx;
1083 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1084 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1091 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1092 struct arm_smmu_master_cfg *cfg)
1095 struct arm_smmu_device *smmu = smmu_domain->smmu;
1096 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1098 /* Devices in an IOMMU group may already be configured */
1099 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1101 return ret == -EEXIST ? 0 : ret;
1104 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1105 * for all devices behind the SMMU.
1107 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1110 for (i = 0; i < cfg->num_streamids; ++i) {
1113 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1114 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1115 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1116 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1122 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1123 struct arm_smmu_master_cfg *cfg)
1126 struct arm_smmu_device *smmu = smmu_domain->smmu;
1127 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1129 /* An IOMMU group is torn down by the first device to be removed */
1130 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1134 * We *must* clear the S2CR first, because freeing the SMR means
1135 * that it can be re-allocated immediately.
1137 for (i = 0; i < cfg->num_streamids; ++i) {
1138 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1139 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1141 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1144 arm_smmu_master_free_smrs(smmu, cfg);
1147 static void arm_smmu_detach_dev(struct device *dev,
1148 struct arm_smmu_master_cfg *cfg)
1150 struct iommu_domain *domain = dev->archdata.iommu;
1151 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1153 dev->archdata.iommu = NULL;
1154 arm_smmu_domain_remove_master(smmu_domain, cfg);
1157 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1160 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1161 struct arm_smmu_device *smmu;
1162 struct arm_smmu_master_cfg *cfg;
1164 smmu = find_smmu_for_device(dev);
1166 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1170 /* Ensure that the domain is finalised */
1171 ret = arm_smmu_init_domain_context(domain, smmu);
1172 if (IS_ERR_VALUE(ret))
1176 * Sanity check the domain. We don't support domains across
1179 if (smmu_domain->smmu != smmu) {
1181 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1182 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1186 /* Looks ok, so add the device to the domain */
1187 cfg = find_smmu_master_cfg(dev);
1191 /* Detach the dev from its current domain */
1192 if (dev->archdata.iommu)
1193 arm_smmu_detach_dev(dev, cfg);
1195 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1197 dev->archdata.iommu = domain;
1201 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1202 phys_addr_t paddr, size_t size, int prot)
1205 unsigned long flags;
1206 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1207 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1212 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1213 ret = ops->map(ops, iova, paddr, size, prot);
1214 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1218 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1222 unsigned long flags;
1223 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1224 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1229 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1230 ret = ops->unmap(ops, iova, size);
1231 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1235 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1238 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1239 struct arm_smmu_device *smmu = smmu_domain->smmu;
1240 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1241 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1242 struct device *dev = smmu->dev;
1243 void __iomem *cb_base;
1248 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1250 /* ATS1 registers can only be written atomically */
1251 va = iova & ~0xfffUL;
1252 if (smmu->version == ARM_SMMU_V2)
1253 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1255 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1257 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1258 !(tmp & ATSR_ACTIVE), 5, 50)) {
1260 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1262 return ops->iova_to_phys(ops, iova);
1265 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1266 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1268 if (phys & CB_PAR_F) {
1269 dev_err(dev, "translation fault!\n");
1270 dev_err(dev, "PAR = 0x%llx\n", phys);
1274 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1277 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1281 unsigned long flags;
1282 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1288 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1289 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1290 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1291 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1293 ret = ops->iova_to_phys(ops, iova);
1296 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1301 static bool arm_smmu_capable(enum iommu_cap cap)
1304 case IOMMU_CAP_CACHE_COHERENCY:
1306 * Return true here as the SMMU can always send out coherent
1310 case IOMMU_CAP_INTR_REMAP:
1311 return true; /* MSIs are just memory writes */
1312 case IOMMU_CAP_NOEXEC:
1319 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1321 *((u16 *)data) = alias;
1322 return 0; /* Continue walking */
1325 static void __arm_smmu_release_pci_iommudata(void *data)
1330 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1331 struct iommu_group *group)
1333 struct arm_smmu_master_cfg *cfg;
1337 cfg = iommu_group_get_iommudata(group);
1339 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1343 iommu_group_set_iommudata(group, cfg,
1344 __arm_smmu_release_pci_iommudata);
1347 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1351 * Assume Stream ID == Requester ID for now.
1352 * We need a way to describe the ID mappings in FDT.
1354 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1355 for (i = 0; i < cfg->num_streamids; ++i)
1356 if (cfg->streamids[i] == sid)
1359 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1360 if (i == cfg->num_streamids)
1361 cfg->streamids[cfg->num_streamids++] = sid;
1366 static int arm_smmu_init_platform_device(struct device *dev,
1367 struct iommu_group *group)
1369 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1370 struct arm_smmu_master *master;
1375 master = find_smmu_master(smmu, dev->of_node);
1379 iommu_group_set_iommudata(group, &master->cfg, NULL);
1384 static int arm_smmu_add_device(struct device *dev)
1386 struct iommu_group *group;
1388 group = iommu_group_get_for_dev(dev);
1390 return PTR_ERR(group);
1392 iommu_group_put(group);
1396 static void arm_smmu_remove_device(struct device *dev)
1398 iommu_group_remove_device(dev);
1401 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1403 struct iommu_group *group;
1406 if (dev_is_pci(dev))
1407 group = pci_device_group(dev);
1409 group = generic_device_group(dev);
1414 if (dev_is_pci(dev))
1415 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1417 ret = arm_smmu_init_platform_device(dev, group);
1420 iommu_group_put(group);
1421 group = ERR_PTR(ret);
1427 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1428 enum iommu_attr attr, void *data)
1430 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1433 case DOMAIN_ATTR_NESTING:
1434 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1441 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1442 enum iommu_attr attr, void *data)
1445 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1447 mutex_lock(&smmu_domain->init_mutex);
1450 case DOMAIN_ATTR_NESTING:
1451 if (smmu_domain->smmu) {
1457 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1459 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1467 mutex_unlock(&smmu_domain->init_mutex);
1471 static struct iommu_ops arm_smmu_ops = {
1472 .capable = arm_smmu_capable,
1473 .domain_alloc = arm_smmu_domain_alloc,
1474 .domain_free = arm_smmu_domain_free,
1475 .attach_dev = arm_smmu_attach_dev,
1476 .map = arm_smmu_map,
1477 .unmap = arm_smmu_unmap,
1478 .map_sg = default_iommu_map_sg,
1479 .iova_to_phys = arm_smmu_iova_to_phys,
1480 .add_device = arm_smmu_add_device,
1481 .remove_device = arm_smmu_remove_device,
1482 .device_group = arm_smmu_device_group,
1483 .domain_get_attr = arm_smmu_domain_get_attr,
1484 .domain_set_attr = arm_smmu_domain_set_attr,
1485 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1488 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1490 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1491 void __iomem *cb_base;
1495 /* clear global FSR */
1496 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1497 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1499 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1500 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1501 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1502 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1503 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1506 /* Make sure all context banks are disabled and clear CB_FSR */
1507 for (i = 0; i < smmu->num_context_banks; ++i) {
1508 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1509 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1510 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1513 /* Invalidate the TLB, just in case */
1514 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1515 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1517 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1519 /* Enable fault reporting */
1520 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1522 /* Disable TLB broadcasting. */
1523 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1525 /* Enable client access, handling unmatched streams as appropriate */
1526 reg &= ~sCR0_CLIENTPD;
1530 reg &= ~sCR0_USFCFG;
1532 /* Disable forced broadcasting */
1535 /* Don't upgrade barriers */
1536 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1538 /* Push the button */
1539 __arm_smmu_tlb_sync(smmu);
1540 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1543 static int arm_smmu_id_size_to_bits(int size)
1562 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1565 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1567 bool cttw_dt, cttw_reg;
1569 dev_notice(smmu->dev, "probing hardware configuration...\n");
1570 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1573 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1575 /* Restrict available stages based on module parameter */
1576 if (force_stage == 1)
1577 id &= ~(ID0_S2TS | ID0_NTS);
1578 else if (force_stage == 2)
1579 id &= ~(ID0_S1TS | ID0_NTS);
1581 if (id & ID0_S1TS) {
1582 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1583 dev_notice(smmu->dev, "\tstage 1 translation\n");
1586 if (id & ID0_S2TS) {
1587 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1588 dev_notice(smmu->dev, "\tstage 2 translation\n");
1592 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1593 dev_notice(smmu->dev, "\tnested translation\n");
1596 if (!(smmu->features &
1597 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1598 dev_err(smmu->dev, "\tno translation support!\n");
1602 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1603 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1604 dev_notice(smmu->dev, "\taddress translation ops\n");
1608 * In order for DMA API calls to work properly, we must defer to what
1609 * the DT says about coherency, regardless of what the hardware claims.
1610 * Fortunately, this also opens up a workaround for systems where the
1611 * ID register value has ended up configured incorrectly.
1613 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1614 cttw_reg = !!(id & ID0_CTTW);
1616 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1617 if (cttw_dt || cttw_reg)
1618 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1619 cttw_dt ? "" : "non-");
1620 if (cttw_dt != cttw_reg)
1621 dev_notice(smmu->dev,
1622 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1627 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1628 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1630 if (smmu->num_mapping_groups == 0) {
1632 "stream-matching supported, but no SMRs present!\n");
1636 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1637 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1638 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1639 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1641 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1642 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1643 if ((mask & sid) != sid) {
1645 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1650 dev_notice(smmu->dev,
1651 "\tstream matching with %u register groups, mask 0x%x",
1652 smmu->num_mapping_groups, mask);
1654 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1659 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1660 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1662 /* Check for size mismatch of SMMU address space from mapped region */
1663 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1664 size *= 2 << smmu->pgshift;
1665 if (smmu->size != size)
1667 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1670 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1671 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1672 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1673 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1676 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1677 smmu->num_context_banks, smmu->num_s2_context_banks);
1680 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1681 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1682 smmu->ipa_size = size;
1684 /* The output mask is also applied for bypass */
1685 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1686 smmu->pa_size = size;
1689 * What the page table walker can address actually depends on which
1690 * descriptor format is in use, but since a) we don't know that yet,
1691 * and b) it can vary per context bank, this will have to do...
1693 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1695 "failed to set DMA mask for table walker\n");
1697 if (smmu->version == ARM_SMMU_V1) {
1698 smmu->va_size = smmu->ipa_size;
1699 size = SZ_4K | SZ_2M | SZ_1G;
1701 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1702 smmu->va_size = arm_smmu_id_size_to_bits(size);
1703 #ifndef CONFIG_64BIT
1704 smmu->va_size = min(32UL, smmu->va_size);
1707 if (id & ID2_PTFS_4K)
1708 size |= SZ_4K | SZ_2M | SZ_1G;
1709 if (id & ID2_PTFS_16K)
1710 size |= SZ_16K | SZ_32M;
1711 if (id & ID2_PTFS_64K)
1712 size |= SZ_64K | SZ_512M;
1715 arm_smmu_ops.pgsize_bitmap &= size;
1716 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1718 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1719 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1720 smmu->va_size, smmu->ipa_size);
1722 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1723 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1724 smmu->ipa_size, smmu->pa_size);
1729 static const struct of_device_id arm_smmu_of_match[] = {
1730 { .compatible = "arm,smmu-v1", .data = (void *)ARM_SMMU_V1 },
1731 { .compatible = "arm,smmu-v2", .data = (void *)ARM_SMMU_V2 },
1732 { .compatible = "arm,mmu-400", .data = (void *)ARM_SMMU_V1 },
1733 { .compatible = "arm,mmu-401", .data = (void *)ARM_SMMU_V1 },
1734 { .compatible = "arm,mmu-500", .data = (void *)ARM_SMMU_V2 },
1737 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1739 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1741 const struct of_device_id *of_id;
1742 struct resource *res;
1743 struct arm_smmu_device *smmu;
1744 struct device *dev = &pdev->dev;
1745 struct rb_node *node;
1746 struct of_phandle_iterator it;
1747 struct arm_smmu_phandle_args *masterspec;
1748 int num_irqs, i, err;
1750 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1752 dev_err(dev, "failed to allocate arm_smmu_device\n");
1757 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1758 smmu->version = (enum arm_smmu_arch_version)of_id->data;
1760 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1761 smmu->base = devm_ioremap_resource(dev, res);
1762 if (IS_ERR(smmu->base))
1763 return PTR_ERR(smmu->base);
1764 smmu->size = resource_size(res);
1766 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1767 &smmu->num_global_irqs)) {
1768 dev_err(dev, "missing #global-interrupts property\n");
1773 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1775 if (num_irqs > smmu->num_global_irqs)
1776 smmu->num_context_irqs++;
1779 if (!smmu->num_context_irqs) {
1780 dev_err(dev, "found %d interrupts but expected at least %d\n",
1781 num_irqs, smmu->num_global_irqs + 1);
1785 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1788 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1792 for (i = 0; i < num_irqs; ++i) {
1793 int irq = platform_get_irq(pdev, i);
1796 dev_err(dev, "failed to get irq index %d\n", i);
1799 smmu->irqs[i] = irq;
1802 err = arm_smmu_device_cfg_probe(smmu);
1807 smmu->masters = RB_ROOT;
1810 /* No need to zero the memory for masterspec */
1811 masterspec = kmalloc(sizeof(*masterspec), GFP_KERNEL);
1813 goto out_put_masters;
1815 of_for_each_phandle(&it, err, dev->of_node,
1816 "mmu-masters", "#stream-id-cells", 0) {
1817 int count = of_phandle_iterator_args(&it, masterspec->args,
1818 MAX_MASTER_STREAMIDS);
1819 masterspec->np = of_node_get(it.node);
1820 masterspec->args_count = count;
1822 err = register_smmu_master(smmu, dev, masterspec);
1824 dev_err(dev, "failed to add master %s\n",
1825 masterspec->np->name);
1827 goto out_put_masters;
1833 dev_notice(dev, "registered %d master devices\n", i);
1837 parse_driver_options(smmu);
1839 if (smmu->version > ARM_SMMU_V1 &&
1840 smmu->num_context_banks != smmu->num_context_irqs) {
1842 "found only %d context interrupt(s) but %d required\n",
1843 smmu->num_context_irqs, smmu->num_context_banks);
1845 goto out_put_masters;
1848 for (i = 0; i < smmu->num_global_irqs; ++i) {
1849 err = request_irq(smmu->irqs[i],
1850 arm_smmu_global_fault,
1852 "arm-smmu global fault",
1855 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1861 INIT_LIST_HEAD(&smmu->list);
1862 spin_lock(&arm_smmu_devices_lock);
1863 list_add(&smmu->list, &arm_smmu_devices);
1864 spin_unlock(&arm_smmu_devices_lock);
1866 arm_smmu_device_reset(smmu);
1871 free_irq(smmu->irqs[i], smmu);
1874 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1875 struct arm_smmu_master *master
1876 = container_of(node, struct arm_smmu_master, node);
1877 of_node_put(master->of_node);
1883 static int arm_smmu_device_remove(struct platform_device *pdev)
1886 struct device *dev = &pdev->dev;
1887 struct arm_smmu_device *curr, *smmu = NULL;
1888 struct rb_node *node;
1890 spin_lock(&arm_smmu_devices_lock);
1891 list_for_each_entry(curr, &arm_smmu_devices, list) {
1892 if (curr->dev == dev) {
1894 list_del(&smmu->list);
1898 spin_unlock(&arm_smmu_devices_lock);
1903 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1904 struct arm_smmu_master *master
1905 = container_of(node, struct arm_smmu_master, node);
1906 of_node_put(master->of_node);
1909 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1910 dev_err(dev, "removing device with active domains!\n");
1912 for (i = 0; i < smmu->num_global_irqs; ++i)
1913 free_irq(smmu->irqs[i], smmu);
1915 /* Turn the thing off */
1916 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1920 static struct platform_driver arm_smmu_driver = {
1923 .of_match_table = of_match_ptr(arm_smmu_of_match),
1925 .probe = arm_smmu_device_dt_probe,
1926 .remove = arm_smmu_device_remove,
1929 static int __init arm_smmu_init(void)
1931 struct device_node *np;
1935 * Play nice with systems that don't have an ARM SMMU by checking that
1936 * an ARM SMMU exists in the system before proceeding with the driver
1937 * and IOMMU bus operation registration.
1939 np = of_find_matching_node(NULL, arm_smmu_of_match);
1945 ret = platform_driver_register(&arm_smmu_driver);
1949 /* Oh, for a proper bus abstraction */
1950 if (!iommu_present(&platform_bus_type))
1951 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1953 #ifdef CONFIG_ARM_AMBA
1954 if (!iommu_present(&amba_bustype))
1955 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1959 if (!iommu_present(&pci_bus_type))
1960 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1966 static void __exit arm_smmu_exit(void)
1968 return platform_driver_unregister(&arm_smmu_driver);
1971 subsys_initcall(arm_smmu_init);
1972 module_exit(arm_smmu_exit);
1974 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1975 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1976 MODULE_LICENSE("GPL v2");