Merge remote-tracking branch 'regulator/fix/core' into regulator-linus
[sfrench/cifs-2.6.git] / drivers / iommu / arm-smmu.c
1 /*
2  * IOMMU API for ARM architected SMMU implementations.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16  *
17  * Copyright (C) 2013 ARM Limited
18  *
19  * Author: Will Deacon <will.deacon@arm.com>
20  *
21  * This driver currently supports:
22  *      - SMMUv1 and v2 implementations
23  *      - Stream-matching and stream-indexing
24  *      - v7/v8 long-descriptor format
25  *      - Non-secure access to the SMMU
26  *      - Context fault reporting
27  *      - Extended Stream ID (16 bit)
28  */
29
30 #define pr_fmt(fmt) "arm-smmu: " fmt
31
32 #include <linux/acpi.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/atomic.h>
35 #include <linux/delay.h>
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/interrupt.h>
40 #include <linux/io.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
42 #include <linux/iommu.h>
43 #include <linux/iopoll.h>
44 #include <linux/module.h>
45 #include <linux/of.h>
46 #include <linux/of_address.h>
47 #include <linux/of_device.h>
48 #include <linux/of_iommu.h>
49 #include <linux/pci.h>
50 #include <linux/platform_device.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53
54 #include <linux/amba/bus.h>
55
56 #include "io-pgtable.h"
57
58 /* Maximum number of context banks per SMMU */
59 #define ARM_SMMU_MAX_CBS                128
60
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu)              ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu)              ((smmu)->base + (1 << (smmu)->pgshift))
64
65 /*
66  * SMMU global address space with conditional offset to access secure
67  * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68  * nsGFSYNR0: 0x450)
69  */
70 #define ARM_SMMU_GR0_NS(smmu)                                           \
71         ((smmu)->base +                                                 \
72                 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS)       \
73                         ? 0x400 : 0))
74
75 /*
76  * Some 64-bit registers only make sense to write atomically, but in such
77  * cases all the data relevant to AArch32 formats lies within the lower word,
78  * therefore this actually makes more sense than it might first appear.
79  */
80 #ifdef CONFIG_64BIT
81 #define smmu_write_atomic_lq            writeq_relaxed
82 #else
83 #define smmu_write_atomic_lq            writel_relaxed
84 #endif
85
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0               0x0
88 #define sCR0_CLIENTPD                   (1 << 0)
89 #define sCR0_GFRE                       (1 << 1)
90 #define sCR0_GFIE                       (1 << 2)
91 #define sCR0_EXIDENABLE                 (1 << 3)
92 #define sCR0_GCFGFRE                    (1 << 4)
93 #define sCR0_GCFGFIE                    (1 << 5)
94 #define sCR0_USFCFG                     (1 << 10)
95 #define sCR0_VMIDPNE                    (1 << 11)
96 #define sCR0_PTM                        (1 << 12)
97 #define sCR0_FB                         (1 << 13)
98 #define sCR0_VMID16EN                   (1 << 31)
99 #define sCR0_BSU_SHIFT                  14
100 #define sCR0_BSU_MASK                   0x3
101
102 /* Auxiliary Configuration register */
103 #define ARM_SMMU_GR0_sACR               0x10
104
105 /* Identification registers */
106 #define ARM_SMMU_GR0_ID0                0x20
107 #define ARM_SMMU_GR0_ID1                0x24
108 #define ARM_SMMU_GR0_ID2                0x28
109 #define ARM_SMMU_GR0_ID3                0x2c
110 #define ARM_SMMU_GR0_ID4                0x30
111 #define ARM_SMMU_GR0_ID5                0x34
112 #define ARM_SMMU_GR0_ID6                0x38
113 #define ARM_SMMU_GR0_ID7                0x3c
114 #define ARM_SMMU_GR0_sGFSR              0x48
115 #define ARM_SMMU_GR0_sGFSYNR0           0x50
116 #define ARM_SMMU_GR0_sGFSYNR1           0x54
117 #define ARM_SMMU_GR0_sGFSYNR2           0x58
118
119 #define ID0_S1TS                        (1 << 30)
120 #define ID0_S2TS                        (1 << 29)
121 #define ID0_NTS                         (1 << 28)
122 #define ID0_SMS                         (1 << 27)
123 #define ID0_ATOSNS                      (1 << 26)
124 #define ID0_PTFS_NO_AARCH32             (1 << 25)
125 #define ID0_PTFS_NO_AARCH32S            (1 << 24)
126 #define ID0_CTTW                        (1 << 14)
127 #define ID0_NUMIRPT_SHIFT               16
128 #define ID0_NUMIRPT_MASK                0xff
129 #define ID0_NUMSIDB_SHIFT               9
130 #define ID0_NUMSIDB_MASK                0xf
131 #define ID0_EXIDS                       (1 << 8)
132 #define ID0_NUMSMRG_SHIFT               0
133 #define ID0_NUMSMRG_MASK                0xff
134
135 #define ID1_PAGESIZE                    (1 << 31)
136 #define ID1_NUMPAGENDXB_SHIFT           28
137 #define ID1_NUMPAGENDXB_MASK            7
138 #define ID1_NUMS2CB_SHIFT               16
139 #define ID1_NUMS2CB_MASK                0xff
140 #define ID1_NUMCB_SHIFT                 0
141 #define ID1_NUMCB_MASK                  0xff
142
143 #define ID2_OAS_SHIFT                   4
144 #define ID2_OAS_MASK                    0xf
145 #define ID2_IAS_SHIFT                   0
146 #define ID2_IAS_MASK                    0xf
147 #define ID2_UBS_SHIFT                   8
148 #define ID2_UBS_MASK                    0xf
149 #define ID2_PTFS_4K                     (1 << 12)
150 #define ID2_PTFS_16K                    (1 << 13)
151 #define ID2_PTFS_64K                    (1 << 14)
152 #define ID2_VMID16                      (1 << 15)
153
154 #define ID7_MAJOR_SHIFT                 4
155 #define ID7_MAJOR_MASK                  0xf
156
157 /* Global TLB invalidation */
158 #define ARM_SMMU_GR0_TLBIVMID           0x64
159 #define ARM_SMMU_GR0_TLBIALLNSNH        0x68
160 #define ARM_SMMU_GR0_TLBIALLH           0x6c
161 #define ARM_SMMU_GR0_sTLBGSYNC          0x70
162 #define ARM_SMMU_GR0_sTLBGSTATUS        0x74
163 #define sTLBGSTATUS_GSACTIVE            (1 << 0)
164 #define TLB_LOOP_TIMEOUT                1000000 /* 1s! */
165 #define TLB_SPIN_COUNT                  10
166
167 /* Stream mapping registers */
168 #define ARM_SMMU_GR0_SMR(n)             (0x800 + ((n) << 2))
169 #define SMR_VALID                       (1 << 31)
170 #define SMR_MASK_SHIFT                  16
171 #define SMR_ID_SHIFT                    0
172
173 #define ARM_SMMU_GR0_S2CR(n)            (0xc00 + ((n) << 2))
174 #define S2CR_CBNDX_SHIFT                0
175 #define S2CR_CBNDX_MASK                 0xff
176 #define S2CR_EXIDVALID                  (1 << 10)
177 #define S2CR_TYPE_SHIFT                 16
178 #define S2CR_TYPE_MASK                  0x3
179 enum arm_smmu_s2cr_type {
180         S2CR_TYPE_TRANS,
181         S2CR_TYPE_BYPASS,
182         S2CR_TYPE_FAULT,
183 };
184
185 #define S2CR_PRIVCFG_SHIFT              24
186 #define S2CR_PRIVCFG_MASK               0x3
187 enum arm_smmu_s2cr_privcfg {
188         S2CR_PRIVCFG_DEFAULT,
189         S2CR_PRIVCFG_DIPAN,
190         S2CR_PRIVCFG_UNPRIV,
191         S2CR_PRIVCFG_PRIV,
192 };
193
194 /* Context bank attribute registers */
195 #define ARM_SMMU_GR1_CBAR(n)            (0x0 + ((n) << 2))
196 #define CBAR_VMID_SHIFT                 0
197 #define CBAR_VMID_MASK                  0xff
198 #define CBAR_S1_BPSHCFG_SHIFT           8
199 #define CBAR_S1_BPSHCFG_MASK            3
200 #define CBAR_S1_BPSHCFG_NSH             3
201 #define CBAR_S1_MEMATTR_SHIFT           12
202 #define CBAR_S1_MEMATTR_MASK            0xf
203 #define CBAR_S1_MEMATTR_WB              0xf
204 #define CBAR_TYPE_SHIFT                 16
205 #define CBAR_TYPE_MASK                  0x3
206 #define CBAR_TYPE_S2_TRANS              (0 << CBAR_TYPE_SHIFT)
207 #define CBAR_TYPE_S1_TRANS_S2_BYPASS    (1 << CBAR_TYPE_SHIFT)
208 #define CBAR_TYPE_S1_TRANS_S2_FAULT     (2 << CBAR_TYPE_SHIFT)
209 #define CBAR_TYPE_S1_TRANS_S2_TRANS     (3 << CBAR_TYPE_SHIFT)
210 #define CBAR_IRPTNDX_SHIFT              24
211 #define CBAR_IRPTNDX_MASK               0xff
212
213 #define ARM_SMMU_GR1_CBA2R(n)           (0x800 + ((n) << 2))
214 #define CBA2R_RW64_32BIT                (0 << 0)
215 #define CBA2R_RW64_64BIT                (1 << 0)
216 #define CBA2R_VMID_SHIFT                16
217 #define CBA2R_VMID_MASK                 0xffff
218
219 /* Translation context bank */
220 #define ARM_SMMU_CB(smmu, n)    ((smmu)->cb_base + ((n) << (smmu)->pgshift))
221
222 #define ARM_SMMU_CB_SCTLR               0x0
223 #define ARM_SMMU_CB_ACTLR               0x4
224 #define ARM_SMMU_CB_RESUME              0x8
225 #define ARM_SMMU_CB_TTBCR2              0x10
226 #define ARM_SMMU_CB_TTBR0               0x20
227 #define ARM_SMMU_CB_TTBR1               0x28
228 #define ARM_SMMU_CB_TTBCR               0x30
229 #define ARM_SMMU_CB_CONTEXTIDR          0x34
230 #define ARM_SMMU_CB_S1_MAIR0            0x38
231 #define ARM_SMMU_CB_S1_MAIR1            0x3c
232 #define ARM_SMMU_CB_PAR                 0x50
233 #define ARM_SMMU_CB_FSR                 0x58
234 #define ARM_SMMU_CB_FAR                 0x60
235 #define ARM_SMMU_CB_FSYNR0              0x68
236 #define ARM_SMMU_CB_S1_TLBIVA           0x600
237 #define ARM_SMMU_CB_S1_TLBIASID         0x610
238 #define ARM_SMMU_CB_S1_TLBIVAL          0x620
239 #define ARM_SMMU_CB_S2_TLBIIPAS2        0x630
240 #define ARM_SMMU_CB_S2_TLBIIPAS2L       0x638
241 #define ARM_SMMU_CB_TLBSYNC             0x7f0
242 #define ARM_SMMU_CB_TLBSTATUS           0x7f4
243 #define ARM_SMMU_CB_ATS1PR              0x800
244 #define ARM_SMMU_CB_ATSR                0x8f0
245
246 #define SCTLR_S1_ASIDPNE                (1 << 12)
247 #define SCTLR_CFCFG                     (1 << 7)
248 #define SCTLR_CFIE                      (1 << 6)
249 #define SCTLR_CFRE                      (1 << 5)
250 #define SCTLR_E                         (1 << 4)
251 #define SCTLR_AFE                       (1 << 2)
252 #define SCTLR_TRE                       (1 << 1)
253 #define SCTLR_M                         (1 << 0)
254
255 #define ARM_MMU500_ACTLR_CPRE           (1 << 1)
256
257 #define ARM_MMU500_ACR_CACHE_LOCK       (1 << 26)
258 #define ARM_MMU500_ACR_SMTNMB_TLBEN     (1 << 8)
259
260 #define CB_PAR_F                        (1 << 0)
261
262 #define ATSR_ACTIVE                     (1 << 0)
263
264 #define RESUME_RETRY                    (0 << 0)
265 #define RESUME_TERMINATE                (1 << 0)
266
267 #define TTBCR2_SEP_SHIFT                15
268 #define TTBCR2_SEP_UPSTREAM             (0x7 << TTBCR2_SEP_SHIFT)
269 #define TTBCR2_AS                       (1 << 4)
270
271 #define TTBRn_ASID_SHIFT                48
272
273 #define FSR_MULTI                       (1 << 31)
274 #define FSR_SS                          (1 << 30)
275 #define FSR_UUT                         (1 << 8)
276 #define FSR_ASF                         (1 << 7)
277 #define FSR_TLBLKF                      (1 << 6)
278 #define FSR_TLBMCF                      (1 << 5)
279 #define FSR_EF                          (1 << 4)
280 #define FSR_PF                          (1 << 3)
281 #define FSR_AFF                         (1 << 2)
282 #define FSR_TF                          (1 << 1)
283
284 #define FSR_IGN                         (FSR_AFF | FSR_ASF | \
285                                          FSR_TLBMCF | FSR_TLBLKF)
286 #define FSR_FAULT                       (FSR_MULTI | FSR_SS | FSR_UUT | \
287                                          FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
288
289 #define FSYNR0_WNR                      (1 << 4)
290
291 #define MSI_IOVA_BASE                   0x8000000
292 #define MSI_IOVA_LENGTH                 0x100000
293
294 static int force_stage;
295 module_param(force_stage, int, S_IRUGO);
296 MODULE_PARM_DESC(force_stage,
297         "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
298 static bool disable_bypass;
299 module_param(disable_bypass, bool, S_IRUGO);
300 MODULE_PARM_DESC(disable_bypass,
301         "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
302
303 enum arm_smmu_arch_version {
304         ARM_SMMU_V1,
305         ARM_SMMU_V1_64K,
306         ARM_SMMU_V2,
307 };
308
309 enum arm_smmu_implementation {
310         GENERIC_SMMU,
311         ARM_MMU500,
312         CAVIUM_SMMUV2,
313 };
314
315 /* Until ACPICA headers cover IORT rev. C */
316 #ifndef ACPI_IORT_SMMU_CORELINK_MMU401
317 #define ACPI_IORT_SMMU_CORELINK_MMU401  0x4
318 #endif
319 #ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
320 #define ACPI_IORT_SMMU_CAVIUM_THUNDERX  0x5
321 #endif
322
323 struct arm_smmu_s2cr {
324         struct iommu_group              *group;
325         int                             count;
326         enum arm_smmu_s2cr_type         type;
327         enum arm_smmu_s2cr_privcfg      privcfg;
328         u8                              cbndx;
329 };
330
331 #define s2cr_init_val (struct arm_smmu_s2cr){                           \
332         .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS,    \
333 }
334
335 struct arm_smmu_smr {
336         u16                             mask;
337         u16                             id;
338         bool                            valid;
339 };
340
341 struct arm_smmu_master_cfg {
342         struct arm_smmu_device          *smmu;
343         s16                             smendx[];
344 };
345 #define INVALID_SMENDX                  -1
346 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
347 #define fwspec_smmu(fw)  (__fwspec_cfg(fw)->smmu)
348 #define fwspec_smendx(fw, i) \
349         (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
350 #define for_each_cfg_sme(fw, i, idx) \
351         for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
352
353 struct arm_smmu_device {
354         struct device                   *dev;
355
356         void __iomem                    *base;
357         void __iomem                    *cb_base;
358         unsigned long                   pgshift;
359
360 #define ARM_SMMU_FEAT_COHERENT_WALK     (1 << 0)
361 #define ARM_SMMU_FEAT_STREAM_MATCH      (1 << 1)
362 #define ARM_SMMU_FEAT_TRANS_S1          (1 << 2)
363 #define ARM_SMMU_FEAT_TRANS_S2          (1 << 3)
364 #define ARM_SMMU_FEAT_TRANS_NESTED      (1 << 4)
365 #define ARM_SMMU_FEAT_TRANS_OPS         (1 << 5)
366 #define ARM_SMMU_FEAT_VMID16            (1 << 6)
367 #define ARM_SMMU_FEAT_FMT_AARCH64_4K    (1 << 7)
368 #define ARM_SMMU_FEAT_FMT_AARCH64_16K   (1 << 8)
369 #define ARM_SMMU_FEAT_FMT_AARCH64_64K   (1 << 9)
370 #define ARM_SMMU_FEAT_FMT_AARCH32_L     (1 << 10)
371 #define ARM_SMMU_FEAT_FMT_AARCH32_S     (1 << 11)
372 #define ARM_SMMU_FEAT_EXIDS             (1 << 12)
373         u32                             features;
374
375 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
376         u32                             options;
377         enum arm_smmu_arch_version      version;
378         enum arm_smmu_implementation    model;
379
380         u32                             num_context_banks;
381         u32                             num_s2_context_banks;
382         DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
383         atomic_t                        irptndx;
384
385         u32                             num_mapping_groups;
386         u16                             streamid_mask;
387         u16                             smr_mask_mask;
388         struct arm_smmu_smr             *smrs;
389         struct arm_smmu_s2cr            *s2crs;
390         struct mutex                    stream_map_mutex;
391
392         unsigned long                   va_size;
393         unsigned long                   ipa_size;
394         unsigned long                   pa_size;
395         unsigned long                   pgsize_bitmap;
396
397         u32                             num_global_irqs;
398         u32                             num_context_irqs;
399         unsigned int                    *irqs;
400
401         u32                             cavium_id_base; /* Specific to Cavium */
402
403         spinlock_t                      global_sync_lock;
404
405         /* IOMMU core code handle */
406         struct iommu_device             iommu;
407 };
408
409 enum arm_smmu_context_fmt {
410         ARM_SMMU_CTX_FMT_NONE,
411         ARM_SMMU_CTX_FMT_AARCH64,
412         ARM_SMMU_CTX_FMT_AARCH32_L,
413         ARM_SMMU_CTX_FMT_AARCH32_S,
414 };
415
416 struct arm_smmu_cfg {
417         u8                              cbndx;
418         u8                              irptndx;
419         union {
420                 u16                     asid;
421                 u16                     vmid;
422         };
423         u32                             cbar;
424         enum arm_smmu_context_fmt       fmt;
425 };
426 #define INVALID_IRPTNDX                 0xff
427
428 enum arm_smmu_domain_stage {
429         ARM_SMMU_DOMAIN_S1 = 0,
430         ARM_SMMU_DOMAIN_S2,
431         ARM_SMMU_DOMAIN_NESTED,
432         ARM_SMMU_DOMAIN_BYPASS,
433 };
434
435 struct arm_smmu_domain {
436         struct arm_smmu_device          *smmu;
437         struct io_pgtable_ops           *pgtbl_ops;
438         struct arm_smmu_cfg             cfg;
439         enum arm_smmu_domain_stage      stage;
440         struct mutex                    init_mutex; /* Protects smmu pointer */
441         spinlock_t                      cb_lock; /* Serialises ATS1* ops and TLB syncs */
442         struct iommu_domain             domain;
443 };
444
445 struct arm_smmu_option_prop {
446         u32 opt;
447         const char *prop;
448 };
449
450 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
451
452 static bool using_legacy_binding, using_generic_binding;
453
454 static struct arm_smmu_option_prop arm_smmu_options[] = {
455         { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
456         { 0, NULL},
457 };
458
459 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
460 {
461         return container_of(dom, struct arm_smmu_domain, domain);
462 }
463
464 static void parse_driver_options(struct arm_smmu_device *smmu)
465 {
466         int i = 0;
467
468         do {
469                 if (of_property_read_bool(smmu->dev->of_node,
470                                                 arm_smmu_options[i].prop)) {
471                         smmu->options |= arm_smmu_options[i].opt;
472                         dev_notice(smmu->dev, "option %s\n",
473                                 arm_smmu_options[i].prop);
474                 }
475         } while (arm_smmu_options[++i].opt);
476 }
477
478 static struct device_node *dev_get_dev_node(struct device *dev)
479 {
480         if (dev_is_pci(dev)) {
481                 struct pci_bus *bus = to_pci_dev(dev)->bus;
482
483                 while (!pci_is_root_bus(bus))
484                         bus = bus->parent;
485                 return of_node_get(bus->bridge->parent->of_node);
486         }
487
488         return of_node_get(dev->of_node);
489 }
490
491 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
492 {
493         *((__be32 *)data) = cpu_to_be32(alias);
494         return 0; /* Continue walking */
495 }
496
497 static int __find_legacy_master_phandle(struct device *dev, void *data)
498 {
499         struct of_phandle_iterator *it = *(void **)data;
500         struct device_node *np = it->node;
501         int err;
502
503         of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
504                             "#stream-id-cells", 0)
505                 if (it->node == np) {
506                         *(void **)data = dev;
507                         return 1;
508                 }
509         it->node = np;
510         return err == -ENOENT ? 0 : err;
511 }
512
513 static struct platform_driver arm_smmu_driver;
514 static struct iommu_ops arm_smmu_ops;
515
516 static int arm_smmu_register_legacy_master(struct device *dev,
517                                            struct arm_smmu_device **smmu)
518 {
519         struct device *smmu_dev;
520         struct device_node *np;
521         struct of_phandle_iterator it;
522         void *data = &it;
523         u32 *sids;
524         __be32 pci_sid;
525         int err;
526
527         np = dev_get_dev_node(dev);
528         if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
529                 of_node_put(np);
530                 return -ENODEV;
531         }
532
533         it.node = np;
534         err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
535                                      __find_legacy_master_phandle);
536         smmu_dev = data;
537         of_node_put(np);
538         if (err == 0)
539                 return -ENODEV;
540         if (err < 0)
541                 return err;
542
543         if (dev_is_pci(dev)) {
544                 /* "mmu-masters" assumes Stream ID == Requester ID */
545                 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
546                                        &pci_sid);
547                 it.cur = &pci_sid;
548                 it.cur_count = 1;
549         }
550
551         err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
552                                 &arm_smmu_ops);
553         if (err)
554                 return err;
555
556         sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
557         if (!sids)
558                 return -ENOMEM;
559
560         *smmu = dev_get_drvdata(smmu_dev);
561         of_phandle_iterator_args(&it, sids, it.cur_count);
562         err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
563         kfree(sids);
564         return err;
565 }
566
567 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
568 {
569         int idx;
570
571         do {
572                 idx = find_next_zero_bit(map, end, start);
573                 if (idx == end)
574                         return -ENOSPC;
575         } while (test_and_set_bit(idx, map));
576
577         return idx;
578 }
579
580 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
581 {
582         clear_bit(idx, map);
583 }
584
585 /* Wait for any pending TLB invalidations to complete */
586 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
587                                 void __iomem *sync, void __iomem *status)
588 {
589         unsigned int spin_cnt, delay;
590
591         writel_relaxed(0, sync);
592         for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
593                 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
594                         if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
595                                 return;
596                         cpu_relax();
597                 }
598                 udelay(delay);
599         }
600         dev_err_ratelimited(smmu->dev,
601                             "TLB sync timed out -- SMMU may be deadlocked\n");
602 }
603
604 static void arm_smmu_tlb_sync_global(struct arm_smmu_device *smmu)
605 {
606         void __iomem *base = ARM_SMMU_GR0(smmu);
607         unsigned long flags;
608
609         spin_lock_irqsave(&smmu->global_sync_lock, flags);
610         __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_GR0_sTLBGSYNC,
611                             base + ARM_SMMU_GR0_sTLBGSTATUS);
612         spin_unlock_irqrestore(&smmu->global_sync_lock, flags);
613 }
614
615 static void arm_smmu_tlb_sync_context(void *cookie)
616 {
617         struct arm_smmu_domain *smmu_domain = cookie;
618         struct arm_smmu_device *smmu = smmu_domain->smmu;
619         void __iomem *base = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
620         unsigned long flags;
621
622         spin_lock_irqsave(&smmu_domain->cb_lock, flags);
623         __arm_smmu_tlb_sync(smmu, base + ARM_SMMU_CB_TLBSYNC,
624                             base + ARM_SMMU_CB_TLBSTATUS);
625         spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
626 }
627
628 static void arm_smmu_tlb_sync_vmid(void *cookie)
629 {
630         struct arm_smmu_domain *smmu_domain = cookie;
631
632         arm_smmu_tlb_sync_global(smmu_domain->smmu);
633 }
634
635 static void arm_smmu_tlb_inv_context_s1(void *cookie)
636 {
637         struct arm_smmu_domain *smmu_domain = cookie;
638         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
639         void __iomem *base = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
640
641         writel_relaxed(cfg->asid, base + ARM_SMMU_CB_S1_TLBIASID);
642         arm_smmu_tlb_sync_context(cookie);
643 }
644
645 static void arm_smmu_tlb_inv_context_s2(void *cookie)
646 {
647         struct arm_smmu_domain *smmu_domain = cookie;
648         struct arm_smmu_device *smmu = smmu_domain->smmu;
649         void __iomem *base = ARM_SMMU_GR0(smmu);
650
651         writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
652         arm_smmu_tlb_sync_global(smmu);
653 }
654
655 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
656                                           size_t granule, bool leaf, void *cookie)
657 {
658         struct arm_smmu_domain *smmu_domain = cookie;
659         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
660         bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
661         void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
662
663         if (stage1) {
664                 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
665
666                 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
667                         iova &= ~12UL;
668                         iova |= cfg->asid;
669                         do {
670                                 writel_relaxed(iova, reg);
671                                 iova += granule;
672                         } while (size -= granule);
673                 } else {
674                         iova >>= 12;
675                         iova |= (u64)cfg->asid << 48;
676                         do {
677                                 writeq_relaxed(iova, reg);
678                                 iova += granule >> 12;
679                         } while (size -= granule);
680                 }
681         } else {
682                 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
683                               ARM_SMMU_CB_S2_TLBIIPAS2;
684                 iova >>= 12;
685                 do {
686                         smmu_write_atomic_lq(iova, reg);
687                         iova += granule >> 12;
688                 } while (size -= granule);
689         }
690 }
691
692 /*
693  * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
694  * almost negligible, but the benefit of getting the first one in as far ahead
695  * of the sync as possible is significant, hence we don't just make this a
696  * no-op and set .tlb_sync to arm_smmu_inv_context_s2() as you might think.
697  */
698 static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
699                                          size_t granule, bool leaf, void *cookie)
700 {
701         struct arm_smmu_domain *smmu_domain = cookie;
702         void __iomem *base = ARM_SMMU_GR0(smmu_domain->smmu);
703
704         writel_relaxed(smmu_domain->cfg.vmid, base + ARM_SMMU_GR0_TLBIVMID);
705 }
706
707 static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
708         .tlb_flush_all  = arm_smmu_tlb_inv_context_s1,
709         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
710         .tlb_sync       = arm_smmu_tlb_sync_context,
711 };
712
713 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
714         .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
715         .tlb_add_flush  = arm_smmu_tlb_inv_range_nosync,
716         .tlb_sync       = arm_smmu_tlb_sync_context,
717 };
718
719 static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v1 = {
720         .tlb_flush_all  = arm_smmu_tlb_inv_context_s2,
721         .tlb_add_flush  = arm_smmu_tlb_inv_vmid_nosync,
722         .tlb_sync       = arm_smmu_tlb_sync_vmid,
723 };
724
725 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
726 {
727         u32 fsr, fsynr;
728         unsigned long iova;
729         struct iommu_domain *domain = dev;
730         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
731         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
732         struct arm_smmu_device *smmu = smmu_domain->smmu;
733         void __iomem *cb_base;
734
735         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
736         fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
737
738         if (!(fsr & FSR_FAULT))
739                 return IRQ_NONE;
740
741         fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
742         iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
743
744         dev_err_ratelimited(smmu->dev,
745         "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
746                             fsr, iova, fsynr, cfg->cbndx);
747
748         writel(fsr, cb_base + ARM_SMMU_CB_FSR);
749         return IRQ_HANDLED;
750 }
751
752 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
753 {
754         u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
755         struct arm_smmu_device *smmu = dev;
756         void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
757
758         gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
759         gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
760         gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
761         gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
762
763         if (!gfsr)
764                 return IRQ_NONE;
765
766         dev_err_ratelimited(smmu->dev,
767                 "Unexpected global fault, this could be serious\n");
768         dev_err_ratelimited(smmu->dev,
769                 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
770                 gfsr, gfsynr0, gfsynr1, gfsynr2);
771
772         writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
773         return IRQ_HANDLED;
774 }
775
776 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
777                                        struct io_pgtable_cfg *pgtbl_cfg)
778 {
779         u32 reg, reg2;
780         u64 reg64;
781         bool stage1;
782         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
783         struct arm_smmu_device *smmu = smmu_domain->smmu;
784         void __iomem *cb_base, *gr1_base;
785
786         gr1_base = ARM_SMMU_GR1(smmu);
787         stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
788         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
789
790         if (smmu->version > ARM_SMMU_V1) {
791                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
792                         reg = CBA2R_RW64_64BIT;
793                 else
794                         reg = CBA2R_RW64_32BIT;
795                 /* 16-bit VMIDs live in CBA2R */
796                 if (smmu->features & ARM_SMMU_FEAT_VMID16)
797                         reg |= cfg->vmid << CBA2R_VMID_SHIFT;
798
799                 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
800         }
801
802         /* CBAR */
803         reg = cfg->cbar;
804         if (smmu->version < ARM_SMMU_V2)
805                 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
806
807         /*
808          * Use the weakest shareability/memory types, so they are
809          * overridden by the ttbcr/pte.
810          */
811         if (stage1) {
812                 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
813                         (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
814         } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
815                 /* 8-bit VMIDs live in CBAR */
816                 reg |= cfg->vmid << CBAR_VMID_SHIFT;
817         }
818         writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
819
820         /*
821          * TTBCR
822          * We must write this before the TTBRs, since it determines the
823          * access behaviour of some fields (in particular, ASID[15:8]).
824          */
825         if (stage1) {
826                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
827                         reg = pgtbl_cfg->arm_v7s_cfg.tcr;
828                         reg2 = 0;
829                 } else {
830                         reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
831                         reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
832                         reg2 |= TTBCR2_SEP_UPSTREAM;
833                         if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
834                                 reg2 |= TTBCR2_AS;
835                 }
836                 if (smmu->version > ARM_SMMU_V1)
837                         writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
838         } else {
839                 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
840         }
841         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
842
843         /* TTBRs */
844         if (stage1) {
845                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
846                         reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
847                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
848                         reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
849                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
850                         writel_relaxed(cfg->asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
851                 } else {
852                         reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
853                         reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
854                         writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
855                         reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
856                         reg64 |= (u64)cfg->asid << TTBRn_ASID_SHIFT;
857                         writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
858                 }
859         } else {
860                 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
861                 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
862         }
863
864         /* MAIRs (stage-1 only) */
865         if (stage1) {
866                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
867                         reg = pgtbl_cfg->arm_v7s_cfg.prrr;
868                         reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
869                 } else {
870                         reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
871                         reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
872                 }
873                 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
874                 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
875         }
876
877         /* SCTLR */
878         reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
879         if (stage1)
880                 reg |= SCTLR_S1_ASIDPNE;
881 #ifdef __BIG_ENDIAN
882         reg |= SCTLR_E;
883 #endif
884         writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
885 }
886
887 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
888                                         struct arm_smmu_device *smmu)
889 {
890         int irq, start, ret = 0;
891         unsigned long ias, oas;
892         struct io_pgtable_ops *pgtbl_ops;
893         struct io_pgtable_cfg pgtbl_cfg;
894         enum io_pgtable_fmt fmt;
895         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
896         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
897         const struct iommu_gather_ops *tlb_ops;
898
899         mutex_lock(&smmu_domain->init_mutex);
900         if (smmu_domain->smmu)
901                 goto out_unlock;
902
903         if (domain->type == IOMMU_DOMAIN_IDENTITY) {
904                 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
905                 smmu_domain->smmu = smmu;
906                 goto out_unlock;
907         }
908
909         /*
910          * Mapping the requested stage onto what we support is surprisingly
911          * complicated, mainly because the spec allows S1+S2 SMMUs without
912          * support for nested translation. That means we end up with the
913          * following table:
914          *
915          * Requested        Supported        Actual
916          *     S1               N              S1
917          *     S1             S1+S2            S1
918          *     S1               S2             S2
919          *     S1               S1             S1
920          *     N                N              N
921          *     N              S1+S2            S2
922          *     N                S2             S2
923          *     N                S1             S1
924          *
925          * Note that you can't actually request stage-2 mappings.
926          */
927         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
928                 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
929         if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
930                 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
931
932         /*
933          * Choosing a suitable context format is even more fiddly. Until we
934          * grow some way for the caller to express a preference, and/or move
935          * the decision into the io-pgtable code where it arguably belongs,
936          * just aim for the closest thing to the rest of the system, and hope
937          * that the hardware isn't esoteric enough that we can't assume AArch64
938          * support to be a superset of AArch32 support...
939          */
940         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
941                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
942         if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
943             !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
944             (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
945             (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
946                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
947         if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
948             (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
949                                ARM_SMMU_FEAT_FMT_AARCH64_16K |
950                                ARM_SMMU_FEAT_FMT_AARCH64_4K)))
951                 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
952
953         if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
954                 ret = -EINVAL;
955                 goto out_unlock;
956         }
957
958         switch (smmu_domain->stage) {
959         case ARM_SMMU_DOMAIN_S1:
960                 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
961                 start = smmu->num_s2_context_banks;
962                 ias = smmu->va_size;
963                 oas = smmu->ipa_size;
964                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
965                         fmt = ARM_64_LPAE_S1;
966                 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
967                         fmt = ARM_32_LPAE_S1;
968                         ias = min(ias, 32UL);
969                         oas = min(oas, 40UL);
970                 } else {
971                         fmt = ARM_V7S;
972                         ias = min(ias, 32UL);
973                         oas = min(oas, 32UL);
974                 }
975                 tlb_ops = &arm_smmu_s1_tlb_ops;
976                 break;
977         case ARM_SMMU_DOMAIN_NESTED:
978                 /*
979                  * We will likely want to change this if/when KVM gets
980                  * involved.
981                  */
982         case ARM_SMMU_DOMAIN_S2:
983                 cfg->cbar = CBAR_TYPE_S2_TRANS;
984                 start = 0;
985                 ias = smmu->ipa_size;
986                 oas = smmu->pa_size;
987                 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
988                         fmt = ARM_64_LPAE_S2;
989                 } else {
990                         fmt = ARM_32_LPAE_S2;
991                         ias = min(ias, 40UL);
992                         oas = min(oas, 40UL);
993                 }
994                 if (smmu->version == ARM_SMMU_V2)
995                         tlb_ops = &arm_smmu_s2_tlb_ops_v2;
996                 else
997                         tlb_ops = &arm_smmu_s2_tlb_ops_v1;
998                 break;
999         default:
1000                 ret = -EINVAL;
1001                 goto out_unlock;
1002         }
1003         ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
1004                                       smmu->num_context_banks);
1005         if (ret < 0)
1006                 goto out_unlock;
1007
1008         cfg->cbndx = ret;
1009         if (smmu->version < ARM_SMMU_V2) {
1010                 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
1011                 cfg->irptndx %= smmu->num_context_irqs;
1012         } else {
1013                 cfg->irptndx = cfg->cbndx;
1014         }
1015
1016         if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2)
1017                 cfg->vmid = cfg->cbndx + 1 + smmu->cavium_id_base;
1018         else
1019                 cfg->asid = cfg->cbndx + smmu->cavium_id_base;
1020
1021         pgtbl_cfg = (struct io_pgtable_cfg) {
1022                 .pgsize_bitmap  = smmu->pgsize_bitmap,
1023                 .ias            = ias,
1024                 .oas            = oas,
1025                 .tlb            = tlb_ops,
1026                 .iommu_dev      = smmu->dev,
1027         };
1028
1029         if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
1030                 pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA;
1031
1032         smmu_domain->smmu = smmu;
1033         pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1034         if (!pgtbl_ops) {
1035                 ret = -ENOMEM;
1036                 goto out_clear_smmu;
1037         }
1038
1039         /* Update the domain's page sizes to reflect the page table format */
1040         domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
1041         domain->geometry.aperture_end = (1UL << ias) - 1;
1042         domain->geometry.force_aperture = true;
1043
1044         /* Initialise the context bank with our page table cfg */
1045         arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
1046
1047         /*
1048          * Request context fault interrupt. Do this last to avoid the
1049          * handler seeing a half-initialised domain state.
1050          */
1051         irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1052         ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
1053                                IRQF_SHARED, "arm-smmu-context-fault", domain);
1054         if (ret < 0) {
1055                 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
1056                         cfg->irptndx, irq);
1057                 cfg->irptndx = INVALID_IRPTNDX;
1058         }
1059
1060         mutex_unlock(&smmu_domain->init_mutex);
1061
1062         /* Publish page table ops for map/unmap */
1063         smmu_domain->pgtbl_ops = pgtbl_ops;
1064         return 0;
1065
1066 out_clear_smmu:
1067         smmu_domain->smmu = NULL;
1068 out_unlock:
1069         mutex_unlock(&smmu_domain->init_mutex);
1070         return ret;
1071 }
1072
1073 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
1074 {
1075         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1076         struct arm_smmu_device *smmu = smmu_domain->smmu;
1077         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1078         void __iomem *cb_base;
1079         int irq;
1080
1081         if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY)
1082                 return;
1083
1084         /*
1085          * Disable the context bank and free the page tables before freeing
1086          * it.
1087          */
1088         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1089         writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1090
1091         if (cfg->irptndx != INVALID_IRPTNDX) {
1092                 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1093                 devm_free_irq(smmu->dev, irq, domain);
1094         }
1095
1096         free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1097         __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1098 }
1099
1100 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1101 {
1102         struct arm_smmu_domain *smmu_domain;
1103
1104         if (type != IOMMU_DOMAIN_UNMANAGED &&
1105             type != IOMMU_DOMAIN_DMA &&
1106             type != IOMMU_DOMAIN_IDENTITY)
1107                 return NULL;
1108         /*
1109          * Allocate the domain and initialise some of its data structures.
1110          * We can't really do anything meaningful until we've added a
1111          * master.
1112          */
1113         smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1114         if (!smmu_domain)
1115                 return NULL;
1116
1117         if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1118             iommu_get_dma_cookie(&smmu_domain->domain))) {
1119                 kfree(smmu_domain);
1120                 return NULL;
1121         }
1122
1123         mutex_init(&smmu_domain->init_mutex);
1124         spin_lock_init(&smmu_domain->cb_lock);
1125
1126         return &smmu_domain->domain;
1127 }
1128
1129 static void arm_smmu_domain_free(struct iommu_domain *domain)
1130 {
1131         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1132
1133         /*
1134          * Free the domain resources. We assume that all devices have
1135          * already been detached.
1136          */
1137         iommu_put_dma_cookie(domain);
1138         arm_smmu_destroy_domain_context(domain);
1139         kfree(smmu_domain);
1140 }
1141
1142 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1143 {
1144         struct arm_smmu_smr *smr = smmu->smrs + idx;
1145         u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1146
1147         if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1148                 reg |= SMR_VALID;
1149         writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1150 }
1151
1152 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1153 {
1154         struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1155         u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1156                   (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1157                   (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1158
1159         if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1160             smmu->smrs[idx].valid)
1161                 reg |= S2CR_EXIDVALID;
1162         writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1163 }
1164
1165 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1166 {
1167         arm_smmu_write_s2cr(smmu, idx);
1168         if (smmu->smrs)
1169                 arm_smmu_write_smr(smmu, idx);
1170 }
1171
1172 /*
1173  * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1174  * should be called after sCR0 is written.
1175  */
1176 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1177 {
1178         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1179         u32 smr;
1180
1181         if (!smmu->smrs)
1182                 return;
1183
1184         /*
1185          * SMR.ID bits may not be preserved if the corresponding MASK
1186          * bits are set, so check each one separately. We can reject
1187          * masters later if they try to claim IDs outside these masks.
1188          */
1189         smr = smmu->streamid_mask << SMR_ID_SHIFT;
1190         writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1191         smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1192         smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1193
1194         smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1195         writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1196         smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1197         smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1198 }
1199
1200 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1201 {
1202         struct arm_smmu_smr *smrs = smmu->smrs;
1203         int i, free_idx = -ENOSPC;
1204
1205         /* Stream indexing is blissfully easy */
1206         if (!smrs)
1207                 return id;
1208
1209         /* Validating SMRs is... less so */
1210         for (i = 0; i < smmu->num_mapping_groups; ++i) {
1211                 if (!smrs[i].valid) {
1212                         /*
1213                          * Note the first free entry we come across, which
1214                          * we'll claim in the end if nothing else matches.
1215                          */
1216                         if (free_idx < 0)
1217                                 free_idx = i;
1218                         continue;
1219                 }
1220                 /*
1221                  * If the new entry is _entirely_ matched by an existing entry,
1222                  * then reuse that, with the guarantee that there also cannot
1223                  * be any subsequent conflicting entries. In normal use we'd
1224                  * expect simply identical entries for this case, but there's
1225                  * no harm in accommodating the generalisation.
1226                  */
1227                 if ((mask & smrs[i].mask) == mask &&
1228                     !((id ^ smrs[i].id) & ~smrs[i].mask))
1229                         return i;
1230                 /*
1231                  * If the new entry has any other overlap with an existing one,
1232                  * though, then there always exists at least one stream ID
1233                  * which would cause a conflict, and we can't allow that risk.
1234                  */
1235                 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1236                         return -EINVAL;
1237         }
1238
1239         return free_idx;
1240 }
1241
1242 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1243 {
1244         if (--smmu->s2crs[idx].count)
1245                 return false;
1246
1247         smmu->s2crs[idx] = s2cr_init_val;
1248         if (smmu->smrs)
1249                 smmu->smrs[idx].valid = false;
1250
1251         return true;
1252 }
1253
1254 static int arm_smmu_master_alloc_smes(struct device *dev)
1255 {
1256         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1257         struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1258         struct arm_smmu_device *smmu = cfg->smmu;
1259         struct arm_smmu_smr *smrs = smmu->smrs;
1260         struct iommu_group *group;
1261         int i, idx, ret;
1262
1263         mutex_lock(&smmu->stream_map_mutex);
1264         /* Figure out a viable stream map entry allocation */
1265         for_each_cfg_sme(fwspec, i, idx) {
1266                 u16 sid = fwspec->ids[i];
1267                 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1268
1269                 if (idx != INVALID_SMENDX) {
1270                         ret = -EEXIST;
1271                         goto out_err;
1272                 }
1273
1274                 ret = arm_smmu_find_sme(smmu, sid, mask);
1275                 if (ret < 0)
1276                         goto out_err;
1277
1278                 idx = ret;
1279                 if (smrs && smmu->s2crs[idx].count == 0) {
1280                         smrs[idx].id = sid;
1281                         smrs[idx].mask = mask;
1282                         smrs[idx].valid = true;
1283                 }
1284                 smmu->s2crs[idx].count++;
1285                 cfg->smendx[i] = (s16)idx;
1286         }
1287
1288         group = iommu_group_get_for_dev(dev);
1289         if (!group)
1290                 group = ERR_PTR(-ENOMEM);
1291         if (IS_ERR(group)) {
1292                 ret = PTR_ERR(group);
1293                 goto out_err;
1294         }
1295         iommu_group_put(group);
1296
1297         /* It worked! Now, poke the actual hardware */
1298         for_each_cfg_sme(fwspec, i, idx) {
1299                 arm_smmu_write_sme(smmu, idx);
1300                 smmu->s2crs[idx].group = group;
1301         }
1302
1303         mutex_unlock(&smmu->stream_map_mutex);
1304         return 0;
1305
1306 out_err:
1307         while (i--) {
1308                 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1309                 cfg->smendx[i] = INVALID_SMENDX;
1310         }
1311         mutex_unlock(&smmu->stream_map_mutex);
1312         return ret;
1313 }
1314
1315 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1316 {
1317         struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1318         struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1319         int i, idx;
1320
1321         mutex_lock(&smmu->stream_map_mutex);
1322         for_each_cfg_sme(fwspec, i, idx) {
1323                 if (arm_smmu_free_sme(smmu, idx))
1324                         arm_smmu_write_sme(smmu, idx);
1325                 cfg->smendx[i] = INVALID_SMENDX;
1326         }
1327         mutex_unlock(&smmu->stream_map_mutex);
1328 }
1329
1330 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1331                                       struct iommu_fwspec *fwspec)
1332 {
1333         struct arm_smmu_device *smmu = smmu_domain->smmu;
1334         struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1335         u8 cbndx = smmu_domain->cfg.cbndx;
1336         enum arm_smmu_s2cr_type type;
1337         int i, idx;
1338
1339         if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS)
1340                 type = S2CR_TYPE_BYPASS;
1341         else
1342                 type = S2CR_TYPE_TRANS;
1343
1344         for_each_cfg_sme(fwspec, i, idx) {
1345                 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1346                         continue;
1347
1348                 s2cr[idx].type = type;
1349                 s2cr[idx].privcfg = S2CR_PRIVCFG_DEFAULT;
1350                 s2cr[idx].cbndx = cbndx;
1351                 arm_smmu_write_s2cr(smmu, idx);
1352         }
1353         return 0;
1354 }
1355
1356 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1357 {
1358         int ret;
1359         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1360         struct arm_smmu_device *smmu;
1361         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1362
1363         if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1364                 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1365                 return -ENXIO;
1366         }
1367
1368         /*
1369          * FIXME: The arch/arm DMA API code tries to attach devices to its own
1370          * domains between of_xlate() and add_device() - we have no way to cope
1371          * with that, so until ARM gets converted to rely on groups and default
1372          * domains, just say no (but more politely than by dereferencing NULL).
1373          * This should be at least a WARN_ON once that's sorted.
1374          */
1375         if (!fwspec->iommu_priv)
1376                 return -ENODEV;
1377
1378         smmu = fwspec_smmu(fwspec);
1379         /* Ensure that the domain is finalised */
1380         ret = arm_smmu_init_domain_context(domain, smmu);
1381         if (ret < 0)
1382                 return ret;
1383
1384         /*
1385          * Sanity check the domain. We don't support domains across
1386          * different SMMUs.
1387          */
1388         if (smmu_domain->smmu != smmu) {
1389                 dev_err(dev,
1390                         "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1391                         dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1392                 return -EINVAL;
1393         }
1394
1395         /* Looks ok, so add the device to the domain */
1396         return arm_smmu_domain_add_master(smmu_domain, fwspec);
1397 }
1398
1399 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1400                         phys_addr_t paddr, size_t size, int prot)
1401 {
1402         struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1403
1404         if (!ops)
1405                 return -ENODEV;
1406
1407         return ops->map(ops, iova, paddr, size, prot);
1408 }
1409
1410 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1411                              size_t size)
1412 {
1413         struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;
1414
1415         if (!ops)
1416                 return 0;
1417
1418         return ops->unmap(ops, iova, size);
1419 }
1420
1421 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1422                                               dma_addr_t iova)
1423 {
1424         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1425         struct arm_smmu_device *smmu = smmu_domain->smmu;
1426         struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1427         struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1428         struct device *dev = smmu->dev;
1429         void __iomem *cb_base;
1430         u32 tmp;
1431         u64 phys;
1432         unsigned long va, flags;
1433
1434         cb_base = ARM_SMMU_CB(smmu, cfg->cbndx);
1435
1436         spin_lock_irqsave(&smmu_domain->cb_lock, flags);
1437         /* ATS1 registers can only be written atomically */
1438         va = iova & ~0xfffUL;
1439         if (smmu->version == ARM_SMMU_V2)
1440                 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1441         else /* Register is only 32-bit in v1 */
1442                 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1443
1444         if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1445                                       !(tmp & ATSR_ACTIVE), 5, 50)) {
1446                 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1447                 dev_err(dev,
1448                         "iova to phys timed out on %pad. Falling back to software table walk.\n",
1449                         &iova);
1450                 return ops->iova_to_phys(ops, iova);
1451         }
1452
1453         phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1454         spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
1455         if (phys & CB_PAR_F) {
1456                 dev_err(dev, "translation fault!\n");
1457                 dev_err(dev, "PAR = 0x%llx\n", phys);
1458                 return 0;
1459         }
1460
1461         return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1462 }
1463
1464 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1465                                         dma_addr_t iova)
1466 {
1467         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1468         struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1469
1470         if (domain->type == IOMMU_DOMAIN_IDENTITY)
1471                 return iova;
1472
1473         if (!ops)
1474                 return 0;
1475
1476         if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1477                         smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
1478                 return arm_smmu_iova_to_phys_hard(domain, iova);
1479
1480         return ops->iova_to_phys(ops, iova);
1481 }
1482
1483 static bool arm_smmu_capable(enum iommu_cap cap)
1484 {
1485         switch (cap) {
1486         case IOMMU_CAP_CACHE_COHERENCY:
1487                 /*
1488                  * Return true here as the SMMU can always send out coherent
1489                  * requests.
1490                  */
1491                 return true;
1492         case IOMMU_CAP_NOEXEC:
1493                 return true;
1494         default:
1495                 return false;
1496         }
1497 }
1498
1499 static int arm_smmu_match_node(struct device *dev, void *data)
1500 {
1501         return dev->fwnode == data;
1502 }
1503
1504 static
1505 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1506 {
1507         struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1508                                                 fwnode, arm_smmu_match_node);
1509         put_device(dev);
1510         return dev ? dev_get_drvdata(dev) : NULL;
1511 }
1512
1513 static int arm_smmu_add_device(struct device *dev)
1514 {
1515         struct arm_smmu_device *smmu;
1516         struct arm_smmu_master_cfg *cfg;
1517         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1518         int i, ret;
1519
1520         if (using_legacy_binding) {
1521                 ret = arm_smmu_register_legacy_master(dev, &smmu);
1522
1523                 /*
1524                  * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master()
1525                  * will allocate/initialise a new one. Thus we need to update fwspec for
1526                  * later use.
1527                  */
1528                 fwspec = dev->iommu_fwspec;
1529                 if (ret)
1530                         goto out_free;
1531         } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1532                 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1533         } else {
1534                 return -ENODEV;
1535         }
1536
1537         ret = -EINVAL;
1538         for (i = 0; i < fwspec->num_ids; i++) {
1539                 u16 sid = fwspec->ids[i];
1540                 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1541
1542                 if (sid & ~smmu->streamid_mask) {
1543                         dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1544                                 sid, smmu->streamid_mask);
1545                         goto out_free;
1546                 }
1547                 if (mask & ~smmu->smr_mask_mask) {
1548                         dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1549                                 mask, smmu->smr_mask_mask);
1550                         goto out_free;
1551                 }
1552         }
1553
1554         ret = -ENOMEM;
1555         cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1556                       GFP_KERNEL);
1557         if (!cfg)
1558                 goto out_free;
1559
1560         cfg->smmu = smmu;
1561         fwspec->iommu_priv = cfg;
1562         while (i--)
1563                 cfg->smendx[i] = INVALID_SMENDX;
1564
1565         ret = arm_smmu_master_alloc_smes(dev);
1566         if (ret)
1567                 goto out_cfg_free;
1568
1569         iommu_device_link(&smmu->iommu, dev);
1570
1571         return 0;
1572
1573 out_cfg_free:
1574         kfree(cfg);
1575 out_free:
1576         iommu_fwspec_free(dev);
1577         return ret;
1578 }
1579
1580 static void arm_smmu_remove_device(struct device *dev)
1581 {
1582         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1583         struct arm_smmu_master_cfg *cfg;
1584         struct arm_smmu_device *smmu;
1585
1586
1587         if (!fwspec || fwspec->ops != &arm_smmu_ops)
1588                 return;
1589
1590         cfg  = fwspec->iommu_priv;
1591         smmu = cfg->smmu;
1592
1593         iommu_device_unlink(&smmu->iommu, dev);
1594         arm_smmu_master_free_smes(fwspec);
1595         iommu_group_remove_device(dev);
1596         kfree(fwspec->iommu_priv);
1597         iommu_fwspec_free(dev);
1598 }
1599
1600 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1601 {
1602         struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1603         struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1604         struct iommu_group *group = NULL;
1605         int i, idx;
1606
1607         for_each_cfg_sme(fwspec, i, idx) {
1608                 if (group && smmu->s2crs[idx].group &&
1609                     group != smmu->s2crs[idx].group)
1610                         return ERR_PTR(-EINVAL);
1611
1612                 group = smmu->s2crs[idx].group;
1613         }
1614
1615         if (group)
1616                 return iommu_group_ref_get(group);
1617
1618         if (dev_is_pci(dev))
1619                 group = pci_device_group(dev);
1620         else
1621                 group = generic_device_group(dev);
1622
1623         return group;
1624 }
1625
1626 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1627                                     enum iommu_attr attr, void *data)
1628 {
1629         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1630
1631         if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1632                 return -EINVAL;
1633
1634         switch (attr) {
1635         case DOMAIN_ATTR_NESTING:
1636                 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1637                 return 0;
1638         default:
1639                 return -ENODEV;
1640         }
1641 }
1642
1643 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1644                                     enum iommu_attr attr, void *data)
1645 {
1646         int ret = 0;
1647         struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1648
1649         if (domain->type != IOMMU_DOMAIN_UNMANAGED)
1650                 return -EINVAL;
1651
1652         mutex_lock(&smmu_domain->init_mutex);
1653
1654         switch (attr) {
1655         case DOMAIN_ATTR_NESTING:
1656                 if (smmu_domain->smmu) {
1657                         ret = -EPERM;
1658                         goto out_unlock;
1659                 }
1660
1661                 if (*(int *)data)
1662                         smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1663                 else
1664                         smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1665
1666                 break;
1667         default:
1668                 ret = -ENODEV;
1669         }
1670
1671 out_unlock:
1672         mutex_unlock(&smmu_domain->init_mutex);
1673         return ret;
1674 }
1675
1676 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1677 {
1678         u32 mask, fwid = 0;
1679
1680         if (args->args_count > 0)
1681                 fwid |= (u16)args->args[0];
1682
1683         if (args->args_count > 1)
1684                 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1685         else if (!of_property_read_u32(args->np, "stream-match-mask", &mask))
1686                 fwid |= (u16)mask << SMR_MASK_SHIFT;
1687
1688         return iommu_fwspec_add_ids(dev, &fwid, 1);
1689 }
1690
1691 static void arm_smmu_get_resv_regions(struct device *dev,
1692                                       struct list_head *head)
1693 {
1694         struct iommu_resv_region *region;
1695         int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
1696
1697         region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
1698                                          prot, IOMMU_RESV_SW_MSI);
1699         if (!region)
1700                 return;
1701
1702         list_add_tail(&region->list, head);
1703
1704         iommu_dma_get_resv_regions(dev, head);
1705 }
1706
1707 static void arm_smmu_put_resv_regions(struct device *dev,
1708                                       struct list_head *head)
1709 {
1710         struct iommu_resv_region *entry, *next;
1711
1712         list_for_each_entry_safe(entry, next, head, list)
1713                 kfree(entry);
1714 }
1715
1716 static struct iommu_ops arm_smmu_ops = {
1717         .capable                = arm_smmu_capable,
1718         .domain_alloc           = arm_smmu_domain_alloc,
1719         .domain_free            = arm_smmu_domain_free,
1720         .attach_dev             = arm_smmu_attach_dev,
1721         .map                    = arm_smmu_map,
1722         .unmap                  = arm_smmu_unmap,
1723         .map_sg                 = default_iommu_map_sg,
1724         .iova_to_phys           = arm_smmu_iova_to_phys,
1725         .add_device             = arm_smmu_add_device,
1726         .remove_device          = arm_smmu_remove_device,
1727         .device_group           = arm_smmu_device_group,
1728         .domain_get_attr        = arm_smmu_domain_get_attr,
1729         .domain_set_attr        = arm_smmu_domain_set_attr,
1730         .of_xlate               = arm_smmu_of_xlate,
1731         .get_resv_regions       = arm_smmu_get_resv_regions,
1732         .put_resv_regions       = arm_smmu_put_resv_regions,
1733         .pgsize_bitmap          = -1UL, /* Restricted during device attach */
1734 };
1735
1736 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1737 {
1738         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1739         void __iomem *cb_base;
1740         int i;
1741         u32 reg, major;
1742
1743         /* clear global FSR */
1744         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1745         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1746
1747         /*
1748          * Reset stream mapping groups: Initial values mark all SMRn as
1749          * invalid and all S2CRn as bypass unless overridden.
1750          */
1751         for (i = 0; i < smmu->num_mapping_groups; ++i)
1752                 arm_smmu_write_sme(smmu, i);
1753
1754         if (smmu->model == ARM_MMU500) {
1755                 /*
1756                  * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1757                  * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1758                  * bit is only present in MMU-500r2 onwards.
1759                  */
1760                 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1761                 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1762                 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1763                 if (major >= 2)
1764                         reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1765                 /*
1766                  * Allow unmatched Stream IDs to allocate bypass
1767                  * TLB entries for reduced latency.
1768                  */
1769                 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1770                 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1771         }
1772
1773         /* Make sure all context banks are disabled and clear CB_FSR  */
1774         for (i = 0; i < smmu->num_context_banks; ++i) {
1775                 cb_base = ARM_SMMU_CB(smmu, i);
1776                 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1777                 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1778                 /*
1779                  * Disable MMU-500's not-particularly-beneficial next-page
1780                  * prefetcher for the sake of errata #841119 and #826419.
1781                  */
1782                 if (smmu->model == ARM_MMU500) {
1783                         reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1784                         reg &= ~ARM_MMU500_ACTLR_CPRE;
1785                         writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1786                 }
1787         }
1788
1789         /* Invalidate the TLB, just in case */
1790         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1791         writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1792
1793         reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1794
1795         /* Enable fault reporting */
1796         reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1797
1798         /* Disable TLB broadcasting. */
1799         reg |= (sCR0_VMIDPNE | sCR0_PTM);
1800
1801         /* Enable client access, handling unmatched streams as appropriate */
1802         reg &= ~sCR0_CLIENTPD;
1803         if (disable_bypass)
1804                 reg |= sCR0_USFCFG;
1805         else
1806                 reg &= ~sCR0_USFCFG;
1807
1808         /* Disable forced broadcasting */
1809         reg &= ~sCR0_FB;
1810
1811         /* Don't upgrade barriers */
1812         reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1813
1814         if (smmu->features & ARM_SMMU_FEAT_VMID16)
1815                 reg |= sCR0_VMID16EN;
1816
1817         if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1818                 reg |= sCR0_EXIDENABLE;
1819
1820         /* Push the button */
1821         arm_smmu_tlb_sync_global(smmu);
1822         writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1823 }
1824
1825 static int arm_smmu_id_size_to_bits(int size)
1826 {
1827         switch (size) {
1828         case 0:
1829                 return 32;
1830         case 1:
1831                 return 36;
1832         case 2:
1833                 return 40;
1834         case 3:
1835                 return 42;
1836         case 4:
1837                 return 44;
1838         case 5:
1839         default:
1840                 return 48;
1841         }
1842 }
1843
1844 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1845 {
1846         unsigned long size;
1847         void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1848         u32 id;
1849         bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1850         int i;
1851
1852         dev_notice(smmu->dev, "probing hardware configuration...\n");
1853         dev_notice(smmu->dev, "SMMUv%d with:\n",
1854                         smmu->version == ARM_SMMU_V2 ? 2 : 1);
1855
1856         /* ID0 */
1857         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1858
1859         /* Restrict available stages based on module parameter */
1860         if (force_stage == 1)
1861                 id &= ~(ID0_S2TS | ID0_NTS);
1862         else if (force_stage == 2)
1863                 id &= ~(ID0_S1TS | ID0_NTS);
1864
1865         if (id & ID0_S1TS) {
1866                 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1867                 dev_notice(smmu->dev, "\tstage 1 translation\n");
1868         }
1869
1870         if (id & ID0_S2TS) {
1871                 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1872                 dev_notice(smmu->dev, "\tstage 2 translation\n");
1873         }
1874
1875         if (id & ID0_NTS) {
1876                 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1877                 dev_notice(smmu->dev, "\tnested translation\n");
1878         }
1879
1880         if (!(smmu->features &
1881                 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1882                 dev_err(smmu->dev, "\tno translation support!\n");
1883                 return -ENODEV;
1884         }
1885
1886         if ((id & ID0_S1TS) &&
1887                 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1888                 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1889                 dev_notice(smmu->dev, "\taddress translation ops\n");
1890         }
1891
1892         /*
1893          * In order for DMA API calls to work properly, we must defer to what
1894          * the FW says about coherency, regardless of what the hardware claims.
1895          * Fortunately, this also opens up a workaround for systems where the
1896          * ID register value has ended up configured incorrectly.
1897          */
1898         cttw_reg = !!(id & ID0_CTTW);
1899         if (cttw_fw || cttw_reg)
1900                 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1901                            cttw_fw ? "" : "non-");
1902         if (cttw_fw != cttw_reg)
1903                 dev_notice(smmu->dev,
1904                            "\t(IDR0.CTTW overridden by FW configuration)\n");
1905
1906         /* Max. number of entries we have for stream matching/indexing */
1907         if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1908                 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1909                 size = 1 << 16;
1910         } else {
1911                 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1912         }
1913         smmu->streamid_mask = size - 1;
1914         if (id & ID0_SMS) {
1915                 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1916                 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1917                 if (size == 0) {
1918                         dev_err(smmu->dev,
1919                                 "stream-matching supported, but no SMRs present!\n");
1920                         return -ENODEV;
1921                 }
1922
1923                 /* Zero-initialised to mark as invalid */
1924                 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1925                                           GFP_KERNEL);
1926                 if (!smmu->smrs)
1927                         return -ENOMEM;
1928
1929                 dev_notice(smmu->dev,
1930                            "\tstream matching with %lu register groups", size);
1931         }
1932         /* s2cr->type == 0 means translation, so initialise explicitly */
1933         smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1934                                          GFP_KERNEL);
1935         if (!smmu->s2crs)
1936                 return -ENOMEM;
1937         for (i = 0; i < size; i++)
1938                 smmu->s2crs[i] = s2cr_init_val;
1939
1940         smmu->num_mapping_groups = size;
1941         mutex_init(&smmu->stream_map_mutex);
1942         spin_lock_init(&smmu->global_sync_lock);
1943
1944         if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1945                 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1946                 if (!(id & ID0_PTFS_NO_AARCH32S))
1947                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1948         }
1949
1950         /* ID1 */
1951         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1952         smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1953
1954         /* Check for size mismatch of SMMU address space from mapped region */
1955         size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1956         size <<= smmu->pgshift;
1957         if (smmu->cb_base != gr0_base + size)
1958                 dev_warn(smmu->dev,
1959                         "SMMU address space size (0x%lx) differs from mapped region size (0x%tx)!\n",
1960                         size * 2, (smmu->cb_base - gr0_base) * 2);
1961
1962         smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1963         smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1964         if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1965                 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1966                 return -ENODEV;
1967         }
1968         dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1969                    smmu->num_context_banks, smmu->num_s2_context_banks);
1970         /*
1971          * Cavium CN88xx erratum #27704.
1972          * Ensure ASID and VMID allocation is unique across all SMMUs in
1973          * the system.
1974          */
1975         if (smmu->model == CAVIUM_SMMUV2) {
1976                 smmu->cavium_id_base =
1977                         atomic_add_return(smmu->num_context_banks,
1978                                           &cavium_smmu_context_count);
1979                 smmu->cavium_id_base -= smmu->num_context_banks;
1980                 dev_notice(smmu->dev, "\tenabling workaround for Cavium erratum 27704\n");
1981         }
1982
1983         /* ID2 */
1984         id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1985         size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1986         smmu->ipa_size = size;
1987
1988         /* The output mask is also applied for bypass */
1989         size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1990         smmu->pa_size = size;
1991
1992         if (id & ID2_VMID16)
1993                 smmu->features |= ARM_SMMU_FEAT_VMID16;
1994
1995         /*
1996          * What the page table walker can address actually depends on which
1997          * descriptor format is in use, but since a) we don't know that yet,
1998          * and b) it can vary per context bank, this will have to do...
1999          */
2000         if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
2001                 dev_warn(smmu->dev,
2002                          "failed to set DMA mask for table walker\n");
2003
2004         if (smmu->version < ARM_SMMU_V2) {
2005                 smmu->va_size = smmu->ipa_size;
2006                 if (smmu->version == ARM_SMMU_V1_64K)
2007                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2008         } else {
2009                 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
2010                 smmu->va_size = arm_smmu_id_size_to_bits(size);
2011                 if (id & ID2_PTFS_4K)
2012                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
2013                 if (id & ID2_PTFS_16K)
2014                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
2015                 if (id & ID2_PTFS_64K)
2016                         smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
2017         }
2018
2019         /* Now we've corralled the various formats, what'll it do? */
2020         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
2021                 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
2022         if (smmu->features &
2023             (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
2024                 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
2025         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
2026                 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
2027         if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
2028                 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
2029
2030         if (arm_smmu_ops.pgsize_bitmap == -1UL)
2031                 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2032         else
2033                 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
2034         dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
2035                    smmu->pgsize_bitmap);
2036
2037
2038         if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
2039                 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
2040                            smmu->va_size, smmu->ipa_size);
2041
2042         if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
2043                 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
2044                            smmu->ipa_size, smmu->pa_size);
2045
2046         return 0;
2047 }
2048
2049 struct arm_smmu_match_data {
2050         enum arm_smmu_arch_version version;
2051         enum arm_smmu_implementation model;
2052 };
2053
2054 #define ARM_SMMU_MATCH_DATA(name, ver, imp)     \
2055 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
2056
2057 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
2058 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
2059 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
2060 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
2061 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
2062
2063 static const struct of_device_id arm_smmu_of_match[] = {
2064         { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
2065         { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
2066         { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
2067         { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
2068         { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
2069         { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
2070         { },
2071 };
2072 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2073
2074 #ifdef CONFIG_ACPI
2075 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
2076 {
2077         int ret = 0;
2078
2079         switch (model) {
2080         case ACPI_IORT_SMMU_V1:
2081         case ACPI_IORT_SMMU_CORELINK_MMU400:
2082                 smmu->version = ARM_SMMU_V1;
2083                 smmu->model = GENERIC_SMMU;
2084                 break;
2085         case ACPI_IORT_SMMU_CORELINK_MMU401:
2086                 smmu->version = ARM_SMMU_V1_64K;
2087                 smmu->model = GENERIC_SMMU;
2088                 break;
2089         case ACPI_IORT_SMMU_V2:
2090                 smmu->version = ARM_SMMU_V2;
2091                 smmu->model = GENERIC_SMMU;
2092                 break;
2093         case ACPI_IORT_SMMU_CORELINK_MMU500:
2094                 smmu->version = ARM_SMMU_V2;
2095                 smmu->model = ARM_MMU500;
2096                 break;
2097         case ACPI_IORT_SMMU_CAVIUM_THUNDERX:
2098                 smmu->version = ARM_SMMU_V2;
2099                 smmu->model = CAVIUM_SMMUV2;
2100                 break;
2101         default:
2102                 ret = -ENODEV;
2103         }
2104
2105         return ret;
2106 }
2107
2108 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2109                                       struct arm_smmu_device *smmu)
2110 {
2111         struct device *dev = smmu->dev;
2112         struct acpi_iort_node *node =
2113                 *(struct acpi_iort_node **)dev_get_platdata(dev);
2114         struct acpi_iort_smmu *iort_smmu;
2115         int ret;
2116
2117         /* Retrieve SMMU1/2 specific data */
2118         iort_smmu = (struct acpi_iort_smmu *)node->node_data;
2119
2120         ret = acpi_smmu_get_data(iort_smmu->model, smmu);
2121         if (ret < 0)
2122                 return ret;
2123
2124         /* Ignore the configuration access interrupt */
2125         smmu->num_global_irqs = 1;
2126
2127         if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
2128                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2129
2130         return 0;
2131 }
2132 #else
2133 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2134                                              struct arm_smmu_device *smmu)
2135 {
2136         return -ENODEV;
2137 }
2138 #endif
2139
2140 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2141                                     struct arm_smmu_device *smmu)
2142 {
2143         const struct arm_smmu_match_data *data;
2144         struct device *dev = &pdev->dev;
2145         bool legacy_binding;
2146
2147         if (of_property_read_u32(dev->of_node, "#global-interrupts",
2148                                  &smmu->num_global_irqs)) {
2149                 dev_err(dev, "missing #global-interrupts property\n");
2150                 return -ENODEV;
2151         }
2152
2153         data = of_device_get_match_data(dev);
2154         smmu->version = data->version;
2155         smmu->model = data->model;
2156
2157         parse_driver_options(smmu);
2158
2159         legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2160         if (legacy_binding && !using_generic_binding) {
2161                 if (!using_legacy_binding)
2162                         pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2163                 using_legacy_binding = true;
2164         } else if (!legacy_binding && !using_legacy_binding) {
2165                 using_generic_binding = true;
2166         } else {
2167                 dev_err(dev, "not probing due to mismatched DT properties\n");
2168                 return -ENODEV;
2169         }
2170
2171         if (of_dma_is_coherent(dev->of_node))
2172                 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2173
2174         return 0;
2175 }
2176
2177 static void arm_smmu_bus_init(void)
2178 {
2179         /* Oh, for a proper bus abstraction */
2180         if (!iommu_present(&platform_bus_type))
2181                 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2182 #ifdef CONFIG_ARM_AMBA
2183         if (!iommu_present(&amba_bustype))
2184                 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2185 #endif
2186 #ifdef CONFIG_PCI
2187         if (!iommu_present(&pci_bus_type)) {
2188                 pci_request_acs();
2189                 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2190         }
2191 #endif
2192 }
2193
2194 static int arm_smmu_device_probe(struct platform_device *pdev)
2195 {
2196         struct resource *res;
2197         resource_size_t ioaddr;
2198         struct arm_smmu_device *smmu;
2199         struct device *dev = &pdev->dev;
2200         int num_irqs, i, err;
2201
2202         smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2203         if (!smmu) {
2204                 dev_err(dev, "failed to allocate arm_smmu_device\n");
2205                 return -ENOMEM;
2206         }
2207         smmu->dev = dev;
2208
2209         if (dev->of_node)
2210                 err = arm_smmu_device_dt_probe(pdev, smmu);
2211         else
2212                 err = arm_smmu_device_acpi_probe(pdev, smmu);
2213
2214         if (err)
2215                 return err;
2216
2217         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2218         ioaddr = res->start;
2219         smmu->base = devm_ioremap_resource(dev, res);
2220         if (IS_ERR(smmu->base))
2221                 return PTR_ERR(smmu->base);
2222         smmu->cb_base = smmu->base + resource_size(res) / 2;
2223
2224         num_irqs = 0;
2225         while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2226                 num_irqs++;
2227                 if (num_irqs > smmu->num_global_irqs)
2228                         smmu->num_context_irqs++;
2229         }
2230
2231         if (!smmu->num_context_irqs) {
2232                 dev_err(dev, "found %d interrupts but expected at least %d\n",
2233                         num_irqs, smmu->num_global_irqs + 1);
2234                 return -ENODEV;
2235         }
2236
2237         smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2238                                   GFP_KERNEL);
2239         if (!smmu->irqs) {
2240                 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2241                 return -ENOMEM;
2242         }
2243
2244         for (i = 0; i < num_irqs; ++i) {
2245                 int irq = platform_get_irq(pdev, i);
2246
2247                 if (irq < 0) {
2248                         dev_err(dev, "failed to get irq index %d\n", i);
2249                         return -ENODEV;
2250                 }
2251                 smmu->irqs[i] = irq;
2252         }
2253
2254         err = arm_smmu_device_cfg_probe(smmu);
2255         if (err)
2256                 return err;
2257
2258         if (smmu->version == ARM_SMMU_V2 &&
2259             smmu->num_context_banks != smmu->num_context_irqs) {
2260                 dev_err(dev,
2261                         "found only %d context interrupt(s) but %d required\n",
2262                         smmu->num_context_irqs, smmu->num_context_banks);
2263                 return -ENODEV;
2264         }
2265
2266         for (i = 0; i < smmu->num_global_irqs; ++i) {
2267                 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2268                                        arm_smmu_global_fault,
2269                                        IRQF_SHARED,
2270                                        "arm-smmu global fault",
2271                                        smmu);
2272                 if (err) {
2273                         dev_err(dev, "failed to request global IRQ %d (%u)\n",
2274                                 i, smmu->irqs[i]);
2275                         return err;
2276                 }
2277         }
2278
2279         err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL,
2280                                      "smmu.%pa", &ioaddr);
2281         if (err) {
2282                 dev_err(dev, "Failed to register iommu in sysfs\n");
2283                 return err;
2284         }
2285
2286         iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);
2287         iommu_device_set_fwnode(&smmu->iommu, dev->fwnode);
2288
2289         err = iommu_device_register(&smmu->iommu);
2290         if (err) {
2291                 dev_err(dev, "Failed to register iommu\n");
2292                 return err;
2293         }
2294
2295         platform_set_drvdata(pdev, smmu);
2296         arm_smmu_device_reset(smmu);
2297         arm_smmu_test_smr_masks(smmu);
2298
2299         /*
2300          * For ACPI and generic DT bindings, an SMMU will be probed before
2301          * any device which might need it, so we want the bus ops in place
2302          * ready to handle default domain setup as soon as any SMMU exists.
2303          */
2304         if (!using_legacy_binding)
2305                 arm_smmu_bus_init();
2306
2307         return 0;
2308 }
2309
2310 /*
2311  * With the legacy DT binding in play, though, we have no guarantees about
2312  * probe order, but then we're also not doing default domains, so we can
2313  * delay setting bus ops until we're sure every possible SMMU is ready,
2314  * and that way ensure that no add_device() calls get missed.
2315  */
2316 static int arm_smmu_legacy_bus_init(void)
2317 {
2318         if (using_legacy_binding)
2319                 arm_smmu_bus_init();
2320         return 0;
2321 }
2322 device_initcall_sync(arm_smmu_legacy_bus_init);
2323
2324 static int arm_smmu_device_remove(struct platform_device *pdev)
2325 {
2326         struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2327
2328         if (!smmu)
2329                 return -ENODEV;
2330
2331         if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2332                 dev_err(&pdev->dev, "removing device with active domains!\n");
2333
2334         /* Turn the thing off */
2335         writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2336         return 0;
2337 }
2338
2339 static struct platform_driver arm_smmu_driver = {
2340         .driver = {
2341                 .name           = "arm-smmu",
2342                 .of_match_table = of_match_ptr(arm_smmu_of_match),
2343         },
2344         .probe  = arm_smmu_device_probe,
2345         .remove = arm_smmu_device_remove,
2346 };
2347 module_platform_driver(arm_smmu_driver);
2348
2349 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", NULL);
2350 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", NULL);
2351 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", NULL);
2352 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", NULL);
2353 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", NULL);
2354 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", NULL);
2355
2356 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2357 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2358 MODULE_LICENSE("GPL v2");