treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 174
[sfrench/cifs-2.6.git] / drivers / iommu / mtk_iommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2015-2016 MediaTek Inc.
4  * Author: Yong Wu <yong.wu@mediatek.com>
5  */
6 #include <linux/memblock.h>
7 #include <linux/bug.h>
8 #include <linux/clk.h>
9 #include <linux/component.h>
10 #include <linux/device.h>
11 #include <linux/dma-iommu.h>
12 #include <linux/err.h>
13 #include <linux/interrupt.h>
14 #include <linux/io.h>
15 #include <linux/iommu.h>
16 #include <linux/iopoll.h>
17 #include <linux/list.h>
18 #include <linux/of_address.h>
19 #include <linux/of_iommu.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <asm/barrier.h>
26 #include <soc/mediatek/smi.h>
27
28 #include "mtk_iommu.h"
29
30 #define REG_MMU_PT_BASE_ADDR                    0x000
31
32 #define REG_MMU_INVALIDATE                      0x020
33 #define F_ALL_INVLD                             0x2
34 #define F_MMU_INV_RANGE                         0x1
35
36 #define REG_MMU_INVLD_START_A                   0x024
37 #define REG_MMU_INVLD_END_A                     0x028
38
39 #define REG_MMU_INV_SEL                         0x038
40 #define F_INVLD_EN0                             BIT(0)
41 #define F_INVLD_EN1                             BIT(1)
42
43 #define REG_MMU_STANDARD_AXI_MODE               0x048
44 #define REG_MMU_DCM_DIS                         0x050
45
46 #define REG_MMU_CTRL_REG                        0x110
47 #define F_MMU_PREFETCH_RT_REPLACE_MOD           BIT(4)
48 #define F_MMU_TF_PROTECT_SEL_SHIFT(data) \
49         ((data)->m4u_plat == M4U_MT2712 ? 4 : 5)
50 /* It's named by F_MMU_TF_PROT_SEL in mt2712. */
51 #define F_MMU_TF_PROTECT_SEL(prot, data) \
52         (((prot) & 0x3) << F_MMU_TF_PROTECT_SEL_SHIFT(data))
53
54 #define REG_MMU_IVRP_PADDR                      0x114
55
56 #define REG_MMU_VLD_PA_RNG                      0x118
57 #define F_MMU_VLD_PA_RNG(EA, SA)                (((EA) << 8) | (SA))
58
59 #define REG_MMU_INT_CONTROL0                    0x120
60 #define F_L2_MULIT_HIT_EN                       BIT(0)
61 #define F_TABLE_WALK_FAULT_INT_EN               BIT(1)
62 #define F_PREETCH_FIFO_OVERFLOW_INT_EN          BIT(2)
63 #define F_MISS_FIFO_OVERFLOW_INT_EN             BIT(3)
64 #define F_PREFETCH_FIFO_ERR_INT_EN              BIT(5)
65 #define F_MISS_FIFO_ERR_INT_EN                  BIT(6)
66 #define F_INT_CLR_BIT                           BIT(12)
67
68 #define REG_MMU_INT_MAIN_CONTROL                0x124
69 #define F_INT_TRANSLATION_FAULT                 BIT(0)
70 #define F_INT_MAIN_MULTI_HIT_FAULT              BIT(1)
71 #define F_INT_INVALID_PA_FAULT                  BIT(2)
72 #define F_INT_ENTRY_REPLACEMENT_FAULT           BIT(3)
73 #define F_INT_TLB_MISS_FAULT                    BIT(4)
74 #define F_INT_MISS_TRANSACTION_FIFO_FAULT       BIT(5)
75 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT    BIT(6)
76
77 #define REG_MMU_CPE_DONE                        0x12C
78
79 #define REG_MMU_FAULT_ST1                       0x134
80
81 #define REG_MMU_FAULT_VA                        0x13c
82 #define F_MMU_FAULT_VA_WRITE_BIT                BIT(1)
83 #define F_MMU_FAULT_VA_LAYER_BIT                BIT(0)
84
85 #define REG_MMU_INVLD_PA                        0x140
86 #define REG_MMU_INT_ID                          0x150
87 #define F_MMU0_INT_ID_LARB_ID(a)                (((a) >> 7) & 0x7)
88 #define F_MMU0_INT_ID_PORT_ID(a)                (((a) >> 2) & 0x1f)
89
90 #define MTK_PROTECT_PA_ALIGN                    128
91
92 /*
93  * Get the local arbiter ID and the portid within the larb arbiter
94  * from mtk_m4u_id which is defined by MTK_M4U_ID.
95  */
96 #define MTK_M4U_TO_LARB(id)             (((id) >> 5) & 0xf)
97 #define MTK_M4U_TO_PORT(id)             ((id) & 0x1f)
98
99 struct mtk_iommu_domain {
100         spinlock_t                      pgtlock; /* lock for page table */
101
102         struct io_pgtable_cfg           cfg;
103         struct io_pgtable_ops           *iop;
104
105         struct iommu_domain             domain;
106 };
107
108 static const struct iommu_ops mtk_iommu_ops;
109
110 static LIST_HEAD(m4ulist);      /* List all the M4U HWs */
111
112 #define for_each_m4u(data)      list_for_each_entry(data, &m4ulist, list)
113
114 /*
115  * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain
116  * for the performance.
117  *
118  * Here always return the mtk_iommu_data of the first probed M4U where the
119  * iommu domain information is recorded.
120  */
121 static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void)
122 {
123         struct mtk_iommu_data *data;
124
125         for_each_m4u(data)
126                 return data;
127
128         return NULL;
129 }
130
131 static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom)
132 {
133         return container_of(dom, struct mtk_iommu_domain, domain);
134 }
135
136 static void mtk_iommu_tlb_flush_all(void *cookie)
137 {
138         struct mtk_iommu_data *data = cookie;
139
140         for_each_m4u(data) {
141                 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
142                                data->base + REG_MMU_INV_SEL);
143                 writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE);
144                 wmb(); /* Make sure the tlb flush all done */
145         }
146 }
147
148 static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova, size_t size,
149                                            size_t granule, bool leaf,
150                                            void *cookie)
151 {
152         struct mtk_iommu_data *data = cookie;
153
154         for_each_m4u(data) {
155                 writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0,
156                                data->base + REG_MMU_INV_SEL);
157
158                 writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A);
159                 writel_relaxed(iova + size - 1,
160                                data->base + REG_MMU_INVLD_END_A);
161                 writel_relaxed(F_MMU_INV_RANGE,
162                                data->base + REG_MMU_INVALIDATE);
163                 data->tlb_flush_active = true;
164         }
165 }
166
167 static void mtk_iommu_tlb_sync(void *cookie)
168 {
169         struct mtk_iommu_data *data = cookie;
170         int ret;
171         u32 tmp;
172
173         for_each_m4u(data) {
174                 /* Avoid timing out if there's nothing to wait for */
175                 if (!data->tlb_flush_active)
176                         return;
177
178                 ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE,
179                                                 tmp, tmp != 0, 10, 100000);
180                 if (ret) {
181                         dev_warn(data->dev,
182                                  "Partial TLB flush timed out, falling back to full flush\n");
183                         mtk_iommu_tlb_flush_all(cookie);
184                 }
185                 /* Clear the CPE status */
186                 writel_relaxed(0, data->base + REG_MMU_CPE_DONE);
187                 data->tlb_flush_active = false;
188         }
189 }
190
191 static const struct iommu_gather_ops mtk_iommu_gather_ops = {
192         .tlb_flush_all = mtk_iommu_tlb_flush_all,
193         .tlb_add_flush = mtk_iommu_tlb_add_flush_nosync,
194         .tlb_sync = mtk_iommu_tlb_sync,
195 };
196
197 static irqreturn_t mtk_iommu_isr(int irq, void *dev_id)
198 {
199         struct mtk_iommu_data *data = dev_id;
200         struct mtk_iommu_domain *dom = data->m4u_dom;
201         u32 int_state, regval, fault_iova, fault_pa;
202         unsigned int fault_larb, fault_port;
203         bool layer, write;
204
205         /* Read error info from registers */
206         int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1);
207         fault_iova = readl_relaxed(data->base + REG_MMU_FAULT_VA);
208         layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT;
209         write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT;
210         fault_pa = readl_relaxed(data->base + REG_MMU_INVLD_PA);
211         regval = readl_relaxed(data->base + REG_MMU_INT_ID);
212         fault_larb = F_MMU0_INT_ID_LARB_ID(regval);
213         fault_port = F_MMU0_INT_ID_PORT_ID(regval);
214
215         if (report_iommu_fault(&dom->domain, data->dev, fault_iova,
216                                write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) {
217                 dev_err_ratelimited(
218                         data->dev,
219                         "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
220                         int_state, fault_iova, fault_pa, fault_larb, fault_port,
221                         layer, write ? "write" : "read");
222         }
223
224         /* Interrupt clear */
225         regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0);
226         regval |= F_INT_CLR_BIT;
227         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
228
229         mtk_iommu_tlb_flush_all(data);
230
231         return IRQ_HANDLED;
232 }
233
234 static void mtk_iommu_config(struct mtk_iommu_data *data,
235                              struct device *dev, bool enable)
236 {
237         struct mtk_smi_larb_iommu    *larb_mmu;
238         unsigned int                 larbid, portid;
239         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
240         int i;
241
242         for (i = 0; i < fwspec->num_ids; ++i) {
243                 larbid = MTK_M4U_TO_LARB(fwspec->ids[i]);
244                 portid = MTK_M4U_TO_PORT(fwspec->ids[i]);
245                 larb_mmu = &data->smi_imu.larb_imu[larbid];
246
247                 dev_dbg(dev, "%s iommu port: %d\n",
248                         enable ? "enable" : "disable", portid);
249
250                 if (enable)
251                         larb_mmu->mmu |= MTK_SMI_MMU_EN(portid);
252                 else
253                         larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid);
254         }
255 }
256
257 static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom)
258 {
259         struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
260
261         spin_lock_init(&dom->pgtlock);
262
263         dom->cfg = (struct io_pgtable_cfg) {
264                 .quirks = IO_PGTABLE_QUIRK_ARM_NS |
265                         IO_PGTABLE_QUIRK_NO_PERMS |
266                         IO_PGTABLE_QUIRK_TLBI_ON_MAP,
267                 .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap,
268                 .ias = 32,
269                 .oas = 32,
270                 .tlb = &mtk_iommu_gather_ops,
271                 .iommu_dev = data->dev,
272         };
273
274         if (data->enable_4GB)
275                 dom->cfg.quirks |= IO_PGTABLE_QUIRK_ARM_MTK_4GB;
276
277         dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data);
278         if (!dom->iop) {
279                 dev_err(data->dev, "Failed to alloc io pgtable\n");
280                 return -EINVAL;
281         }
282
283         /* Update our support page sizes bitmap */
284         dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap;
285         return 0;
286 }
287
288 static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type)
289 {
290         struct mtk_iommu_domain *dom;
291
292         if (type != IOMMU_DOMAIN_DMA)
293                 return NULL;
294
295         dom = kzalloc(sizeof(*dom), GFP_KERNEL);
296         if (!dom)
297                 return NULL;
298
299         if (iommu_get_dma_cookie(&dom->domain))
300                 goto  free_dom;
301
302         if (mtk_iommu_domain_finalise(dom))
303                 goto  put_dma_cookie;
304
305         dom->domain.geometry.aperture_start = 0;
306         dom->domain.geometry.aperture_end = DMA_BIT_MASK(32);
307         dom->domain.geometry.force_aperture = true;
308
309         return &dom->domain;
310
311 put_dma_cookie:
312         iommu_put_dma_cookie(&dom->domain);
313 free_dom:
314         kfree(dom);
315         return NULL;
316 }
317
318 static void mtk_iommu_domain_free(struct iommu_domain *domain)
319 {
320         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
321
322         free_io_pgtable_ops(dom->iop);
323         iommu_put_dma_cookie(domain);
324         kfree(to_mtk_domain(domain));
325 }
326
327 static int mtk_iommu_attach_device(struct iommu_domain *domain,
328                                    struct device *dev)
329 {
330         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
331         struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
332
333         if (!data)
334                 return -ENODEV;
335
336         /* Update the pgtable base address register of the M4U HW */
337         if (!data->m4u_dom) {
338                 data->m4u_dom = dom;
339                 writel(dom->cfg.arm_v7s_cfg.ttbr[0],
340                        data->base + REG_MMU_PT_BASE_ADDR);
341         }
342
343         mtk_iommu_config(data, dev, true);
344         return 0;
345 }
346
347 static void mtk_iommu_detach_device(struct iommu_domain *domain,
348                                     struct device *dev)
349 {
350         struct mtk_iommu_data *data = dev_iommu_fwspec_get(dev)->iommu_priv;
351
352         if (!data)
353                 return;
354
355         mtk_iommu_config(data, dev, false);
356 }
357
358 static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
359                          phys_addr_t paddr, size_t size, int prot)
360 {
361         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
362         unsigned long flags;
363         int ret;
364
365         spin_lock_irqsave(&dom->pgtlock, flags);
366         ret = dom->iop->map(dom->iop, iova, paddr & DMA_BIT_MASK(32),
367                             size, prot);
368         spin_unlock_irqrestore(&dom->pgtlock, flags);
369
370         return ret;
371 }
372
373 static size_t mtk_iommu_unmap(struct iommu_domain *domain,
374                               unsigned long iova, size_t size)
375 {
376         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
377         unsigned long flags;
378         size_t unmapsz;
379
380         spin_lock_irqsave(&dom->pgtlock, flags);
381         unmapsz = dom->iop->unmap(dom->iop, iova, size);
382         spin_unlock_irqrestore(&dom->pgtlock, flags);
383
384         return unmapsz;
385 }
386
387 static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
388 {
389         mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
390 }
391
392 static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
393                                           dma_addr_t iova)
394 {
395         struct mtk_iommu_domain *dom = to_mtk_domain(domain);
396         struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
397         unsigned long flags;
398         phys_addr_t pa;
399
400         spin_lock_irqsave(&dom->pgtlock, flags);
401         pa = dom->iop->iova_to_phys(dom->iop, iova);
402         spin_unlock_irqrestore(&dom->pgtlock, flags);
403
404         if (data->enable_4GB)
405                 pa |= BIT_ULL(32);
406
407         return pa;
408 }
409
410 static int mtk_iommu_add_device(struct device *dev)
411 {
412         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
413         struct mtk_iommu_data *data;
414         struct iommu_group *group;
415
416         if (!fwspec || fwspec->ops != &mtk_iommu_ops)
417                 return -ENODEV; /* Not a iommu client device */
418
419         data = fwspec->iommu_priv;
420         iommu_device_link(&data->iommu, dev);
421
422         group = iommu_group_get_for_dev(dev);
423         if (IS_ERR(group))
424                 return PTR_ERR(group);
425
426         iommu_group_put(group);
427         return 0;
428 }
429
430 static void mtk_iommu_remove_device(struct device *dev)
431 {
432         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
433         struct mtk_iommu_data *data;
434
435         if (!fwspec || fwspec->ops != &mtk_iommu_ops)
436                 return;
437
438         data = fwspec->iommu_priv;
439         iommu_device_unlink(&data->iommu, dev);
440
441         iommu_group_remove_device(dev);
442         iommu_fwspec_free(dev);
443 }
444
445 static struct iommu_group *mtk_iommu_device_group(struct device *dev)
446 {
447         struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
448
449         if (!data)
450                 return ERR_PTR(-ENODEV);
451
452         /* All the client devices are in the same m4u iommu-group */
453         if (!data->m4u_group) {
454                 data->m4u_group = iommu_group_alloc();
455                 if (IS_ERR(data->m4u_group))
456                         dev_err(dev, "Failed to allocate M4U IOMMU group\n");
457         } else {
458                 iommu_group_ref_get(data->m4u_group);
459         }
460         return data->m4u_group;
461 }
462
463 static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args)
464 {
465         struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
466         struct platform_device *m4updev;
467
468         if (args->args_count != 1) {
469                 dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n",
470                         args->args_count);
471                 return -EINVAL;
472         }
473
474         if (!fwspec->iommu_priv) {
475                 /* Get the m4u device */
476                 m4updev = of_find_device_by_node(args->np);
477                 if (WARN_ON(!m4updev))
478                         return -EINVAL;
479
480                 fwspec->iommu_priv = platform_get_drvdata(m4updev);
481         }
482
483         return iommu_fwspec_add_ids(dev, args->args, 1);
484 }
485
486 static const struct iommu_ops mtk_iommu_ops = {
487         .domain_alloc   = mtk_iommu_domain_alloc,
488         .domain_free    = mtk_iommu_domain_free,
489         .attach_dev     = mtk_iommu_attach_device,
490         .detach_dev     = mtk_iommu_detach_device,
491         .map            = mtk_iommu_map,
492         .unmap          = mtk_iommu_unmap,
493         .flush_iotlb_all = mtk_iommu_iotlb_sync,
494         .iotlb_sync     = mtk_iommu_iotlb_sync,
495         .iova_to_phys   = mtk_iommu_iova_to_phys,
496         .add_device     = mtk_iommu_add_device,
497         .remove_device  = mtk_iommu_remove_device,
498         .device_group   = mtk_iommu_device_group,
499         .of_xlate       = mtk_iommu_of_xlate,
500         .pgsize_bitmap  = SZ_4K | SZ_64K | SZ_1M | SZ_16M,
501 };
502
503 static int mtk_iommu_hw_init(const struct mtk_iommu_data *data)
504 {
505         u32 regval;
506         int ret;
507
508         ret = clk_prepare_enable(data->bclk);
509         if (ret) {
510                 dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret);
511                 return ret;
512         }
513
514         regval = F_MMU_TF_PROTECT_SEL(2, data);
515         if (data->m4u_plat == M4U_MT8173)
516                 regval |= F_MMU_PREFETCH_RT_REPLACE_MOD;
517         writel_relaxed(regval, data->base + REG_MMU_CTRL_REG);
518
519         regval = F_L2_MULIT_HIT_EN |
520                 F_TABLE_WALK_FAULT_INT_EN |
521                 F_PREETCH_FIFO_OVERFLOW_INT_EN |
522                 F_MISS_FIFO_OVERFLOW_INT_EN |
523                 F_PREFETCH_FIFO_ERR_INT_EN |
524                 F_MISS_FIFO_ERR_INT_EN;
525         writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0);
526
527         regval = F_INT_TRANSLATION_FAULT |
528                 F_INT_MAIN_MULTI_HIT_FAULT |
529                 F_INT_INVALID_PA_FAULT |
530                 F_INT_ENTRY_REPLACEMENT_FAULT |
531                 F_INT_TLB_MISS_FAULT |
532                 F_INT_MISS_TRANSACTION_FIFO_FAULT |
533                 F_INT_PRETETCH_TRANSATION_FIFO_FAULT;
534         writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL);
535
536         if (data->m4u_plat == M4U_MT8173)
537                 regval = (data->protect_base >> 1) | (data->enable_4GB << 31);
538         else
539                 regval = lower_32_bits(data->protect_base) |
540                          upper_32_bits(data->protect_base);
541         writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR);
542
543         if (data->enable_4GB && data->m4u_plat != M4U_MT8173) {
544                 /*
545                  * If 4GB mode is enabled, the validate PA range is from
546                  * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30].
547                  */
548                 regval = F_MMU_VLD_PA_RNG(7, 4);
549                 writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG);
550         }
551         writel_relaxed(0, data->base + REG_MMU_DCM_DIS);
552
553         /* It's MISC control register whose default value is ok except mt8173.*/
554         if (data->m4u_plat == M4U_MT8173)
555                 writel_relaxed(0, data->base + REG_MMU_STANDARD_AXI_MODE);
556
557         if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0,
558                              dev_name(data->dev), (void *)data)) {
559                 writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR);
560                 clk_disable_unprepare(data->bclk);
561                 dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq);
562                 return -ENODEV;
563         }
564
565         return 0;
566 }
567
568 static const struct component_master_ops mtk_iommu_com_ops = {
569         .bind           = mtk_iommu_bind,
570         .unbind         = mtk_iommu_unbind,
571 };
572
573 static int mtk_iommu_probe(struct platform_device *pdev)
574 {
575         struct mtk_iommu_data   *data;
576         struct device           *dev = &pdev->dev;
577         struct resource         *res;
578         resource_size_t         ioaddr;
579         struct component_match  *match = NULL;
580         void                    *protect;
581         int                     i, larb_nr, ret;
582
583         data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
584         if (!data)
585                 return -ENOMEM;
586         data->dev = dev;
587         data->m4u_plat = (enum mtk_iommu_plat)of_device_get_match_data(dev);
588
589         /* Protect memory. HW will access here while translation fault.*/
590         protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL);
591         if (!protect)
592                 return -ENOMEM;
593         data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN);
594
595         /* Whether the current dram is over 4GB */
596         data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT));
597
598         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
599         data->base = devm_ioremap_resource(dev, res);
600         if (IS_ERR(data->base))
601                 return PTR_ERR(data->base);
602         ioaddr = res->start;
603
604         data->irq = platform_get_irq(pdev, 0);
605         if (data->irq < 0)
606                 return data->irq;
607
608         data->bclk = devm_clk_get(dev, "bclk");
609         if (IS_ERR(data->bclk))
610                 return PTR_ERR(data->bclk);
611
612         larb_nr = of_count_phandle_with_args(dev->of_node,
613                                              "mediatek,larbs", NULL);
614         if (larb_nr < 0)
615                 return larb_nr;
616         data->smi_imu.larb_nr = larb_nr;
617
618         for (i = 0; i < larb_nr; i++) {
619                 struct device_node *larbnode;
620                 struct platform_device *plarbdev;
621                 u32 id;
622
623                 larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i);
624                 if (!larbnode)
625                         return -EINVAL;
626
627                 if (!of_device_is_available(larbnode)) {
628                         of_node_put(larbnode);
629                         continue;
630                 }
631
632                 ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id);
633                 if (ret)/* The id is consecutive if there is no this property */
634                         id = i;
635
636                 plarbdev = of_find_device_by_node(larbnode);
637                 if (!plarbdev) {
638                         of_node_put(larbnode);
639                         return -EPROBE_DEFER;
640                 }
641                 data->smi_imu.larb_imu[id].dev = &plarbdev->dev;
642
643                 component_match_add_release(dev, &match, release_of,
644                                             compare_of, larbnode);
645         }
646
647         platform_set_drvdata(pdev, data);
648
649         ret = mtk_iommu_hw_init(data);
650         if (ret)
651                 return ret;
652
653         ret = iommu_device_sysfs_add(&data->iommu, dev, NULL,
654                                      "mtk-iommu.%pa", &ioaddr);
655         if (ret)
656                 return ret;
657
658         iommu_device_set_ops(&data->iommu, &mtk_iommu_ops);
659         iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode);
660
661         ret = iommu_device_register(&data->iommu);
662         if (ret)
663                 return ret;
664
665         list_add_tail(&data->list, &m4ulist);
666
667         if (!iommu_present(&platform_bus_type))
668                 bus_set_iommu(&platform_bus_type, &mtk_iommu_ops);
669
670         return component_master_add_with_match(dev, &mtk_iommu_com_ops, match);
671 }
672
673 static int mtk_iommu_remove(struct platform_device *pdev)
674 {
675         struct mtk_iommu_data *data = platform_get_drvdata(pdev);
676
677         iommu_device_sysfs_remove(&data->iommu);
678         iommu_device_unregister(&data->iommu);
679
680         if (iommu_present(&platform_bus_type))
681                 bus_set_iommu(&platform_bus_type, NULL);
682
683         clk_disable_unprepare(data->bclk);
684         devm_free_irq(&pdev->dev, data->irq, data);
685         component_master_del(&pdev->dev, &mtk_iommu_com_ops);
686         return 0;
687 }
688
689 static int __maybe_unused mtk_iommu_suspend(struct device *dev)
690 {
691         struct mtk_iommu_data *data = dev_get_drvdata(dev);
692         struct mtk_iommu_suspend_reg *reg = &data->reg;
693         void __iomem *base = data->base;
694
695         reg->standard_axi_mode = readl_relaxed(base +
696                                                REG_MMU_STANDARD_AXI_MODE);
697         reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS);
698         reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG);
699         reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0);
700         reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL);
701         reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR);
702         clk_disable_unprepare(data->bclk);
703         return 0;
704 }
705
706 static int __maybe_unused mtk_iommu_resume(struct device *dev)
707 {
708         struct mtk_iommu_data *data = dev_get_drvdata(dev);
709         struct mtk_iommu_suspend_reg *reg = &data->reg;
710         void __iomem *base = data->base;
711         int ret;
712
713         ret = clk_prepare_enable(data->bclk);
714         if (ret) {
715                 dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret);
716                 return ret;
717         }
718         writel_relaxed(reg->standard_axi_mode,
719                        base + REG_MMU_STANDARD_AXI_MODE);
720         writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS);
721         writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG);
722         writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0);
723         writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL);
724         writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR);
725         if (data->m4u_dom)
726                 writel(data->m4u_dom->cfg.arm_v7s_cfg.ttbr[0],
727                        base + REG_MMU_PT_BASE_ADDR);
728         return 0;
729 }
730
731 static const struct dev_pm_ops mtk_iommu_pm_ops = {
732         SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume)
733 };
734
735 static const struct of_device_id mtk_iommu_of_ids[] = {
736         { .compatible = "mediatek,mt2712-m4u", .data = (void *)M4U_MT2712},
737         { .compatible = "mediatek,mt8173-m4u", .data = (void *)M4U_MT8173},
738         {}
739 };
740
741 static struct platform_driver mtk_iommu_driver = {
742         .probe  = mtk_iommu_probe,
743         .remove = mtk_iommu_remove,
744         .driver = {
745                 .name = "mtk-iommu",
746                 .of_match_table = of_match_ptr(mtk_iommu_of_ids),
747                 .pm = &mtk_iommu_pm_ops,
748         }
749 };
750
751 static int __init mtk_iommu_init(void)
752 {
753         int ret;
754
755         ret = platform_driver_register(&mtk_iommu_driver);
756         if (ret != 0)
757                 pr_err("Failed to register MTK IOMMU driver\n");
758
759         return ret;
760 }
761
762 subsys_initcall(mtk_iommu_init)