2 * Copyright (c) 2015-2016 MediaTek Inc.
3 * Author: Yong Wu <yong.wu@mediatek.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 #include <linux/bootmem.h>
15 #include <linux/bug.h>
16 #include <linux/clk.h>
17 #include <linux/component.h>
18 #include <linux/device.h>
19 #include <linux/dma-iommu.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
23 #include <linux/iommu.h>
24 #include <linux/iopoll.h>
25 #include <linux/list.h>
26 #include <linux/of_address.h>
27 #include <linux/of_iommu.h>
28 #include <linux/of_irq.h>
29 #include <linux/of_platform.h>
30 #include <linux/platform_device.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
33 #include <asm/barrier.h>
34 #include <dt-bindings/memory/mt8173-larb-port.h>
35 #include <soc/mediatek/smi.h>
37 #include "mtk_iommu.h"
39 #define REG_MMU_PT_BASE_ADDR 0x000
41 #define REG_MMU_INVALIDATE 0x020
42 #define F_ALL_INVLD 0x2
43 #define F_MMU_INV_RANGE 0x1
45 #define REG_MMU_INVLD_START_A 0x024
46 #define REG_MMU_INVLD_END_A 0x028
48 #define REG_MMU_INV_SEL 0x038
49 #define F_INVLD_EN0 BIT(0)
50 #define F_INVLD_EN1 BIT(1)
52 #define REG_MMU_STANDARD_AXI_MODE 0x048
53 #define REG_MMU_DCM_DIS 0x050
55 #define REG_MMU_CTRL_REG 0x110
56 #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4)
57 #define F_MMU_TF_PROTECT_SEL(prot) (((prot) & 0x3) << 5)
59 #define REG_MMU_IVRP_PADDR 0x114
60 #define F_MMU_IVRP_PA_SET(pa, ext) (((pa) >> 1) | ((!!(ext)) << 31))
62 #define REG_MMU_INT_CONTROL0 0x120
63 #define F_L2_MULIT_HIT_EN BIT(0)
64 #define F_TABLE_WALK_FAULT_INT_EN BIT(1)
65 #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2)
66 #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3)
67 #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5)
68 #define F_MISS_FIFO_ERR_INT_EN BIT(6)
69 #define F_INT_CLR_BIT BIT(12)
71 #define REG_MMU_INT_MAIN_CONTROL 0x124
72 #define F_INT_TRANSLATION_FAULT BIT(0)
73 #define F_INT_MAIN_MULTI_HIT_FAULT BIT(1)
74 #define F_INT_INVALID_PA_FAULT BIT(2)
75 #define F_INT_ENTRY_REPLACEMENT_FAULT BIT(3)
76 #define F_INT_TLB_MISS_FAULT BIT(4)
77 #define F_INT_MISS_TRANSACTION_FIFO_FAULT BIT(5)
78 #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT BIT(6)
80 #define REG_MMU_CPE_DONE 0x12C
82 #define REG_MMU_FAULT_ST1 0x134
84 #define REG_MMU_FAULT_VA 0x13c
85 #define F_MMU_FAULT_VA_MSK 0xfffff000
86 #define F_MMU_FAULT_VA_WRITE_BIT BIT(1)
87 #define F_MMU_FAULT_VA_LAYER_BIT BIT(0)
89 #define REG_MMU_INVLD_PA 0x140
90 #define REG_MMU_INT_ID 0x150
91 #define F_MMU0_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7)
92 #define F_MMU0_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f)
94 #define MTK_PROTECT_PA_ALIGN 128
96 struct mtk_iommu_domain
{
97 spinlock_t pgtlock
; /* lock for page table */
99 struct io_pgtable_cfg cfg
;
100 struct io_pgtable_ops
*iop
;
102 struct iommu_domain domain
;
105 static struct iommu_ops mtk_iommu_ops
;
107 static struct mtk_iommu_domain
*to_mtk_domain(struct iommu_domain
*dom
)
109 return container_of(dom
, struct mtk_iommu_domain
, domain
);
112 static void mtk_iommu_tlb_flush_all(void *cookie
)
114 struct mtk_iommu_data
*data
= cookie
;
116 writel_relaxed(F_INVLD_EN1
| F_INVLD_EN0
, data
->base
+ REG_MMU_INV_SEL
);
117 writel_relaxed(F_ALL_INVLD
, data
->base
+ REG_MMU_INVALIDATE
);
118 wmb(); /* Make sure the tlb flush all done */
121 static void mtk_iommu_tlb_add_flush_nosync(unsigned long iova
, size_t size
,
122 size_t granule
, bool leaf
,
125 struct mtk_iommu_data
*data
= cookie
;
127 writel_relaxed(F_INVLD_EN1
| F_INVLD_EN0
, data
->base
+ REG_MMU_INV_SEL
);
129 writel_relaxed(iova
, data
->base
+ REG_MMU_INVLD_START_A
);
130 writel_relaxed(iova
+ size
- 1, data
->base
+ REG_MMU_INVLD_END_A
);
131 writel_relaxed(F_MMU_INV_RANGE
, data
->base
+ REG_MMU_INVALIDATE
);
132 data
->tlb_flush_active
= true;
135 static void mtk_iommu_tlb_sync(void *cookie
)
137 struct mtk_iommu_data
*data
= cookie
;
141 /* Avoid timing out if there's nothing to wait for */
142 if (!data
->tlb_flush_active
)
145 ret
= readl_poll_timeout_atomic(data
->base
+ REG_MMU_CPE_DONE
, tmp
,
146 tmp
!= 0, 10, 100000);
149 "Partial TLB flush timed out, falling back to full flush\n");
150 mtk_iommu_tlb_flush_all(cookie
);
152 /* Clear the CPE status */
153 writel_relaxed(0, data
->base
+ REG_MMU_CPE_DONE
);
154 data
->tlb_flush_active
= false;
157 static const struct iommu_gather_ops mtk_iommu_gather_ops
= {
158 .tlb_flush_all
= mtk_iommu_tlb_flush_all
,
159 .tlb_add_flush
= mtk_iommu_tlb_add_flush_nosync
,
160 .tlb_sync
= mtk_iommu_tlb_sync
,
163 static irqreturn_t
mtk_iommu_isr(int irq
, void *dev_id
)
165 struct mtk_iommu_data
*data
= dev_id
;
166 struct mtk_iommu_domain
*dom
= data
->m4u_dom
;
167 u32 int_state
, regval
, fault_iova
, fault_pa
;
168 unsigned int fault_larb
, fault_port
;
171 /* Read error info from registers */
172 int_state
= readl_relaxed(data
->base
+ REG_MMU_FAULT_ST1
);
173 fault_iova
= readl_relaxed(data
->base
+ REG_MMU_FAULT_VA
);
174 layer
= fault_iova
& F_MMU_FAULT_VA_LAYER_BIT
;
175 write
= fault_iova
& F_MMU_FAULT_VA_WRITE_BIT
;
176 fault_iova
&= F_MMU_FAULT_VA_MSK
;
177 fault_pa
= readl_relaxed(data
->base
+ REG_MMU_INVLD_PA
);
178 regval
= readl_relaxed(data
->base
+ REG_MMU_INT_ID
);
179 fault_larb
= F_MMU0_INT_ID_LARB_ID(regval
);
180 fault_port
= F_MMU0_INT_ID_PORT_ID(regval
);
182 if (report_iommu_fault(&dom
->domain
, data
->dev
, fault_iova
,
183 write
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
)) {
186 "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n",
187 int_state
, fault_iova
, fault_pa
, fault_larb
, fault_port
,
188 layer
, write
? "write" : "read");
191 /* Interrupt clear */
192 regval
= readl_relaxed(data
->base
+ REG_MMU_INT_CONTROL0
);
193 regval
|= F_INT_CLR_BIT
;
194 writel_relaxed(regval
, data
->base
+ REG_MMU_INT_CONTROL0
);
196 mtk_iommu_tlb_flush_all(data
);
201 static void mtk_iommu_config(struct mtk_iommu_data
*data
,
202 struct device
*dev
, bool enable
)
204 struct mtk_smi_larb_iommu
*larb_mmu
;
205 unsigned int larbid
, portid
;
206 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
209 for (i
= 0; i
< fwspec
->num_ids
; ++i
) {
210 larbid
= MTK_M4U_TO_LARB(fwspec
->ids
[i
]);
211 portid
= MTK_M4U_TO_PORT(fwspec
->ids
[i
]);
212 larb_mmu
= &data
->smi_imu
.larb_imu
[larbid
];
214 dev_dbg(dev
, "%s iommu port: %d\n",
215 enable
? "enable" : "disable", portid
);
218 larb_mmu
->mmu
|= MTK_SMI_MMU_EN(portid
);
220 larb_mmu
->mmu
&= ~MTK_SMI_MMU_EN(portid
);
224 static int mtk_iommu_domain_finalise(struct mtk_iommu_data
*data
)
226 struct mtk_iommu_domain
*dom
= data
->m4u_dom
;
228 spin_lock_init(&dom
->pgtlock
);
230 dom
->cfg
= (struct io_pgtable_cfg
) {
231 .quirks
= IO_PGTABLE_QUIRK_ARM_NS
|
232 IO_PGTABLE_QUIRK_NO_PERMS
|
233 IO_PGTABLE_QUIRK_TLBI_ON_MAP
,
234 .pgsize_bitmap
= mtk_iommu_ops
.pgsize_bitmap
,
237 .tlb
= &mtk_iommu_gather_ops
,
238 .iommu_dev
= data
->dev
,
241 if (data
->enable_4GB
)
242 dom
->cfg
.quirks
|= IO_PGTABLE_QUIRK_ARM_MTK_4GB
;
244 dom
->iop
= alloc_io_pgtable_ops(ARM_V7S
, &dom
->cfg
, data
);
246 dev_err(data
->dev
, "Failed to alloc io pgtable\n");
250 /* Update our support page sizes bitmap */
251 dom
->domain
.pgsize_bitmap
= dom
->cfg
.pgsize_bitmap
;
253 writel(data
->m4u_dom
->cfg
.arm_v7s_cfg
.ttbr
[0],
254 data
->base
+ REG_MMU_PT_BASE_ADDR
);
258 static struct iommu_domain
*mtk_iommu_domain_alloc(unsigned type
)
260 struct mtk_iommu_domain
*dom
;
262 if (type
!= IOMMU_DOMAIN_DMA
)
265 dom
= kzalloc(sizeof(*dom
), GFP_KERNEL
);
269 if (iommu_get_dma_cookie(&dom
->domain
)) {
274 dom
->domain
.geometry
.aperture_start
= 0;
275 dom
->domain
.geometry
.aperture_end
= DMA_BIT_MASK(32);
276 dom
->domain
.geometry
.force_aperture
= true;
281 static void mtk_iommu_domain_free(struct iommu_domain
*domain
)
283 iommu_put_dma_cookie(domain
);
284 kfree(to_mtk_domain(domain
));
287 static int mtk_iommu_attach_device(struct iommu_domain
*domain
,
290 struct mtk_iommu_domain
*dom
= to_mtk_domain(domain
);
291 struct mtk_iommu_data
*data
= dev
->iommu_fwspec
->iommu_priv
;
297 if (!data
->m4u_dom
) {
299 ret
= mtk_iommu_domain_finalise(data
);
301 data
->m4u_dom
= NULL
;
304 } else if (data
->m4u_dom
!= dom
) {
305 /* All the client devices should be in the same m4u domain */
306 dev_err(dev
, "try to attach into the error iommu domain\n");
310 mtk_iommu_config(data
, dev
, true);
314 static void mtk_iommu_detach_device(struct iommu_domain
*domain
,
317 struct mtk_iommu_data
*data
= dev
->iommu_fwspec
->iommu_priv
;
322 mtk_iommu_config(data
, dev
, false);
325 static int mtk_iommu_map(struct iommu_domain
*domain
, unsigned long iova
,
326 phys_addr_t paddr
, size_t size
, int prot
)
328 struct mtk_iommu_domain
*dom
= to_mtk_domain(domain
);
332 spin_lock_irqsave(&dom
->pgtlock
, flags
);
333 ret
= dom
->iop
->map(dom
->iop
, iova
, paddr
, size
, prot
);
334 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
339 static size_t mtk_iommu_unmap(struct iommu_domain
*domain
,
340 unsigned long iova
, size_t size
)
342 struct mtk_iommu_domain
*dom
= to_mtk_domain(domain
);
346 spin_lock_irqsave(&dom
->pgtlock
, flags
);
347 unmapsz
= dom
->iop
->unmap(dom
->iop
, iova
, size
);
348 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
353 static phys_addr_t
mtk_iommu_iova_to_phys(struct iommu_domain
*domain
,
356 struct mtk_iommu_domain
*dom
= to_mtk_domain(domain
);
360 spin_lock_irqsave(&dom
->pgtlock
, flags
);
361 pa
= dom
->iop
->iova_to_phys(dom
->iop
, iova
);
362 spin_unlock_irqrestore(&dom
->pgtlock
, flags
);
367 static int mtk_iommu_add_device(struct device
*dev
)
369 struct mtk_iommu_data
*data
;
370 struct iommu_group
*group
;
372 if (!dev
->iommu_fwspec
|| dev
->iommu_fwspec
->ops
!= &mtk_iommu_ops
)
373 return -ENODEV
; /* Not a iommu client device */
375 data
= dev
->iommu_fwspec
->iommu_priv
;
376 iommu_device_link(&data
->iommu
, dev
);
378 group
= iommu_group_get_for_dev(dev
);
380 return PTR_ERR(group
);
382 iommu_group_put(group
);
386 static void mtk_iommu_remove_device(struct device
*dev
)
388 struct mtk_iommu_data
*data
;
390 if (!dev
->iommu_fwspec
|| dev
->iommu_fwspec
->ops
!= &mtk_iommu_ops
)
393 data
= dev
->iommu_fwspec
->iommu_priv
;
394 iommu_device_unlink(&data
->iommu
, dev
);
396 iommu_group_remove_device(dev
);
397 iommu_fwspec_free(dev
);
400 static struct iommu_group
*mtk_iommu_device_group(struct device
*dev
)
402 struct mtk_iommu_data
*data
= dev
->iommu_fwspec
->iommu_priv
;
405 return ERR_PTR(-ENODEV
);
407 /* All the client devices are in the same m4u iommu-group */
408 if (!data
->m4u_group
) {
409 data
->m4u_group
= iommu_group_alloc();
410 if (IS_ERR(data
->m4u_group
))
411 dev_err(dev
, "Failed to allocate M4U IOMMU group\n");
413 iommu_group_ref_get(data
->m4u_group
);
415 return data
->m4u_group
;
418 static int mtk_iommu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
420 struct platform_device
*m4updev
;
422 if (args
->args_count
!= 1) {
423 dev_err(dev
, "invalid #iommu-cells(%d) property for IOMMU\n",
428 if (!dev
->iommu_fwspec
->iommu_priv
) {
429 /* Get the m4u device */
430 m4updev
= of_find_device_by_node(args
->np
);
431 if (WARN_ON(!m4updev
))
434 dev
->iommu_fwspec
->iommu_priv
= platform_get_drvdata(m4updev
);
437 return iommu_fwspec_add_ids(dev
, args
->args
, 1);
440 static struct iommu_ops mtk_iommu_ops
= {
441 .domain_alloc
= mtk_iommu_domain_alloc
,
442 .domain_free
= mtk_iommu_domain_free
,
443 .attach_dev
= mtk_iommu_attach_device
,
444 .detach_dev
= mtk_iommu_detach_device
,
445 .map
= mtk_iommu_map
,
446 .unmap
= mtk_iommu_unmap
,
447 .map_sg
= default_iommu_map_sg
,
448 .iova_to_phys
= mtk_iommu_iova_to_phys
,
449 .add_device
= mtk_iommu_add_device
,
450 .remove_device
= mtk_iommu_remove_device
,
451 .device_group
= mtk_iommu_device_group
,
452 .of_xlate
= mtk_iommu_of_xlate
,
453 .pgsize_bitmap
= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
,
456 static int mtk_iommu_hw_init(const struct mtk_iommu_data
*data
)
461 ret
= clk_prepare_enable(data
->bclk
);
463 dev_err(data
->dev
, "Failed to enable iommu bclk(%d)\n", ret
);
467 regval
= F_MMU_PREFETCH_RT_REPLACE_MOD
|
468 F_MMU_TF_PROTECT_SEL(2);
469 writel_relaxed(regval
, data
->base
+ REG_MMU_CTRL_REG
);
471 regval
= F_L2_MULIT_HIT_EN
|
472 F_TABLE_WALK_FAULT_INT_EN
|
473 F_PREETCH_FIFO_OVERFLOW_INT_EN
|
474 F_MISS_FIFO_OVERFLOW_INT_EN
|
475 F_PREFETCH_FIFO_ERR_INT_EN
|
476 F_MISS_FIFO_ERR_INT_EN
;
477 writel_relaxed(regval
, data
->base
+ REG_MMU_INT_CONTROL0
);
479 regval
= F_INT_TRANSLATION_FAULT
|
480 F_INT_MAIN_MULTI_HIT_FAULT
|
481 F_INT_INVALID_PA_FAULT
|
482 F_INT_ENTRY_REPLACEMENT_FAULT
|
483 F_INT_TLB_MISS_FAULT
|
484 F_INT_MISS_TRANSACTION_FIFO_FAULT
|
485 F_INT_PRETETCH_TRANSATION_FIFO_FAULT
;
486 writel_relaxed(regval
, data
->base
+ REG_MMU_INT_MAIN_CONTROL
);
488 writel_relaxed(F_MMU_IVRP_PA_SET(data
->protect_base
, data
->enable_4GB
),
489 data
->base
+ REG_MMU_IVRP_PADDR
);
491 writel_relaxed(0, data
->base
+ REG_MMU_DCM_DIS
);
492 writel_relaxed(0, data
->base
+ REG_MMU_STANDARD_AXI_MODE
);
494 if (devm_request_irq(data
->dev
, data
->irq
, mtk_iommu_isr
, 0,
495 dev_name(data
->dev
), (void *)data
)) {
496 writel_relaxed(0, data
->base
+ REG_MMU_PT_BASE_ADDR
);
497 clk_disable_unprepare(data
->bclk
);
498 dev_err(data
->dev
, "Failed @ IRQ-%d Request\n", data
->irq
);
505 static const struct component_master_ops mtk_iommu_com_ops
= {
506 .bind
= mtk_iommu_bind
,
507 .unbind
= mtk_iommu_unbind
,
510 static int mtk_iommu_probe(struct platform_device
*pdev
)
512 struct mtk_iommu_data
*data
;
513 struct device
*dev
= &pdev
->dev
;
514 struct resource
*res
;
515 resource_size_t ioaddr
;
516 struct component_match
*match
= NULL
;
520 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
525 /* Protect memory. HW will access here while translation fault.*/
526 protect
= devm_kzalloc(dev
, MTK_PROTECT_PA_ALIGN
* 2, GFP_KERNEL
);
529 data
->protect_base
= ALIGN(virt_to_phys(protect
), MTK_PROTECT_PA_ALIGN
);
531 /* Whether the current dram is over 4GB */
532 data
->enable_4GB
= !!(max_pfn
> (0xffffffffUL
>> PAGE_SHIFT
));
534 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
535 data
->base
= devm_ioremap_resource(dev
, res
);
536 if (IS_ERR(data
->base
))
537 return PTR_ERR(data
->base
);
540 data
->irq
= platform_get_irq(pdev
, 0);
544 data
->bclk
= devm_clk_get(dev
, "bclk");
545 if (IS_ERR(data
->bclk
))
546 return PTR_ERR(data
->bclk
);
548 larb_nr
= of_count_phandle_with_args(dev
->of_node
,
549 "mediatek,larbs", NULL
);
552 data
->smi_imu
.larb_nr
= larb_nr
;
554 for (i
= 0; i
< larb_nr
; i
++) {
555 struct device_node
*larbnode
;
556 struct platform_device
*plarbdev
;
558 larbnode
= of_parse_phandle(dev
->of_node
, "mediatek,larbs", i
);
562 if (!of_device_is_available(larbnode
))
565 plarbdev
= of_find_device_by_node(larbnode
);
567 plarbdev
= of_platform_device_create(
569 platform_bus_type
.dev_root
);
571 of_node_put(larbnode
);
572 return -EPROBE_DEFER
;
575 data
->smi_imu
.larb_imu
[i
].dev
= &plarbdev
->dev
;
577 component_match_add_release(dev
, &match
, release_of
,
578 compare_of
, larbnode
);
581 platform_set_drvdata(pdev
, data
);
583 ret
= mtk_iommu_hw_init(data
);
587 ret
= iommu_device_sysfs_add(&data
->iommu
, dev
, NULL
,
588 "mtk-iommu.%pa", &ioaddr
);
592 iommu_device_set_ops(&data
->iommu
, &mtk_iommu_ops
);
593 iommu_device_set_fwnode(&data
->iommu
, &pdev
->dev
.of_node
->fwnode
);
595 ret
= iommu_device_register(&data
->iommu
);
599 if (!iommu_present(&platform_bus_type
))
600 bus_set_iommu(&platform_bus_type
, &mtk_iommu_ops
);
602 return component_master_add_with_match(dev
, &mtk_iommu_com_ops
, match
);
605 static int mtk_iommu_remove(struct platform_device
*pdev
)
607 struct mtk_iommu_data
*data
= platform_get_drvdata(pdev
);
609 iommu_device_sysfs_remove(&data
->iommu
);
610 iommu_device_unregister(&data
->iommu
);
612 if (iommu_present(&platform_bus_type
))
613 bus_set_iommu(&platform_bus_type
, NULL
);
615 free_io_pgtable_ops(data
->m4u_dom
->iop
);
616 clk_disable_unprepare(data
->bclk
);
617 devm_free_irq(&pdev
->dev
, data
->irq
, data
);
618 component_master_del(&pdev
->dev
, &mtk_iommu_com_ops
);
622 static int __maybe_unused
mtk_iommu_suspend(struct device
*dev
)
624 struct mtk_iommu_data
*data
= dev_get_drvdata(dev
);
625 struct mtk_iommu_suspend_reg
*reg
= &data
->reg
;
626 void __iomem
*base
= data
->base
;
628 reg
->standard_axi_mode
= readl_relaxed(base
+
629 REG_MMU_STANDARD_AXI_MODE
);
630 reg
->dcm_dis
= readl_relaxed(base
+ REG_MMU_DCM_DIS
);
631 reg
->ctrl_reg
= readl_relaxed(base
+ REG_MMU_CTRL_REG
);
632 reg
->int_control0
= readl_relaxed(base
+ REG_MMU_INT_CONTROL0
);
633 reg
->int_main_control
= readl_relaxed(base
+ REG_MMU_INT_MAIN_CONTROL
);
637 static int __maybe_unused
mtk_iommu_resume(struct device
*dev
)
639 struct mtk_iommu_data
*data
= dev_get_drvdata(dev
);
640 struct mtk_iommu_suspend_reg
*reg
= &data
->reg
;
641 void __iomem
*base
= data
->base
;
643 writel_relaxed(data
->m4u_dom
->cfg
.arm_v7s_cfg
.ttbr
[0],
644 base
+ REG_MMU_PT_BASE_ADDR
);
645 writel_relaxed(reg
->standard_axi_mode
,
646 base
+ REG_MMU_STANDARD_AXI_MODE
);
647 writel_relaxed(reg
->dcm_dis
, base
+ REG_MMU_DCM_DIS
);
648 writel_relaxed(reg
->ctrl_reg
, base
+ REG_MMU_CTRL_REG
);
649 writel_relaxed(reg
->int_control0
, base
+ REG_MMU_INT_CONTROL0
);
650 writel_relaxed(reg
->int_main_control
, base
+ REG_MMU_INT_MAIN_CONTROL
);
651 writel_relaxed(F_MMU_IVRP_PA_SET(data
->protect_base
, data
->enable_4GB
),
652 base
+ REG_MMU_IVRP_PADDR
);
656 const struct dev_pm_ops mtk_iommu_pm_ops
= {
657 SET_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend
, mtk_iommu_resume
)
660 static const struct of_device_id mtk_iommu_of_ids
[] = {
661 { .compatible
= "mediatek,mt8173-m4u", },
665 static struct platform_driver mtk_iommu_driver
= {
666 .probe
= mtk_iommu_probe
,
667 .remove
= mtk_iommu_remove
,
670 .of_match_table
= mtk_iommu_of_ids
,
671 .pm
= &mtk_iommu_pm_ops
,
675 static int mtk_iommu_init_fn(struct device_node
*np
)
678 struct platform_device
*pdev
;
680 pdev
= of_platform_device_create(np
, NULL
, platform_bus_type
.dev_root
);
684 ret
= platform_driver_register(&mtk_iommu_driver
);
686 pr_err("%s: Failed to register driver\n", __func__
);
693 IOMMU_OF_DECLARE(mtkm4u
, "mediatek,mt8173-m4u", mtk_iommu_init_fn
);