]>
Commit | Line | Data |
---|---|---|
1802d0be | 1 | // SPDX-License-Identifier: GPL-2.0-only |
0df4fabe YW |
2 | /* |
3 | * Copyright (c) 2015-2016 MediaTek Inc. | |
4 | * Author: Yong Wu <yong.wu@mediatek.com> | |
0df4fabe | 5 | */ |
57c8a661 | 6 | #include <linux/memblock.h> |
0df4fabe YW |
7 | #include <linux/bug.h> |
8 | #include <linux/clk.h> | |
9 | #include <linux/component.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/dma-iommu.h> | |
12 | #include <linux/err.h> | |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/iommu.h> | |
16 | #include <linux/iopoll.h> | |
17 | #include <linux/list.h> | |
18 | #include <linux/of_address.h> | |
19 | #include <linux/of_iommu.h> | |
20 | #include <linux/of_irq.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <asm/barrier.h> | |
0df4fabe YW |
26 | #include <soc/mediatek/smi.h> |
27 | ||
9ca340c9 | 28 | #include "mtk_iommu.h" |
0df4fabe YW |
29 | |
30 | #define REG_MMU_PT_BASE_ADDR 0x000 | |
907ba6a1 | 31 | #define MMU_PT_ADDR_MASK GENMASK(31, 7) |
0df4fabe YW |
32 | |
33 | #define REG_MMU_INVALIDATE 0x020 | |
34 | #define F_ALL_INVLD 0x2 | |
35 | #define F_MMU_INV_RANGE 0x1 | |
36 | ||
37 | #define REG_MMU_INVLD_START_A 0x024 | |
38 | #define REG_MMU_INVLD_END_A 0x028 | |
39 | ||
b053bc71 | 40 | #define REG_MMU_INV_SEL_GEN1 0x038 |
0df4fabe YW |
41 | #define F_INVLD_EN0 BIT(0) |
42 | #define F_INVLD_EN1 BIT(1) | |
43 | ||
75eed350 | 44 | #define REG_MMU_MISC_CTRL 0x048 |
4bb2bf4c CH |
45 | #define F_MMU_IN_ORDER_WR_EN_MASK (BIT(1) | BIT(17)) |
46 | #define F_MMU_STANDARD_AXI_MODE_MASK (BIT(3) | BIT(19)) | |
47 | ||
0df4fabe YW |
48 | #define REG_MMU_DCM_DIS 0x050 |
49 | ||
50 | #define REG_MMU_CTRL_REG 0x110 | |
acb3c92a | 51 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR (2 << 4) |
0df4fabe | 52 | #define F_MMU_PREFETCH_RT_REPLACE_MOD BIT(4) |
acb3c92a | 53 | #define F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173 (2 << 5) |
0df4fabe YW |
54 | |
55 | #define REG_MMU_IVRP_PADDR 0x114 | |
70ca608b | 56 | |
30e2fccf YW |
57 | #define REG_MMU_VLD_PA_RNG 0x118 |
58 | #define F_MMU_VLD_PA_RNG(EA, SA) (((EA) << 8) | (SA)) | |
0df4fabe YW |
59 | |
60 | #define REG_MMU_INT_CONTROL0 0x120 | |
61 | #define F_L2_MULIT_HIT_EN BIT(0) | |
62 | #define F_TABLE_WALK_FAULT_INT_EN BIT(1) | |
63 | #define F_PREETCH_FIFO_OVERFLOW_INT_EN BIT(2) | |
64 | #define F_MISS_FIFO_OVERFLOW_INT_EN BIT(3) | |
65 | #define F_PREFETCH_FIFO_ERR_INT_EN BIT(5) | |
66 | #define F_MISS_FIFO_ERR_INT_EN BIT(6) | |
67 | #define F_INT_CLR_BIT BIT(12) | |
68 | ||
69 | #define REG_MMU_INT_MAIN_CONTROL 0x124 | |
15a01f4c YW |
70 | /* mmu0 | mmu1 */ |
71 | #define F_INT_TRANSLATION_FAULT (BIT(0) | BIT(7)) | |
72 | #define F_INT_MAIN_MULTI_HIT_FAULT (BIT(1) | BIT(8)) | |
73 | #define F_INT_INVALID_PA_FAULT (BIT(2) | BIT(9)) | |
74 | #define F_INT_ENTRY_REPLACEMENT_FAULT (BIT(3) | BIT(10)) | |
75 | #define F_INT_TLB_MISS_FAULT (BIT(4) | BIT(11)) | |
76 | #define F_INT_MISS_TRANSACTION_FIFO_FAULT (BIT(5) | BIT(12)) | |
77 | #define F_INT_PRETETCH_TRANSATION_FIFO_FAULT (BIT(6) | BIT(13)) | |
0df4fabe YW |
78 | |
79 | #define REG_MMU_CPE_DONE 0x12C | |
80 | ||
81 | #define REG_MMU_FAULT_ST1 0x134 | |
15a01f4c YW |
82 | #define F_REG_MMU0_FAULT_MASK GENMASK(6, 0) |
83 | #define F_REG_MMU1_FAULT_MASK GENMASK(13, 7) | |
0df4fabe | 84 | |
15a01f4c | 85 | #define REG_MMU0_FAULT_VA 0x13c |
0df4fabe YW |
86 | #define F_MMU_FAULT_VA_WRITE_BIT BIT(1) |
87 | #define F_MMU_FAULT_VA_LAYER_BIT BIT(0) | |
88 | ||
15a01f4c YW |
89 | #define REG_MMU0_INVLD_PA 0x140 |
90 | #define REG_MMU1_FAULT_VA 0x144 | |
91 | #define REG_MMU1_INVLD_PA 0x148 | |
92 | #define REG_MMU0_INT_ID 0x150 | |
93 | #define REG_MMU1_INT_ID 0x154 | |
37276e00 CH |
94 | #define F_MMU_INT_ID_COMM_ID(a) (((a) >> 9) & 0x7) |
95 | #define F_MMU_INT_ID_SUB_COMM_ID(a) (((a) >> 7) & 0x3) | |
15a01f4c YW |
96 | #define F_MMU_INT_ID_LARB_ID(a) (((a) >> 7) & 0x7) |
97 | #define F_MMU_INT_ID_PORT_ID(a) (((a) >> 2) & 0x1f) | |
0df4fabe YW |
98 | |
99 | #define MTK_PROTECT_PA_ALIGN 128 | |
100 | ||
a9467d95 YW |
101 | /* |
102 | * Get the local arbiter ID and the portid within the larb arbiter | |
103 | * from mtk_m4u_id which is defined by MTK_M4U_ID. | |
104 | */ | |
e6dec923 | 105 | #define MTK_M4U_TO_LARB(id) (((id) >> 5) & 0xf) |
a9467d95 YW |
106 | #define MTK_M4U_TO_PORT(id) ((id) & 0x1f) |
107 | ||
6b717796 CH |
108 | #define HAS_4GB_MODE BIT(0) |
109 | /* HW will use the EMI clock if there isn't the "bclk". */ | |
110 | #define HAS_BCLK BIT(1) | |
111 | #define HAS_VLD_PA_RNG BIT(2) | |
112 | #define RESET_AXI BIT(3) | |
4bb2bf4c | 113 | #define OUT_ORDER_WR_EN BIT(4) |
37276e00 | 114 | #define HAS_SUB_COMM BIT(5) |
6b717796 CH |
115 | |
116 | #define MTK_IOMMU_HAS_FLAG(pdata, _x) \ | |
117 | ((((pdata)->flags) & (_x)) == (_x)) | |
118 | ||
0df4fabe | 119 | struct mtk_iommu_domain { |
0df4fabe YW |
120 | struct io_pgtable_cfg cfg; |
121 | struct io_pgtable_ops *iop; | |
122 | ||
123 | struct iommu_domain domain; | |
124 | }; | |
125 | ||
b65f5016 | 126 | static const struct iommu_ops mtk_iommu_ops; |
0df4fabe | 127 | |
76ce6546 YW |
128 | /* |
129 | * In M4U 4GB mode, the physical address is remapped as below: | |
130 | * | |
131 | * CPU Physical address: | |
132 | * ==================== | |
133 | * | |
134 | * 0 1G 2G 3G 4G 5G | |
135 | * |---A---|---B---|---C---|---D---|---E---| | |
136 | * +--I/O--+------------Memory-------------+ | |
137 | * | |
138 | * IOMMU output physical address: | |
139 | * ============================= | |
140 | * | |
141 | * 4G 5G 6G 7G 8G | |
142 | * |---E---|---B---|---C---|---D---| | |
143 | * +------------Memory-------------+ | |
144 | * | |
145 | * The Region 'A'(I/O) can NOT be mapped by M4U; For Region 'B'/'C'/'D', the | |
146 | * bit32 of the CPU physical address always is needed to set, and for Region | |
147 | * 'E', the CPU physical address keep as is. | |
148 | * Additionally, The iommu consumers always use the CPU phyiscal address. | |
149 | */ | |
b4dad40e | 150 | #define MTK_IOMMU_4GB_MODE_REMAP_BASE 0x140000000UL |
76ce6546 | 151 | |
7c3a2ec0 YW |
152 | static LIST_HEAD(m4ulist); /* List all the M4U HWs */ |
153 | ||
154 | #define for_each_m4u(data) list_for_each_entry(data, &m4ulist, list) | |
155 | ||
156 | /* | |
157 | * There may be 1 or 2 M4U HWs, But we always expect they are in the same domain | |
158 | * for the performance. | |
159 | * | |
160 | * Here always return the mtk_iommu_data of the first probed M4U where the | |
161 | * iommu domain information is recorded. | |
162 | */ | |
163 | static struct mtk_iommu_data *mtk_iommu_get_m4u_data(void) | |
164 | { | |
165 | struct mtk_iommu_data *data; | |
166 | ||
167 | for_each_m4u(data) | |
168 | return data; | |
169 | ||
170 | return NULL; | |
171 | } | |
172 | ||
0df4fabe YW |
173 | static struct mtk_iommu_domain *to_mtk_domain(struct iommu_domain *dom) |
174 | { | |
175 | return container_of(dom, struct mtk_iommu_domain, domain); | |
176 | } | |
177 | ||
178 | static void mtk_iommu_tlb_flush_all(void *cookie) | |
179 | { | |
180 | struct mtk_iommu_data *data = cookie; | |
181 | ||
7c3a2ec0 YW |
182 | for_each_m4u(data) { |
183 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, | |
b053bc71 | 184 | data->base + data->plat_data->inv_sel_reg); |
7c3a2ec0 YW |
185 | writel_relaxed(F_ALL_INVLD, data->base + REG_MMU_INVALIDATE); |
186 | wmb(); /* Make sure the tlb flush all done */ | |
187 | } | |
0df4fabe YW |
188 | } |
189 | ||
1f4fd624 | 190 | static void mtk_iommu_tlb_flush_range_sync(unsigned long iova, size_t size, |
67caf7e2 | 191 | size_t granule, void *cookie) |
0df4fabe YW |
192 | { |
193 | struct mtk_iommu_data *data = cookie; | |
1f4fd624 YW |
194 | unsigned long flags; |
195 | int ret; | |
196 | u32 tmp; | |
0df4fabe | 197 | |
7c3a2ec0 | 198 | for_each_m4u(data) { |
1f4fd624 | 199 | spin_lock_irqsave(&data->tlb_lock, flags); |
7c3a2ec0 | 200 | writel_relaxed(F_INVLD_EN1 | F_INVLD_EN0, |
b053bc71 | 201 | data->base + data->plat_data->inv_sel_reg); |
0df4fabe | 202 | |
7c3a2ec0 YW |
203 | writel_relaxed(iova, data->base + REG_MMU_INVLD_START_A); |
204 | writel_relaxed(iova + size - 1, | |
205 | data->base + REG_MMU_INVLD_END_A); | |
206 | writel_relaxed(F_MMU_INV_RANGE, | |
207 | data->base + REG_MMU_INVALIDATE); | |
98a8f63e | 208 | |
1f4fd624 | 209 | /* tlb sync */ |
7c3a2ec0 | 210 | ret = readl_poll_timeout_atomic(data->base + REG_MMU_CPE_DONE, |
c90ae4a6 | 211 | tmp, tmp != 0, 10, 1000); |
7c3a2ec0 YW |
212 | if (ret) { |
213 | dev_warn(data->dev, | |
214 | "Partial TLB flush timed out, falling back to full flush\n"); | |
215 | mtk_iommu_tlb_flush_all(cookie); | |
216 | } | |
217 | /* Clear the CPE status */ | |
218 | writel_relaxed(0, data->base + REG_MMU_CPE_DONE); | |
1f4fd624 | 219 | spin_unlock_irqrestore(&data->tlb_lock, flags); |
0df4fabe | 220 | } |
0df4fabe YW |
221 | } |
222 | ||
3951c41a WD |
223 | static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather, |
224 | unsigned long iova, size_t granule, | |
abfd6fe0 WD |
225 | void *cookie) |
226 | { | |
da3cc91b | 227 | struct mtk_iommu_data *data = cookie; |
a7a04ea3 | 228 | struct iommu_domain *domain = &data->m4u_dom->domain; |
da3cc91b | 229 | |
a7a04ea3 | 230 | iommu_iotlb_gather_add_page(domain, gather, iova, granule); |
abfd6fe0 WD |
231 | } |
232 | ||
298f7889 | 233 | static const struct iommu_flush_ops mtk_iommu_flush_ops = { |
0df4fabe | 234 | .tlb_flush_all = mtk_iommu_tlb_flush_all, |
1f4fd624 YW |
235 | .tlb_flush_walk = mtk_iommu_tlb_flush_range_sync, |
236 | .tlb_flush_leaf = mtk_iommu_tlb_flush_range_sync, | |
abfd6fe0 | 237 | .tlb_add_page = mtk_iommu_tlb_flush_page_nosync, |
0df4fabe YW |
238 | }; |
239 | ||
240 | static irqreturn_t mtk_iommu_isr(int irq, void *dev_id) | |
241 | { | |
242 | struct mtk_iommu_data *data = dev_id; | |
243 | struct mtk_iommu_domain *dom = data->m4u_dom; | |
244 | u32 int_state, regval, fault_iova, fault_pa; | |
37276e00 | 245 | unsigned int fault_larb, fault_port, sub_comm = 0; |
0df4fabe YW |
246 | bool layer, write; |
247 | ||
248 | /* Read error info from registers */ | |
249 | int_state = readl_relaxed(data->base + REG_MMU_FAULT_ST1); | |
15a01f4c YW |
250 | if (int_state & F_REG_MMU0_FAULT_MASK) { |
251 | regval = readl_relaxed(data->base + REG_MMU0_INT_ID); | |
252 | fault_iova = readl_relaxed(data->base + REG_MMU0_FAULT_VA); | |
253 | fault_pa = readl_relaxed(data->base + REG_MMU0_INVLD_PA); | |
254 | } else { | |
255 | regval = readl_relaxed(data->base + REG_MMU1_INT_ID); | |
256 | fault_iova = readl_relaxed(data->base + REG_MMU1_FAULT_VA); | |
257 | fault_pa = readl_relaxed(data->base + REG_MMU1_INVLD_PA); | |
258 | } | |
0df4fabe YW |
259 | layer = fault_iova & F_MMU_FAULT_VA_LAYER_BIT; |
260 | write = fault_iova & F_MMU_FAULT_VA_WRITE_BIT; | |
15a01f4c | 261 | fault_port = F_MMU_INT_ID_PORT_ID(regval); |
37276e00 CH |
262 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_SUB_COMM)) { |
263 | fault_larb = F_MMU_INT_ID_COMM_ID(regval); | |
264 | sub_comm = F_MMU_INT_ID_SUB_COMM_ID(regval); | |
265 | } else { | |
266 | fault_larb = F_MMU_INT_ID_LARB_ID(regval); | |
267 | } | |
268 | fault_larb = data->plat_data->larbid_remap[fault_larb][sub_comm]; | |
b3e5eee7 | 269 | |
0df4fabe YW |
270 | if (report_iommu_fault(&dom->domain, data->dev, fault_iova, |
271 | write ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ)) { | |
272 | dev_err_ratelimited( | |
273 | data->dev, | |
274 | "fault type=0x%x iova=0x%x pa=0x%x larb=%d port=%d layer=%d %s\n", | |
275 | int_state, fault_iova, fault_pa, fault_larb, fault_port, | |
276 | layer, write ? "write" : "read"); | |
277 | } | |
278 | ||
279 | /* Interrupt clear */ | |
280 | regval = readl_relaxed(data->base + REG_MMU_INT_CONTROL0); | |
281 | regval |= F_INT_CLR_BIT; | |
282 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
283 | ||
284 | mtk_iommu_tlb_flush_all(data); | |
285 | ||
286 | return IRQ_HANDLED; | |
287 | } | |
288 | ||
289 | static void mtk_iommu_config(struct mtk_iommu_data *data, | |
290 | struct device *dev, bool enable) | |
291 | { | |
0df4fabe YW |
292 | struct mtk_smi_larb_iommu *larb_mmu; |
293 | unsigned int larbid, portid; | |
a9bf2eec | 294 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
58f0d1d5 | 295 | int i; |
0df4fabe | 296 | |
58f0d1d5 RM |
297 | for (i = 0; i < fwspec->num_ids; ++i) { |
298 | larbid = MTK_M4U_TO_LARB(fwspec->ids[i]); | |
299 | portid = MTK_M4U_TO_PORT(fwspec->ids[i]); | |
1ee9feb2 | 300 | larb_mmu = &data->larb_imu[larbid]; |
0df4fabe YW |
301 | |
302 | dev_dbg(dev, "%s iommu port: %d\n", | |
303 | enable ? "enable" : "disable", portid); | |
304 | ||
305 | if (enable) | |
306 | larb_mmu->mmu |= MTK_SMI_MMU_EN(portid); | |
307 | else | |
308 | larb_mmu->mmu &= ~MTK_SMI_MMU_EN(portid); | |
309 | } | |
310 | } | |
311 | ||
4b00f5ac | 312 | static int mtk_iommu_domain_finalise(struct mtk_iommu_domain *dom) |
0df4fabe | 313 | { |
4b00f5ac | 314 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 315 | |
0df4fabe YW |
316 | dom->cfg = (struct io_pgtable_cfg) { |
317 | .quirks = IO_PGTABLE_QUIRK_ARM_NS | | |
318 | IO_PGTABLE_QUIRK_NO_PERMS | | |
b4dad40e YW |
319 | IO_PGTABLE_QUIRK_TLBI_ON_MAP | |
320 | IO_PGTABLE_QUIRK_ARM_MTK_EXT, | |
0df4fabe YW |
321 | .pgsize_bitmap = mtk_iommu_ops.pgsize_bitmap, |
322 | .ias = 32, | |
b4dad40e | 323 | .oas = 34, |
298f7889 | 324 | .tlb = &mtk_iommu_flush_ops, |
0df4fabe YW |
325 | .iommu_dev = data->dev, |
326 | }; | |
327 | ||
328 | dom->iop = alloc_io_pgtable_ops(ARM_V7S, &dom->cfg, data); | |
329 | if (!dom->iop) { | |
330 | dev_err(data->dev, "Failed to alloc io pgtable\n"); | |
331 | return -EINVAL; | |
332 | } | |
333 | ||
334 | /* Update our support page sizes bitmap */ | |
d16e0faa | 335 | dom->domain.pgsize_bitmap = dom->cfg.pgsize_bitmap; |
0df4fabe YW |
336 | return 0; |
337 | } | |
338 | ||
339 | static struct iommu_domain *mtk_iommu_domain_alloc(unsigned type) | |
340 | { | |
341 | struct mtk_iommu_domain *dom; | |
342 | ||
343 | if (type != IOMMU_DOMAIN_DMA) | |
344 | return NULL; | |
345 | ||
346 | dom = kzalloc(sizeof(*dom), GFP_KERNEL); | |
347 | if (!dom) | |
348 | return NULL; | |
349 | ||
4b00f5ac YW |
350 | if (iommu_get_dma_cookie(&dom->domain)) |
351 | goto free_dom; | |
352 | ||
353 | if (mtk_iommu_domain_finalise(dom)) | |
354 | goto put_dma_cookie; | |
0df4fabe YW |
355 | |
356 | dom->domain.geometry.aperture_start = 0; | |
357 | dom->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
358 | dom->domain.geometry.force_aperture = true; | |
359 | ||
360 | return &dom->domain; | |
4b00f5ac YW |
361 | |
362 | put_dma_cookie: | |
363 | iommu_put_dma_cookie(&dom->domain); | |
364 | free_dom: | |
365 | kfree(dom); | |
366 | return NULL; | |
0df4fabe YW |
367 | } |
368 | ||
369 | static void mtk_iommu_domain_free(struct iommu_domain *domain) | |
370 | { | |
4b00f5ac YW |
371 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
372 | ||
373 | free_io_pgtable_ops(dom->iop); | |
0df4fabe YW |
374 | iommu_put_dma_cookie(domain); |
375 | kfree(to_mtk_domain(domain)); | |
376 | } | |
377 | ||
378 | static int mtk_iommu_attach_device(struct iommu_domain *domain, | |
379 | struct device *dev) | |
380 | { | |
3524b559 | 381 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
0df4fabe | 382 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); |
0df4fabe | 383 | |
4b00f5ac | 384 | if (!data) |
0df4fabe YW |
385 | return -ENODEV; |
386 | ||
4b00f5ac | 387 | /* Update the pgtable base address register of the M4U HW */ |
0df4fabe YW |
388 | if (!data->m4u_dom) { |
389 | data->m4u_dom = dom; | |
d1e5f26f | 390 | writel(dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, |
4b00f5ac | 391 | data->base + REG_MMU_PT_BASE_ADDR); |
7c3a2ec0 YW |
392 | } |
393 | ||
4b00f5ac | 394 | mtk_iommu_config(data, dev, true); |
0df4fabe YW |
395 | return 0; |
396 | } | |
397 | ||
398 | static void mtk_iommu_detach_device(struct iommu_domain *domain, | |
399 | struct device *dev) | |
400 | { | |
3524b559 | 401 | struct mtk_iommu_data *data = dev_iommu_priv_get(dev); |
0df4fabe | 402 | |
58f0d1d5 | 403 | if (!data) |
0df4fabe YW |
404 | return; |
405 | ||
0df4fabe YW |
406 | mtk_iommu_config(data, dev, false); |
407 | } | |
408 | ||
409 | static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova, | |
781ca2de | 410 | phys_addr_t paddr, size_t size, int prot, gfp_t gfp) |
0df4fabe YW |
411 | { |
412 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
b4dad40e | 413 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 414 | |
b4dad40e YW |
415 | /* The "4GB mode" M4U physically can not use the lower remap of Dram. */ |
416 | if (data->enable_4GB) | |
417 | paddr |= BIT_ULL(32); | |
418 | ||
60829b4d YW |
419 | /* Synchronize with the tlb_lock */ |
420 | return dom->iop->map(dom->iop, iova, paddr, size, prot); | |
0df4fabe YW |
421 | } |
422 | ||
423 | static size_t mtk_iommu_unmap(struct iommu_domain *domain, | |
56f8af5e WD |
424 | unsigned long iova, size_t size, |
425 | struct iommu_iotlb_gather *gather) | |
0df4fabe YW |
426 | { |
427 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
0df4fabe | 428 | |
60829b4d | 429 | return dom->iop->unmap(dom->iop, iova, size, gather); |
0df4fabe YW |
430 | } |
431 | ||
56f8af5e WD |
432 | static void mtk_iommu_flush_iotlb_all(struct iommu_domain *domain) |
433 | { | |
2009122f | 434 | mtk_iommu_tlb_flush_all(mtk_iommu_get_m4u_data()); |
56f8af5e WD |
435 | } |
436 | ||
437 | static void mtk_iommu_iotlb_sync(struct iommu_domain *domain, | |
438 | struct iommu_iotlb_gather *gather) | |
4d689b61 | 439 | { |
da3cc91b | 440 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
a7a04ea3 | 441 | size_t length = gather->end - gather->start; |
da3cc91b | 442 | |
a7a04ea3 YW |
443 | if (gather->start == ULONG_MAX) |
444 | return; | |
445 | ||
1f4fd624 | 446 | mtk_iommu_tlb_flush_range_sync(gather->start, length, gather->pgsize, |
67caf7e2 | 447 | data); |
4d689b61 RM |
448 | } |
449 | ||
0df4fabe YW |
450 | static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, |
451 | dma_addr_t iova) | |
452 | { | |
453 | struct mtk_iommu_domain *dom = to_mtk_domain(domain); | |
30e2fccf | 454 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe YW |
455 | phys_addr_t pa; |
456 | ||
0df4fabe | 457 | pa = dom->iop->iova_to_phys(dom->iop, iova); |
b4dad40e YW |
458 | if (data->enable_4GB && pa >= MTK_IOMMU_4GB_MODE_REMAP_BASE) |
459 | pa &= ~BIT_ULL(32); | |
30e2fccf | 460 | |
0df4fabe YW |
461 | return pa; |
462 | } | |
463 | ||
80e4592a | 464 | static struct iommu_device *mtk_iommu_probe_device(struct device *dev) |
0df4fabe | 465 | { |
a9bf2eec | 466 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 | 467 | struct mtk_iommu_data *data; |
0df4fabe | 468 | |
a9bf2eec | 469 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
80e4592a | 470 | return ERR_PTR(-ENODEV); /* Not a iommu client device */ |
0df4fabe | 471 | |
3524b559 | 472 | data = dev_iommu_priv_get(dev); |
b16c0170 | 473 | |
80e4592a | 474 | return &data->iommu; |
0df4fabe YW |
475 | } |
476 | ||
80e4592a | 477 | static void mtk_iommu_release_device(struct device *dev) |
0df4fabe | 478 | { |
a9bf2eec | 479 | struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); |
b16c0170 | 480 | |
a9bf2eec | 481 | if (!fwspec || fwspec->ops != &mtk_iommu_ops) |
0df4fabe YW |
482 | return; |
483 | ||
58f0d1d5 | 484 | iommu_fwspec_free(dev); |
0df4fabe YW |
485 | } |
486 | ||
487 | static struct iommu_group *mtk_iommu_device_group(struct device *dev) | |
488 | { | |
7c3a2ec0 | 489 | struct mtk_iommu_data *data = mtk_iommu_get_m4u_data(); |
0df4fabe | 490 | |
58f0d1d5 | 491 | if (!data) |
0df4fabe YW |
492 | return ERR_PTR(-ENODEV); |
493 | ||
494 | /* All the client devices are in the same m4u iommu-group */ | |
0df4fabe YW |
495 | if (!data->m4u_group) { |
496 | data->m4u_group = iommu_group_alloc(); | |
497 | if (IS_ERR(data->m4u_group)) | |
498 | dev_err(dev, "Failed to allocate M4U IOMMU group\n"); | |
3a8d40b6 RM |
499 | } else { |
500 | iommu_group_ref_get(data->m4u_group); | |
0df4fabe YW |
501 | } |
502 | return data->m4u_group; | |
503 | } | |
504 | ||
505 | static int mtk_iommu_of_xlate(struct device *dev, struct of_phandle_args *args) | |
506 | { | |
0df4fabe YW |
507 | struct platform_device *m4updev; |
508 | ||
509 | if (args->args_count != 1) { | |
510 | dev_err(dev, "invalid #iommu-cells(%d) property for IOMMU\n", | |
511 | args->args_count); | |
512 | return -EINVAL; | |
513 | } | |
514 | ||
3524b559 | 515 | if (!dev_iommu_priv_get(dev)) { |
0df4fabe YW |
516 | /* Get the m4u device */ |
517 | m4updev = of_find_device_by_node(args->np); | |
0df4fabe YW |
518 | if (WARN_ON(!m4updev)) |
519 | return -EINVAL; | |
520 | ||
3524b559 | 521 | dev_iommu_priv_set(dev, platform_get_drvdata(m4updev)); |
0df4fabe YW |
522 | } |
523 | ||
58f0d1d5 | 524 | return iommu_fwspec_add_ids(dev, args->args, 1); |
0df4fabe YW |
525 | } |
526 | ||
b65f5016 | 527 | static const struct iommu_ops mtk_iommu_ops = { |
0df4fabe YW |
528 | .domain_alloc = mtk_iommu_domain_alloc, |
529 | .domain_free = mtk_iommu_domain_free, | |
530 | .attach_dev = mtk_iommu_attach_device, | |
531 | .detach_dev = mtk_iommu_detach_device, | |
532 | .map = mtk_iommu_map, | |
533 | .unmap = mtk_iommu_unmap, | |
56f8af5e | 534 | .flush_iotlb_all = mtk_iommu_flush_iotlb_all, |
4d689b61 | 535 | .iotlb_sync = mtk_iommu_iotlb_sync, |
0df4fabe | 536 | .iova_to_phys = mtk_iommu_iova_to_phys, |
80e4592a JR |
537 | .probe_device = mtk_iommu_probe_device, |
538 | .release_device = mtk_iommu_release_device, | |
0df4fabe YW |
539 | .device_group = mtk_iommu_device_group, |
540 | .of_xlate = mtk_iommu_of_xlate, | |
541 | .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, | |
542 | }; | |
543 | ||
544 | static int mtk_iommu_hw_init(const struct mtk_iommu_data *data) | |
545 | { | |
546 | u32 regval; | |
547 | int ret; | |
548 | ||
549 | ret = clk_prepare_enable(data->bclk); | |
550 | if (ret) { | |
551 | dev_err(data->dev, "Failed to enable iommu bclk(%d)\n", ret); | |
552 | return ret; | |
553 | } | |
554 | ||
cecdce9d | 555 | if (data->plat_data->m4u_plat == M4U_MT8173) |
acb3c92a YW |
556 | regval = F_MMU_PREFETCH_RT_REPLACE_MOD | |
557 | F_MMU_TF_PROT_TO_PROGRAM_ADDR_MT8173; | |
558 | else | |
559 | regval = F_MMU_TF_PROT_TO_PROGRAM_ADDR; | |
0df4fabe YW |
560 | writel_relaxed(regval, data->base + REG_MMU_CTRL_REG); |
561 | ||
562 | regval = F_L2_MULIT_HIT_EN | | |
563 | F_TABLE_WALK_FAULT_INT_EN | | |
564 | F_PREETCH_FIFO_OVERFLOW_INT_EN | | |
565 | F_MISS_FIFO_OVERFLOW_INT_EN | | |
566 | F_PREFETCH_FIFO_ERR_INT_EN | | |
567 | F_MISS_FIFO_ERR_INT_EN; | |
568 | writel_relaxed(regval, data->base + REG_MMU_INT_CONTROL0); | |
569 | ||
570 | regval = F_INT_TRANSLATION_FAULT | | |
571 | F_INT_MAIN_MULTI_HIT_FAULT | | |
572 | F_INT_INVALID_PA_FAULT | | |
573 | F_INT_ENTRY_REPLACEMENT_FAULT | | |
574 | F_INT_TLB_MISS_FAULT | | |
575 | F_INT_MISS_TRANSACTION_FIFO_FAULT | | |
576 | F_INT_PRETETCH_TRANSATION_FIFO_FAULT; | |
577 | writel_relaxed(regval, data->base + REG_MMU_INT_MAIN_CONTROL); | |
578 | ||
cecdce9d | 579 | if (data->plat_data->m4u_plat == M4U_MT8173) |
70ca608b YW |
580 | regval = (data->protect_base >> 1) | (data->enable_4GB << 31); |
581 | else | |
582 | regval = lower_32_bits(data->protect_base) | | |
583 | upper_32_bits(data->protect_base); | |
584 | writel_relaxed(regval, data->base + REG_MMU_IVRP_PADDR); | |
585 | ||
6b717796 CH |
586 | if (data->enable_4GB && |
587 | MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_VLD_PA_RNG)) { | |
30e2fccf YW |
588 | /* |
589 | * If 4GB mode is enabled, the validate PA range is from | |
590 | * 0x1_0000_0000 to 0x1_ffff_ffff. here record bit[32:30]. | |
591 | */ | |
592 | regval = F_MMU_VLD_PA_RNG(7, 4); | |
593 | writel_relaxed(regval, data->base + REG_MMU_VLD_PA_RNG); | |
594 | } | |
0df4fabe | 595 | writel_relaxed(0, data->base + REG_MMU_DCM_DIS); |
e6dec923 | 596 | |
6b717796 | 597 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, RESET_AXI)) { |
75eed350 | 598 | /* The register is called STANDARD_AXI_MODE in this case */ |
4bb2bf4c CH |
599 | regval = 0; |
600 | } else { | |
601 | regval = readl_relaxed(data->base + REG_MMU_MISC_CTRL); | |
602 | regval &= ~F_MMU_STANDARD_AXI_MODE_MASK; | |
603 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, OUT_ORDER_WR_EN)) | |
604 | regval &= ~F_MMU_IN_ORDER_WR_EN_MASK; | |
75eed350 | 605 | } |
4bb2bf4c | 606 | writel_relaxed(regval, data->base + REG_MMU_MISC_CTRL); |
0df4fabe YW |
607 | |
608 | if (devm_request_irq(data->dev, data->irq, mtk_iommu_isr, 0, | |
609 | dev_name(data->dev), (void *)data)) { | |
610 | writel_relaxed(0, data->base + REG_MMU_PT_BASE_ADDR); | |
611 | clk_disable_unprepare(data->bclk); | |
612 | dev_err(data->dev, "Failed @ IRQ-%d Request\n", data->irq); | |
613 | return -ENODEV; | |
614 | } | |
615 | ||
616 | return 0; | |
617 | } | |
618 | ||
0df4fabe YW |
619 | static const struct component_master_ops mtk_iommu_com_ops = { |
620 | .bind = mtk_iommu_bind, | |
621 | .unbind = mtk_iommu_unbind, | |
622 | }; | |
623 | ||
624 | static int mtk_iommu_probe(struct platform_device *pdev) | |
625 | { | |
626 | struct mtk_iommu_data *data; | |
627 | struct device *dev = &pdev->dev; | |
628 | struct resource *res; | |
b16c0170 | 629 | resource_size_t ioaddr; |
0df4fabe YW |
630 | struct component_match *match = NULL; |
631 | void *protect; | |
0b6c0ad3 | 632 | int i, larb_nr, ret; |
0df4fabe YW |
633 | |
634 | data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); | |
635 | if (!data) | |
636 | return -ENOMEM; | |
637 | data->dev = dev; | |
cecdce9d | 638 | data->plat_data = of_device_get_match_data(dev); |
0df4fabe YW |
639 | |
640 | /* Protect memory. HW will access here while translation fault.*/ | |
641 | protect = devm_kzalloc(dev, MTK_PROTECT_PA_ALIGN * 2, GFP_KERNEL); | |
642 | if (!protect) | |
643 | return -ENOMEM; | |
644 | data->protect_base = ALIGN(virt_to_phys(protect), MTK_PROTECT_PA_ALIGN); | |
645 | ||
01e23c93 | 646 | /* Whether the current dram is over 4GB */ |
41939980 | 647 | data->enable_4GB = !!(max_pfn > (BIT_ULL(32) >> PAGE_SHIFT)); |
6b717796 | 648 | if (!MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_4GB_MODE)) |
b4dad40e | 649 | data->enable_4GB = false; |
01e23c93 | 650 | |
0df4fabe YW |
651 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
652 | data->base = devm_ioremap_resource(dev, res); | |
653 | if (IS_ERR(data->base)) | |
654 | return PTR_ERR(data->base); | |
b16c0170 | 655 | ioaddr = res->start; |
0df4fabe YW |
656 | |
657 | data->irq = platform_get_irq(pdev, 0); | |
658 | if (data->irq < 0) | |
659 | return data->irq; | |
660 | ||
6b717796 | 661 | if (MTK_IOMMU_HAS_FLAG(data->plat_data, HAS_BCLK)) { |
2aa4c259 YW |
662 | data->bclk = devm_clk_get(dev, "bclk"); |
663 | if (IS_ERR(data->bclk)) | |
664 | return PTR_ERR(data->bclk); | |
665 | } | |
0df4fabe YW |
666 | |
667 | larb_nr = of_count_phandle_with_args(dev->of_node, | |
668 | "mediatek,larbs", NULL); | |
669 | if (larb_nr < 0) | |
670 | return larb_nr; | |
0df4fabe YW |
671 | |
672 | for (i = 0; i < larb_nr; i++) { | |
673 | struct device_node *larbnode; | |
674 | struct platform_device *plarbdev; | |
e6dec923 | 675 | u32 id; |
0df4fabe YW |
676 | |
677 | larbnode = of_parse_phandle(dev->of_node, "mediatek,larbs", i); | |
678 | if (!larbnode) | |
679 | return -EINVAL; | |
680 | ||
1eb8e4e2 WY |
681 | if (!of_device_is_available(larbnode)) { |
682 | of_node_put(larbnode); | |
0df4fabe | 683 | continue; |
1eb8e4e2 | 684 | } |
0df4fabe | 685 | |
e6dec923 YW |
686 | ret = of_property_read_u32(larbnode, "mediatek,larb-id", &id); |
687 | if (ret)/* The id is consecutive if there is no this property */ | |
688 | id = i; | |
689 | ||
0df4fabe | 690 | plarbdev = of_find_device_by_node(larbnode); |
1eb8e4e2 WY |
691 | if (!plarbdev) { |
692 | of_node_put(larbnode); | |
e6dec923 | 693 | return -EPROBE_DEFER; |
1eb8e4e2 | 694 | } |
1ee9feb2 | 695 | data->larb_imu[id].dev = &plarbdev->dev; |
0df4fabe | 696 | |
00c7c81f RK |
697 | component_match_add_release(dev, &match, release_of, |
698 | compare_of, larbnode); | |
0df4fabe YW |
699 | } |
700 | ||
701 | platform_set_drvdata(pdev, data); | |
702 | ||
703 | ret = mtk_iommu_hw_init(data); | |
704 | if (ret) | |
705 | return ret; | |
706 | ||
b16c0170 JR |
707 | ret = iommu_device_sysfs_add(&data->iommu, dev, NULL, |
708 | "mtk-iommu.%pa", &ioaddr); | |
709 | if (ret) | |
710 | return ret; | |
711 | ||
712 | iommu_device_set_ops(&data->iommu, &mtk_iommu_ops); | |
713 | iommu_device_set_fwnode(&data->iommu, &pdev->dev.of_node->fwnode); | |
714 | ||
715 | ret = iommu_device_register(&data->iommu); | |
716 | if (ret) | |
717 | return ret; | |
718 | ||
da3cc91b | 719 | spin_lock_init(&data->tlb_lock); |
7c3a2ec0 YW |
720 | list_add_tail(&data->list, &m4ulist); |
721 | ||
0df4fabe YW |
722 | if (!iommu_present(&platform_bus_type)) |
723 | bus_set_iommu(&platform_bus_type, &mtk_iommu_ops); | |
724 | ||
725 | return component_master_add_with_match(dev, &mtk_iommu_com_ops, match); | |
726 | } | |
727 | ||
728 | static int mtk_iommu_remove(struct platform_device *pdev) | |
729 | { | |
730 | struct mtk_iommu_data *data = platform_get_drvdata(pdev); | |
731 | ||
b16c0170 JR |
732 | iommu_device_sysfs_remove(&data->iommu); |
733 | iommu_device_unregister(&data->iommu); | |
734 | ||
0df4fabe YW |
735 | if (iommu_present(&platform_bus_type)) |
736 | bus_set_iommu(&platform_bus_type, NULL); | |
737 | ||
0df4fabe YW |
738 | clk_disable_unprepare(data->bclk); |
739 | devm_free_irq(&pdev->dev, data->irq, data); | |
740 | component_master_del(&pdev->dev, &mtk_iommu_com_ops); | |
741 | return 0; | |
742 | } | |
743 | ||
fd99f796 | 744 | static int __maybe_unused mtk_iommu_suspend(struct device *dev) |
0df4fabe YW |
745 | { |
746 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
747 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
748 | void __iomem *base = data->base; | |
749 | ||
75eed350 | 750 | reg->misc_ctrl = readl_relaxed(base + REG_MMU_MISC_CTRL); |
0df4fabe YW |
751 | reg->dcm_dis = readl_relaxed(base + REG_MMU_DCM_DIS); |
752 | reg->ctrl_reg = readl_relaxed(base + REG_MMU_CTRL_REG); | |
753 | reg->int_control0 = readl_relaxed(base + REG_MMU_INT_CONTROL0); | |
754 | reg->int_main_control = readl_relaxed(base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 755 | reg->ivrp_paddr = readl_relaxed(base + REG_MMU_IVRP_PADDR); |
b9475b34 | 756 | reg->vld_pa_rng = readl_relaxed(base + REG_MMU_VLD_PA_RNG); |
6254b64f | 757 | clk_disable_unprepare(data->bclk); |
0df4fabe YW |
758 | return 0; |
759 | } | |
760 | ||
fd99f796 | 761 | static int __maybe_unused mtk_iommu_resume(struct device *dev) |
0df4fabe YW |
762 | { |
763 | struct mtk_iommu_data *data = dev_get_drvdata(dev); | |
764 | struct mtk_iommu_suspend_reg *reg = &data->reg; | |
907ba6a1 | 765 | struct mtk_iommu_domain *m4u_dom = data->m4u_dom; |
0df4fabe | 766 | void __iomem *base = data->base; |
6254b64f | 767 | int ret; |
0df4fabe | 768 | |
6254b64f YW |
769 | ret = clk_prepare_enable(data->bclk); |
770 | if (ret) { | |
771 | dev_err(data->dev, "Failed to enable clk(%d) in resume\n", ret); | |
772 | return ret; | |
773 | } | |
75eed350 | 774 | writel_relaxed(reg->misc_ctrl, base + REG_MMU_MISC_CTRL); |
0df4fabe YW |
775 | writel_relaxed(reg->dcm_dis, base + REG_MMU_DCM_DIS); |
776 | writel_relaxed(reg->ctrl_reg, base + REG_MMU_CTRL_REG); | |
777 | writel_relaxed(reg->int_control0, base + REG_MMU_INT_CONTROL0); | |
778 | writel_relaxed(reg->int_main_control, base + REG_MMU_INT_MAIN_CONTROL); | |
70ca608b | 779 | writel_relaxed(reg->ivrp_paddr, base + REG_MMU_IVRP_PADDR); |
b9475b34 | 780 | writel_relaxed(reg->vld_pa_rng, base + REG_MMU_VLD_PA_RNG); |
907ba6a1 | 781 | if (m4u_dom) |
d1e5f26f | 782 | writel(m4u_dom->cfg.arm_v7s_cfg.ttbr & MMU_PT_ADDR_MASK, |
e6dec923 | 783 | base + REG_MMU_PT_BASE_ADDR); |
0df4fabe YW |
784 | return 0; |
785 | } | |
786 | ||
e6dec923 | 787 | static const struct dev_pm_ops mtk_iommu_pm_ops = { |
6254b64f | 788 | SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_iommu_suspend, mtk_iommu_resume) |
0df4fabe YW |
789 | }; |
790 | ||
cecdce9d YW |
791 | static const struct mtk_iommu_plat_data mt2712_data = { |
792 | .m4u_plat = M4U_MT2712, | |
6b717796 | 793 | .flags = HAS_4GB_MODE | HAS_BCLK | HAS_VLD_PA_RNG, |
b053bc71 | 794 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
37276e00 | 795 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}}, |
cecdce9d YW |
796 | }; |
797 | ||
798 | static const struct mtk_iommu_plat_data mt8173_data = { | |
799 | .m4u_plat = M4U_MT8173, | |
6b717796 | 800 | .flags = HAS_4GB_MODE | HAS_BCLK | RESET_AXI, |
b053bc71 | 801 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
37276e00 | 802 | .larbid_remap = {{0}, {1}, {2}, {3}, {4}, {5}}, /* Linear mapping. */ |
cecdce9d YW |
803 | }; |
804 | ||
907ba6a1 YW |
805 | static const struct mtk_iommu_plat_data mt8183_data = { |
806 | .m4u_plat = M4U_MT8183, | |
6b717796 | 807 | .flags = RESET_AXI, |
b053bc71 | 808 | .inv_sel_reg = REG_MMU_INV_SEL_GEN1, |
37276e00 | 809 | .larbid_remap = {{0}, {4}, {5}, {6}, {7}, {2}, {3}, {1}}, |
907ba6a1 YW |
810 | }; |
811 | ||
0df4fabe | 812 | static const struct of_device_id mtk_iommu_of_ids[] = { |
cecdce9d YW |
813 | { .compatible = "mediatek,mt2712-m4u", .data = &mt2712_data}, |
814 | { .compatible = "mediatek,mt8173-m4u", .data = &mt8173_data}, | |
907ba6a1 | 815 | { .compatible = "mediatek,mt8183-m4u", .data = &mt8183_data}, |
0df4fabe YW |
816 | {} |
817 | }; | |
818 | ||
819 | static struct platform_driver mtk_iommu_driver = { | |
820 | .probe = mtk_iommu_probe, | |
821 | .remove = mtk_iommu_remove, | |
822 | .driver = { | |
823 | .name = "mtk-iommu", | |
e6dec923 | 824 | .of_match_table = of_match_ptr(mtk_iommu_of_ids), |
0df4fabe YW |
825 | .pm = &mtk_iommu_pm_ops, |
826 | } | |
827 | }; | |
828 | ||
e6dec923 | 829 | static int __init mtk_iommu_init(void) |
0df4fabe YW |
830 | { |
831 | int ret; | |
0df4fabe YW |
832 | |
833 | ret = platform_driver_register(&mtk_iommu_driver); | |
e6dec923 YW |
834 | if (ret != 0) |
835 | pr_err("Failed to register MTK IOMMU driver\n"); | |
0df4fabe | 836 | |
e6dec923 | 837 | return ret; |
0df4fabe YW |
838 | } |
839 | ||
e6dec923 | 840 | subsys_initcall(mtk_iommu_init) |