]>
Commit | Line | Data |
---|---|---|
c68a2921 DK |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License version 2 as | |
4 | * published by the Free Software Foundation. | |
5 | */ | |
6 | ||
c68a2921 DK |
7 | #include <linux/compiler.h> |
8 | #include <linux/delay.h> | |
9 | #include <linux/device.h> | |
4f0aba67 | 10 | #include <linux/dma-iommu.h> |
461a6946 | 11 | #include <linux/dma-mapping.h> |
c68a2921 DK |
12 | #include <linux/errno.h> |
13 | #include <linux/interrupt.h> | |
14 | #include <linux/io.h> | |
15 | #include <linux/iommu.h> | |
0416bf64 | 16 | #include <linux/iopoll.h> |
c68a2921 DK |
17 | #include <linux/list.h> |
18 | #include <linux/mm.h> | |
19 | #include <linux/module.h> | |
20 | #include <linux/of.h> | |
21 | #include <linux/of_platform.h> | |
22 | #include <linux/platform_device.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | ||
26 | /** MMU register offsets */ | |
27 | #define RK_MMU_DTE_ADDR 0x00 /* Directory table address */ | |
28 | #define RK_MMU_STATUS 0x04 | |
29 | #define RK_MMU_COMMAND 0x08 | |
30 | #define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */ | |
31 | #define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */ | |
32 | #define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */ | |
33 | #define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */ | |
34 | #define RK_MMU_INT_MASK 0x1C /* IRQ enable */ | |
35 | #define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */ | |
36 | #define RK_MMU_AUTO_GATING 0x24 | |
37 | ||
38 | #define DTE_ADDR_DUMMY 0xCAFEBABE | |
0416bf64 TF |
39 | |
40 | #define RK_MMU_POLL_PERIOD_US 100 | |
41 | #define RK_MMU_FORCE_RESET_TIMEOUT_US 100000 | |
42 | #define RK_MMU_POLL_TIMEOUT_US 1000 | |
c68a2921 DK |
43 | |
44 | /* RK_MMU_STATUS fields */ | |
45 | #define RK_MMU_STATUS_PAGING_ENABLED BIT(0) | |
46 | #define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1) | |
47 | #define RK_MMU_STATUS_STALL_ACTIVE BIT(2) | |
48 | #define RK_MMU_STATUS_IDLE BIT(3) | |
49 | #define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4) | |
50 | #define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5) | |
51 | #define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31) | |
52 | ||
53 | /* RK_MMU_COMMAND command values */ | |
54 | #define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */ | |
55 | #define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */ | |
56 | #define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */ | |
57 | #define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */ | |
58 | #define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */ | |
59 | #define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */ | |
60 | #define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */ | |
61 | ||
62 | /* RK_MMU_INT_* register fields */ | |
63 | #define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */ | |
64 | #define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */ | |
65 | #define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR) | |
66 | ||
67 | #define NUM_DT_ENTRIES 1024 | |
68 | #define NUM_PT_ENTRIES 1024 | |
69 | ||
70 | #define SPAGE_ORDER 12 | |
71 | #define SPAGE_SIZE (1 << SPAGE_ORDER) | |
72 | ||
73 | /* | |
74 | * Support mapping any size that fits in one page table: | |
75 | * 4 KiB to 4 MiB | |
76 | */ | |
77 | #define RK_IOMMU_PGSIZE_BITMAP 0x007ff000 | |
78 | ||
c68a2921 DK |
79 | struct rk_iommu_domain { |
80 | struct list_head iommus; | |
4f0aba67 | 81 | struct platform_device *pdev; |
c68a2921 | 82 | u32 *dt; /* page directory table */ |
4f0aba67 | 83 | dma_addr_t dt_dma; |
c68a2921 DK |
84 | spinlock_t iommus_lock; /* lock for iommus list */ |
85 | spinlock_t dt_lock; /* lock for modifying page directory table */ | |
bcd516a3 JR |
86 | |
87 | struct iommu_domain domain; | |
c68a2921 DK |
88 | }; |
89 | ||
90 | struct rk_iommu { | |
91 | struct device *dev; | |
cd6438c5 Z |
92 | void __iomem **bases; |
93 | int num_mmu; | |
c3aa4742 | 94 | bool reset_disabled; |
c9d9f239 | 95 | struct iommu_device iommu; |
c68a2921 DK |
96 | struct list_head node; /* entry in rk_iommu_domain.iommus */ |
97 | struct iommu_domain *domain; /* domain to which iommu is attached */ | |
98 | }; | |
99 | ||
4f0aba67 SZ |
100 | static inline void rk_table_flush(struct rk_iommu_domain *dom, dma_addr_t dma, |
101 | unsigned int count) | |
c68a2921 | 102 | { |
4f0aba67 | 103 | size_t size = count * sizeof(u32); /* count of u32 entry */ |
c68a2921 | 104 | |
4f0aba67 | 105 | dma_sync_single_for_device(&dom->pdev->dev, dma, size, DMA_TO_DEVICE); |
c68a2921 DK |
106 | } |
107 | ||
bcd516a3 JR |
108 | static struct rk_iommu_domain *to_rk_domain(struct iommu_domain *dom) |
109 | { | |
110 | return container_of(dom, struct rk_iommu_domain, domain); | |
111 | } | |
112 | ||
c68a2921 DK |
113 | /* |
114 | * The Rockchip rk3288 iommu uses a 2-level page table. | |
115 | * The first level is the "Directory Table" (DT). | |
116 | * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing | |
117 | * to a "Page Table". | |
118 | * The second level is the 1024 Page Tables (PT). | |
119 | * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to | |
120 | * a 4 KB page of physical memory. | |
121 | * | |
122 | * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries). | |
123 | * Each iommu device has a MMU_DTE_ADDR register that contains the physical | |
124 | * address of the start of the DT page. | |
125 | * | |
126 | * The structure of the page table is as follows: | |
127 | * | |
128 | * DT | |
129 | * MMU_DTE_ADDR -> +-----+ | |
130 | * | | | |
131 | * +-----+ PT | |
132 | * | DTE | -> +-----+ | |
133 | * +-----+ | | Memory | |
134 | * | | +-----+ Page | |
135 | * | | | PTE | -> +-----+ | |
136 | * +-----+ +-----+ | | | |
137 | * | | | | | |
138 | * | | | | | |
139 | * +-----+ | | | |
140 | * | | | |
141 | * | | | |
142 | * +-----+ | |
143 | */ | |
144 | ||
145 | /* | |
146 | * Each DTE has a PT address and a valid bit: | |
147 | * +---------------------+-----------+-+ | |
148 | * | PT address | Reserved |V| | |
149 | * +---------------------+-----------+-+ | |
150 | * 31:12 - PT address (PTs always starts on a 4 KB boundary) | |
151 | * 11: 1 - Reserved | |
152 | * 0 - 1 if PT @ PT address is valid | |
153 | */ | |
154 | #define RK_DTE_PT_ADDRESS_MASK 0xfffff000 | |
155 | #define RK_DTE_PT_VALID BIT(0) | |
156 | ||
157 | static inline phys_addr_t rk_dte_pt_address(u32 dte) | |
158 | { | |
159 | return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK; | |
160 | } | |
161 | ||
162 | static inline bool rk_dte_is_pt_valid(u32 dte) | |
163 | { | |
164 | return dte & RK_DTE_PT_VALID; | |
165 | } | |
166 | ||
4f0aba67 | 167 | static inline u32 rk_mk_dte(dma_addr_t pt_dma) |
c68a2921 | 168 | { |
4f0aba67 | 169 | return (pt_dma & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID; |
c68a2921 DK |
170 | } |
171 | ||
172 | /* | |
173 | * Each PTE has a Page address, some flags and a valid bit: | |
174 | * +---------------------+---+-------+-+ | |
175 | * | Page address |Rsv| Flags |V| | |
176 | * +---------------------+---+-------+-+ | |
177 | * 31:12 - Page address (Pages always start on a 4 KB boundary) | |
178 | * 11: 9 - Reserved | |
179 | * 8: 1 - Flags | |
180 | * 8 - Read allocate - allocate cache space on read misses | |
181 | * 7 - Read cache - enable cache & prefetch of data | |
182 | * 6 - Write buffer - enable delaying writes on their way to memory | |
183 | * 5 - Write allocate - allocate cache space on write misses | |
184 | * 4 - Write cache - different writes can be merged together | |
185 | * 3 - Override cache attributes | |
186 | * if 1, bits 4-8 control cache attributes | |
187 | * if 0, the system bus defaults are used | |
188 | * 2 - Writable | |
189 | * 1 - Readable | |
190 | * 0 - 1 if Page @ Page address is valid | |
191 | */ | |
192 | #define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000 | |
193 | #define RK_PTE_PAGE_FLAGS_MASK 0x000001fe | |
194 | #define RK_PTE_PAGE_WRITABLE BIT(2) | |
195 | #define RK_PTE_PAGE_READABLE BIT(1) | |
196 | #define RK_PTE_PAGE_VALID BIT(0) | |
197 | ||
198 | static inline phys_addr_t rk_pte_page_address(u32 pte) | |
199 | { | |
200 | return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK; | |
201 | } | |
202 | ||
203 | static inline bool rk_pte_is_page_valid(u32 pte) | |
204 | { | |
205 | return pte & RK_PTE_PAGE_VALID; | |
206 | } | |
207 | ||
208 | /* TODO: set cache flags per prot IOMMU_CACHE */ | |
209 | static u32 rk_mk_pte(phys_addr_t page, int prot) | |
210 | { | |
211 | u32 flags = 0; | |
212 | flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0; | |
213 | flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0; | |
214 | page &= RK_PTE_PAGE_ADDRESS_MASK; | |
215 | return page | flags | RK_PTE_PAGE_VALID; | |
216 | } | |
217 | ||
218 | static u32 rk_mk_pte_invalid(u32 pte) | |
219 | { | |
220 | return pte & ~RK_PTE_PAGE_VALID; | |
221 | } | |
222 | ||
223 | /* | |
224 | * rk3288 iova (IOMMU Virtual Address) format | |
225 | * 31 22.21 12.11 0 | |
226 | * +-----------+-----------+-------------+ | |
227 | * | DTE index | PTE index | Page offset | | |
228 | * +-----------+-----------+-------------+ | |
229 | * 31:22 - DTE index - index of DTE in DT | |
230 | * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address | |
231 | * 11: 0 - Page offset - offset into page @ PTE.page_address | |
232 | */ | |
233 | #define RK_IOVA_DTE_MASK 0xffc00000 | |
234 | #define RK_IOVA_DTE_SHIFT 22 | |
235 | #define RK_IOVA_PTE_MASK 0x003ff000 | |
236 | #define RK_IOVA_PTE_SHIFT 12 | |
237 | #define RK_IOVA_PAGE_MASK 0x00000fff | |
238 | #define RK_IOVA_PAGE_SHIFT 0 | |
239 | ||
240 | static u32 rk_iova_dte_index(dma_addr_t iova) | |
241 | { | |
242 | return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; | |
243 | } | |
244 | ||
245 | static u32 rk_iova_pte_index(dma_addr_t iova) | |
246 | { | |
247 | return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; | |
248 | } | |
249 | ||
250 | static u32 rk_iova_page_offset(dma_addr_t iova) | |
251 | { | |
252 | return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; | |
253 | } | |
254 | ||
cd6438c5 | 255 | static u32 rk_iommu_read(void __iomem *base, u32 offset) |
c68a2921 | 256 | { |
cd6438c5 | 257 | return readl(base + offset); |
c68a2921 DK |
258 | } |
259 | ||
cd6438c5 | 260 | static void rk_iommu_write(void __iomem *base, u32 offset, u32 value) |
c68a2921 | 261 | { |
cd6438c5 | 262 | writel(value, base + offset); |
c68a2921 DK |
263 | } |
264 | ||
265 | static void rk_iommu_command(struct rk_iommu *iommu, u32 command) | |
266 | { | |
cd6438c5 Z |
267 | int i; |
268 | ||
269 | for (i = 0; i < iommu->num_mmu; i++) | |
270 | writel(command, iommu->bases[i] + RK_MMU_COMMAND); | |
c68a2921 DK |
271 | } |
272 | ||
cd6438c5 Z |
273 | static void rk_iommu_base_command(void __iomem *base, u32 command) |
274 | { | |
275 | writel(command, base + RK_MMU_COMMAND); | |
276 | } | |
bf2a5e71 | 277 | static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, |
c68a2921 DK |
278 | size_t size) |
279 | { | |
cd6438c5 | 280 | int i; |
bf2a5e71 | 281 | dma_addr_t iova_end = iova_start + size; |
c68a2921 DK |
282 | /* |
283 | * TODO(djkurtz): Figure out when it is more efficient to shootdown the | |
284 | * entire iotlb rather than iterate over individual iovas. | |
285 | */ | |
bf2a5e71 TF |
286 | for (i = 0; i < iommu->num_mmu; i++) { |
287 | dma_addr_t iova; | |
288 | ||
289 | for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) | |
cd6438c5 | 290 | rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); |
bf2a5e71 | 291 | } |
c68a2921 DK |
292 | } |
293 | ||
294 | static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) | |
295 | { | |
cd6438c5 Z |
296 | bool active = true; |
297 | int i; | |
298 | ||
299 | for (i = 0; i < iommu->num_mmu; i++) | |
fbedd9b9 JK |
300 | active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & |
301 | RK_MMU_STATUS_STALL_ACTIVE); | |
cd6438c5 Z |
302 | |
303 | return active; | |
c68a2921 DK |
304 | } |
305 | ||
306 | static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) | |
307 | { | |
cd6438c5 Z |
308 | bool enable = true; |
309 | int i; | |
310 | ||
311 | for (i = 0; i < iommu->num_mmu; i++) | |
fbedd9b9 JK |
312 | enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & |
313 | RK_MMU_STATUS_PAGING_ENABLED); | |
cd6438c5 Z |
314 | |
315 | return enable; | |
c68a2921 DK |
316 | } |
317 | ||
0416bf64 TF |
318 | static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) |
319 | { | |
320 | bool done = true; | |
321 | int i; | |
322 | ||
323 | for (i = 0; i < iommu->num_mmu; i++) | |
324 | done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; | |
325 | ||
326 | return done; | |
327 | } | |
328 | ||
c68a2921 DK |
329 | static int rk_iommu_enable_stall(struct rk_iommu *iommu) |
330 | { | |
cd6438c5 | 331 | int ret, i; |
0416bf64 | 332 | bool val; |
c68a2921 DK |
333 | |
334 | if (rk_iommu_is_stall_active(iommu)) | |
335 | return 0; | |
336 | ||
337 | /* Stall can only be enabled if paging is enabled */ | |
338 | if (!rk_iommu_is_paging_enabled(iommu)) | |
339 | return 0; | |
340 | ||
341 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); | |
342 | ||
0416bf64 TF |
343 | ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, |
344 | val, RK_MMU_POLL_PERIOD_US, | |
345 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 346 | if (ret) |
cd6438c5 Z |
347 | for (i = 0; i < iommu->num_mmu; i++) |
348 | dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", | |
349 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
350 | |
351 | return ret; | |
352 | } | |
353 | ||
354 | static int rk_iommu_disable_stall(struct rk_iommu *iommu) | |
355 | { | |
cd6438c5 | 356 | int ret, i; |
0416bf64 | 357 | bool val; |
c68a2921 DK |
358 | |
359 | if (!rk_iommu_is_stall_active(iommu)) | |
360 | return 0; | |
361 | ||
362 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); | |
363 | ||
0416bf64 TF |
364 | ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, |
365 | !val, RK_MMU_POLL_PERIOD_US, | |
366 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 367 | if (ret) |
cd6438c5 Z |
368 | for (i = 0; i < iommu->num_mmu; i++) |
369 | dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", | |
370 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
371 | |
372 | return ret; | |
373 | } | |
374 | ||
375 | static int rk_iommu_enable_paging(struct rk_iommu *iommu) | |
376 | { | |
cd6438c5 | 377 | int ret, i; |
0416bf64 | 378 | bool val; |
c68a2921 DK |
379 | |
380 | if (rk_iommu_is_paging_enabled(iommu)) | |
381 | return 0; | |
382 | ||
383 | rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); | |
384 | ||
0416bf64 TF |
385 | ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, |
386 | val, RK_MMU_POLL_PERIOD_US, | |
387 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 388 | if (ret) |
cd6438c5 Z |
389 | for (i = 0; i < iommu->num_mmu; i++) |
390 | dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", | |
391 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
392 | |
393 | return ret; | |
394 | } | |
395 | ||
396 | static int rk_iommu_disable_paging(struct rk_iommu *iommu) | |
397 | { | |
cd6438c5 | 398 | int ret, i; |
0416bf64 | 399 | bool val; |
c68a2921 DK |
400 | |
401 | if (!rk_iommu_is_paging_enabled(iommu)) | |
402 | return 0; | |
403 | ||
404 | rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); | |
405 | ||
0416bf64 TF |
406 | ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, |
407 | !val, RK_MMU_POLL_PERIOD_US, | |
408 | RK_MMU_POLL_TIMEOUT_US); | |
c68a2921 | 409 | if (ret) |
cd6438c5 Z |
410 | for (i = 0; i < iommu->num_mmu; i++) |
411 | dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", | |
412 | rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); | |
c68a2921 DK |
413 | |
414 | return ret; | |
415 | } | |
416 | ||
417 | static int rk_iommu_force_reset(struct rk_iommu *iommu) | |
418 | { | |
cd6438c5 | 419 | int ret, i; |
c68a2921 | 420 | u32 dte_addr; |
0416bf64 | 421 | bool val; |
c68a2921 | 422 | |
c3aa4742 SX |
423 | if (iommu->reset_disabled) |
424 | return 0; | |
425 | ||
c68a2921 DK |
426 | /* |
427 | * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY | |
428 | * and verifying that upper 5 nybbles are read back. | |
429 | */ | |
cd6438c5 Z |
430 | for (i = 0; i < iommu->num_mmu; i++) { |
431 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); | |
c68a2921 | 432 | |
cd6438c5 Z |
433 | dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); |
434 | if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) { | |
435 | dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); | |
436 | return -EFAULT; | |
437 | } | |
c68a2921 DK |
438 | } |
439 | ||
440 | rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); | |
441 | ||
0416bf64 TF |
442 | ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, |
443 | val, RK_MMU_FORCE_RESET_TIMEOUT_US, | |
444 | RK_MMU_POLL_TIMEOUT_US); | |
445 | if (ret) { | |
446 | dev_err(iommu->dev, "FORCE_RESET command timed out\n"); | |
447 | return ret; | |
cd6438c5 | 448 | } |
c68a2921 | 449 | |
cd6438c5 | 450 | return 0; |
c68a2921 DK |
451 | } |
452 | ||
cd6438c5 | 453 | static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) |
c68a2921 | 454 | { |
cd6438c5 | 455 | void __iomem *base = iommu->bases[index]; |
c68a2921 DK |
456 | u32 dte_index, pte_index, page_offset; |
457 | u32 mmu_dte_addr; | |
458 | phys_addr_t mmu_dte_addr_phys, dte_addr_phys; | |
459 | u32 *dte_addr; | |
460 | u32 dte; | |
461 | phys_addr_t pte_addr_phys = 0; | |
462 | u32 *pte_addr = NULL; | |
463 | u32 pte = 0; | |
464 | phys_addr_t page_addr_phys = 0; | |
465 | u32 page_flags = 0; | |
466 | ||
467 | dte_index = rk_iova_dte_index(iova); | |
468 | pte_index = rk_iova_pte_index(iova); | |
469 | page_offset = rk_iova_page_offset(iova); | |
470 | ||
cd6438c5 | 471 | mmu_dte_addr = rk_iommu_read(base, RK_MMU_DTE_ADDR); |
c68a2921 DK |
472 | mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr; |
473 | ||
474 | dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index); | |
475 | dte_addr = phys_to_virt(dte_addr_phys); | |
476 | dte = *dte_addr; | |
477 | ||
478 | if (!rk_dte_is_pt_valid(dte)) | |
479 | goto print_it; | |
480 | ||
481 | pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4); | |
482 | pte_addr = phys_to_virt(pte_addr_phys); | |
483 | pte = *pte_addr; | |
484 | ||
485 | if (!rk_pte_is_page_valid(pte)) | |
486 | goto print_it; | |
487 | ||
488 | page_addr_phys = rk_pte_page_address(pte) + page_offset; | |
489 | page_flags = pte & RK_PTE_PAGE_FLAGS_MASK; | |
490 | ||
491 | print_it: | |
492 | dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", | |
493 | &iova, dte_index, pte_index, page_offset); | |
494 | dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n", | |
495 | &mmu_dte_addr_phys, &dte_addr_phys, dte, | |
496 | rk_dte_is_pt_valid(dte), &pte_addr_phys, pte, | |
497 | rk_pte_is_page_valid(pte), &page_addr_phys, page_flags); | |
498 | } | |
499 | ||
500 | static irqreturn_t rk_iommu_irq(int irq, void *dev_id) | |
501 | { | |
502 | struct rk_iommu *iommu = dev_id; | |
503 | u32 status; | |
504 | u32 int_status; | |
505 | dma_addr_t iova; | |
cd6438c5 Z |
506 | irqreturn_t ret = IRQ_NONE; |
507 | int i; | |
c68a2921 | 508 | |
cd6438c5 Z |
509 | for (i = 0; i < iommu->num_mmu; i++) { |
510 | int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); | |
511 | if (int_status == 0) | |
512 | continue; | |
c68a2921 | 513 | |
cd6438c5 Z |
514 | ret = IRQ_HANDLED; |
515 | iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); | |
c68a2921 | 516 | |
cd6438c5 Z |
517 | if (int_status & RK_MMU_IRQ_PAGE_FAULT) { |
518 | int flags; | |
c68a2921 | 519 | |
cd6438c5 Z |
520 | status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); |
521 | flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ? | |
522 | IOMMU_FAULT_WRITE : IOMMU_FAULT_READ; | |
c68a2921 | 523 | |
cd6438c5 Z |
524 | dev_err(iommu->dev, "Page fault at %pad of type %s\n", |
525 | &iova, | |
526 | (flags == IOMMU_FAULT_WRITE) ? "write" : "read"); | |
c68a2921 | 527 | |
cd6438c5 | 528 | log_iova(iommu, i, iova); |
c68a2921 | 529 | |
cd6438c5 Z |
530 | /* |
531 | * Report page fault to any installed handlers. | |
532 | * Ignore the return code, though, since we always zap cache | |
533 | * and clear the page fault anyway. | |
534 | */ | |
535 | if (iommu->domain) | |
536 | report_iommu_fault(iommu->domain, iommu->dev, iova, | |
537 | flags); | |
538 | else | |
539 | dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); | |
c68a2921 | 540 | |
cd6438c5 Z |
541 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
542 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); | |
543 | } | |
c68a2921 | 544 | |
cd6438c5 Z |
545 | if (int_status & RK_MMU_IRQ_BUS_ERROR) |
546 | dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); | |
c68a2921 | 547 | |
cd6438c5 Z |
548 | if (int_status & ~RK_MMU_IRQ_MASK) |
549 | dev_err(iommu->dev, "unexpected int_status: %#08x\n", | |
550 | int_status); | |
c68a2921 | 551 | |
cd6438c5 Z |
552 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); |
553 | } | |
c68a2921 | 554 | |
cd6438c5 | 555 | return ret; |
c68a2921 DK |
556 | } |
557 | ||
558 | static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain, | |
559 | dma_addr_t iova) | |
560 | { | |
bcd516a3 | 561 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 DK |
562 | unsigned long flags; |
563 | phys_addr_t pt_phys, phys = 0; | |
564 | u32 dte, pte; | |
565 | u32 *page_table; | |
566 | ||
567 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
568 | ||
569 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
570 | if (!rk_dte_is_pt_valid(dte)) | |
571 | goto out; | |
572 | ||
573 | pt_phys = rk_dte_pt_address(dte); | |
574 | page_table = (u32 *)phys_to_virt(pt_phys); | |
575 | pte = page_table[rk_iova_pte_index(iova)]; | |
576 | if (!rk_pte_is_page_valid(pte)) | |
577 | goto out; | |
578 | ||
579 | phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova); | |
580 | out: | |
581 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
582 | ||
583 | return phys; | |
584 | } | |
585 | ||
586 | static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, | |
587 | dma_addr_t iova, size_t size) | |
588 | { | |
589 | struct list_head *pos; | |
590 | unsigned long flags; | |
591 | ||
592 | /* shootdown these iova from all iommus using this domain */ | |
593 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
594 | list_for_each(pos, &rk_domain->iommus) { | |
595 | struct rk_iommu *iommu; | |
596 | iommu = list_entry(pos, struct rk_iommu, node); | |
597 | rk_iommu_zap_lines(iommu, iova, size); | |
598 | } | |
599 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
600 | } | |
601 | ||
d4dd920c TF |
602 | static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, |
603 | dma_addr_t iova, size_t size) | |
604 | { | |
605 | rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); | |
606 | if (size > SPAGE_SIZE) | |
607 | rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, | |
608 | SPAGE_SIZE); | |
609 | } | |
610 | ||
c68a2921 DK |
611 | static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, |
612 | dma_addr_t iova) | |
613 | { | |
4f0aba67 | 614 | struct device *dev = &rk_domain->pdev->dev; |
c68a2921 | 615 | u32 *page_table, *dte_addr; |
4f0aba67 | 616 | u32 dte_index, dte; |
c68a2921 | 617 | phys_addr_t pt_phys; |
4f0aba67 | 618 | dma_addr_t pt_dma; |
c68a2921 DK |
619 | |
620 | assert_spin_locked(&rk_domain->dt_lock); | |
621 | ||
4f0aba67 SZ |
622 | dte_index = rk_iova_dte_index(iova); |
623 | dte_addr = &rk_domain->dt[dte_index]; | |
c68a2921 DK |
624 | dte = *dte_addr; |
625 | if (rk_dte_is_pt_valid(dte)) | |
626 | goto done; | |
627 | ||
628 | page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32); | |
629 | if (!page_table) | |
630 | return ERR_PTR(-ENOMEM); | |
631 | ||
4f0aba67 SZ |
632 | pt_dma = dma_map_single(dev, page_table, SPAGE_SIZE, DMA_TO_DEVICE); |
633 | if (dma_mapping_error(dev, pt_dma)) { | |
634 | dev_err(dev, "DMA mapping error while allocating page table\n"); | |
635 | free_page((unsigned long)page_table); | |
636 | return ERR_PTR(-ENOMEM); | |
637 | } | |
c68a2921 | 638 | |
4f0aba67 SZ |
639 | dte = rk_mk_dte(pt_dma); |
640 | *dte_addr = dte; | |
c68a2921 | 641 | |
4f0aba67 SZ |
642 | rk_table_flush(rk_domain, pt_dma, NUM_PT_ENTRIES); |
643 | rk_table_flush(rk_domain, | |
644 | rk_domain->dt_dma + dte_index * sizeof(u32), 1); | |
c68a2921 DK |
645 | done: |
646 | pt_phys = rk_dte_pt_address(dte); | |
647 | return (u32 *)phys_to_virt(pt_phys); | |
648 | } | |
649 | ||
650 | static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, | |
4f0aba67 SZ |
651 | u32 *pte_addr, dma_addr_t pte_dma, |
652 | size_t size) | |
c68a2921 DK |
653 | { |
654 | unsigned int pte_count; | |
655 | unsigned int pte_total = size / SPAGE_SIZE; | |
656 | ||
657 | assert_spin_locked(&rk_domain->dt_lock); | |
658 | ||
659 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
660 | u32 pte = pte_addr[pte_count]; | |
661 | if (!rk_pte_is_page_valid(pte)) | |
662 | break; | |
663 | ||
664 | pte_addr[pte_count] = rk_mk_pte_invalid(pte); | |
665 | } | |
666 | ||
4f0aba67 | 667 | rk_table_flush(rk_domain, pte_dma, pte_count); |
c68a2921 DK |
668 | |
669 | return pte_count * SPAGE_SIZE; | |
670 | } | |
671 | ||
672 | static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, | |
4f0aba67 SZ |
673 | dma_addr_t pte_dma, dma_addr_t iova, |
674 | phys_addr_t paddr, size_t size, int prot) | |
c68a2921 DK |
675 | { |
676 | unsigned int pte_count; | |
677 | unsigned int pte_total = size / SPAGE_SIZE; | |
678 | phys_addr_t page_phys; | |
679 | ||
680 | assert_spin_locked(&rk_domain->dt_lock); | |
681 | ||
682 | for (pte_count = 0; pte_count < pte_total; pte_count++) { | |
683 | u32 pte = pte_addr[pte_count]; | |
684 | ||
685 | if (rk_pte_is_page_valid(pte)) | |
686 | goto unwind; | |
687 | ||
688 | pte_addr[pte_count] = rk_mk_pte(paddr, prot); | |
689 | ||
690 | paddr += SPAGE_SIZE; | |
691 | } | |
692 | ||
4f0aba67 | 693 | rk_table_flush(rk_domain, pte_dma, pte_total); |
c68a2921 | 694 | |
d4dd920c TF |
695 | /* |
696 | * Zap the first and last iova to evict from iotlb any previously | |
697 | * mapped cachelines holding stale values for its dte and pte. | |
698 | * We only zap the first and last iova, since only they could have | |
699 | * dte or pte shared with an existing mapping. | |
700 | */ | |
701 | rk_iommu_zap_iova_first_last(rk_domain, iova, size); | |
702 | ||
c68a2921 DK |
703 | return 0; |
704 | unwind: | |
705 | /* Unmap the range of iovas that we just mapped */ | |
4f0aba67 SZ |
706 | rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, |
707 | pte_count * SPAGE_SIZE); | |
c68a2921 DK |
708 | |
709 | iova += pte_count * SPAGE_SIZE; | |
710 | page_phys = rk_pte_page_address(pte_addr[pte_count]); | |
711 | pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n", | |
712 | &iova, &page_phys, &paddr, prot); | |
713 | ||
714 | return -EADDRINUSE; | |
715 | } | |
716 | ||
717 | static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova, | |
718 | phys_addr_t paddr, size_t size, int prot) | |
719 | { | |
bcd516a3 | 720 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 721 | unsigned long flags; |
4f0aba67 | 722 | dma_addr_t pte_dma, iova = (dma_addr_t)_iova; |
c68a2921 | 723 | u32 *page_table, *pte_addr; |
4f0aba67 | 724 | u32 dte_index, pte_index; |
c68a2921 DK |
725 | int ret; |
726 | ||
727 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
728 | ||
729 | /* | |
730 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
731 | * (1024 4-KiB pages = 4 MiB). | |
732 | * So, size will always be 4096 <= size <= 4194304. | |
733 | * Since iommu_map() guarantees that both iova and size will be | |
734 | * aligned, we will always only be mapping from a single dte here. | |
735 | */ | |
736 | page_table = rk_dte_get_page_table(rk_domain, iova); | |
737 | if (IS_ERR(page_table)) { | |
738 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
739 | return PTR_ERR(page_table); | |
740 | } | |
741 | ||
4f0aba67 SZ |
742 | dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; |
743 | pte_index = rk_iova_pte_index(iova); | |
744 | pte_addr = &page_table[pte_index]; | |
745 | pte_dma = rk_dte_pt_address(dte_index) + pte_index * sizeof(u32); | |
746 | ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, | |
747 | paddr, size, prot); | |
748 | ||
c68a2921 DK |
749 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); |
750 | ||
751 | return ret; | |
752 | } | |
753 | ||
754 | static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova, | |
755 | size_t size) | |
756 | { | |
bcd516a3 | 757 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 758 | unsigned long flags; |
4f0aba67 | 759 | dma_addr_t pte_dma, iova = (dma_addr_t)_iova; |
c68a2921 DK |
760 | phys_addr_t pt_phys; |
761 | u32 dte; | |
762 | u32 *pte_addr; | |
763 | size_t unmap_size; | |
764 | ||
765 | spin_lock_irqsave(&rk_domain->dt_lock, flags); | |
766 | ||
767 | /* | |
768 | * pgsize_bitmap specifies iova sizes that fit in one page table | |
769 | * (1024 4-KiB pages = 4 MiB). | |
770 | * So, size will always be 4096 <= size <= 4194304. | |
771 | * Since iommu_unmap() guarantees that both iova and size will be | |
772 | * aligned, we will always only be unmapping from a single dte here. | |
773 | */ | |
774 | dte = rk_domain->dt[rk_iova_dte_index(iova)]; | |
775 | /* Just return 0 if iova is unmapped */ | |
776 | if (!rk_dte_is_pt_valid(dte)) { | |
777 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
778 | return 0; | |
779 | } | |
780 | ||
781 | pt_phys = rk_dte_pt_address(dte); | |
782 | pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova); | |
4f0aba67 SZ |
783 | pte_dma = pt_phys + rk_iova_pte_index(iova) * sizeof(u32); |
784 | unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); | |
c68a2921 DK |
785 | |
786 | spin_unlock_irqrestore(&rk_domain->dt_lock, flags); | |
787 | ||
788 | /* Shootdown iotlb entries for iova range that was just unmapped */ | |
789 | rk_iommu_zap_iova(rk_domain, iova, unmap_size); | |
790 | ||
791 | return unmap_size; | |
792 | } | |
793 | ||
794 | static struct rk_iommu *rk_iommu_from_dev(struct device *dev) | |
795 | { | |
796 | struct iommu_group *group; | |
797 | struct device *iommu_dev; | |
798 | struct rk_iommu *rk_iommu; | |
799 | ||
800 | group = iommu_group_get(dev); | |
801 | if (!group) | |
802 | return NULL; | |
803 | iommu_dev = iommu_group_get_iommudata(group); | |
804 | rk_iommu = dev_get_drvdata(iommu_dev); | |
805 | iommu_group_put(group); | |
806 | ||
807 | return rk_iommu; | |
808 | } | |
809 | ||
810 | static int rk_iommu_attach_device(struct iommu_domain *domain, | |
811 | struct device *dev) | |
812 | { | |
813 | struct rk_iommu *iommu; | |
bcd516a3 | 814 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 815 | unsigned long flags; |
cd6438c5 | 816 | int ret, i; |
c68a2921 DK |
817 | |
818 | /* | |
819 | * Allow 'virtual devices' (e.g., drm) to attach to domain. | |
820 | * Such a device does not belong to an iommu group. | |
821 | */ | |
822 | iommu = rk_iommu_from_dev(dev); | |
823 | if (!iommu) | |
824 | return 0; | |
825 | ||
826 | ret = rk_iommu_enable_stall(iommu); | |
827 | if (ret) | |
828 | return ret; | |
829 | ||
830 | ret = rk_iommu_force_reset(iommu); | |
831 | if (ret) | |
f6717d72 | 832 | goto out_disable_stall; |
c68a2921 DK |
833 | |
834 | iommu->domain = domain; | |
835 | ||
cd6438c5 | 836 | for (i = 0; i < iommu->num_mmu; i++) { |
4f0aba67 SZ |
837 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, |
838 | rk_domain->dt_dma); | |
ae8a7910 | 839 | rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); |
cd6438c5 Z |
840 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); |
841 | } | |
c68a2921 DK |
842 | |
843 | ret = rk_iommu_enable_paging(iommu); | |
844 | if (ret) | |
f6717d72 | 845 | goto out_disable_stall; |
c68a2921 DK |
846 | |
847 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
848 | list_add_tail(&iommu->node, &rk_domain->iommus); | |
849 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
850 | ||
ec4292de | 851 | dev_dbg(dev, "Attached to iommu domain\n"); |
c68a2921 | 852 | |
f6717d72 | 853 | out_disable_stall: |
c68a2921 | 854 | rk_iommu_disable_stall(iommu); |
f6717d72 | 855 | return ret; |
c68a2921 DK |
856 | } |
857 | ||
858 | static void rk_iommu_detach_device(struct iommu_domain *domain, | |
859 | struct device *dev) | |
860 | { | |
861 | struct rk_iommu *iommu; | |
bcd516a3 | 862 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 | 863 | unsigned long flags; |
cd6438c5 | 864 | int i; |
c68a2921 DK |
865 | |
866 | /* Allow 'virtual devices' (eg drm) to detach from domain */ | |
867 | iommu = rk_iommu_from_dev(dev); | |
868 | if (!iommu) | |
869 | return; | |
870 | ||
871 | spin_lock_irqsave(&rk_domain->iommus_lock, flags); | |
872 | list_del_init(&iommu->node); | |
873 | spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); | |
874 | ||
875 | /* Ignore error while disabling, just keep going */ | |
876 | rk_iommu_enable_stall(iommu); | |
877 | rk_iommu_disable_paging(iommu); | |
cd6438c5 Z |
878 | for (i = 0; i < iommu->num_mmu; i++) { |
879 | rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); | |
880 | rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); | |
881 | } | |
c68a2921 DK |
882 | rk_iommu_disable_stall(iommu); |
883 | ||
c68a2921 DK |
884 | iommu->domain = NULL; |
885 | ||
ec4292de | 886 | dev_dbg(dev, "Detached from iommu domain\n"); |
c68a2921 DK |
887 | } |
888 | ||
bcd516a3 | 889 | static struct iommu_domain *rk_iommu_domain_alloc(unsigned type) |
c68a2921 DK |
890 | { |
891 | struct rk_iommu_domain *rk_domain; | |
4f0aba67 SZ |
892 | struct platform_device *pdev; |
893 | struct device *iommu_dev; | |
c68a2921 | 894 | |
a93db2f2 | 895 | if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA) |
bcd516a3 JR |
896 | return NULL; |
897 | ||
4f0aba67 SZ |
898 | /* Register a pdev per domain, so DMA API can base on this *dev |
899 | * even some virtual master doesn't have an iommu slave | |
900 | */ | |
901 | pdev = platform_device_register_simple("rk_iommu_domain", | |
902 | PLATFORM_DEVID_AUTO, NULL, 0); | |
903 | if (IS_ERR(pdev)) | |
bcd516a3 | 904 | return NULL; |
c68a2921 | 905 | |
4f0aba67 SZ |
906 | rk_domain = devm_kzalloc(&pdev->dev, sizeof(*rk_domain), GFP_KERNEL); |
907 | if (!rk_domain) | |
908 | goto err_unreg_pdev; | |
909 | ||
910 | rk_domain->pdev = pdev; | |
911 | ||
a93db2f2 SZ |
912 | if (type == IOMMU_DOMAIN_DMA && |
913 | iommu_get_dma_cookie(&rk_domain->domain)) | |
4f0aba67 SZ |
914 | goto err_unreg_pdev; |
915 | ||
c68a2921 DK |
916 | /* |
917 | * rk32xx iommus use a 2 level pagetable. | |
918 | * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries. | |
919 | * Allocate one 4 KiB page for each table. | |
920 | */ | |
921 | rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); | |
922 | if (!rk_domain->dt) | |
4f0aba67 SZ |
923 | goto err_put_cookie; |
924 | ||
925 | iommu_dev = &pdev->dev; | |
926 | rk_domain->dt_dma = dma_map_single(iommu_dev, rk_domain->dt, | |
927 | SPAGE_SIZE, DMA_TO_DEVICE); | |
928 | if (dma_mapping_error(iommu_dev, rk_domain->dt_dma)) { | |
929 | dev_err(iommu_dev, "DMA map error for DT\n"); | |
930 | goto err_free_dt; | |
931 | } | |
c68a2921 | 932 | |
4f0aba67 | 933 | rk_table_flush(rk_domain, rk_domain->dt_dma, NUM_DT_ENTRIES); |
c68a2921 DK |
934 | |
935 | spin_lock_init(&rk_domain->iommus_lock); | |
936 | spin_lock_init(&rk_domain->dt_lock); | |
937 | INIT_LIST_HEAD(&rk_domain->iommus); | |
938 | ||
a93db2f2 SZ |
939 | rk_domain->domain.geometry.aperture_start = 0; |
940 | rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); | |
941 | rk_domain->domain.geometry.force_aperture = true; | |
942 | ||
bcd516a3 | 943 | return &rk_domain->domain; |
c68a2921 | 944 | |
4f0aba67 SZ |
945 | err_free_dt: |
946 | free_page((unsigned long)rk_domain->dt); | |
947 | err_put_cookie: | |
a93db2f2 SZ |
948 | if (type == IOMMU_DOMAIN_DMA) |
949 | iommu_put_dma_cookie(&rk_domain->domain); | |
4f0aba67 SZ |
950 | err_unreg_pdev: |
951 | platform_device_unregister(pdev); | |
952 | ||
bcd516a3 | 953 | return NULL; |
c68a2921 DK |
954 | } |
955 | ||
bcd516a3 | 956 | static void rk_iommu_domain_free(struct iommu_domain *domain) |
c68a2921 | 957 | { |
bcd516a3 | 958 | struct rk_iommu_domain *rk_domain = to_rk_domain(domain); |
c68a2921 DK |
959 | int i; |
960 | ||
961 | WARN_ON(!list_empty(&rk_domain->iommus)); | |
962 | ||
963 | for (i = 0; i < NUM_DT_ENTRIES; i++) { | |
964 | u32 dte = rk_domain->dt[i]; | |
965 | if (rk_dte_is_pt_valid(dte)) { | |
966 | phys_addr_t pt_phys = rk_dte_pt_address(dte); | |
967 | u32 *page_table = phys_to_virt(pt_phys); | |
4f0aba67 SZ |
968 | dma_unmap_single(&rk_domain->pdev->dev, pt_phys, |
969 | SPAGE_SIZE, DMA_TO_DEVICE); | |
c68a2921 DK |
970 | free_page((unsigned long)page_table); |
971 | } | |
972 | } | |
973 | ||
4f0aba67 SZ |
974 | dma_unmap_single(&rk_domain->pdev->dev, rk_domain->dt_dma, |
975 | SPAGE_SIZE, DMA_TO_DEVICE); | |
c68a2921 | 976 | free_page((unsigned long)rk_domain->dt); |
4f0aba67 | 977 | |
a93db2f2 SZ |
978 | if (domain->type == IOMMU_DOMAIN_DMA) |
979 | iommu_put_dma_cookie(&rk_domain->domain); | |
4f0aba67 SZ |
980 | |
981 | platform_device_unregister(rk_domain->pdev); | |
c68a2921 DK |
982 | } |
983 | ||
984 | static bool rk_iommu_is_dev_iommu_master(struct device *dev) | |
985 | { | |
986 | struct device_node *np = dev->of_node; | |
987 | int ret; | |
988 | ||
989 | /* | |
990 | * An iommu master has an iommus property containing a list of phandles | |
991 | * to iommu nodes, each with an #iommu-cells property with value 0. | |
992 | */ | |
993 | ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells"); | |
994 | return (ret > 0); | |
995 | } | |
996 | ||
997 | static int rk_iommu_group_set_iommudata(struct iommu_group *group, | |
998 | struct device *dev) | |
999 | { | |
1000 | struct device_node *np = dev->of_node; | |
1001 | struct platform_device *pd; | |
1002 | int ret; | |
1003 | struct of_phandle_args args; | |
1004 | ||
1005 | /* | |
1006 | * An iommu master has an iommus property containing a list of phandles | |
1007 | * to iommu nodes, each with an #iommu-cells property with value 0. | |
1008 | */ | |
1009 | ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0, | |
1010 | &args); | |
1011 | if (ret) { | |
6bd4f1c7 RH |
1012 | dev_err(dev, "of_parse_phandle_with_args(%pOF) => %d\n", |
1013 | np, ret); | |
c68a2921 DK |
1014 | return ret; |
1015 | } | |
1016 | if (args.args_count != 0) { | |
6bd4f1c7 RH |
1017 | dev_err(dev, "incorrect number of iommu params found for %pOF (found %d, expected 0)\n", |
1018 | args.np, args.args_count); | |
c68a2921 DK |
1019 | return -EINVAL; |
1020 | } | |
1021 | ||
1022 | pd = of_find_device_by_node(args.np); | |
1023 | of_node_put(args.np); | |
1024 | if (!pd) { | |
6bd4f1c7 | 1025 | dev_err(dev, "iommu %pOF not found\n", args.np); |
c68a2921 DK |
1026 | return -EPROBE_DEFER; |
1027 | } | |
1028 | ||
1029 | /* TODO(djkurtz): handle multiple slave iommus for a single master */ | |
1030 | iommu_group_set_iommudata(group, &pd->dev, NULL); | |
1031 | ||
1032 | return 0; | |
1033 | } | |
1034 | ||
1035 | static int rk_iommu_add_device(struct device *dev) | |
1036 | { | |
1037 | struct iommu_group *group; | |
c9d9f239 | 1038 | struct rk_iommu *iommu; |
c68a2921 DK |
1039 | int ret; |
1040 | ||
1041 | if (!rk_iommu_is_dev_iommu_master(dev)) | |
1042 | return -ENODEV; | |
1043 | ||
1044 | group = iommu_group_get(dev); | |
1045 | if (!group) { | |
1046 | group = iommu_group_alloc(); | |
1047 | if (IS_ERR(group)) { | |
1048 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
1049 | return PTR_ERR(group); | |
1050 | } | |
1051 | } | |
1052 | ||
1053 | ret = iommu_group_add_device(group, dev); | |
1054 | if (ret) | |
1055 | goto err_put_group; | |
1056 | ||
1057 | ret = rk_iommu_group_set_iommudata(group, dev); | |
1058 | if (ret) | |
1059 | goto err_remove_device; | |
1060 | ||
c9d9f239 JR |
1061 | iommu = rk_iommu_from_dev(dev); |
1062 | if (iommu) | |
1063 | iommu_device_link(&iommu->iommu, dev); | |
1064 | ||
c68a2921 DK |
1065 | iommu_group_put(group); |
1066 | ||
1067 | return 0; | |
1068 | ||
1069 | err_remove_device: | |
1070 | iommu_group_remove_device(dev); | |
1071 | err_put_group: | |
1072 | iommu_group_put(group); | |
1073 | return ret; | |
1074 | } | |
1075 | ||
1076 | static void rk_iommu_remove_device(struct device *dev) | |
1077 | { | |
c9d9f239 JR |
1078 | struct rk_iommu *iommu; |
1079 | ||
c68a2921 DK |
1080 | if (!rk_iommu_is_dev_iommu_master(dev)) |
1081 | return; | |
1082 | ||
c9d9f239 JR |
1083 | iommu = rk_iommu_from_dev(dev); |
1084 | if (iommu) | |
1085 | iommu_device_unlink(&iommu->iommu, dev); | |
1086 | ||
c68a2921 DK |
1087 | iommu_group_remove_device(dev); |
1088 | } | |
1089 | ||
1090 | static const struct iommu_ops rk_iommu_ops = { | |
bcd516a3 JR |
1091 | .domain_alloc = rk_iommu_domain_alloc, |
1092 | .domain_free = rk_iommu_domain_free, | |
c68a2921 DK |
1093 | .attach_dev = rk_iommu_attach_device, |
1094 | .detach_dev = rk_iommu_detach_device, | |
1095 | .map = rk_iommu_map, | |
1096 | .unmap = rk_iommu_unmap, | |
e6d0f473 | 1097 | .map_sg = default_iommu_map_sg, |
c68a2921 DK |
1098 | .add_device = rk_iommu_add_device, |
1099 | .remove_device = rk_iommu_remove_device, | |
1100 | .iova_to_phys = rk_iommu_iova_to_phys, | |
1101 | .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP, | |
1102 | }; | |
1103 | ||
4f0aba67 SZ |
1104 | static int rk_iommu_domain_probe(struct platform_device *pdev) |
1105 | { | |
1106 | struct device *dev = &pdev->dev; | |
1107 | ||
1108 | dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); | |
1109 | if (!dev->dma_parms) | |
1110 | return -ENOMEM; | |
1111 | ||
1112 | /* Set dma_ops for dev, otherwise it would be dummy_dma_ops */ | |
1113 | arch_setup_dma_ops(dev, 0, DMA_BIT_MASK(32), NULL, false); | |
1114 | ||
1115 | dma_set_max_seg_size(dev, DMA_BIT_MASK(32)); | |
1116 | dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(32)); | |
1117 | ||
1118 | return 0; | |
1119 | } | |
1120 | ||
1121 | static struct platform_driver rk_iommu_domain_driver = { | |
1122 | .probe = rk_iommu_domain_probe, | |
1123 | .driver = { | |
1124 | .name = "rk_iommu_domain", | |
1125 | }, | |
1126 | }; | |
1127 | ||
c68a2921 DK |
1128 | static int rk_iommu_probe(struct platform_device *pdev) |
1129 | { | |
1130 | struct device *dev = &pdev->dev; | |
1131 | struct rk_iommu *iommu; | |
1132 | struct resource *res; | |
3d08f434 | 1133 | int num_res = pdev->num_resources; |
d0b912bd | 1134 | int err, i, irq; |
c68a2921 DK |
1135 | |
1136 | iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); | |
1137 | if (!iommu) | |
1138 | return -ENOMEM; | |
1139 | ||
1140 | platform_set_drvdata(pdev, iommu); | |
1141 | iommu->dev = dev; | |
cd6438c5 | 1142 | iommu->num_mmu = 0; |
3d08f434 SZ |
1143 | |
1144 | iommu->bases = devm_kzalloc(dev, sizeof(*iommu->bases) * num_res, | |
cd6438c5 Z |
1145 | GFP_KERNEL); |
1146 | if (!iommu->bases) | |
1147 | return -ENOMEM; | |
c68a2921 | 1148 | |
3d08f434 | 1149 | for (i = 0; i < num_res; i++) { |
cd6438c5 | 1150 | res = platform_get_resource(pdev, IORESOURCE_MEM, i); |
8d7f2d84 TV |
1151 | if (!res) |
1152 | continue; | |
cd6438c5 Z |
1153 | iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); |
1154 | if (IS_ERR(iommu->bases[i])) | |
1155 | continue; | |
1156 | iommu->num_mmu++; | |
1157 | } | |
1158 | if (iommu->num_mmu == 0) | |
1159 | return PTR_ERR(iommu->bases[0]); | |
c68a2921 | 1160 | |
d0b912bd JC |
1161 | i = 0; |
1162 | while ((irq = platform_get_irq(pdev, i++)) != -ENXIO) { | |
1163 | if (irq < 0) | |
1164 | return irq; | |
03f732f8 | 1165 | |
d0b912bd JC |
1166 | err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, |
1167 | IRQF_SHARED, dev_name(dev), iommu); | |
1168 | if (err) | |
1169 | return err; | |
c68a2921 DK |
1170 | } |
1171 | ||
c3aa4742 SX |
1172 | iommu->reset_disabled = device_property_read_bool(dev, |
1173 | "rockchip,disable-mmu-reset"); | |
1174 | ||
c9d9f239 JR |
1175 | err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); |
1176 | if (err) | |
1177 | return err; | |
1178 | ||
1179 | iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); | |
1180 | err = iommu_device_register(&iommu->iommu); | |
6d9ffaad JC |
1181 | if (err) |
1182 | iommu_device_sysfs_remove(&iommu->iommu); | |
c9d9f239 JR |
1183 | |
1184 | return err; | |
c68a2921 DK |
1185 | } |
1186 | ||
1a4e90f2 MZ |
1187 | static void rk_iommu_shutdown(struct platform_device *pdev) |
1188 | { | |
1189 | struct rk_iommu *iommu = platform_get_drvdata(pdev); | |
1190 | ||
1191 | /* | |
1192 | * Be careful not to try to shutdown an otherwise unused | |
1193 | * IOMMU, as it is likely not to be clocked, and accessing it | |
1194 | * would just block. An IOMMU without a domain is likely to be | |
1195 | * unused, so let's use this as a (weak) guard. | |
1196 | */ | |
1197 | if (iommu && iommu->domain) { | |
1198 | rk_iommu_enable_stall(iommu); | |
1199 | rk_iommu_disable_paging(iommu); | |
1200 | rk_iommu_force_reset(iommu); | |
1201 | } | |
1202 | } | |
1203 | ||
c68a2921 DK |
1204 | static const struct of_device_id rk_iommu_dt_ids[] = { |
1205 | { .compatible = "rockchip,iommu" }, | |
1206 | { /* sentinel */ } | |
1207 | }; | |
1208 | MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids); | |
c68a2921 DK |
1209 | |
1210 | static struct platform_driver rk_iommu_driver = { | |
1211 | .probe = rk_iommu_probe, | |
1a4e90f2 | 1212 | .shutdown = rk_iommu_shutdown, |
c68a2921 DK |
1213 | .driver = { |
1214 | .name = "rk_iommu", | |
d9e7eb15 | 1215 | .of_match_table = rk_iommu_dt_ids, |
98b72b94 | 1216 | .suppress_bind_attrs = true, |
c68a2921 DK |
1217 | }, |
1218 | }; | |
1219 | ||
1220 | static int __init rk_iommu_init(void) | |
1221 | { | |
425061b0 | 1222 | struct device_node *np; |
c68a2921 DK |
1223 | int ret; |
1224 | ||
425061b0 TR |
1225 | np = of_find_matching_node(NULL, rk_iommu_dt_ids); |
1226 | if (!np) | |
1227 | return 0; | |
1228 | ||
1229 | of_node_put(np); | |
1230 | ||
c68a2921 DK |
1231 | ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); |
1232 | if (ret) | |
1233 | return ret; | |
1234 | ||
4f0aba67 SZ |
1235 | ret = platform_driver_register(&rk_iommu_domain_driver); |
1236 | if (ret) | |
1237 | return ret; | |
1238 | ||
1239 | ret = platform_driver_register(&rk_iommu_driver); | |
1240 | if (ret) | |
1241 | platform_driver_unregister(&rk_iommu_domain_driver); | |
1242 | return ret; | |
c68a2921 | 1243 | } |
c68a2921 | 1244 | subsys_initcall(rk_iommu_init); |
c68a2921 DK |
1245 | |
1246 | MODULE_DESCRIPTION("IOMMU API for Rockchip"); | |
1247 | MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>"); | |
1248 | MODULE_ALIAS("platform:rockchip-iommu"); | |
1249 | MODULE_LICENSE("GPL v2"); |