]>
Commit | Line | Data |
---|---|---|
d25a2a16 LP |
1 | /* |
2 | * IPMMU VMSA | |
3 | * | |
4 | * Copyright (C) 2014 Renesas Electronics Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; version 2 of the License. | |
9 | */ | |
10 | ||
dbb70692 | 11 | #include <linux/bitmap.h> |
d25a2a16 LP |
12 | #include <linux/delay.h> |
13 | #include <linux/dma-mapping.h> | |
14 | #include <linux/err.h> | |
15 | #include <linux/export.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/io.h> | |
18 | #include <linux/iommu.h> | |
19 | #include <linux/module.h> | |
275f5053 | 20 | #include <linux/of.h> |
d25a2a16 LP |
21 | #include <linux/platform_device.h> |
22 | #include <linux/sizes.h> | |
23 | #include <linux/slab.h> | |
24 | ||
25 | #include <asm/dma-iommu.h> | |
26 | #include <asm/pgalloc.h> | |
27 | ||
f20ed39f LP |
28 | #include "io-pgtable.h" |
29 | ||
dbb70692 MD |
30 | #define IPMMU_CTX_MAX 1 |
31 | ||
d25a2a16 LP |
32 | struct ipmmu_vmsa_device { |
33 | struct device *dev; | |
34 | void __iomem *base; | |
35 | struct list_head list; | |
36 | ||
d25a2a16 | 37 | unsigned int num_utlbs; |
dbb70692 MD |
38 | spinlock_t lock; /* Protects ctx and domains[] */ |
39 | DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); | |
40 | struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; | |
d25a2a16 LP |
41 | |
42 | struct dma_iommu_mapping *mapping; | |
43 | }; | |
44 | ||
45 | struct ipmmu_vmsa_domain { | |
46 | struct ipmmu_vmsa_device *mmu; | |
5914c5fd | 47 | struct iommu_domain io_domain; |
d25a2a16 | 48 | |
f20ed39f LP |
49 | struct io_pgtable_cfg cfg; |
50 | struct io_pgtable_ops *iop; | |
51 | ||
d25a2a16 LP |
52 | unsigned int context_id; |
53 | spinlock_t lock; /* Protects mappings */ | |
d25a2a16 LP |
54 | }; |
55 | ||
192d2045 LP |
56 | struct ipmmu_vmsa_archdata { |
57 | struct ipmmu_vmsa_device *mmu; | |
a166d31e LP |
58 | unsigned int *utlbs; |
59 | unsigned int num_utlbs; | |
192d2045 LP |
60 | }; |
61 | ||
d25a2a16 LP |
62 | static DEFINE_SPINLOCK(ipmmu_devices_lock); |
63 | static LIST_HEAD(ipmmu_devices); | |
64 | ||
5914c5fd JR |
65 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) |
66 | { | |
67 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); | |
68 | } | |
69 | ||
d25a2a16 LP |
70 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ |
71 | ||
72 | /* ----------------------------------------------------------------------------- | |
73 | * Registers Definition | |
74 | */ | |
75 | ||
275f5053 LP |
76 | #define IM_NS_ALIAS_OFFSET 0x800 |
77 | ||
d25a2a16 LP |
78 | #define IM_CTX_SIZE 0x40 |
79 | ||
80 | #define IMCTR 0x0000 | |
81 | #define IMCTR_TRE (1 << 17) | |
82 | #define IMCTR_AFE (1 << 16) | |
83 | #define IMCTR_RTSEL_MASK (3 << 4) | |
84 | #define IMCTR_RTSEL_SHIFT 4 | |
85 | #define IMCTR_TREN (1 << 3) | |
86 | #define IMCTR_INTEN (1 << 2) | |
87 | #define IMCTR_FLUSH (1 << 1) | |
88 | #define IMCTR_MMUEN (1 << 0) | |
89 | ||
90 | #define IMCAAR 0x0004 | |
91 | ||
92 | #define IMTTBCR 0x0008 | |
93 | #define IMTTBCR_EAE (1 << 31) | |
94 | #define IMTTBCR_PMB (1 << 30) | |
95 | #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) | |
96 | #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) | |
97 | #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) | |
98 | #define IMTTBCR_SH1_MASK (3 << 28) | |
99 | #define IMTTBCR_ORGN1_NC (0 << 26) | |
100 | #define IMTTBCR_ORGN1_WB_WA (1 << 26) | |
101 | #define IMTTBCR_ORGN1_WT (2 << 26) | |
102 | #define IMTTBCR_ORGN1_WB (3 << 26) | |
103 | #define IMTTBCR_ORGN1_MASK (3 << 26) | |
104 | #define IMTTBCR_IRGN1_NC (0 << 24) | |
105 | #define IMTTBCR_IRGN1_WB_WA (1 << 24) | |
106 | #define IMTTBCR_IRGN1_WT (2 << 24) | |
107 | #define IMTTBCR_IRGN1_WB (3 << 24) | |
108 | #define IMTTBCR_IRGN1_MASK (3 << 24) | |
109 | #define IMTTBCR_TSZ1_MASK (7 << 16) | |
110 | #define IMTTBCR_TSZ1_SHIFT 16 | |
111 | #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) | |
112 | #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) | |
113 | #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) | |
114 | #define IMTTBCR_SH0_MASK (3 << 12) | |
115 | #define IMTTBCR_ORGN0_NC (0 << 10) | |
116 | #define IMTTBCR_ORGN0_WB_WA (1 << 10) | |
117 | #define IMTTBCR_ORGN0_WT (2 << 10) | |
118 | #define IMTTBCR_ORGN0_WB (3 << 10) | |
119 | #define IMTTBCR_ORGN0_MASK (3 << 10) | |
120 | #define IMTTBCR_IRGN0_NC (0 << 8) | |
121 | #define IMTTBCR_IRGN0_WB_WA (1 << 8) | |
122 | #define IMTTBCR_IRGN0_WT (2 << 8) | |
123 | #define IMTTBCR_IRGN0_WB (3 << 8) | |
124 | #define IMTTBCR_IRGN0_MASK (3 << 8) | |
125 | #define IMTTBCR_SL0_LVL_2 (0 << 4) | |
126 | #define IMTTBCR_SL0_LVL_1 (1 << 4) | |
127 | #define IMTTBCR_TSZ0_MASK (7 << 0) | |
128 | #define IMTTBCR_TSZ0_SHIFT O | |
129 | ||
130 | #define IMBUSCR 0x000c | |
131 | #define IMBUSCR_DVM (1 << 2) | |
132 | #define IMBUSCR_BUSSEL_SYS (0 << 0) | |
133 | #define IMBUSCR_BUSSEL_CCI (1 << 0) | |
134 | #define IMBUSCR_BUSSEL_IMCAAR (2 << 0) | |
135 | #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) | |
136 | #define IMBUSCR_BUSSEL_MASK (3 << 0) | |
137 | ||
138 | #define IMTTLBR0 0x0010 | |
139 | #define IMTTUBR0 0x0014 | |
140 | #define IMTTLBR1 0x0018 | |
141 | #define IMTTUBR1 0x001c | |
142 | ||
143 | #define IMSTR 0x0020 | |
144 | #define IMSTR_ERRLVL_MASK (3 << 12) | |
145 | #define IMSTR_ERRLVL_SHIFT 12 | |
146 | #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) | |
147 | #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) | |
148 | #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) | |
149 | #define IMSTR_ERRCODE_MASK (7 << 8) | |
150 | #define IMSTR_MHIT (1 << 4) | |
151 | #define IMSTR_ABORT (1 << 2) | |
152 | #define IMSTR_PF (1 << 1) | |
153 | #define IMSTR_TF (1 << 0) | |
154 | ||
155 | #define IMMAIR0 0x0028 | |
156 | #define IMMAIR1 0x002c | |
157 | #define IMMAIR_ATTR_MASK 0xff | |
158 | #define IMMAIR_ATTR_DEVICE 0x04 | |
159 | #define IMMAIR_ATTR_NC 0x44 | |
160 | #define IMMAIR_ATTR_WBRWA 0xff | |
161 | #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) | |
162 | #define IMMAIR_ATTR_IDX_NC 0 | |
163 | #define IMMAIR_ATTR_IDX_WBRWA 1 | |
164 | #define IMMAIR_ATTR_IDX_DEV 2 | |
165 | ||
166 | #define IMEAR 0x0030 | |
167 | ||
168 | #define IMPCTR 0x0200 | |
169 | #define IMPSTR 0x0208 | |
170 | #define IMPEAR 0x020c | |
171 | #define IMPMBA(n) (0x0280 + ((n) * 4)) | |
172 | #define IMPMBD(n) (0x02c0 + ((n) * 4)) | |
173 | ||
174 | #define IMUCTR(n) (0x0300 + ((n) * 16)) | |
175 | #define IMUCTR_FIXADDEN (1 << 31) | |
176 | #define IMUCTR_FIXADD_MASK (0xff << 16) | |
177 | #define IMUCTR_FIXADD_SHIFT 16 | |
178 | #define IMUCTR_TTSEL_MMU(n) ((n) << 4) | |
179 | #define IMUCTR_TTSEL_PMB (8 << 4) | |
180 | #define IMUCTR_TTSEL_MASK (15 << 4) | |
181 | #define IMUCTR_FLUSH (1 << 1) | |
182 | #define IMUCTR_MMUEN (1 << 0) | |
183 | ||
184 | #define IMUASID(n) (0x0308 + ((n) * 16)) | |
185 | #define IMUASID_ASID8_MASK (0xff << 8) | |
186 | #define IMUASID_ASID8_SHIFT 8 | |
187 | #define IMUASID_ASID0_MASK (0xff << 0) | |
188 | #define IMUASID_ASID0_SHIFT 0 | |
189 | ||
d25a2a16 LP |
190 | /* ----------------------------------------------------------------------------- |
191 | * Read/Write Access | |
192 | */ | |
193 | ||
194 | static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) | |
195 | { | |
196 | return ioread32(mmu->base + offset); | |
197 | } | |
198 | ||
199 | static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, | |
200 | u32 data) | |
201 | { | |
202 | iowrite32(data, mmu->base + offset); | |
203 | } | |
204 | ||
205 | static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) | |
206 | { | |
207 | return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); | |
208 | } | |
209 | ||
210 | static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, | |
211 | u32 data) | |
212 | { | |
213 | ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); | |
214 | } | |
215 | ||
216 | /* ----------------------------------------------------------------------------- | |
217 | * TLB and microTLB Management | |
218 | */ | |
219 | ||
220 | /* Wait for any pending TLB invalidations to complete */ | |
221 | static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) | |
222 | { | |
223 | unsigned int count = 0; | |
224 | ||
225 | while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { | |
226 | cpu_relax(); | |
227 | if (++count == TLB_LOOP_TIMEOUT) { | |
228 | dev_err_ratelimited(domain->mmu->dev, | |
229 | "TLB sync timed out -- MMU may be deadlocked\n"); | |
230 | return; | |
231 | } | |
232 | udelay(1); | |
233 | } | |
234 | } | |
235 | ||
236 | static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) | |
237 | { | |
238 | u32 reg; | |
239 | ||
240 | reg = ipmmu_ctx_read(domain, IMCTR); | |
241 | reg |= IMCTR_FLUSH; | |
242 | ipmmu_ctx_write(domain, IMCTR, reg); | |
243 | ||
244 | ipmmu_tlb_sync(domain); | |
245 | } | |
246 | ||
247 | /* | |
248 | * Enable MMU translation for the microTLB. | |
249 | */ | |
250 | static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 251 | unsigned int utlb) |
d25a2a16 LP |
252 | { |
253 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
254 | ||
192d2045 LP |
255 | /* |
256 | * TODO: Reference-count the microTLB as several bus masters can be | |
257 | * connected to the same microTLB. | |
258 | */ | |
259 | ||
d25a2a16 | 260 | /* TODO: What should we set the ASID to ? */ |
192d2045 | 261 | ipmmu_write(mmu, IMUASID(utlb), 0); |
d25a2a16 | 262 | /* TODO: Do we need to flush the microTLB ? */ |
192d2045 | 263 | ipmmu_write(mmu, IMUCTR(utlb), |
d25a2a16 LP |
264 | IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | |
265 | IMUCTR_MMUEN); | |
266 | } | |
267 | ||
268 | /* | |
269 | * Disable MMU translation for the microTLB. | |
270 | */ | |
271 | static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 272 | unsigned int utlb) |
d25a2a16 LP |
273 | { |
274 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
275 | ||
192d2045 | 276 | ipmmu_write(mmu, IMUCTR(utlb), 0); |
d25a2a16 LP |
277 | } |
278 | ||
f20ed39f | 279 | static void ipmmu_tlb_flush_all(void *cookie) |
d25a2a16 | 280 | { |
f20ed39f LP |
281 | struct ipmmu_vmsa_domain *domain = cookie; |
282 | ||
283 | ipmmu_tlb_invalidate(domain); | |
284 | } | |
285 | ||
06c610e8 RM |
286 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, |
287 | size_t granule, bool leaf, void *cookie) | |
f20ed39f LP |
288 | { |
289 | /* The hardware doesn't support selective TLB flush. */ | |
290 | } | |
291 | ||
f20ed39f LP |
292 | static struct iommu_gather_ops ipmmu_gather_ops = { |
293 | .tlb_flush_all = ipmmu_tlb_flush_all, | |
294 | .tlb_add_flush = ipmmu_tlb_add_flush, | |
295 | .tlb_sync = ipmmu_tlb_flush_all, | |
f20ed39f LP |
296 | }; |
297 | ||
d25a2a16 LP |
298 | /* ----------------------------------------------------------------------------- |
299 | * Domain/Context Management | |
300 | */ | |
301 | ||
dbb70692 MD |
302 | static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, |
303 | struct ipmmu_vmsa_domain *domain) | |
304 | { | |
305 | unsigned long flags; | |
306 | int ret; | |
307 | ||
308 | spin_lock_irqsave(&mmu->lock, flags); | |
309 | ||
310 | ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); | |
311 | if (ret != IPMMU_CTX_MAX) { | |
312 | mmu->domains[ret] = domain; | |
313 | set_bit(ret, mmu->ctx); | |
314 | } | |
315 | ||
316 | spin_unlock_irqrestore(&mmu->lock, flags); | |
317 | ||
318 | return ret; | |
319 | } | |
320 | ||
d25a2a16 LP |
321 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
322 | { | |
f64232ee | 323 | u64 ttbr; |
dbb70692 | 324 | int ret; |
f20ed39f LP |
325 | |
326 | /* | |
327 | * Allocate the page table operations. | |
328 | * | |
329 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory | |
330 | * access, Long-descriptor format" that the NStable bit being set in a | |
331 | * table descriptor will result in the NStable and NS bits of all child | |
332 | * entries being ignored and considered as being set. The IPMMU seems | |
333 | * not to comply with this, as it generates a secure access page fault | |
334 | * if any of the NStable and NS bits isn't set when running in | |
335 | * non-secure mode. | |
336 | */ | |
337 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; | |
338 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | |
339 | domain->cfg.ias = 32; | |
340 | domain->cfg.oas = 40; | |
341 | domain->cfg.tlb = &ipmmu_gather_ops; | |
3b6bb5b7 GU |
342 | domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); |
343 | domain->io_domain.geometry.force_aperture = true; | |
ff2ed96d RM |
344 | /* |
345 | * TODO: Add support for coherent walk through CCI with DVM and remove | |
346 | * cache handling. For now, delegate it to the io-pgtable code. | |
347 | */ | |
348 | domain->cfg.iommu_dev = domain->mmu->dev; | |
f20ed39f LP |
349 | |
350 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | |
351 | domain); | |
352 | if (!domain->iop) | |
353 | return -EINVAL; | |
d25a2a16 LP |
354 | |
355 | /* | |
dbb70692 | 356 | * Find an unused context. |
d25a2a16 | 357 | */ |
dbb70692 MD |
358 | ret = ipmmu_domain_allocate_context(domain->mmu, domain); |
359 | if (ret == IPMMU_CTX_MAX) { | |
360 | free_io_pgtable_ops(domain->iop); | |
361 | return -EBUSY; | |
362 | } | |
363 | ||
364 | domain->context_id = ret; | |
d25a2a16 LP |
365 | |
366 | /* TTBR0 */ | |
f20ed39f | 367 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
d25a2a16 LP |
368 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
369 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | |
370 | ||
371 | /* | |
372 | * TTBCR | |
373 | * We use long descriptors with inner-shareable WBWA tables and allocate | |
374 | * the whole 32-bit VA space to TTBR0. | |
375 | */ | |
376 | ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | | |
377 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | |
378 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | |
379 | ||
f20ed39f LP |
380 | /* MAIR0 */ |
381 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); | |
d25a2a16 LP |
382 | |
383 | /* IMBUSCR */ | |
384 | ipmmu_ctx_write(domain, IMBUSCR, | |
385 | ipmmu_ctx_read(domain, IMBUSCR) & | |
386 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); | |
387 | ||
388 | /* | |
389 | * IMSTR | |
390 | * Clear all interrupt flags. | |
391 | */ | |
392 | ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); | |
393 | ||
394 | /* | |
395 | * IMCTR | |
396 | * Enable the MMU and interrupt generation. The long-descriptor | |
397 | * translation table format doesn't use TEX remapping. Don't enable AF | |
398 | * software management as we have no use for it. Flush the TLB as | |
399 | * required when modifying the context registers. | |
400 | */ | |
401 | ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); | |
402 | ||
403 | return 0; | |
404 | } | |
405 | ||
dbb70692 MD |
406 | static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, |
407 | unsigned int context_id) | |
408 | { | |
409 | unsigned long flags; | |
410 | ||
411 | spin_lock_irqsave(&mmu->lock, flags); | |
412 | ||
413 | clear_bit(context_id, mmu->ctx); | |
414 | mmu->domains[context_id] = NULL; | |
415 | ||
416 | spin_unlock_irqrestore(&mmu->lock, flags); | |
417 | } | |
418 | ||
d25a2a16 LP |
419 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) |
420 | { | |
421 | /* | |
422 | * Disable the context. Flush the TLB as required when modifying the | |
423 | * context registers. | |
424 | * | |
425 | * TODO: Is TLB flush really needed ? | |
426 | */ | |
427 | ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); | |
428 | ipmmu_tlb_sync(domain); | |
dbb70692 | 429 | ipmmu_domain_free_context(domain->mmu, domain->context_id); |
d25a2a16 LP |
430 | } |
431 | ||
432 | /* ----------------------------------------------------------------------------- | |
433 | * Fault Handling | |
434 | */ | |
435 | ||
436 | static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |
437 | { | |
438 | const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; | |
439 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
440 | u32 status; | |
441 | u32 iova; | |
442 | ||
443 | status = ipmmu_ctx_read(domain, IMSTR); | |
444 | if (!(status & err_mask)) | |
445 | return IRQ_NONE; | |
446 | ||
447 | iova = ipmmu_ctx_read(domain, IMEAR); | |
448 | ||
449 | /* | |
450 | * Clear the error status flags. Unlike traditional interrupt flag | |
451 | * registers that must be cleared by writing 1, this status register | |
452 | * seems to require 0. The error address register must be read before, | |
453 | * otherwise its value will be 0. | |
454 | */ | |
455 | ipmmu_ctx_write(domain, IMSTR, 0); | |
456 | ||
457 | /* Log fatal errors. */ | |
458 | if (status & IMSTR_MHIT) | |
459 | dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", | |
460 | iova); | |
461 | if (status & IMSTR_ABORT) | |
462 | dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", | |
463 | iova); | |
464 | ||
465 | if (!(status & (IMSTR_PF | IMSTR_TF))) | |
466 | return IRQ_NONE; | |
467 | ||
468 | /* | |
469 | * Try to handle page faults and translation faults. | |
470 | * | |
471 | * TODO: We need to look up the faulty device based on the I/O VA. Use | |
472 | * the IOMMU device for now. | |
473 | */ | |
5914c5fd | 474 | if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) |
d25a2a16 LP |
475 | return IRQ_HANDLED; |
476 | ||
477 | dev_err_ratelimited(mmu->dev, | |
478 | "Unhandled fault: status 0x%08x iova 0x%08x\n", | |
479 | status, iova); | |
480 | ||
481 | return IRQ_HANDLED; | |
482 | } | |
483 | ||
484 | static irqreturn_t ipmmu_irq(int irq, void *dev) | |
485 | { | |
486 | struct ipmmu_vmsa_device *mmu = dev; | |
dbb70692 MD |
487 | irqreturn_t status = IRQ_NONE; |
488 | unsigned int i; | |
489 | unsigned long flags; | |
d25a2a16 | 490 | |
dbb70692 MD |
491 | spin_lock_irqsave(&mmu->lock, flags); |
492 | ||
493 | /* | |
494 | * Check interrupts for all active contexts. | |
495 | */ | |
496 | for (i = 0; i < IPMMU_CTX_MAX; i++) { | |
497 | if (!mmu->domains[i]) | |
498 | continue; | |
499 | if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) | |
500 | status = IRQ_HANDLED; | |
501 | } | |
d25a2a16 | 502 | |
dbb70692 | 503 | spin_unlock_irqrestore(&mmu->lock, flags); |
d25a2a16 | 504 | |
dbb70692 | 505 | return status; |
d25a2a16 LP |
506 | } |
507 | ||
d25a2a16 LP |
508 | /* ----------------------------------------------------------------------------- |
509 | * IOMMU Operations | |
510 | */ | |
511 | ||
5914c5fd | 512 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) |
d25a2a16 LP |
513 | { |
514 | struct ipmmu_vmsa_domain *domain; | |
515 | ||
5914c5fd JR |
516 | if (type != IOMMU_DOMAIN_UNMANAGED) |
517 | return NULL; | |
518 | ||
d25a2a16 LP |
519 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
520 | if (!domain) | |
5914c5fd | 521 | return NULL; |
d25a2a16 LP |
522 | |
523 | spin_lock_init(&domain->lock); | |
524 | ||
5914c5fd | 525 | return &domain->io_domain; |
d25a2a16 LP |
526 | } |
527 | ||
5914c5fd | 528 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
d25a2a16 | 529 | { |
5914c5fd | 530 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
531 | |
532 | /* | |
533 | * Free the domain resources. We assume that all devices have already | |
534 | * been detached. | |
535 | */ | |
536 | ipmmu_domain_destroy_context(domain); | |
f20ed39f | 537 | free_io_pgtable_ops(domain->iop); |
d25a2a16 LP |
538 | kfree(domain); |
539 | } | |
540 | ||
541 | static int ipmmu_attach_device(struct iommu_domain *io_domain, | |
542 | struct device *dev) | |
543 | { | |
192d2045 LP |
544 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
545 | struct ipmmu_vmsa_device *mmu = archdata->mmu; | |
5914c5fd | 546 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 | 547 | unsigned long flags; |
a166d31e | 548 | unsigned int i; |
d25a2a16 LP |
549 | int ret = 0; |
550 | ||
551 | if (!mmu) { | |
552 | dev_err(dev, "Cannot attach to IPMMU\n"); | |
553 | return -ENXIO; | |
554 | } | |
555 | ||
556 | spin_lock_irqsave(&domain->lock, flags); | |
557 | ||
558 | if (!domain->mmu) { | |
559 | /* The domain hasn't been used yet, initialize it. */ | |
560 | domain->mmu = mmu; | |
561 | ret = ipmmu_domain_init_context(domain); | |
562 | } else if (domain->mmu != mmu) { | |
563 | /* | |
564 | * Something is wrong, we can't attach two devices using | |
565 | * different IOMMUs to the same domain. | |
566 | */ | |
567 | dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", | |
568 | dev_name(mmu->dev), dev_name(domain->mmu->dev)); | |
569 | ret = -EINVAL; | |
570 | } | |
571 | ||
572 | spin_unlock_irqrestore(&domain->lock, flags); | |
573 | ||
574 | if (ret < 0) | |
575 | return ret; | |
576 | ||
a166d31e LP |
577 | for (i = 0; i < archdata->num_utlbs; ++i) |
578 | ipmmu_utlb_enable(domain, archdata->utlbs[i]); | |
d25a2a16 LP |
579 | |
580 | return 0; | |
581 | } | |
582 | ||
583 | static void ipmmu_detach_device(struct iommu_domain *io_domain, | |
584 | struct device *dev) | |
585 | { | |
192d2045 | 586 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
5914c5fd | 587 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
a166d31e | 588 | unsigned int i; |
d25a2a16 | 589 | |
a166d31e LP |
590 | for (i = 0; i < archdata->num_utlbs; ++i) |
591 | ipmmu_utlb_disable(domain, archdata->utlbs[i]); | |
d25a2a16 LP |
592 | |
593 | /* | |
594 | * TODO: Optimize by disabling the context when no device is attached. | |
595 | */ | |
596 | } | |
597 | ||
598 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |
599 | phys_addr_t paddr, size_t size, int prot) | |
600 | { | |
5914c5fd | 601 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
602 | |
603 | if (!domain) | |
604 | return -ENODEV; | |
605 | ||
f20ed39f | 606 | return domain->iop->map(domain->iop, iova, paddr, size, prot); |
d25a2a16 LP |
607 | } |
608 | ||
609 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | |
610 | size_t size) | |
611 | { | |
5914c5fd | 612 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 | 613 | |
f20ed39f | 614 | return domain->iop->unmap(domain->iop, iova, size); |
d25a2a16 LP |
615 | } |
616 | ||
617 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | |
618 | dma_addr_t iova) | |
619 | { | |
5914c5fd | 620 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
621 | |
622 | /* TODO: Is locking needed ? */ | |
623 | ||
f20ed39f | 624 | return domain->iop->iova_to_phys(domain->iop, iova); |
d25a2a16 LP |
625 | } |
626 | ||
a166d31e | 627 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, |
bb590c90 | 628 | unsigned int *utlbs, unsigned int num_utlbs) |
192d2045 | 629 | { |
a166d31e | 630 | unsigned int i; |
a166d31e | 631 | |
bb590c90 | 632 | for (i = 0; i < num_utlbs; ++i) { |
a166d31e LP |
633 | struct of_phandle_args args; |
634 | int ret; | |
635 | ||
636 | ret = of_parse_phandle_with_args(dev->of_node, "iommus", | |
637 | "#iommu-cells", i, &args); | |
638 | if (ret < 0) | |
bb590c90 | 639 | return ret; |
a166d31e LP |
640 | |
641 | of_node_put(args.np); | |
642 | ||
643 | if (args.np != mmu->dev->of_node || args.args_count != 1) | |
bb590c90 | 644 | return -EINVAL; |
a166d31e LP |
645 | |
646 | utlbs[i] = args.args[0]; | |
647 | } | |
275f5053 | 648 | |
bb590c90 | 649 | return 0; |
192d2045 LP |
650 | } |
651 | ||
d25a2a16 LP |
652 | static int ipmmu_add_device(struct device *dev) |
653 | { | |
192d2045 | 654 | struct ipmmu_vmsa_archdata *archdata; |
d25a2a16 | 655 | struct ipmmu_vmsa_device *mmu; |
a166d31e | 656 | struct iommu_group *group = NULL; |
bb590c90 | 657 | unsigned int *utlbs; |
a166d31e | 658 | unsigned int i; |
bb590c90 LP |
659 | int num_utlbs; |
660 | int ret = -ENODEV; | |
d25a2a16 LP |
661 | |
662 | if (dev->archdata.iommu) { | |
663 | dev_warn(dev, "IOMMU driver already assigned to device %s\n", | |
664 | dev_name(dev)); | |
665 | return -EINVAL; | |
666 | } | |
667 | ||
668 | /* Find the master corresponding to the device. */ | |
bb590c90 LP |
669 | |
670 | num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", | |
671 | "#iommu-cells"); | |
672 | if (num_utlbs < 0) | |
673 | return -ENODEV; | |
674 | ||
675 | utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); | |
676 | if (!utlbs) | |
677 | return -ENOMEM; | |
678 | ||
d25a2a16 LP |
679 | spin_lock(&ipmmu_devices_lock); |
680 | ||
681 | list_for_each_entry(mmu, &ipmmu_devices, list) { | |
bb590c90 LP |
682 | ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); |
683 | if (!ret) { | |
d25a2a16 | 684 | /* |
192d2045 | 685 | * TODO Take a reference to the MMU to protect |
d25a2a16 LP |
686 | * against device removal. |
687 | */ | |
688 | break; | |
689 | } | |
690 | } | |
691 | ||
692 | spin_unlock(&ipmmu_devices_lock); | |
693 | ||
bb590c90 | 694 | if (ret < 0) |
b1e2afca | 695 | goto error; |
d25a2a16 | 696 | |
a166d31e LP |
697 | for (i = 0; i < num_utlbs; ++i) { |
698 | if (utlbs[i] >= mmu->num_utlbs) { | |
699 | ret = -EINVAL; | |
700 | goto error; | |
701 | } | |
702 | } | |
d25a2a16 LP |
703 | |
704 | /* Create a device group and add the device to it. */ | |
705 | group = iommu_group_alloc(); | |
706 | if (IS_ERR(group)) { | |
707 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
a166d31e LP |
708 | ret = PTR_ERR(group); |
709 | goto error; | |
d25a2a16 LP |
710 | } |
711 | ||
712 | ret = iommu_group_add_device(group, dev); | |
713 | iommu_group_put(group); | |
714 | ||
715 | if (ret < 0) { | |
716 | dev_err(dev, "Failed to add device to IPMMU group\n"); | |
a166d31e LP |
717 | group = NULL; |
718 | goto error; | |
d25a2a16 LP |
719 | } |
720 | ||
192d2045 LP |
721 | archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); |
722 | if (!archdata) { | |
723 | ret = -ENOMEM; | |
724 | goto error; | |
725 | } | |
726 | ||
727 | archdata->mmu = mmu; | |
a166d31e LP |
728 | archdata->utlbs = utlbs; |
729 | archdata->num_utlbs = num_utlbs; | |
192d2045 | 730 | dev->archdata.iommu = archdata; |
d25a2a16 LP |
731 | |
732 | /* | |
733 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate | |
734 | * VAs. This will allocate a corresponding IOMMU domain. | |
735 | * | |
736 | * TODO: | |
737 | * - Create one mapping per context (TLB). | |
738 | * - Make the mapping size configurable ? We currently use a 2GB mapping | |
739 | * at a 1GB offset to ensure that NULL VAs will fault. | |
740 | */ | |
741 | if (!mmu->mapping) { | |
742 | struct dma_iommu_mapping *mapping; | |
743 | ||
744 | mapping = arm_iommu_create_mapping(&platform_bus_type, | |
720b0cef | 745 | SZ_1G, SZ_2G); |
d25a2a16 LP |
746 | if (IS_ERR(mapping)) { |
747 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); | |
b8f80bff LP |
748 | ret = PTR_ERR(mapping); |
749 | goto error; | |
d25a2a16 LP |
750 | } |
751 | ||
752 | mmu->mapping = mapping; | |
753 | } | |
754 | ||
755 | /* Attach the ARM VA mapping to the device. */ | |
756 | ret = arm_iommu_attach_device(dev, mmu->mapping); | |
757 | if (ret < 0) { | |
758 | dev_err(dev, "Failed to attach device to VA mapping\n"); | |
759 | goto error; | |
760 | } | |
761 | ||
762 | return 0; | |
763 | ||
764 | error: | |
b8f80bff | 765 | arm_iommu_release_mapping(mmu->mapping); |
a166d31e | 766 | |
192d2045 | 767 | kfree(dev->archdata.iommu); |
a166d31e LP |
768 | kfree(utlbs); |
769 | ||
d25a2a16 | 770 | dev->archdata.iommu = NULL; |
a166d31e LP |
771 | |
772 | if (!IS_ERR_OR_NULL(group)) | |
773 | iommu_group_remove_device(dev); | |
774 | ||
d25a2a16 LP |
775 | return ret; |
776 | } | |
777 | ||
778 | static void ipmmu_remove_device(struct device *dev) | |
779 | { | |
a166d31e LP |
780 | struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; |
781 | ||
d25a2a16 LP |
782 | arm_iommu_detach_device(dev); |
783 | iommu_group_remove_device(dev); | |
a166d31e LP |
784 | |
785 | kfree(archdata->utlbs); | |
786 | kfree(archdata); | |
787 | ||
d25a2a16 LP |
788 | dev->archdata.iommu = NULL; |
789 | } | |
790 | ||
b22f6434 | 791 | static const struct iommu_ops ipmmu_ops = { |
5914c5fd JR |
792 | .domain_alloc = ipmmu_domain_alloc, |
793 | .domain_free = ipmmu_domain_free, | |
d25a2a16 LP |
794 | .attach_dev = ipmmu_attach_device, |
795 | .detach_dev = ipmmu_detach_device, | |
796 | .map = ipmmu_map, | |
797 | .unmap = ipmmu_unmap, | |
315786eb | 798 | .map_sg = default_iommu_map_sg, |
d25a2a16 LP |
799 | .iova_to_phys = ipmmu_iova_to_phys, |
800 | .add_device = ipmmu_add_device, | |
801 | .remove_device = ipmmu_remove_device, | |
f20ed39f | 802 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
d25a2a16 LP |
803 | }; |
804 | ||
805 | /* ----------------------------------------------------------------------------- | |
806 | * Probe/remove and init | |
807 | */ | |
808 | ||
809 | static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) | |
810 | { | |
811 | unsigned int i; | |
812 | ||
813 | /* Disable all contexts. */ | |
814 | for (i = 0; i < 4; ++i) | |
815 | ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); | |
816 | } | |
817 | ||
818 | static int ipmmu_probe(struct platform_device *pdev) | |
819 | { | |
820 | struct ipmmu_vmsa_device *mmu; | |
821 | struct resource *res; | |
822 | int irq; | |
823 | int ret; | |
824 | ||
d25a2a16 LP |
825 | mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); |
826 | if (!mmu) { | |
827 | dev_err(&pdev->dev, "cannot allocate device data\n"); | |
828 | return -ENOMEM; | |
829 | } | |
830 | ||
831 | mmu->dev = &pdev->dev; | |
d25a2a16 | 832 | mmu->num_utlbs = 32; |
dbb70692 MD |
833 | spin_lock_init(&mmu->lock); |
834 | bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); | |
d25a2a16 LP |
835 | |
836 | /* Map I/O memory and request IRQ. */ | |
837 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
838 | mmu->base = devm_ioremap_resource(&pdev->dev, res); | |
839 | if (IS_ERR(mmu->base)) | |
840 | return PTR_ERR(mmu->base); | |
841 | ||
275f5053 LP |
842 | /* |
843 | * The IPMMU has two register banks, for secure and non-secure modes. | |
844 | * The bank mapped at the beginning of the IPMMU address space | |
845 | * corresponds to the running mode of the CPU. When running in secure | |
846 | * mode the non-secure register bank is also available at an offset. | |
847 | * | |
848 | * Secure mode operation isn't clearly documented and is thus currently | |
849 | * not implemented in the driver. Furthermore, preliminary tests of | |
850 | * non-secure operation with the main register bank were not successful. | |
851 | * Offset the registers base unconditionally to point to the non-secure | |
852 | * alias space for now. | |
853 | */ | |
854 | mmu->base += IM_NS_ALIAS_OFFSET; | |
855 | ||
d25a2a16 LP |
856 | irq = platform_get_irq(pdev, 0); |
857 | if (irq < 0) { | |
858 | dev_err(&pdev->dev, "no IRQ found\n"); | |
859 | return irq; | |
860 | } | |
861 | ||
862 | ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, | |
863 | dev_name(&pdev->dev), mmu); | |
864 | if (ret < 0) { | |
865 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); | |
e222d6a4 | 866 | return ret; |
d25a2a16 LP |
867 | } |
868 | ||
869 | ipmmu_device_reset(mmu); | |
870 | ||
871 | /* | |
872 | * We can't create the ARM mapping here as it requires the bus to have | |
873 | * an IOMMU, which only happens when bus_set_iommu() is called in | |
874 | * ipmmu_init() after the probe function returns. | |
875 | */ | |
876 | ||
877 | spin_lock(&ipmmu_devices_lock); | |
878 | list_add(&mmu->list, &ipmmu_devices); | |
879 | spin_unlock(&ipmmu_devices_lock); | |
880 | ||
881 | platform_set_drvdata(pdev, mmu); | |
882 | ||
883 | return 0; | |
884 | } | |
885 | ||
886 | static int ipmmu_remove(struct platform_device *pdev) | |
887 | { | |
888 | struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); | |
889 | ||
890 | spin_lock(&ipmmu_devices_lock); | |
891 | list_del(&mmu->list); | |
892 | spin_unlock(&ipmmu_devices_lock); | |
893 | ||
894 | arm_iommu_release_mapping(mmu->mapping); | |
895 | ||
896 | ipmmu_device_reset(mmu); | |
897 | ||
898 | return 0; | |
899 | } | |
900 | ||
275f5053 LP |
901 | static const struct of_device_id ipmmu_of_ids[] = { |
902 | { .compatible = "renesas,ipmmu-vmsa", }, | |
ac04f85a | 903 | { } |
275f5053 LP |
904 | }; |
905 | ||
d25a2a16 LP |
906 | static struct platform_driver ipmmu_driver = { |
907 | .driver = { | |
d25a2a16 | 908 | .name = "ipmmu-vmsa", |
275f5053 | 909 | .of_match_table = of_match_ptr(ipmmu_of_ids), |
d25a2a16 LP |
910 | }, |
911 | .probe = ipmmu_probe, | |
912 | .remove = ipmmu_remove, | |
913 | }; | |
914 | ||
915 | static int __init ipmmu_init(void) | |
916 | { | |
917 | int ret; | |
918 | ||
919 | ret = platform_driver_register(&ipmmu_driver); | |
920 | if (ret < 0) | |
921 | return ret; | |
922 | ||
923 | if (!iommu_present(&platform_bus_type)) | |
924 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); | |
925 | ||
926 | return 0; | |
927 | } | |
928 | ||
929 | static void __exit ipmmu_exit(void) | |
930 | { | |
931 | return platform_driver_unregister(&ipmmu_driver); | |
932 | } | |
933 | ||
934 | subsys_initcall(ipmmu_init); | |
935 | module_exit(ipmmu_exit); | |
936 | ||
937 | MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); | |
938 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | |
939 | MODULE_LICENSE("GPL v2"); |