]>
Commit | Line | Data |
---|---|---|
d25a2a16 LP |
1 | /* |
2 | * IPMMU VMSA | |
3 | * | |
4 | * Copyright (C) 2014 Renesas Electronics Corporation | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License as published by | |
8 | * the Free Software Foundation; version 2 of the License. | |
9 | */ | |
10 | ||
dbb70692 | 11 | #include <linux/bitmap.h> |
d25a2a16 | 12 | #include <linux/delay.h> |
3ae47292 | 13 | #include <linux/dma-iommu.h> |
d25a2a16 LP |
14 | #include <linux/dma-mapping.h> |
15 | #include <linux/err.h> | |
16 | #include <linux/export.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/iommu.h> | |
20 | #include <linux/module.h> | |
275f5053 | 21 | #include <linux/of.h> |
d25a2a16 LP |
22 | #include <linux/platform_device.h> |
23 | #include <linux/sizes.h> | |
24 | #include <linux/slab.h> | |
25 | ||
3ae47292 | 26 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
d25a2a16 LP |
27 | #include <asm/dma-iommu.h> |
28 | #include <asm/pgalloc.h> | |
3ae47292 | 29 | #endif |
d25a2a16 | 30 | |
f20ed39f LP |
31 | #include "io-pgtable.h" |
32 | ||
dbb70692 MD |
33 | #define IPMMU_CTX_MAX 1 |
34 | ||
d25a2a16 LP |
35 | struct ipmmu_vmsa_device { |
36 | struct device *dev; | |
37 | void __iomem *base; | |
38 | struct list_head list; | |
39 | ||
d25a2a16 | 40 | unsigned int num_utlbs; |
dbb70692 MD |
41 | spinlock_t lock; /* Protects ctx and domains[] */ |
42 | DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); | |
43 | struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; | |
d25a2a16 LP |
44 | |
45 | struct dma_iommu_mapping *mapping; | |
46 | }; | |
47 | ||
48 | struct ipmmu_vmsa_domain { | |
49 | struct ipmmu_vmsa_device *mmu; | |
5914c5fd | 50 | struct iommu_domain io_domain; |
d25a2a16 | 51 | |
f20ed39f LP |
52 | struct io_pgtable_cfg cfg; |
53 | struct io_pgtable_ops *iop; | |
54 | ||
d25a2a16 LP |
55 | unsigned int context_id; |
56 | spinlock_t lock; /* Protects mappings */ | |
d25a2a16 LP |
57 | }; |
58 | ||
0fbc8b04 | 59 | struct ipmmu_vmsa_iommu_priv { |
192d2045 | 60 | struct ipmmu_vmsa_device *mmu; |
a166d31e LP |
61 | unsigned int *utlbs; |
62 | unsigned int num_utlbs; | |
3ae47292 MD |
63 | struct device *dev; |
64 | struct list_head list; | |
192d2045 LP |
65 | }; |
66 | ||
d25a2a16 LP |
67 | static DEFINE_SPINLOCK(ipmmu_devices_lock); |
68 | static LIST_HEAD(ipmmu_devices); | |
69 | ||
5914c5fd JR |
70 | static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) |
71 | { | |
72 | return container_of(dom, struct ipmmu_vmsa_domain, io_domain); | |
73 | } | |
74 | ||
0fbc8b04 MD |
75 | |
76 | static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) | |
77 | { | |
78 | #if defined(CONFIG_ARM) | |
79 | return dev->archdata.iommu; | |
80 | #else | |
81 | return dev->iommu_fwspec->iommu_priv; | |
82 | #endif | |
83 | } | |
84 | static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) | |
85 | { | |
86 | #if defined(CONFIG_ARM) | |
87 | dev->archdata.iommu = p; | |
88 | #else | |
89 | dev->iommu_fwspec->iommu_priv = p; | |
90 | #endif | |
91 | } | |
92 | ||
d25a2a16 LP |
93 | #define TLB_LOOP_TIMEOUT 100 /* 100us */ |
94 | ||
95 | /* ----------------------------------------------------------------------------- | |
96 | * Registers Definition | |
97 | */ | |
98 | ||
275f5053 LP |
99 | #define IM_NS_ALIAS_OFFSET 0x800 |
100 | ||
d25a2a16 LP |
101 | #define IM_CTX_SIZE 0x40 |
102 | ||
103 | #define IMCTR 0x0000 | |
104 | #define IMCTR_TRE (1 << 17) | |
105 | #define IMCTR_AFE (1 << 16) | |
106 | #define IMCTR_RTSEL_MASK (3 << 4) | |
107 | #define IMCTR_RTSEL_SHIFT 4 | |
108 | #define IMCTR_TREN (1 << 3) | |
109 | #define IMCTR_INTEN (1 << 2) | |
110 | #define IMCTR_FLUSH (1 << 1) | |
111 | #define IMCTR_MMUEN (1 << 0) | |
112 | ||
113 | #define IMCAAR 0x0004 | |
114 | ||
115 | #define IMTTBCR 0x0008 | |
116 | #define IMTTBCR_EAE (1 << 31) | |
117 | #define IMTTBCR_PMB (1 << 30) | |
118 | #define IMTTBCR_SH1_NON_SHAREABLE (0 << 28) | |
119 | #define IMTTBCR_SH1_OUTER_SHAREABLE (2 << 28) | |
120 | #define IMTTBCR_SH1_INNER_SHAREABLE (3 << 28) | |
121 | #define IMTTBCR_SH1_MASK (3 << 28) | |
122 | #define IMTTBCR_ORGN1_NC (0 << 26) | |
123 | #define IMTTBCR_ORGN1_WB_WA (1 << 26) | |
124 | #define IMTTBCR_ORGN1_WT (2 << 26) | |
125 | #define IMTTBCR_ORGN1_WB (3 << 26) | |
126 | #define IMTTBCR_ORGN1_MASK (3 << 26) | |
127 | #define IMTTBCR_IRGN1_NC (0 << 24) | |
128 | #define IMTTBCR_IRGN1_WB_WA (1 << 24) | |
129 | #define IMTTBCR_IRGN1_WT (2 << 24) | |
130 | #define IMTTBCR_IRGN1_WB (3 << 24) | |
131 | #define IMTTBCR_IRGN1_MASK (3 << 24) | |
132 | #define IMTTBCR_TSZ1_MASK (7 << 16) | |
133 | #define IMTTBCR_TSZ1_SHIFT 16 | |
134 | #define IMTTBCR_SH0_NON_SHAREABLE (0 << 12) | |
135 | #define IMTTBCR_SH0_OUTER_SHAREABLE (2 << 12) | |
136 | #define IMTTBCR_SH0_INNER_SHAREABLE (3 << 12) | |
137 | #define IMTTBCR_SH0_MASK (3 << 12) | |
138 | #define IMTTBCR_ORGN0_NC (0 << 10) | |
139 | #define IMTTBCR_ORGN0_WB_WA (1 << 10) | |
140 | #define IMTTBCR_ORGN0_WT (2 << 10) | |
141 | #define IMTTBCR_ORGN0_WB (3 << 10) | |
142 | #define IMTTBCR_ORGN0_MASK (3 << 10) | |
143 | #define IMTTBCR_IRGN0_NC (0 << 8) | |
144 | #define IMTTBCR_IRGN0_WB_WA (1 << 8) | |
145 | #define IMTTBCR_IRGN0_WT (2 << 8) | |
146 | #define IMTTBCR_IRGN0_WB (3 << 8) | |
147 | #define IMTTBCR_IRGN0_MASK (3 << 8) | |
148 | #define IMTTBCR_SL0_LVL_2 (0 << 4) | |
149 | #define IMTTBCR_SL0_LVL_1 (1 << 4) | |
150 | #define IMTTBCR_TSZ0_MASK (7 << 0) | |
151 | #define IMTTBCR_TSZ0_SHIFT O | |
152 | ||
153 | #define IMBUSCR 0x000c | |
154 | #define IMBUSCR_DVM (1 << 2) | |
155 | #define IMBUSCR_BUSSEL_SYS (0 << 0) | |
156 | #define IMBUSCR_BUSSEL_CCI (1 << 0) | |
157 | #define IMBUSCR_BUSSEL_IMCAAR (2 << 0) | |
158 | #define IMBUSCR_BUSSEL_CCI_IMCAAR (3 << 0) | |
159 | #define IMBUSCR_BUSSEL_MASK (3 << 0) | |
160 | ||
161 | #define IMTTLBR0 0x0010 | |
162 | #define IMTTUBR0 0x0014 | |
163 | #define IMTTLBR1 0x0018 | |
164 | #define IMTTUBR1 0x001c | |
165 | ||
166 | #define IMSTR 0x0020 | |
167 | #define IMSTR_ERRLVL_MASK (3 << 12) | |
168 | #define IMSTR_ERRLVL_SHIFT 12 | |
169 | #define IMSTR_ERRCODE_TLB_FORMAT (1 << 8) | |
170 | #define IMSTR_ERRCODE_ACCESS_PERM (4 << 8) | |
171 | #define IMSTR_ERRCODE_SECURE_ACCESS (5 << 8) | |
172 | #define IMSTR_ERRCODE_MASK (7 << 8) | |
173 | #define IMSTR_MHIT (1 << 4) | |
174 | #define IMSTR_ABORT (1 << 2) | |
175 | #define IMSTR_PF (1 << 1) | |
176 | #define IMSTR_TF (1 << 0) | |
177 | ||
178 | #define IMMAIR0 0x0028 | |
179 | #define IMMAIR1 0x002c | |
180 | #define IMMAIR_ATTR_MASK 0xff | |
181 | #define IMMAIR_ATTR_DEVICE 0x04 | |
182 | #define IMMAIR_ATTR_NC 0x44 | |
183 | #define IMMAIR_ATTR_WBRWA 0xff | |
184 | #define IMMAIR_ATTR_SHIFT(n) ((n) << 3) | |
185 | #define IMMAIR_ATTR_IDX_NC 0 | |
186 | #define IMMAIR_ATTR_IDX_WBRWA 1 | |
187 | #define IMMAIR_ATTR_IDX_DEV 2 | |
188 | ||
189 | #define IMEAR 0x0030 | |
190 | ||
191 | #define IMPCTR 0x0200 | |
192 | #define IMPSTR 0x0208 | |
193 | #define IMPEAR 0x020c | |
194 | #define IMPMBA(n) (0x0280 + ((n) * 4)) | |
195 | #define IMPMBD(n) (0x02c0 + ((n) * 4)) | |
196 | ||
197 | #define IMUCTR(n) (0x0300 + ((n) * 16)) | |
198 | #define IMUCTR_FIXADDEN (1 << 31) | |
199 | #define IMUCTR_FIXADD_MASK (0xff << 16) | |
200 | #define IMUCTR_FIXADD_SHIFT 16 | |
201 | #define IMUCTR_TTSEL_MMU(n) ((n) << 4) | |
202 | #define IMUCTR_TTSEL_PMB (8 << 4) | |
203 | #define IMUCTR_TTSEL_MASK (15 << 4) | |
204 | #define IMUCTR_FLUSH (1 << 1) | |
205 | #define IMUCTR_MMUEN (1 << 0) | |
206 | ||
207 | #define IMUASID(n) (0x0308 + ((n) * 16)) | |
208 | #define IMUASID_ASID8_MASK (0xff << 8) | |
209 | #define IMUASID_ASID8_SHIFT 8 | |
210 | #define IMUASID_ASID0_MASK (0xff << 0) | |
211 | #define IMUASID_ASID0_SHIFT 0 | |
212 | ||
d25a2a16 LP |
213 | /* ----------------------------------------------------------------------------- |
214 | * Read/Write Access | |
215 | */ | |
216 | ||
217 | static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) | |
218 | { | |
219 | return ioread32(mmu->base + offset); | |
220 | } | |
221 | ||
222 | static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, | |
223 | u32 data) | |
224 | { | |
225 | iowrite32(data, mmu->base + offset); | |
226 | } | |
227 | ||
228 | static u32 ipmmu_ctx_read(struct ipmmu_vmsa_domain *domain, unsigned int reg) | |
229 | { | |
230 | return ipmmu_read(domain->mmu, domain->context_id * IM_CTX_SIZE + reg); | |
231 | } | |
232 | ||
233 | static void ipmmu_ctx_write(struct ipmmu_vmsa_domain *domain, unsigned int reg, | |
234 | u32 data) | |
235 | { | |
236 | ipmmu_write(domain->mmu, domain->context_id * IM_CTX_SIZE + reg, data); | |
237 | } | |
238 | ||
239 | /* ----------------------------------------------------------------------------- | |
240 | * TLB and microTLB Management | |
241 | */ | |
242 | ||
243 | /* Wait for any pending TLB invalidations to complete */ | |
244 | static void ipmmu_tlb_sync(struct ipmmu_vmsa_domain *domain) | |
245 | { | |
246 | unsigned int count = 0; | |
247 | ||
248 | while (ipmmu_ctx_read(domain, IMCTR) & IMCTR_FLUSH) { | |
249 | cpu_relax(); | |
250 | if (++count == TLB_LOOP_TIMEOUT) { | |
251 | dev_err_ratelimited(domain->mmu->dev, | |
252 | "TLB sync timed out -- MMU may be deadlocked\n"); | |
253 | return; | |
254 | } | |
255 | udelay(1); | |
256 | } | |
257 | } | |
258 | ||
259 | static void ipmmu_tlb_invalidate(struct ipmmu_vmsa_domain *domain) | |
260 | { | |
261 | u32 reg; | |
262 | ||
263 | reg = ipmmu_ctx_read(domain, IMCTR); | |
264 | reg |= IMCTR_FLUSH; | |
265 | ipmmu_ctx_write(domain, IMCTR, reg); | |
266 | ||
267 | ipmmu_tlb_sync(domain); | |
268 | } | |
269 | ||
270 | /* | |
271 | * Enable MMU translation for the microTLB. | |
272 | */ | |
273 | static void ipmmu_utlb_enable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 274 | unsigned int utlb) |
d25a2a16 LP |
275 | { |
276 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
277 | ||
192d2045 LP |
278 | /* |
279 | * TODO: Reference-count the microTLB as several bus masters can be | |
280 | * connected to the same microTLB. | |
281 | */ | |
282 | ||
d25a2a16 | 283 | /* TODO: What should we set the ASID to ? */ |
192d2045 | 284 | ipmmu_write(mmu, IMUASID(utlb), 0); |
d25a2a16 | 285 | /* TODO: Do we need to flush the microTLB ? */ |
192d2045 | 286 | ipmmu_write(mmu, IMUCTR(utlb), |
d25a2a16 LP |
287 | IMUCTR_TTSEL_MMU(domain->context_id) | IMUCTR_FLUSH | |
288 | IMUCTR_MMUEN); | |
289 | } | |
290 | ||
291 | /* | |
292 | * Disable MMU translation for the microTLB. | |
293 | */ | |
294 | static void ipmmu_utlb_disable(struct ipmmu_vmsa_domain *domain, | |
192d2045 | 295 | unsigned int utlb) |
d25a2a16 LP |
296 | { |
297 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
298 | ||
192d2045 | 299 | ipmmu_write(mmu, IMUCTR(utlb), 0); |
d25a2a16 LP |
300 | } |
301 | ||
f20ed39f | 302 | static void ipmmu_tlb_flush_all(void *cookie) |
d25a2a16 | 303 | { |
f20ed39f LP |
304 | struct ipmmu_vmsa_domain *domain = cookie; |
305 | ||
306 | ipmmu_tlb_invalidate(domain); | |
307 | } | |
308 | ||
06c610e8 RM |
309 | static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, |
310 | size_t granule, bool leaf, void *cookie) | |
f20ed39f LP |
311 | { |
312 | /* The hardware doesn't support selective TLB flush. */ | |
313 | } | |
314 | ||
f20ed39f LP |
315 | static struct iommu_gather_ops ipmmu_gather_ops = { |
316 | .tlb_flush_all = ipmmu_tlb_flush_all, | |
317 | .tlb_add_flush = ipmmu_tlb_add_flush, | |
318 | .tlb_sync = ipmmu_tlb_flush_all, | |
f20ed39f LP |
319 | }; |
320 | ||
d25a2a16 LP |
321 | /* ----------------------------------------------------------------------------- |
322 | * Domain/Context Management | |
323 | */ | |
324 | ||
dbb70692 MD |
325 | static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, |
326 | struct ipmmu_vmsa_domain *domain) | |
327 | { | |
328 | unsigned long flags; | |
329 | int ret; | |
330 | ||
331 | spin_lock_irqsave(&mmu->lock, flags); | |
332 | ||
333 | ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); | |
334 | if (ret != IPMMU_CTX_MAX) { | |
335 | mmu->domains[ret] = domain; | |
336 | set_bit(ret, mmu->ctx); | |
337 | } | |
338 | ||
339 | spin_unlock_irqrestore(&mmu->lock, flags); | |
340 | ||
341 | return ret; | |
342 | } | |
343 | ||
d25a2a16 LP |
344 | static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) |
345 | { | |
f64232ee | 346 | u64 ttbr; |
dbb70692 | 347 | int ret; |
f20ed39f LP |
348 | |
349 | /* | |
350 | * Allocate the page table operations. | |
351 | * | |
352 | * VMSA states in section B3.6.3 "Control of Secure or Non-secure memory | |
353 | * access, Long-descriptor format" that the NStable bit being set in a | |
354 | * table descriptor will result in the NStable and NS bits of all child | |
355 | * entries being ignored and considered as being set. The IPMMU seems | |
356 | * not to comply with this, as it generates a secure access page fault | |
357 | * if any of the NStable and NS bits isn't set when running in | |
358 | * non-secure mode. | |
359 | */ | |
360 | domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; | |
26b6aec6 | 361 | domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; |
f20ed39f LP |
362 | domain->cfg.ias = 32; |
363 | domain->cfg.oas = 40; | |
364 | domain->cfg.tlb = &ipmmu_gather_ops; | |
3b6bb5b7 GU |
365 | domain->io_domain.geometry.aperture_end = DMA_BIT_MASK(32); |
366 | domain->io_domain.geometry.force_aperture = true; | |
ff2ed96d RM |
367 | /* |
368 | * TODO: Add support for coherent walk through CCI with DVM and remove | |
369 | * cache handling. For now, delegate it to the io-pgtable code. | |
370 | */ | |
371 | domain->cfg.iommu_dev = domain->mmu->dev; | |
f20ed39f LP |
372 | |
373 | domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, | |
374 | domain); | |
375 | if (!domain->iop) | |
376 | return -EINVAL; | |
d25a2a16 LP |
377 | |
378 | /* | |
dbb70692 | 379 | * Find an unused context. |
d25a2a16 | 380 | */ |
dbb70692 MD |
381 | ret = ipmmu_domain_allocate_context(domain->mmu, domain); |
382 | if (ret == IPMMU_CTX_MAX) { | |
383 | free_io_pgtable_ops(domain->iop); | |
384 | return -EBUSY; | |
385 | } | |
386 | ||
387 | domain->context_id = ret; | |
d25a2a16 LP |
388 | |
389 | /* TTBR0 */ | |
f20ed39f | 390 | ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; |
d25a2a16 LP |
391 | ipmmu_ctx_write(domain, IMTTLBR0, ttbr); |
392 | ipmmu_ctx_write(domain, IMTTUBR0, ttbr >> 32); | |
393 | ||
394 | /* | |
395 | * TTBCR | |
396 | * We use long descriptors with inner-shareable WBWA tables and allocate | |
397 | * the whole 32-bit VA space to TTBR0. | |
398 | */ | |
399 | ipmmu_ctx_write(domain, IMTTBCR, IMTTBCR_EAE | | |
400 | IMTTBCR_SH0_INNER_SHAREABLE | IMTTBCR_ORGN0_WB_WA | | |
401 | IMTTBCR_IRGN0_WB_WA | IMTTBCR_SL0_LVL_1); | |
402 | ||
f20ed39f LP |
403 | /* MAIR0 */ |
404 | ipmmu_ctx_write(domain, IMMAIR0, domain->cfg.arm_lpae_s1_cfg.mair[0]); | |
d25a2a16 LP |
405 | |
406 | /* IMBUSCR */ | |
407 | ipmmu_ctx_write(domain, IMBUSCR, | |
408 | ipmmu_ctx_read(domain, IMBUSCR) & | |
409 | ~(IMBUSCR_DVM | IMBUSCR_BUSSEL_MASK)); | |
410 | ||
411 | /* | |
412 | * IMSTR | |
413 | * Clear all interrupt flags. | |
414 | */ | |
415 | ipmmu_ctx_write(domain, IMSTR, ipmmu_ctx_read(domain, IMSTR)); | |
416 | ||
417 | /* | |
418 | * IMCTR | |
419 | * Enable the MMU and interrupt generation. The long-descriptor | |
420 | * translation table format doesn't use TEX remapping. Don't enable AF | |
421 | * software management as we have no use for it. Flush the TLB as | |
422 | * required when modifying the context registers. | |
423 | */ | |
424 | ipmmu_ctx_write(domain, IMCTR, IMCTR_INTEN | IMCTR_FLUSH | IMCTR_MMUEN); | |
425 | ||
426 | return 0; | |
427 | } | |
428 | ||
dbb70692 MD |
429 | static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, |
430 | unsigned int context_id) | |
431 | { | |
432 | unsigned long flags; | |
433 | ||
434 | spin_lock_irqsave(&mmu->lock, flags); | |
435 | ||
436 | clear_bit(context_id, mmu->ctx); | |
437 | mmu->domains[context_id] = NULL; | |
438 | ||
439 | spin_unlock_irqrestore(&mmu->lock, flags); | |
440 | } | |
441 | ||
d25a2a16 LP |
442 | static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) |
443 | { | |
444 | /* | |
445 | * Disable the context. Flush the TLB as required when modifying the | |
446 | * context registers. | |
447 | * | |
448 | * TODO: Is TLB flush really needed ? | |
449 | */ | |
450 | ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); | |
451 | ipmmu_tlb_sync(domain); | |
dbb70692 | 452 | ipmmu_domain_free_context(domain->mmu, domain->context_id); |
d25a2a16 LP |
453 | } |
454 | ||
455 | /* ----------------------------------------------------------------------------- | |
456 | * Fault Handling | |
457 | */ | |
458 | ||
459 | static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) | |
460 | { | |
461 | const u32 err_mask = IMSTR_MHIT | IMSTR_ABORT | IMSTR_PF | IMSTR_TF; | |
462 | struct ipmmu_vmsa_device *mmu = domain->mmu; | |
463 | u32 status; | |
464 | u32 iova; | |
465 | ||
466 | status = ipmmu_ctx_read(domain, IMSTR); | |
467 | if (!(status & err_mask)) | |
468 | return IRQ_NONE; | |
469 | ||
470 | iova = ipmmu_ctx_read(domain, IMEAR); | |
471 | ||
472 | /* | |
473 | * Clear the error status flags. Unlike traditional interrupt flag | |
474 | * registers that must be cleared by writing 1, this status register | |
475 | * seems to require 0. The error address register must be read before, | |
476 | * otherwise its value will be 0. | |
477 | */ | |
478 | ipmmu_ctx_write(domain, IMSTR, 0); | |
479 | ||
480 | /* Log fatal errors. */ | |
481 | if (status & IMSTR_MHIT) | |
482 | dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%08x\n", | |
483 | iova); | |
484 | if (status & IMSTR_ABORT) | |
485 | dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%08x\n", | |
486 | iova); | |
487 | ||
488 | if (!(status & (IMSTR_PF | IMSTR_TF))) | |
489 | return IRQ_NONE; | |
490 | ||
491 | /* | |
492 | * Try to handle page faults and translation faults. | |
493 | * | |
494 | * TODO: We need to look up the faulty device based on the I/O VA. Use | |
495 | * the IOMMU device for now. | |
496 | */ | |
5914c5fd | 497 | if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) |
d25a2a16 LP |
498 | return IRQ_HANDLED; |
499 | ||
500 | dev_err_ratelimited(mmu->dev, | |
501 | "Unhandled fault: status 0x%08x iova 0x%08x\n", | |
502 | status, iova); | |
503 | ||
504 | return IRQ_HANDLED; | |
505 | } | |
506 | ||
507 | static irqreturn_t ipmmu_irq(int irq, void *dev) | |
508 | { | |
509 | struct ipmmu_vmsa_device *mmu = dev; | |
dbb70692 MD |
510 | irqreturn_t status = IRQ_NONE; |
511 | unsigned int i; | |
512 | unsigned long flags; | |
d25a2a16 | 513 | |
dbb70692 MD |
514 | spin_lock_irqsave(&mmu->lock, flags); |
515 | ||
516 | /* | |
517 | * Check interrupts for all active contexts. | |
518 | */ | |
519 | for (i = 0; i < IPMMU_CTX_MAX; i++) { | |
520 | if (!mmu->domains[i]) | |
521 | continue; | |
522 | if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) | |
523 | status = IRQ_HANDLED; | |
524 | } | |
d25a2a16 | 525 | |
dbb70692 | 526 | spin_unlock_irqrestore(&mmu->lock, flags); |
d25a2a16 | 527 | |
dbb70692 | 528 | return status; |
d25a2a16 LP |
529 | } |
530 | ||
d25a2a16 LP |
531 | /* ----------------------------------------------------------------------------- |
532 | * IOMMU Operations | |
533 | */ | |
534 | ||
8e73bf65 | 535 | static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) |
d25a2a16 LP |
536 | { |
537 | struct ipmmu_vmsa_domain *domain; | |
538 | ||
539 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); | |
540 | if (!domain) | |
5914c5fd | 541 | return NULL; |
d25a2a16 LP |
542 | |
543 | spin_lock_init(&domain->lock); | |
544 | ||
5914c5fd | 545 | return &domain->io_domain; |
d25a2a16 LP |
546 | } |
547 | ||
5914c5fd | 548 | static void ipmmu_domain_free(struct iommu_domain *io_domain) |
d25a2a16 | 549 | { |
5914c5fd | 550 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
551 | |
552 | /* | |
553 | * Free the domain resources. We assume that all devices have already | |
554 | * been detached. | |
555 | */ | |
556 | ipmmu_domain_destroy_context(domain); | |
f20ed39f | 557 | free_io_pgtable_ops(domain->iop); |
d25a2a16 LP |
558 | kfree(domain); |
559 | } | |
560 | ||
561 | static int ipmmu_attach_device(struct iommu_domain *io_domain, | |
562 | struct device *dev) | |
563 | { | |
0fbc8b04 MD |
564 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
565 | struct ipmmu_vmsa_device *mmu = priv->mmu; | |
5914c5fd | 566 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 | 567 | unsigned long flags; |
a166d31e | 568 | unsigned int i; |
d25a2a16 LP |
569 | int ret = 0; |
570 | ||
571 | if (!mmu) { | |
572 | dev_err(dev, "Cannot attach to IPMMU\n"); | |
573 | return -ENXIO; | |
574 | } | |
575 | ||
576 | spin_lock_irqsave(&domain->lock, flags); | |
577 | ||
578 | if (!domain->mmu) { | |
579 | /* The domain hasn't been used yet, initialize it. */ | |
580 | domain->mmu = mmu; | |
581 | ret = ipmmu_domain_init_context(domain); | |
582 | } else if (domain->mmu != mmu) { | |
583 | /* | |
584 | * Something is wrong, we can't attach two devices using | |
585 | * different IOMMUs to the same domain. | |
586 | */ | |
587 | dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", | |
588 | dev_name(mmu->dev), dev_name(domain->mmu->dev)); | |
589 | ret = -EINVAL; | |
3ae47292 MD |
590 | } else |
591 | dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); | |
d25a2a16 LP |
592 | |
593 | spin_unlock_irqrestore(&domain->lock, flags); | |
594 | ||
595 | if (ret < 0) | |
596 | return ret; | |
597 | ||
0fbc8b04 MD |
598 | for (i = 0; i < priv->num_utlbs; ++i) |
599 | ipmmu_utlb_enable(domain, priv->utlbs[i]); | |
d25a2a16 LP |
600 | |
601 | return 0; | |
602 | } | |
603 | ||
604 | static void ipmmu_detach_device(struct iommu_domain *io_domain, | |
605 | struct device *dev) | |
606 | { | |
0fbc8b04 | 607 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
5914c5fd | 608 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
a166d31e | 609 | unsigned int i; |
d25a2a16 | 610 | |
0fbc8b04 MD |
611 | for (i = 0; i < priv->num_utlbs; ++i) |
612 | ipmmu_utlb_disable(domain, priv->utlbs[i]); | |
d25a2a16 LP |
613 | |
614 | /* | |
615 | * TODO: Optimize by disabling the context when no device is attached. | |
616 | */ | |
617 | } | |
618 | ||
619 | static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova, | |
620 | phys_addr_t paddr, size_t size, int prot) | |
621 | { | |
5914c5fd | 622 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
623 | |
624 | if (!domain) | |
625 | return -ENODEV; | |
626 | ||
f20ed39f | 627 | return domain->iop->map(domain->iop, iova, paddr, size, prot); |
d25a2a16 LP |
628 | } |
629 | ||
630 | static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova, | |
631 | size_t size) | |
632 | { | |
5914c5fd | 633 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 | 634 | |
f20ed39f | 635 | return domain->iop->unmap(domain->iop, iova, size); |
d25a2a16 LP |
636 | } |
637 | ||
638 | static phys_addr_t ipmmu_iova_to_phys(struct iommu_domain *io_domain, | |
639 | dma_addr_t iova) | |
640 | { | |
5914c5fd | 641 | struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); |
d25a2a16 LP |
642 | |
643 | /* TODO: Is locking needed ? */ | |
644 | ||
f20ed39f | 645 | return domain->iop->iova_to_phys(domain->iop, iova); |
d25a2a16 LP |
646 | } |
647 | ||
a166d31e | 648 | static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, |
bb590c90 | 649 | unsigned int *utlbs, unsigned int num_utlbs) |
192d2045 | 650 | { |
a166d31e | 651 | unsigned int i; |
a166d31e | 652 | |
bb590c90 | 653 | for (i = 0; i < num_utlbs; ++i) { |
a166d31e LP |
654 | struct of_phandle_args args; |
655 | int ret; | |
656 | ||
657 | ret = of_parse_phandle_with_args(dev->of_node, "iommus", | |
658 | "#iommu-cells", i, &args); | |
659 | if (ret < 0) | |
bb590c90 | 660 | return ret; |
a166d31e LP |
661 | |
662 | of_node_put(args.np); | |
663 | ||
664 | if (args.np != mmu->dev->of_node || args.args_count != 1) | |
bb590c90 | 665 | return -EINVAL; |
a166d31e LP |
666 | |
667 | utlbs[i] = args.args[0]; | |
668 | } | |
275f5053 | 669 | |
bb590c90 | 670 | return 0; |
192d2045 LP |
671 | } |
672 | ||
383fef5f | 673 | static int ipmmu_init_platform_device(struct device *dev) |
d25a2a16 | 674 | { |
0fbc8b04 | 675 | struct ipmmu_vmsa_iommu_priv *priv; |
d25a2a16 | 676 | struct ipmmu_vmsa_device *mmu; |
bb590c90 | 677 | unsigned int *utlbs; |
a166d31e | 678 | unsigned int i; |
bb590c90 LP |
679 | int num_utlbs; |
680 | int ret = -ENODEV; | |
d25a2a16 | 681 | |
d25a2a16 | 682 | /* Find the master corresponding to the device. */ |
bb590c90 LP |
683 | |
684 | num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", | |
685 | "#iommu-cells"); | |
686 | if (num_utlbs < 0) | |
687 | return -ENODEV; | |
688 | ||
689 | utlbs = kcalloc(num_utlbs, sizeof(*utlbs), GFP_KERNEL); | |
690 | if (!utlbs) | |
691 | return -ENOMEM; | |
692 | ||
d25a2a16 LP |
693 | spin_lock(&ipmmu_devices_lock); |
694 | ||
695 | list_for_each_entry(mmu, &ipmmu_devices, list) { | |
bb590c90 LP |
696 | ret = ipmmu_find_utlbs(mmu, dev, utlbs, num_utlbs); |
697 | if (!ret) { | |
d25a2a16 | 698 | /* |
192d2045 | 699 | * TODO Take a reference to the MMU to protect |
d25a2a16 LP |
700 | * against device removal. |
701 | */ | |
702 | break; | |
703 | } | |
704 | } | |
705 | ||
706 | spin_unlock(&ipmmu_devices_lock); | |
707 | ||
bb590c90 | 708 | if (ret < 0) |
b1e2afca | 709 | goto error; |
d25a2a16 | 710 | |
a166d31e LP |
711 | for (i = 0; i < num_utlbs; ++i) { |
712 | if (utlbs[i] >= mmu->num_utlbs) { | |
713 | ret = -EINVAL; | |
714 | goto error; | |
715 | } | |
716 | } | |
d25a2a16 | 717 | |
0fbc8b04 MD |
718 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
719 | if (!priv) { | |
383fef5f MD |
720 | ret = -ENOMEM; |
721 | goto error; | |
722 | } | |
723 | ||
0fbc8b04 MD |
724 | priv->mmu = mmu; |
725 | priv->utlbs = utlbs; | |
726 | priv->num_utlbs = num_utlbs; | |
727 | priv->dev = dev; | |
728 | set_priv(dev, priv); | |
383fef5f MD |
729 | return 0; |
730 | ||
731 | error: | |
732 | kfree(utlbs); | |
733 | return ret; | |
734 | } | |
735 | ||
3ae47292 MD |
736 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
737 | ||
738 | static struct iommu_domain *ipmmu_domain_alloc(unsigned type) | |
739 | { | |
740 | if (type != IOMMU_DOMAIN_UNMANAGED) | |
741 | return NULL; | |
742 | ||
743 | return __ipmmu_domain_alloc(type); | |
744 | } | |
745 | ||
383fef5f MD |
746 | static int ipmmu_add_device(struct device *dev) |
747 | { | |
383fef5f MD |
748 | struct ipmmu_vmsa_device *mmu = NULL; |
749 | struct iommu_group *group; | |
750 | int ret; | |
751 | ||
0fbc8b04 | 752 | if (to_priv(dev)) { |
383fef5f MD |
753 | dev_warn(dev, "IOMMU driver already assigned to device %s\n", |
754 | dev_name(dev)); | |
755 | return -EINVAL; | |
756 | } | |
757 | ||
d25a2a16 LP |
758 | /* Create a device group and add the device to it. */ |
759 | group = iommu_group_alloc(); | |
760 | if (IS_ERR(group)) { | |
761 | dev_err(dev, "Failed to allocate IOMMU group\n"); | |
a166d31e LP |
762 | ret = PTR_ERR(group); |
763 | goto error; | |
d25a2a16 LP |
764 | } |
765 | ||
766 | ret = iommu_group_add_device(group, dev); | |
767 | iommu_group_put(group); | |
768 | ||
769 | if (ret < 0) { | |
770 | dev_err(dev, "Failed to add device to IPMMU group\n"); | |
a166d31e LP |
771 | group = NULL; |
772 | goto error; | |
d25a2a16 LP |
773 | } |
774 | ||
383fef5f MD |
775 | ret = ipmmu_init_platform_device(dev); |
776 | if (ret < 0) | |
192d2045 | 777 | goto error; |
d25a2a16 LP |
778 | |
779 | /* | |
780 | * Create the ARM mapping, used by the ARM DMA mapping core to allocate | |
781 | * VAs. This will allocate a corresponding IOMMU domain. | |
782 | * | |
783 | * TODO: | |
784 | * - Create one mapping per context (TLB). | |
785 | * - Make the mapping size configurable ? We currently use a 2GB mapping | |
786 | * at a 1GB offset to ensure that NULL VAs will fault. | |
787 | */ | |
0fbc8b04 | 788 | mmu = to_priv(dev)->mmu; |
d25a2a16 LP |
789 | if (!mmu->mapping) { |
790 | struct dma_iommu_mapping *mapping; | |
791 | ||
792 | mapping = arm_iommu_create_mapping(&platform_bus_type, | |
720b0cef | 793 | SZ_1G, SZ_2G); |
d25a2a16 LP |
794 | if (IS_ERR(mapping)) { |
795 | dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); | |
b8f80bff LP |
796 | ret = PTR_ERR(mapping); |
797 | goto error; | |
d25a2a16 LP |
798 | } |
799 | ||
800 | mmu->mapping = mapping; | |
801 | } | |
802 | ||
803 | /* Attach the ARM VA mapping to the device. */ | |
804 | ret = arm_iommu_attach_device(dev, mmu->mapping); | |
805 | if (ret < 0) { | |
806 | dev_err(dev, "Failed to attach device to VA mapping\n"); | |
807 | goto error; | |
808 | } | |
809 | ||
810 | return 0; | |
811 | ||
812 | error: | |
383fef5f MD |
813 | if (mmu) |
814 | arm_iommu_release_mapping(mmu->mapping); | |
a166d31e LP |
815 | |
816 | if (!IS_ERR_OR_NULL(group)) | |
817 | iommu_group_remove_device(dev); | |
818 | ||
0fbc8b04 MD |
819 | kfree(to_priv(dev)->utlbs); |
820 | kfree(to_priv(dev)); | |
821 | set_priv(dev, NULL); | |
383fef5f | 822 | |
d25a2a16 LP |
823 | return ret; |
824 | } | |
825 | ||
826 | static void ipmmu_remove_device(struct device *dev) | |
827 | { | |
0fbc8b04 | 828 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
a166d31e | 829 | |
d25a2a16 LP |
830 | arm_iommu_detach_device(dev); |
831 | iommu_group_remove_device(dev); | |
a166d31e | 832 | |
0fbc8b04 MD |
833 | kfree(priv->utlbs); |
834 | kfree(priv); | |
a166d31e | 835 | |
0fbc8b04 | 836 | set_priv(dev, NULL); |
d25a2a16 LP |
837 | } |
838 | ||
b22f6434 | 839 | static const struct iommu_ops ipmmu_ops = { |
5914c5fd JR |
840 | .domain_alloc = ipmmu_domain_alloc, |
841 | .domain_free = ipmmu_domain_free, | |
d25a2a16 LP |
842 | .attach_dev = ipmmu_attach_device, |
843 | .detach_dev = ipmmu_detach_device, | |
844 | .map = ipmmu_map, | |
845 | .unmap = ipmmu_unmap, | |
315786eb | 846 | .map_sg = default_iommu_map_sg, |
d25a2a16 LP |
847 | .iova_to_phys = ipmmu_iova_to_phys, |
848 | .add_device = ipmmu_add_device, | |
849 | .remove_device = ipmmu_remove_device, | |
f20ed39f | 850 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, |
d25a2a16 LP |
851 | }; |
852 | ||
3ae47292 MD |
853 | #endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ |
854 | ||
855 | #ifdef CONFIG_IOMMU_DMA | |
856 | ||
857 | static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); | |
858 | static LIST_HEAD(ipmmu_slave_devices); | |
859 | ||
860 | static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) | |
861 | { | |
862 | struct iommu_domain *io_domain = NULL; | |
863 | ||
864 | switch (type) { | |
865 | case IOMMU_DOMAIN_UNMANAGED: | |
866 | io_domain = __ipmmu_domain_alloc(type); | |
867 | break; | |
868 | ||
869 | case IOMMU_DOMAIN_DMA: | |
870 | io_domain = __ipmmu_domain_alloc(type); | |
871 | if (io_domain) | |
872 | iommu_get_dma_cookie(io_domain); | |
873 | break; | |
874 | } | |
875 | ||
876 | return io_domain; | |
877 | } | |
878 | ||
879 | static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) | |
880 | { | |
881 | switch (io_domain->type) { | |
882 | case IOMMU_DOMAIN_DMA: | |
883 | iommu_put_dma_cookie(io_domain); | |
884 | /* fall-through */ | |
885 | default: | |
886 | ipmmu_domain_free(io_domain); | |
887 | break; | |
888 | } | |
889 | } | |
890 | ||
891 | static int ipmmu_add_device_dma(struct device *dev) | |
892 | { | |
0fbc8b04 | 893 | struct iommu_fwspec *fwspec = dev->iommu_fwspec; |
3ae47292 MD |
894 | struct iommu_group *group; |
895 | ||
0fbc8b04 MD |
896 | /* |
897 | * Only let through devices that have been verified in xlate() | |
898 | * We may get called with dev->iommu_fwspec set to NULL. | |
899 | */ | |
900 | if (!fwspec || !fwspec->iommu_priv) | |
3ae47292 MD |
901 | return -ENODEV; |
902 | ||
903 | group = iommu_group_get_for_dev(dev); | |
904 | if (IS_ERR(group)) | |
905 | return PTR_ERR(group); | |
906 | ||
907 | spin_lock(&ipmmu_slave_devices_lock); | |
0fbc8b04 | 908 | list_add(&to_priv(dev)->list, &ipmmu_slave_devices); |
3ae47292 MD |
909 | spin_unlock(&ipmmu_slave_devices_lock); |
910 | return 0; | |
911 | } | |
912 | ||
913 | static void ipmmu_remove_device_dma(struct device *dev) | |
914 | { | |
0fbc8b04 | 915 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
3ae47292 MD |
916 | |
917 | spin_lock(&ipmmu_slave_devices_lock); | |
0fbc8b04 | 918 | list_del(&priv->list); |
3ae47292 MD |
919 | spin_unlock(&ipmmu_slave_devices_lock); |
920 | ||
921 | iommu_group_remove_device(dev); | |
922 | } | |
923 | ||
924 | static struct device *ipmmu_find_sibling_device(struct device *dev) | |
925 | { | |
0fbc8b04 MD |
926 | struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); |
927 | struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; | |
3ae47292 MD |
928 | bool found = false; |
929 | ||
930 | spin_lock(&ipmmu_slave_devices_lock); | |
931 | ||
0fbc8b04 MD |
932 | list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { |
933 | if (priv == sibling_priv) | |
3ae47292 | 934 | continue; |
0fbc8b04 | 935 | if (sibling_priv->mmu == priv->mmu) { |
3ae47292 MD |
936 | found = true; |
937 | break; | |
938 | } | |
939 | } | |
940 | ||
941 | spin_unlock(&ipmmu_slave_devices_lock); | |
942 | ||
0fbc8b04 | 943 | return found ? sibling_priv->dev : NULL; |
3ae47292 MD |
944 | } |
945 | ||
946 | static struct iommu_group *ipmmu_find_group_dma(struct device *dev) | |
947 | { | |
948 | struct iommu_group *group; | |
949 | struct device *sibling; | |
950 | ||
951 | sibling = ipmmu_find_sibling_device(dev); | |
952 | if (sibling) | |
953 | group = iommu_group_get(sibling); | |
954 | if (!sibling || IS_ERR(group)) | |
955 | group = generic_device_group(dev); | |
956 | ||
957 | return group; | |
958 | } | |
959 | ||
960 | static int ipmmu_of_xlate_dma(struct device *dev, | |
961 | struct of_phandle_args *spec) | |
962 | { | |
963 | /* If the IPMMU device is disabled in DT then return error | |
964 | * to make sure the of_iommu code does not install ops | |
965 | * even though the iommu device is disabled | |
966 | */ | |
967 | if (!of_device_is_available(spec->np)) | |
968 | return -ENODEV; | |
969 | ||
970 | return ipmmu_init_platform_device(dev); | |
971 | } | |
972 | ||
973 | static const struct iommu_ops ipmmu_ops = { | |
974 | .domain_alloc = ipmmu_domain_alloc_dma, | |
975 | .domain_free = ipmmu_domain_free_dma, | |
976 | .attach_dev = ipmmu_attach_device, | |
977 | .detach_dev = ipmmu_detach_device, | |
978 | .map = ipmmu_map, | |
979 | .unmap = ipmmu_unmap, | |
980 | .map_sg = default_iommu_map_sg, | |
981 | .iova_to_phys = ipmmu_iova_to_phys, | |
982 | .add_device = ipmmu_add_device_dma, | |
983 | .remove_device = ipmmu_remove_device_dma, | |
984 | .device_group = ipmmu_find_group_dma, | |
985 | .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, | |
986 | .of_xlate = ipmmu_of_xlate_dma, | |
987 | }; | |
988 | ||
989 | #endif /* CONFIG_IOMMU_DMA */ | |
990 | ||
d25a2a16 LP |
991 | /* ----------------------------------------------------------------------------- |
992 | * Probe/remove and init | |
993 | */ | |
994 | ||
995 | static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) | |
996 | { | |
997 | unsigned int i; | |
998 | ||
999 | /* Disable all contexts. */ | |
1000 | for (i = 0; i < 4; ++i) | |
1001 | ipmmu_write(mmu, i * IM_CTX_SIZE + IMCTR, 0); | |
1002 | } | |
1003 | ||
1004 | static int ipmmu_probe(struct platform_device *pdev) | |
1005 | { | |
1006 | struct ipmmu_vmsa_device *mmu; | |
1007 | struct resource *res; | |
1008 | int irq; | |
1009 | int ret; | |
1010 | ||
d25a2a16 LP |
1011 | mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); |
1012 | if (!mmu) { | |
1013 | dev_err(&pdev->dev, "cannot allocate device data\n"); | |
1014 | return -ENOMEM; | |
1015 | } | |
1016 | ||
1017 | mmu->dev = &pdev->dev; | |
d25a2a16 | 1018 | mmu->num_utlbs = 32; |
dbb70692 MD |
1019 | spin_lock_init(&mmu->lock); |
1020 | bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); | |
d25a2a16 LP |
1021 | |
1022 | /* Map I/O memory and request IRQ. */ | |
1023 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1024 | mmu->base = devm_ioremap_resource(&pdev->dev, res); | |
1025 | if (IS_ERR(mmu->base)) | |
1026 | return PTR_ERR(mmu->base); | |
1027 | ||
275f5053 LP |
1028 | /* |
1029 | * The IPMMU has two register banks, for secure and non-secure modes. | |
1030 | * The bank mapped at the beginning of the IPMMU address space | |
1031 | * corresponds to the running mode of the CPU. When running in secure | |
1032 | * mode the non-secure register bank is also available at an offset. | |
1033 | * | |
1034 | * Secure mode operation isn't clearly documented and is thus currently | |
1035 | * not implemented in the driver. Furthermore, preliminary tests of | |
1036 | * non-secure operation with the main register bank were not successful. | |
1037 | * Offset the registers base unconditionally to point to the non-secure | |
1038 | * alias space for now. | |
1039 | */ | |
1040 | mmu->base += IM_NS_ALIAS_OFFSET; | |
1041 | ||
d25a2a16 LP |
1042 | irq = platform_get_irq(pdev, 0); |
1043 | if (irq < 0) { | |
1044 | dev_err(&pdev->dev, "no IRQ found\n"); | |
1045 | return irq; | |
1046 | } | |
1047 | ||
1048 | ret = devm_request_irq(&pdev->dev, irq, ipmmu_irq, 0, | |
1049 | dev_name(&pdev->dev), mmu); | |
1050 | if (ret < 0) { | |
1051 | dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); | |
e222d6a4 | 1052 | return ret; |
d25a2a16 LP |
1053 | } |
1054 | ||
1055 | ipmmu_device_reset(mmu); | |
1056 | ||
1057 | /* | |
1058 | * We can't create the ARM mapping here as it requires the bus to have | |
1059 | * an IOMMU, which only happens when bus_set_iommu() is called in | |
1060 | * ipmmu_init() after the probe function returns. | |
1061 | */ | |
1062 | ||
1063 | spin_lock(&ipmmu_devices_lock); | |
1064 | list_add(&mmu->list, &ipmmu_devices); | |
1065 | spin_unlock(&ipmmu_devices_lock); | |
1066 | ||
1067 | platform_set_drvdata(pdev, mmu); | |
1068 | ||
1069 | return 0; | |
1070 | } | |
1071 | ||
1072 | static int ipmmu_remove(struct platform_device *pdev) | |
1073 | { | |
1074 | struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); | |
1075 | ||
1076 | spin_lock(&ipmmu_devices_lock); | |
1077 | list_del(&mmu->list); | |
1078 | spin_unlock(&ipmmu_devices_lock); | |
1079 | ||
3ae47292 | 1080 | #if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) |
d25a2a16 | 1081 | arm_iommu_release_mapping(mmu->mapping); |
3ae47292 | 1082 | #endif |
d25a2a16 LP |
1083 | |
1084 | ipmmu_device_reset(mmu); | |
1085 | ||
1086 | return 0; | |
1087 | } | |
1088 | ||
275f5053 LP |
1089 | static const struct of_device_id ipmmu_of_ids[] = { |
1090 | { .compatible = "renesas,ipmmu-vmsa", }, | |
ac04f85a | 1091 | { } |
275f5053 LP |
1092 | }; |
1093 | ||
d25a2a16 LP |
1094 | static struct platform_driver ipmmu_driver = { |
1095 | .driver = { | |
d25a2a16 | 1096 | .name = "ipmmu-vmsa", |
275f5053 | 1097 | .of_match_table = of_match_ptr(ipmmu_of_ids), |
d25a2a16 LP |
1098 | }, |
1099 | .probe = ipmmu_probe, | |
1100 | .remove = ipmmu_remove, | |
1101 | }; | |
1102 | ||
1103 | static int __init ipmmu_init(void) | |
1104 | { | |
1105 | int ret; | |
1106 | ||
1107 | ret = platform_driver_register(&ipmmu_driver); | |
1108 | if (ret < 0) | |
1109 | return ret; | |
1110 | ||
1111 | if (!iommu_present(&platform_bus_type)) | |
1112 | bus_set_iommu(&platform_bus_type, &ipmmu_ops); | |
1113 | ||
1114 | return 0; | |
1115 | } | |
1116 | ||
1117 | static void __exit ipmmu_exit(void) | |
1118 | { | |
1119 | return platform_driver_unregister(&ipmmu_driver); | |
1120 | } | |
1121 | ||
1122 | subsys_initcall(ipmmu_init); | |
1123 | module_exit(ipmmu_exit); | |
1124 | ||
1125 | MODULE_DESCRIPTION("IOMMU API for Renesas VMSA-compatible IPMMU"); | |
1126 | MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>"); | |
1127 | MODULE_LICENSE("GPL v2"); |