]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/iommu/arm-smmu.c
d8bc20a0efb9841be34594f4122ad1907333227e
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / arm-smmu.c
1 /*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
27 */
28
29 #define pr_fmt(fmt) "arm-smmu: " fmt
30
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
36 #include <linux/io.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
40 #include <linux/of.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
46
47 #include <linux/amba/bus.h>
48
49 #include "io-pgtable.h"
50
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
53
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
56
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
59
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
63
64 /*
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
67 * nsGFSYNR0: 0x450)
68 */
69 #define ARM_SMMU_GR0_NS(smmu) \
70 ((smmu)->base + \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
72 ? 0x400 : 0))
73
74 #ifdef CONFIG_64BIT
75 #define smmu_writeq writeq_relaxed
76 #else
77 #define smmu_writeq(reg64, addr) \
78 do { \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
83 } while (0)
84 #endif
85
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_VMID16EN (1 << 31)
98 #define sCR0_BSU_SHIFT 14
99 #define sCR0_BSU_MASK 0x3
100
101 /* Identification registers */
102 #define ARM_SMMU_GR0_ID0 0x20
103 #define ARM_SMMU_GR0_ID1 0x24
104 #define ARM_SMMU_GR0_ID2 0x28
105 #define ARM_SMMU_GR0_ID3 0x2c
106 #define ARM_SMMU_GR0_ID4 0x30
107 #define ARM_SMMU_GR0_ID5 0x34
108 #define ARM_SMMU_GR0_ID6 0x38
109 #define ARM_SMMU_GR0_ID7 0x3c
110 #define ARM_SMMU_GR0_sGFSR 0x48
111 #define ARM_SMMU_GR0_sGFSYNR0 0x50
112 #define ARM_SMMU_GR0_sGFSYNR1 0x54
113 #define ARM_SMMU_GR0_sGFSYNR2 0x58
114
115 #define ID0_S1TS (1 << 30)
116 #define ID0_S2TS (1 << 29)
117 #define ID0_NTS (1 << 28)
118 #define ID0_SMS (1 << 27)
119 #define ID0_ATOSNS (1 << 26)
120 #define ID0_CTTW (1 << 14)
121 #define ID0_NUMIRPT_SHIFT 16
122 #define ID0_NUMIRPT_MASK 0xff
123 #define ID0_NUMSIDB_SHIFT 9
124 #define ID0_NUMSIDB_MASK 0xf
125 #define ID0_NUMSMRG_SHIFT 0
126 #define ID0_NUMSMRG_MASK 0xff
127
128 #define ID1_PAGESIZE (1 << 31)
129 #define ID1_NUMPAGENDXB_SHIFT 28
130 #define ID1_NUMPAGENDXB_MASK 7
131 #define ID1_NUMS2CB_SHIFT 16
132 #define ID1_NUMS2CB_MASK 0xff
133 #define ID1_NUMCB_SHIFT 0
134 #define ID1_NUMCB_MASK 0xff
135
136 #define ID2_OAS_SHIFT 4
137 #define ID2_OAS_MASK 0xf
138 #define ID2_IAS_SHIFT 0
139 #define ID2_IAS_MASK 0xf
140 #define ID2_UBS_SHIFT 8
141 #define ID2_UBS_MASK 0xf
142 #define ID2_PTFS_4K (1 << 12)
143 #define ID2_PTFS_16K (1 << 13)
144 #define ID2_PTFS_64K (1 << 14)
145 #define ID2_VMID16 (1 << 15)
146
147 /* Global TLB invalidation */
148 #define ARM_SMMU_GR0_TLBIVMID 0x64
149 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150 #define ARM_SMMU_GR0_TLBIALLH 0x6c
151 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
152 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153 #define sTLBGSTATUS_GSACTIVE (1 << 0)
154 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
155
156 /* Stream mapping registers */
157 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158 #define SMR_VALID (1 << 31)
159 #define SMR_MASK_SHIFT 16
160 #define SMR_MASK_MASK 0x7fff
161 #define SMR_ID_SHIFT 0
162 #define SMR_ID_MASK 0x7fff
163
164 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165 #define S2CR_CBNDX_SHIFT 0
166 #define S2CR_CBNDX_MASK 0xff
167 #define S2CR_TYPE_SHIFT 16
168 #define S2CR_TYPE_MASK 0x3
169 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
172
173 #define S2CR_PRIVCFG_SHIFT 24
174 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
175
176 /* Context bank attribute registers */
177 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178 #define CBAR_VMID_SHIFT 0
179 #define CBAR_VMID_MASK 0xff
180 #define CBAR_S1_BPSHCFG_SHIFT 8
181 #define CBAR_S1_BPSHCFG_MASK 3
182 #define CBAR_S1_BPSHCFG_NSH 3
183 #define CBAR_S1_MEMATTR_SHIFT 12
184 #define CBAR_S1_MEMATTR_MASK 0xf
185 #define CBAR_S1_MEMATTR_WB 0xf
186 #define CBAR_TYPE_SHIFT 16
187 #define CBAR_TYPE_MASK 0x3
188 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192 #define CBAR_IRPTNDX_SHIFT 24
193 #define CBAR_IRPTNDX_MASK 0xff
194
195 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196 #define CBA2R_RW64_32BIT (0 << 0)
197 #define CBA2R_RW64_64BIT (1 << 0)
198 #define CBA2R_VMID_SHIFT 16
199 #define CBA2R_VMID_MASK 0xffff
200
201 /* Translation context bank */
202 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
203 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
204
205 #define ARM_SMMU_CB_SCTLR 0x0
206 #define ARM_SMMU_CB_RESUME 0x8
207 #define ARM_SMMU_CB_TTBCR2 0x10
208 #define ARM_SMMU_CB_TTBR0 0x20
209 #define ARM_SMMU_CB_TTBR1 0x28
210 #define ARM_SMMU_CB_TTBCR 0x30
211 #define ARM_SMMU_CB_S1_MAIR0 0x38
212 #define ARM_SMMU_CB_S1_MAIR1 0x3c
213 #define ARM_SMMU_CB_PAR_LO 0x50
214 #define ARM_SMMU_CB_PAR_HI 0x54
215 #define ARM_SMMU_CB_FSR 0x58
216 #define ARM_SMMU_CB_FAR_LO 0x60
217 #define ARM_SMMU_CB_FAR_HI 0x64
218 #define ARM_SMMU_CB_FSYNR0 0x68
219 #define ARM_SMMU_CB_S1_TLBIVA 0x600
220 #define ARM_SMMU_CB_S1_TLBIASID 0x610
221 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
222 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
223 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
224 #define ARM_SMMU_CB_ATS1PR 0x800
225 #define ARM_SMMU_CB_ATSR 0x8f0
226
227 #define SCTLR_S1_ASIDPNE (1 << 12)
228 #define SCTLR_CFCFG (1 << 7)
229 #define SCTLR_CFIE (1 << 6)
230 #define SCTLR_CFRE (1 << 5)
231 #define SCTLR_E (1 << 4)
232 #define SCTLR_AFE (1 << 2)
233 #define SCTLR_TRE (1 << 1)
234 #define SCTLR_M (1 << 0)
235 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
236
237 #define CB_PAR_F (1 << 0)
238
239 #define ATSR_ACTIVE (1 << 0)
240
241 #define RESUME_RETRY (0 << 0)
242 #define RESUME_TERMINATE (1 << 0)
243
244 #define TTBCR2_SEP_SHIFT 15
245 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
246
247 #define TTBRn_ASID_SHIFT 48
248
249 #define FSR_MULTI (1 << 31)
250 #define FSR_SS (1 << 30)
251 #define FSR_UUT (1 << 8)
252 #define FSR_ASF (1 << 7)
253 #define FSR_TLBLKF (1 << 6)
254 #define FSR_TLBMCF (1 << 5)
255 #define FSR_EF (1 << 4)
256 #define FSR_PF (1 << 3)
257 #define FSR_AFF (1 << 2)
258 #define FSR_TF (1 << 1)
259
260 #define FSR_IGN (FSR_AFF | FSR_ASF | \
261 FSR_TLBMCF | FSR_TLBLKF)
262 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
263 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
264
265 #define FSYNR0_WNR (1 << 4)
266
267 static int force_stage;
268 module_param(force_stage, int, S_IRUGO);
269 MODULE_PARM_DESC(force_stage,
270 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
271 static bool disable_bypass;
272 module_param(disable_bypass, bool, S_IRUGO);
273 MODULE_PARM_DESC(disable_bypass,
274 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
275
276 enum arm_smmu_arch_version {
277 ARM_SMMU_V1 = 1,
278 ARM_SMMU_V2,
279 };
280
281 enum arm_smmu_implementation {
282 GENERIC_SMMU,
283 CAVIUM_SMMUV2,
284 };
285
286 struct arm_smmu_smr {
287 u8 idx;
288 u16 mask;
289 u16 id;
290 };
291
292 struct arm_smmu_master_cfg {
293 int num_streamids;
294 u16 streamids[MAX_MASTER_STREAMIDS];
295 struct arm_smmu_smr *smrs;
296 };
297
298 struct arm_smmu_master {
299 struct device_node *of_node;
300 struct rb_node node;
301 struct arm_smmu_master_cfg cfg;
302 };
303
304 struct arm_smmu_device {
305 struct device *dev;
306
307 void __iomem *base;
308 unsigned long size;
309 unsigned long pgshift;
310
311 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
312 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
313 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
314 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
315 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
316 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
317 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
318 u32 features;
319
320 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
321 u32 options;
322 enum arm_smmu_arch_version version;
323 enum arm_smmu_implementation model;
324
325 u32 num_context_banks;
326 u32 num_s2_context_banks;
327 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
328 atomic_t irptndx;
329
330 u32 num_mapping_groups;
331 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
332
333 unsigned long va_size;
334 unsigned long ipa_size;
335 unsigned long pa_size;
336
337 u32 num_global_irqs;
338 u32 num_context_irqs;
339 unsigned int *irqs;
340
341 struct list_head list;
342 struct rb_root masters;
343
344 u32 cavium_id_base; /* Specific to Cavium */
345 };
346
347 struct arm_smmu_cfg {
348 u8 cbndx;
349 u8 irptndx;
350 u32 cbar;
351 };
352 #define INVALID_IRPTNDX 0xff
353
354 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
355 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
356
357 enum arm_smmu_domain_stage {
358 ARM_SMMU_DOMAIN_S1 = 0,
359 ARM_SMMU_DOMAIN_S2,
360 ARM_SMMU_DOMAIN_NESTED,
361 };
362
363 struct arm_smmu_domain {
364 struct arm_smmu_device *smmu;
365 struct io_pgtable_ops *pgtbl_ops;
366 spinlock_t pgtbl_lock;
367 struct arm_smmu_cfg cfg;
368 enum arm_smmu_domain_stage stage;
369 struct mutex init_mutex; /* Protects smmu pointer */
370 struct iommu_domain domain;
371 };
372
373 static struct iommu_ops arm_smmu_ops;
374
375 static DEFINE_SPINLOCK(arm_smmu_devices_lock);
376 static LIST_HEAD(arm_smmu_devices);
377
378 struct arm_smmu_option_prop {
379 u32 opt;
380 const char *prop;
381 };
382
383 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
384
385 static struct arm_smmu_option_prop arm_smmu_options[] = {
386 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
387 { 0, NULL},
388 };
389
390 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
391 {
392 return container_of(dom, struct arm_smmu_domain, domain);
393 }
394
395 static void parse_driver_options(struct arm_smmu_device *smmu)
396 {
397 int i = 0;
398
399 do {
400 if (of_property_read_bool(smmu->dev->of_node,
401 arm_smmu_options[i].prop)) {
402 smmu->options |= arm_smmu_options[i].opt;
403 dev_notice(smmu->dev, "option %s\n",
404 arm_smmu_options[i].prop);
405 }
406 } while (arm_smmu_options[++i].opt);
407 }
408
409 static struct device_node *dev_get_dev_node(struct device *dev)
410 {
411 if (dev_is_pci(dev)) {
412 struct pci_bus *bus = to_pci_dev(dev)->bus;
413
414 while (!pci_is_root_bus(bus))
415 bus = bus->parent;
416 return bus->bridge->parent->of_node;
417 }
418
419 return dev->of_node;
420 }
421
422 static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
423 struct device_node *dev_node)
424 {
425 struct rb_node *node = smmu->masters.rb_node;
426
427 while (node) {
428 struct arm_smmu_master *master;
429
430 master = container_of(node, struct arm_smmu_master, node);
431
432 if (dev_node < master->of_node)
433 node = node->rb_left;
434 else if (dev_node > master->of_node)
435 node = node->rb_right;
436 else
437 return master;
438 }
439
440 return NULL;
441 }
442
443 static struct arm_smmu_master_cfg *
444 find_smmu_master_cfg(struct device *dev)
445 {
446 struct arm_smmu_master_cfg *cfg = NULL;
447 struct iommu_group *group = iommu_group_get(dev);
448
449 if (group) {
450 cfg = iommu_group_get_iommudata(group);
451 iommu_group_put(group);
452 }
453
454 return cfg;
455 }
456
457 static int insert_smmu_master(struct arm_smmu_device *smmu,
458 struct arm_smmu_master *master)
459 {
460 struct rb_node **new, *parent;
461
462 new = &smmu->masters.rb_node;
463 parent = NULL;
464 while (*new) {
465 struct arm_smmu_master *this
466 = container_of(*new, struct arm_smmu_master, node);
467
468 parent = *new;
469 if (master->of_node < this->of_node)
470 new = &((*new)->rb_left);
471 else if (master->of_node > this->of_node)
472 new = &((*new)->rb_right);
473 else
474 return -EEXIST;
475 }
476
477 rb_link_node(&master->node, parent, new);
478 rb_insert_color(&master->node, &smmu->masters);
479 return 0;
480 }
481
482 static int register_smmu_master(struct arm_smmu_device *smmu,
483 struct device *dev,
484 struct of_phandle_args *masterspec)
485 {
486 int i;
487 struct arm_smmu_master *master;
488
489 master = find_smmu_master(smmu, masterspec->np);
490 if (master) {
491 dev_err(dev,
492 "rejecting multiple registrations for master device %s\n",
493 masterspec->np->name);
494 return -EBUSY;
495 }
496
497 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
498 dev_err(dev,
499 "reached maximum number (%d) of stream IDs for master device %s\n",
500 MAX_MASTER_STREAMIDS, masterspec->np->name);
501 return -ENOSPC;
502 }
503
504 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
505 if (!master)
506 return -ENOMEM;
507
508 master->of_node = masterspec->np;
509 master->cfg.num_streamids = masterspec->args_count;
510
511 for (i = 0; i < master->cfg.num_streamids; ++i) {
512 u16 streamid = masterspec->args[i];
513
514 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
515 (streamid >= smmu->num_mapping_groups)) {
516 dev_err(dev,
517 "stream ID for master device %s greater than maximum allowed (%d)\n",
518 masterspec->np->name, smmu->num_mapping_groups);
519 return -ERANGE;
520 }
521 master->cfg.streamids[i] = streamid;
522 }
523 return insert_smmu_master(smmu, master);
524 }
525
526 static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
527 {
528 struct arm_smmu_device *smmu;
529 struct arm_smmu_master *master = NULL;
530 struct device_node *dev_node = dev_get_dev_node(dev);
531
532 spin_lock(&arm_smmu_devices_lock);
533 list_for_each_entry(smmu, &arm_smmu_devices, list) {
534 master = find_smmu_master(smmu, dev_node);
535 if (master)
536 break;
537 }
538 spin_unlock(&arm_smmu_devices_lock);
539
540 return master ? smmu : NULL;
541 }
542
543 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
544 {
545 int idx;
546
547 do {
548 idx = find_next_zero_bit(map, end, start);
549 if (idx == end)
550 return -ENOSPC;
551 } while (test_and_set_bit(idx, map));
552
553 return idx;
554 }
555
556 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
557 {
558 clear_bit(idx, map);
559 }
560
561 /* Wait for any pending TLB invalidations to complete */
562 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
563 {
564 int count = 0;
565 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
566
567 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
568 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
569 & sTLBGSTATUS_GSACTIVE) {
570 cpu_relax();
571 if (++count == TLB_LOOP_TIMEOUT) {
572 dev_err_ratelimited(smmu->dev,
573 "TLB sync timed out -- SMMU may be deadlocked\n");
574 return;
575 }
576 udelay(1);
577 }
578 }
579
580 static void arm_smmu_tlb_sync(void *cookie)
581 {
582 struct arm_smmu_domain *smmu_domain = cookie;
583 __arm_smmu_tlb_sync(smmu_domain->smmu);
584 }
585
586 static void arm_smmu_tlb_inv_context(void *cookie)
587 {
588 struct arm_smmu_domain *smmu_domain = cookie;
589 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
590 struct arm_smmu_device *smmu = smmu_domain->smmu;
591 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
592 void __iomem *base;
593
594 if (stage1) {
595 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
596 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
597 base + ARM_SMMU_CB_S1_TLBIASID);
598 } else {
599 base = ARM_SMMU_GR0(smmu);
600 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
601 base + ARM_SMMU_GR0_TLBIVMID);
602 }
603
604 __arm_smmu_tlb_sync(smmu);
605 }
606
607 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
608 size_t granule, bool leaf, void *cookie)
609 {
610 struct arm_smmu_domain *smmu_domain = cookie;
611 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
612 struct arm_smmu_device *smmu = smmu_domain->smmu;
613 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
614 void __iomem *reg;
615
616 if (stage1) {
617 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
618 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
619
620 if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
621 iova &= ~12UL;
622 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
623 do {
624 writel_relaxed(iova, reg);
625 iova += granule;
626 } while (size -= granule);
627 #ifdef CONFIG_64BIT
628 } else {
629 iova >>= 12;
630 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
631 do {
632 writeq_relaxed(iova, reg);
633 iova += granule >> 12;
634 } while (size -= granule);
635 #endif
636 }
637 #ifdef CONFIG_64BIT
638 } else if (smmu->version == ARM_SMMU_V2) {
639 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
640 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
641 ARM_SMMU_CB_S2_TLBIIPAS2;
642 iova >>= 12;
643 do {
644 writeq_relaxed(iova, reg);
645 iova += granule >> 12;
646 } while (size -= granule);
647 #endif
648 } else {
649 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
650 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
651 }
652 }
653
654 static struct iommu_gather_ops arm_smmu_gather_ops = {
655 .tlb_flush_all = arm_smmu_tlb_inv_context,
656 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
657 .tlb_sync = arm_smmu_tlb_sync,
658 };
659
660 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
661 {
662 int flags, ret;
663 u32 fsr, far, fsynr, resume;
664 unsigned long iova;
665 struct iommu_domain *domain = dev;
666 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
667 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
668 struct arm_smmu_device *smmu = smmu_domain->smmu;
669 void __iomem *cb_base;
670
671 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
672 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
673
674 if (!(fsr & FSR_FAULT))
675 return IRQ_NONE;
676
677 if (fsr & FSR_IGN)
678 dev_err_ratelimited(smmu->dev,
679 "Unexpected context fault (fsr 0x%x)\n",
680 fsr);
681
682 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
683 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
684
685 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_LO);
686 iova = far;
687 #ifdef CONFIG_64BIT
688 far = readl_relaxed(cb_base + ARM_SMMU_CB_FAR_HI);
689 iova |= ((unsigned long)far << 32);
690 #endif
691
692 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
693 ret = IRQ_HANDLED;
694 resume = RESUME_RETRY;
695 } else {
696 dev_err_ratelimited(smmu->dev,
697 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
698 iova, fsynr, cfg->cbndx);
699 ret = IRQ_NONE;
700 resume = RESUME_TERMINATE;
701 }
702
703 /* Clear the faulting FSR */
704 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
705
706 /* Retry or terminate any stalled transactions */
707 if (fsr & FSR_SS)
708 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
709
710 return ret;
711 }
712
713 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
714 {
715 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
716 struct arm_smmu_device *smmu = dev;
717 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
718
719 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
720 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
721 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
722 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
723
724 if (!gfsr)
725 return IRQ_NONE;
726
727 dev_err_ratelimited(smmu->dev,
728 "Unexpected global fault, this could be serious\n");
729 dev_err_ratelimited(smmu->dev,
730 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
731 gfsr, gfsynr0, gfsynr1, gfsynr2);
732
733 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
734 return IRQ_HANDLED;
735 }
736
737 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
738 struct io_pgtable_cfg *pgtbl_cfg)
739 {
740 u32 reg;
741 u64 reg64;
742 bool stage1;
743 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
744 struct arm_smmu_device *smmu = smmu_domain->smmu;
745 void __iomem *cb_base, *gr1_base;
746
747 gr1_base = ARM_SMMU_GR1(smmu);
748 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
749 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
750
751 if (smmu->version > ARM_SMMU_V1) {
752 #ifdef CONFIG_64BIT
753 reg = CBA2R_RW64_64BIT;
754 #else
755 reg = CBA2R_RW64_32BIT;
756 #endif
757 /* 16-bit VMIDs live in CBA2R */
758 if (smmu->features & ARM_SMMU_FEAT_VMID16)
759 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
760
761 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
762 }
763
764 /* CBAR */
765 reg = cfg->cbar;
766 if (smmu->version == ARM_SMMU_V1)
767 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
768
769 /*
770 * Use the weakest shareability/memory types, so they are
771 * overridden by the ttbcr/pte.
772 */
773 if (stage1) {
774 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
775 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
776 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
777 /* 8-bit VMIDs live in CBAR */
778 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
779 }
780 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
781
782 /* TTBRs */
783 if (stage1) {
784 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
785
786 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
787 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
788
789 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
790 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
791 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR1);
792 } else {
793 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
794 smmu_writeq(reg64, cb_base + ARM_SMMU_CB_TTBR0);
795 }
796
797 /* TTBCR */
798 if (stage1) {
799 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
800 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
801 if (smmu->version > ARM_SMMU_V1) {
802 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
803 reg |= TTBCR2_SEP_UPSTREAM;
804 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
805 }
806 } else {
807 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
808 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
809 }
810
811 /* MAIRs (stage-1 only) */
812 if (stage1) {
813 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
814 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
815 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
816 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
817 }
818
819 /* SCTLR */
820 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
821 if (stage1)
822 reg |= SCTLR_S1_ASIDPNE;
823 #ifdef __BIG_ENDIAN
824 reg |= SCTLR_E;
825 #endif
826 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
827 }
828
829 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
830 struct arm_smmu_device *smmu)
831 {
832 int irq, start, ret = 0;
833 unsigned long ias, oas;
834 struct io_pgtable_ops *pgtbl_ops;
835 struct io_pgtable_cfg pgtbl_cfg;
836 enum io_pgtable_fmt fmt;
837 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
838 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
839
840 mutex_lock(&smmu_domain->init_mutex);
841 if (smmu_domain->smmu)
842 goto out_unlock;
843
844 /*
845 * Mapping the requested stage onto what we support is surprisingly
846 * complicated, mainly because the spec allows S1+S2 SMMUs without
847 * support for nested translation. That means we end up with the
848 * following table:
849 *
850 * Requested Supported Actual
851 * S1 N S1
852 * S1 S1+S2 S1
853 * S1 S2 S2
854 * S1 S1 S1
855 * N N N
856 * N S1+S2 S2
857 * N S2 S2
858 * N S1 S1
859 *
860 * Note that you can't actually request stage-2 mappings.
861 */
862 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
863 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
864 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
865 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
866
867 switch (smmu_domain->stage) {
868 case ARM_SMMU_DOMAIN_S1:
869 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
870 start = smmu->num_s2_context_banks;
871 ias = smmu->va_size;
872 oas = smmu->ipa_size;
873 if (IS_ENABLED(CONFIG_64BIT))
874 fmt = ARM_64_LPAE_S1;
875 else
876 fmt = ARM_32_LPAE_S1;
877 break;
878 case ARM_SMMU_DOMAIN_NESTED:
879 /*
880 * We will likely want to change this if/when KVM gets
881 * involved.
882 */
883 case ARM_SMMU_DOMAIN_S2:
884 cfg->cbar = CBAR_TYPE_S2_TRANS;
885 start = 0;
886 ias = smmu->ipa_size;
887 oas = smmu->pa_size;
888 if (IS_ENABLED(CONFIG_64BIT))
889 fmt = ARM_64_LPAE_S2;
890 else
891 fmt = ARM_32_LPAE_S2;
892 break;
893 default:
894 ret = -EINVAL;
895 goto out_unlock;
896 }
897
898 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
899 smmu->num_context_banks);
900 if (IS_ERR_VALUE(ret))
901 goto out_unlock;
902
903 cfg->cbndx = ret;
904 if (smmu->version == ARM_SMMU_V1) {
905 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
906 cfg->irptndx %= smmu->num_context_irqs;
907 } else {
908 cfg->irptndx = cfg->cbndx;
909 }
910
911 pgtbl_cfg = (struct io_pgtable_cfg) {
912 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
913 .ias = ias,
914 .oas = oas,
915 .tlb = &arm_smmu_gather_ops,
916 .iommu_dev = smmu->dev,
917 };
918
919 smmu_domain->smmu = smmu;
920 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
921 if (!pgtbl_ops) {
922 ret = -ENOMEM;
923 goto out_clear_smmu;
924 }
925
926 /* Update our support page sizes to reflect the page table format */
927 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
928
929 /* Initialise the context bank with our page table cfg */
930 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
931
932 /*
933 * Request context fault interrupt. Do this last to avoid the
934 * handler seeing a half-initialised domain state.
935 */
936 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
937 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
938 "arm-smmu-context-fault", domain);
939 if (IS_ERR_VALUE(ret)) {
940 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
941 cfg->irptndx, irq);
942 cfg->irptndx = INVALID_IRPTNDX;
943 }
944
945 mutex_unlock(&smmu_domain->init_mutex);
946
947 /* Publish page table ops for map/unmap */
948 smmu_domain->pgtbl_ops = pgtbl_ops;
949 return 0;
950
951 out_clear_smmu:
952 smmu_domain->smmu = NULL;
953 out_unlock:
954 mutex_unlock(&smmu_domain->init_mutex);
955 return ret;
956 }
957
958 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
959 {
960 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
961 struct arm_smmu_device *smmu = smmu_domain->smmu;
962 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
963 void __iomem *cb_base;
964 int irq;
965
966 if (!smmu)
967 return;
968
969 /*
970 * Disable the context bank and free the page tables before freeing
971 * it.
972 */
973 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
974 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
975
976 if (cfg->irptndx != INVALID_IRPTNDX) {
977 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
978 free_irq(irq, domain);
979 }
980
981 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
982 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
983 }
984
985 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
986 {
987 struct arm_smmu_domain *smmu_domain;
988
989 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
990 return NULL;
991 /*
992 * Allocate the domain and initialise some of its data structures.
993 * We can't really do anything meaningful until we've added a
994 * master.
995 */
996 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
997 if (!smmu_domain)
998 return NULL;
999
1000 if (type == IOMMU_DOMAIN_DMA &&
1001 iommu_get_dma_cookie(&smmu_domain->domain)) {
1002 kfree(smmu_domain);
1003 return NULL;
1004 }
1005
1006 mutex_init(&smmu_domain->init_mutex);
1007 spin_lock_init(&smmu_domain->pgtbl_lock);
1008
1009 return &smmu_domain->domain;
1010 }
1011
1012 static void arm_smmu_domain_free(struct iommu_domain *domain)
1013 {
1014 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1015
1016 /*
1017 * Free the domain resources. We assume that all devices have
1018 * already been detached.
1019 */
1020 iommu_put_dma_cookie(domain);
1021 arm_smmu_destroy_domain_context(domain);
1022 kfree(smmu_domain);
1023 }
1024
1025 static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
1026 struct arm_smmu_master_cfg *cfg)
1027 {
1028 int i;
1029 struct arm_smmu_smr *smrs;
1030 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1031
1032 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1033 return 0;
1034
1035 if (cfg->smrs)
1036 return -EEXIST;
1037
1038 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
1039 if (!smrs) {
1040 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1041 cfg->num_streamids);
1042 return -ENOMEM;
1043 }
1044
1045 /* Allocate the SMRs on the SMMU */
1046 for (i = 0; i < cfg->num_streamids; ++i) {
1047 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1048 smmu->num_mapping_groups);
1049 if (IS_ERR_VALUE(idx)) {
1050 dev_err(smmu->dev, "failed to allocate free SMR\n");
1051 goto err_free_smrs;
1052 }
1053
1054 smrs[i] = (struct arm_smmu_smr) {
1055 .idx = idx,
1056 .mask = 0, /* We don't currently share SMRs */
1057 .id = cfg->streamids[i],
1058 };
1059 }
1060
1061 /* It worked! Now, poke the actual hardware */
1062 for (i = 0; i < cfg->num_streamids; ++i) {
1063 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1064 smrs[i].mask << SMR_MASK_SHIFT;
1065 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1066 }
1067
1068 cfg->smrs = smrs;
1069 return 0;
1070
1071 err_free_smrs:
1072 while (--i >= 0)
1073 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1074 kfree(smrs);
1075 return -ENOSPC;
1076 }
1077
1078 static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
1079 struct arm_smmu_master_cfg *cfg)
1080 {
1081 int i;
1082 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1083 struct arm_smmu_smr *smrs = cfg->smrs;
1084
1085 if (!smrs)
1086 return;
1087
1088 /* Invalidate the SMRs before freeing back to the allocator */
1089 for (i = 0; i < cfg->num_streamids; ++i) {
1090 u8 idx = smrs[i].idx;
1091
1092 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1093 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1094 }
1095
1096 cfg->smrs = NULL;
1097 kfree(smrs);
1098 }
1099
1100 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1101 struct arm_smmu_master_cfg *cfg)
1102 {
1103 int i, ret;
1104 struct arm_smmu_device *smmu = smmu_domain->smmu;
1105 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1106
1107 /* Devices in an IOMMU group may already be configured */
1108 ret = arm_smmu_master_configure_smrs(smmu, cfg);
1109 if (ret)
1110 return ret == -EEXIST ? 0 : ret;
1111
1112 /*
1113 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1114 * for all devices behind the SMMU.
1115 */
1116 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1117 return 0;
1118
1119 for (i = 0; i < cfg->num_streamids; ++i) {
1120 u32 idx, s2cr;
1121
1122 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1123 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
1124 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
1125 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1126 }
1127
1128 return 0;
1129 }
1130
1131 static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
1132 struct arm_smmu_master_cfg *cfg)
1133 {
1134 int i;
1135 struct arm_smmu_device *smmu = smmu_domain->smmu;
1136 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1137
1138 /* An IOMMU group is torn down by the first device to be removed */
1139 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1140 return;
1141
1142 /*
1143 * We *must* clear the S2CR first, because freeing the SMR means
1144 * that it can be re-allocated immediately.
1145 */
1146 for (i = 0; i < cfg->num_streamids; ++i) {
1147 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
1148 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1149
1150 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1151 }
1152
1153 arm_smmu_master_free_smrs(smmu, cfg);
1154 }
1155
1156 static void arm_smmu_detach_dev(struct device *dev,
1157 struct arm_smmu_master_cfg *cfg)
1158 {
1159 struct iommu_domain *domain = dev->archdata.iommu;
1160 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1161
1162 dev->archdata.iommu = NULL;
1163 arm_smmu_domain_remove_master(smmu_domain, cfg);
1164 }
1165
1166 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1167 {
1168 int ret;
1169 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1170 struct arm_smmu_device *smmu;
1171 struct arm_smmu_master_cfg *cfg;
1172
1173 smmu = find_smmu_for_device(dev);
1174 if (!smmu) {
1175 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1176 return -ENXIO;
1177 }
1178
1179 /* Ensure that the domain is finalised */
1180 ret = arm_smmu_init_domain_context(domain, smmu);
1181 if (IS_ERR_VALUE(ret))
1182 return ret;
1183
1184 /*
1185 * Sanity check the domain. We don't support domains across
1186 * different SMMUs.
1187 */
1188 if (smmu_domain->smmu != smmu) {
1189 dev_err(dev,
1190 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1191 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1192 return -EINVAL;
1193 }
1194
1195 /* Looks ok, so add the device to the domain */
1196 cfg = find_smmu_master_cfg(dev);
1197 if (!cfg)
1198 return -ENODEV;
1199
1200 /* Detach the dev from its current domain */
1201 if (dev->archdata.iommu)
1202 arm_smmu_detach_dev(dev, cfg);
1203
1204 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1205 if (!ret)
1206 dev->archdata.iommu = domain;
1207 return ret;
1208 }
1209
1210 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1211 phys_addr_t paddr, size_t size, int prot)
1212 {
1213 int ret;
1214 unsigned long flags;
1215 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1216 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1217
1218 if (!ops)
1219 return -ENODEV;
1220
1221 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1222 ret = ops->map(ops, iova, paddr, size, prot);
1223 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1224 return ret;
1225 }
1226
1227 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1228 size_t size)
1229 {
1230 size_t ret;
1231 unsigned long flags;
1232 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1233 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1234
1235 if (!ops)
1236 return 0;
1237
1238 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1239 ret = ops->unmap(ops, iova, size);
1240 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1241 return ret;
1242 }
1243
1244 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1245 dma_addr_t iova)
1246 {
1247 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1248 struct arm_smmu_device *smmu = smmu_domain->smmu;
1249 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1250 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1251 struct device *dev = smmu->dev;
1252 void __iomem *cb_base;
1253 u32 tmp;
1254 u64 phys;
1255 unsigned long va;
1256
1257 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1258
1259 /* ATS1 registers can only be written atomically */
1260 va = iova & ~0xfffUL;
1261 if (smmu->version == ARM_SMMU_V2)
1262 smmu_writeq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1263 else
1264 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1265
1266 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1267 !(tmp & ATSR_ACTIVE), 5, 50)) {
1268 dev_err(dev,
1269 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1270 &iova);
1271 return ops->iova_to_phys(ops, iova);
1272 }
1273
1274 phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO);
1275 phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32;
1276
1277 if (phys & CB_PAR_F) {
1278 dev_err(dev, "translation fault!\n");
1279 dev_err(dev, "PAR = 0x%llx\n", phys);
1280 return 0;
1281 }
1282
1283 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1284 }
1285
1286 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1287 dma_addr_t iova)
1288 {
1289 phys_addr_t ret;
1290 unsigned long flags;
1291 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1292 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1293
1294 if (!ops)
1295 return 0;
1296
1297 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1298 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1299 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1300 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1301 } else {
1302 ret = ops->iova_to_phys(ops, iova);
1303 }
1304
1305 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1306
1307 return ret;
1308 }
1309
1310 static bool arm_smmu_capable(enum iommu_cap cap)
1311 {
1312 switch (cap) {
1313 case IOMMU_CAP_CACHE_COHERENCY:
1314 /*
1315 * Return true here as the SMMU can always send out coherent
1316 * requests.
1317 */
1318 return true;
1319 case IOMMU_CAP_INTR_REMAP:
1320 return true; /* MSIs are just memory writes */
1321 case IOMMU_CAP_NOEXEC:
1322 return true;
1323 default:
1324 return false;
1325 }
1326 }
1327
1328 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1329 {
1330 *((u16 *)data) = alias;
1331 return 0; /* Continue walking */
1332 }
1333
1334 static void __arm_smmu_release_pci_iommudata(void *data)
1335 {
1336 kfree(data);
1337 }
1338
1339 static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1340 struct iommu_group *group)
1341 {
1342 struct arm_smmu_master_cfg *cfg;
1343 u16 sid;
1344 int i;
1345
1346 cfg = iommu_group_get_iommudata(group);
1347 if (!cfg) {
1348 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
1349 if (!cfg)
1350 return -ENOMEM;
1351
1352 iommu_group_set_iommudata(group, cfg,
1353 __arm_smmu_release_pci_iommudata);
1354 }
1355
1356 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1357 return -ENOSPC;
1358
1359 /*
1360 * Assume Stream ID == Requester ID for now.
1361 * We need a way to describe the ID mappings in FDT.
1362 */
1363 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1364 for (i = 0; i < cfg->num_streamids; ++i)
1365 if (cfg->streamids[i] == sid)
1366 break;
1367
1368 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1369 if (i == cfg->num_streamids)
1370 cfg->streamids[cfg->num_streamids++] = sid;
1371
1372 return 0;
1373 }
1374
1375 static int arm_smmu_init_platform_device(struct device *dev,
1376 struct iommu_group *group)
1377 {
1378 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
1379 struct arm_smmu_master *master;
1380
1381 if (!smmu)
1382 return -ENODEV;
1383
1384 master = find_smmu_master(smmu, dev->of_node);
1385 if (!master)
1386 return -ENODEV;
1387
1388 iommu_group_set_iommudata(group, &master->cfg, NULL);
1389
1390 return 0;
1391 }
1392
1393 static int arm_smmu_add_device(struct device *dev)
1394 {
1395 struct iommu_group *group;
1396
1397 group = iommu_group_get_for_dev(dev);
1398 if (IS_ERR(group))
1399 return PTR_ERR(group);
1400
1401 iommu_group_put(group);
1402 return 0;
1403 }
1404
1405 static void arm_smmu_remove_device(struct device *dev)
1406 {
1407 iommu_group_remove_device(dev);
1408 }
1409
1410 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1411 {
1412 struct iommu_group *group;
1413 int ret;
1414
1415 if (dev_is_pci(dev))
1416 group = pci_device_group(dev);
1417 else
1418 group = generic_device_group(dev);
1419
1420 if (IS_ERR(group))
1421 return group;
1422
1423 if (dev_is_pci(dev))
1424 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1425 else
1426 ret = arm_smmu_init_platform_device(dev, group);
1427
1428 if (ret) {
1429 iommu_group_put(group);
1430 group = ERR_PTR(ret);
1431 }
1432
1433 return group;
1434 }
1435
1436 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1437 enum iommu_attr attr, void *data)
1438 {
1439 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1440
1441 switch (attr) {
1442 case DOMAIN_ATTR_NESTING:
1443 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1444 return 0;
1445 default:
1446 return -ENODEV;
1447 }
1448 }
1449
1450 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1451 enum iommu_attr attr, void *data)
1452 {
1453 int ret = 0;
1454 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1455
1456 mutex_lock(&smmu_domain->init_mutex);
1457
1458 switch (attr) {
1459 case DOMAIN_ATTR_NESTING:
1460 if (smmu_domain->smmu) {
1461 ret = -EPERM;
1462 goto out_unlock;
1463 }
1464
1465 if (*(int *)data)
1466 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1467 else
1468 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1469
1470 break;
1471 default:
1472 ret = -ENODEV;
1473 }
1474
1475 out_unlock:
1476 mutex_unlock(&smmu_domain->init_mutex);
1477 return ret;
1478 }
1479
1480 static struct iommu_ops arm_smmu_ops = {
1481 .capable = arm_smmu_capable,
1482 .domain_alloc = arm_smmu_domain_alloc,
1483 .domain_free = arm_smmu_domain_free,
1484 .attach_dev = arm_smmu_attach_dev,
1485 .map = arm_smmu_map,
1486 .unmap = arm_smmu_unmap,
1487 .map_sg = default_iommu_map_sg,
1488 .iova_to_phys = arm_smmu_iova_to_phys,
1489 .add_device = arm_smmu_add_device,
1490 .remove_device = arm_smmu_remove_device,
1491 .device_group = arm_smmu_device_group,
1492 .domain_get_attr = arm_smmu_domain_get_attr,
1493 .domain_set_attr = arm_smmu_domain_set_attr,
1494 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1495 };
1496
1497 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1498 {
1499 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1500 void __iomem *cb_base;
1501 int i = 0;
1502 u32 reg;
1503
1504 /* clear global FSR */
1505 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1506 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1507
1508 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1509 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
1510 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1511 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
1512 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
1513 }
1514
1515 /* Make sure all context banks are disabled and clear CB_FSR */
1516 for (i = 0; i < smmu->num_context_banks; ++i) {
1517 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1518 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1519 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1520 }
1521
1522 /* Invalidate the TLB, just in case */
1523 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1524 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1525
1526 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1527
1528 /* Enable fault reporting */
1529 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1530
1531 /* Disable TLB broadcasting. */
1532 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1533
1534 /* Enable client access, handling unmatched streams as appropriate */
1535 reg &= ~sCR0_CLIENTPD;
1536 if (disable_bypass)
1537 reg |= sCR0_USFCFG;
1538 else
1539 reg &= ~sCR0_USFCFG;
1540
1541 /* Disable forced broadcasting */
1542 reg &= ~sCR0_FB;
1543
1544 /* Don't upgrade barriers */
1545 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1546
1547 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1548 reg |= sCR0_VMID16EN;
1549
1550 /* Push the button */
1551 __arm_smmu_tlb_sync(smmu);
1552 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1553 }
1554
1555 static int arm_smmu_id_size_to_bits(int size)
1556 {
1557 switch (size) {
1558 case 0:
1559 return 32;
1560 case 1:
1561 return 36;
1562 case 2:
1563 return 40;
1564 case 3:
1565 return 42;
1566 case 4:
1567 return 44;
1568 case 5:
1569 default:
1570 return 48;
1571 }
1572 }
1573
1574 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1575 {
1576 unsigned long size;
1577 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1578 u32 id;
1579 bool cttw_dt, cttw_reg;
1580
1581 dev_notice(smmu->dev, "probing hardware configuration...\n");
1582 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1583
1584 /* ID0 */
1585 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1586
1587 /* Restrict available stages based on module parameter */
1588 if (force_stage == 1)
1589 id &= ~(ID0_S2TS | ID0_NTS);
1590 else if (force_stage == 2)
1591 id &= ~(ID0_S1TS | ID0_NTS);
1592
1593 if (id & ID0_S1TS) {
1594 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1595 dev_notice(smmu->dev, "\tstage 1 translation\n");
1596 }
1597
1598 if (id & ID0_S2TS) {
1599 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1600 dev_notice(smmu->dev, "\tstage 2 translation\n");
1601 }
1602
1603 if (id & ID0_NTS) {
1604 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1605 dev_notice(smmu->dev, "\tnested translation\n");
1606 }
1607
1608 if (!(smmu->features &
1609 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1610 dev_err(smmu->dev, "\tno translation support!\n");
1611 return -ENODEV;
1612 }
1613
1614 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
1615 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1616 dev_notice(smmu->dev, "\taddress translation ops\n");
1617 }
1618
1619 /*
1620 * In order for DMA API calls to work properly, we must defer to what
1621 * the DT says about coherency, regardless of what the hardware claims.
1622 * Fortunately, this also opens up a workaround for systems where the
1623 * ID register value has ended up configured incorrectly.
1624 */
1625 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1626 cttw_reg = !!(id & ID0_CTTW);
1627 if (cttw_dt)
1628 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1629 if (cttw_dt || cttw_reg)
1630 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1631 cttw_dt ? "" : "non-");
1632 if (cttw_dt != cttw_reg)
1633 dev_notice(smmu->dev,
1634 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1635
1636 if (id & ID0_SMS) {
1637 u32 smr, sid, mask;
1638
1639 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1640 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1641 ID0_NUMSMRG_MASK;
1642 if (smmu->num_mapping_groups == 0) {
1643 dev_err(smmu->dev,
1644 "stream-matching supported, but no SMRs present!\n");
1645 return -ENODEV;
1646 }
1647
1648 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1649 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1650 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1651 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1652
1653 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1654 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1655 if ((mask & sid) != sid) {
1656 dev_err(smmu->dev,
1657 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1658 mask, sid);
1659 return -ENODEV;
1660 }
1661
1662 dev_notice(smmu->dev,
1663 "\tstream matching with %u register groups, mask 0x%x",
1664 smmu->num_mapping_groups, mask);
1665 } else {
1666 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1667 ID0_NUMSIDB_MASK;
1668 }
1669
1670 /* ID1 */
1671 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1672 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1673
1674 /* Check for size mismatch of SMMU address space from mapped region */
1675 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1676 size *= 2 << smmu->pgshift;
1677 if (smmu->size != size)
1678 dev_warn(smmu->dev,
1679 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1680 size, smmu->size);
1681
1682 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1683 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1684 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1685 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1686 return -ENODEV;
1687 }
1688 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1689 smmu->num_context_banks, smmu->num_s2_context_banks);
1690 /*
1691 * Cavium CN88xx erratum #27704.
1692 * Ensure ASID and VMID allocation is unique across all SMMUs in
1693 * the system.
1694 */
1695 if (smmu->model == CAVIUM_SMMUV2) {
1696 smmu->cavium_id_base =
1697 atomic_add_return(smmu->num_context_banks,
1698 &cavium_smmu_context_count);
1699 smmu->cavium_id_base -= smmu->num_context_banks;
1700 }
1701
1702 /* ID2 */
1703 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1704 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1705 smmu->ipa_size = size;
1706
1707 /* The output mask is also applied for bypass */
1708 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1709 smmu->pa_size = size;
1710
1711 if (id & ID2_VMID16)
1712 smmu->features |= ARM_SMMU_FEAT_VMID16;
1713
1714 /*
1715 * What the page table walker can address actually depends on which
1716 * descriptor format is in use, but since a) we don't know that yet,
1717 * and b) it can vary per context bank, this will have to do...
1718 */
1719 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1720 dev_warn(smmu->dev,
1721 "failed to set DMA mask for table walker\n");
1722
1723 if (smmu->version == ARM_SMMU_V1) {
1724 smmu->va_size = smmu->ipa_size;
1725 size = SZ_4K | SZ_2M | SZ_1G;
1726 } else {
1727 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1728 smmu->va_size = arm_smmu_id_size_to_bits(size);
1729 #ifndef CONFIG_64BIT
1730 smmu->va_size = min(32UL, smmu->va_size);
1731 #endif
1732 size = 0;
1733 if (id & ID2_PTFS_4K)
1734 size |= SZ_4K | SZ_2M | SZ_1G;
1735 if (id & ID2_PTFS_16K)
1736 size |= SZ_16K | SZ_32M;
1737 if (id & ID2_PTFS_64K)
1738 size |= SZ_64K | SZ_512M;
1739 }
1740
1741 arm_smmu_ops.pgsize_bitmap &= size;
1742 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1743
1744 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1745 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1746 smmu->va_size, smmu->ipa_size);
1747
1748 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1749 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1750 smmu->ipa_size, smmu->pa_size);
1751
1752 return 0;
1753 }
1754
1755 struct arm_smmu_match_data {
1756 enum arm_smmu_arch_version version;
1757 enum arm_smmu_implementation model;
1758 };
1759
1760 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1761 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1762
1763 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1764 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1765 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1766
1767 static const struct of_device_id arm_smmu_of_match[] = {
1768 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1769 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1770 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1771 { .compatible = "arm,mmu-401", .data = &smmu_generic_v1 },
1772 { .compatible = "arm,mmu-500", .data = &smmu_generic_v2 },
1773 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1774 { },
1775 };
1776 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1777
1778 static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1779 {
1780 const struct of_device_id *of_id;
1781 const struct arm_smmu_match_data *data;
1782 struct resource *res;
1783 struct arm_smmu_device *smmu;
1784 struct device *dev = &pdev->dev;
1785 struct rb_node *node;
1786 struct of_phandle_args masterspec;
1787 int num_irqs, i, err;
1788
1789 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1790 if (!smmu) {
1791 dev_err(dev, "failed to allocate arm_smmu_device\n");
1792 return -ENOMEM;
1793 }
1794 smmu->dev = dev;
1795
1796 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
1797 data = of_id->data;
1798 smmu->version = data->version;
1799 smmu->model = data->model;
1800
1801 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1802 smmu->base = devm_ioremap_resource(dev, res);
1803 if (IS_ERR(smmu->base))
1804 return PTR_ERR(smmu->base);
1805 smmu->size = resource_size(res);
1806
1807 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1808 &smmu->num_global_irqs)) {
1809 dev_err(dev, "missing #global-interrupts property\n");
1810 return -ENODEV;
1811 }
1812
1813 num_irqs = 0;
1814 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1815 num_irqs++;
1816 if (num_irqs > smmu->num_global_irqs)
1817 smmu->num_context_irqs++;
1818 }
1819
1820 if (!smmu->num_context_irqs) {
1821 dev_err(dev, "found %d interrupts but expected at least %d\n",
1822 num_irqs, smmu->num_global_irqs + 1);
1823 return -ENODEV;
1824 }
1825
1826 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1827 GFP_KERNEL);
1828 if (!smmu->irqs) {
1829 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1830 return -ENOMEM;
1831 }
1832
1833 for (i = 0; i < num_irqs; ++i) {
1834 int irq = platform_get_irq(pdev, i);
1835
1836 if (irq < 0) {
1837 dev_err(dev, "failed to get irq index %d\n", i);
1838 return -ENODEV;
1839 }
1840 smmu->irqs[i] = irq;
1841 }
1842
1843 err = arm_smmu_device_cfg_probe(smmu);
1844 if (err)
1845 return err;
1846
1847 i = 0;
1848 smmu->masters = RB_ROOT;
1849 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1850 "#stream-id-cells", i,
1851 &masterspec)) {
1852 err = register_smmu_master(smmu, dev, &masterspec);
1853 if (err) {
1854 dev_err(dev, "failed to add master %s\n",
1855 masterspec.np->name);
1856 goto out_put_masters;
1857 }
1858
1859 i++;
1860 }
1861 dev_notice(dev, "registered %d master devices\n", i);
1862
1863 parse_driver_options(smmu);
1864
1865 if (smmu->version > ARM_SMMU_V1 &&
1866 smmu->num_context_banks != smmu->num_context_irqs) {
1867 dev_err(dev,
1868 "found only %d context interrupt(s) but %d required\n",
1869 smmu->num_context_irqs, smmu->num_context_banks);
1870 err = -ENODEV;
1871 goto out_put_masters;
1872 }
1873
1874 for (i = 0; i < smmu->num_global_irqs; ++i) {
1875 err = request_irq(smmu->irqs[i],
1876 arm_smmu_global_fault,
1877 IRQF_SHARED,
1878 "arm-smmu global fault",
1879 smmu);
1880 if (err) {
1881 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1882 i, smmu->irqs[i]);
1883 goto out_free_irqs;
1884 }
1885 }
1886
1887 INIT_LIST_HEAD(&smmu->list);
1888 spin_lock(&arm_smmu_devices_lock);
1889 list_add(&smmu->list, &arm_smmu_devices);
1890 spin_unlock(&arm_smmu_devices_lock);
1891
1892 arm_smmu_device_reset(smmu);
1893 return 0;
1894
1895 out_free_irqs:
1896 while (i--)
1897 free_irq(smmu->irqs[i], smmu);
1898
1899 out_put_masters:
1900 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1901 struct arm_smmu_master *master
1902 = container_of(node, struct arm_smmu_master, node);
1903 of_node_put(master->of_node);
1904 }
1905
1906 return err;
1907 }
1908
1909 static int arm_smmu_device_remove(struct platform_device *pdev)
1910 {
1911 int i;
1912 struct device *dev = &pdev->dev;
1913 struct arm_smmu_device *curr, *smmu = NULL;
1914 struct rb_node *node;
1915
1916 spin_lock(&arm_smmu_devices_lock);
1917 list_for_each_entry(curr, &arm_smmu_devices, list) {
1918 if (curr->dev == dev) {
1919 smmu = curr;
1920 list_del(&smmu->list);
1921 break;
1922 }
1923 }
1924 spin_unlock(&arm_smmu_devices_lock);
1925
1926 if (!smmu)
1927 return -ENODEV;
1928
1929 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
1930 struct arm_smmu_master *master
1931 = container_of(node, struct arm_smmu_master, node);
1932 of_node_put(master->of_node);
1933 }
1934
1935 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
1936 dev_err(dev, "removing device with active domains!\n");
1937
1938 for (i = 0; i < smmu->num_global_irqs; ++i)
1939 free_irq(smmu->irqs[i], smmu);
1940
1941 /* Turn the thing off */
1942 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1943 return 0;
1944 }
1945
1946 static struct platform_driver arm_smmu_driver = {
1947 .driver = {
1948 .name = "arm-smmu",
1949 .of_match_table = of_match_ptr(arm_smmu_of_match),
1950 },
1951 .probe = arm_smmu_device_dt_probe,
1952 .remove = arm_smmu_device_remove,
1953 };
1954
1955 static int __init arm_smmu_init(void)
1956 {
1957 struct device_node *np;
1958 int ret;
1959
1960 /*
1961 * Play nice with systems that don't have an ARM SMMU by checking that
1962 * an ARM SMMU exists in the system before proceeding with the driver
1963 * and IOMMU bus operation registration.
1964 */
1965 np = of_find_matching_node(NULL, arm_smmu_of_match);
1966 if (!np)
1967 return 0;
1968
1969 of_node_put(np);
1970
1971 ret = platform_driver_register(&arm_smmu_driver);
1972 if (ret)
1973 return ret;
1974
1975 /* Oh, for a proper bus abstraction */
1976 if (!iommu_present(&platform_bus_type))
1977 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
1978
1979 #ifdef CONFIG_ARM_AMBA
1980 if (!iommu_present(&amba_bustype))
1981 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
1982 #endif
1983
1984 #ifdef CONFIG_PCI
1985 if (!iommu_present(&pci_bus_type))
1986 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
1987 #endif
1988
1989 return 0;
1990 }
1991
1992 static void __exit arm_smmu_exit(void)
1993 {
1994 return platform_driver_unregister(&arm_smmu_driver);
1995 }
1996
1997 subsys_initcall(arm_smmu_init);
1998 module_exit(arm_smmu_exit);
1999
2000 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2001 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2002 MODULE_LICENSE("GPL v2");