]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/iommu/arm-smmu.c
Merge branch 'for-joerg/arm-smmu/updates' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-bionic-kernel.git] / drivers / iommu / arm-smmu.c
1 /*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
27 * - Extended Stream ID (16 bit)
28 */
29
30 #define pr_fmt(fmt) "arm-smmu: " fmt
31
32 #include <linux/acpi.h>
33 #include <linux/acpi_iort.h>
34 #include <linux/atomic.h>
35 #include <linux/delay.h>
36 #include <linux/dma-iommu.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/err.h>
39 #include <linux/interrupt.h>
40 #include <linux/io.h>
41 #include <linux/io-64-nonatomic-hi-lo.h>
42 #include <linux/iommu.h>
43 #include <linux/iopoll.h>
44 #include <linux/module.h>
45 #include <linux/of.h>
46 #include <linux/of_address.h>
47 #include <linux/of_device.h>
48 #include <linux/of_iommu.h>
49 #include <linux/pci.h>
50 #include <linux/platform_device.h>
51 #include <linux/slab.h>
52 #include <linux/spinlock.h>
53
54 #include <linux/amba/bus.h>
55
56 #include "io-pgtable.h"
57
58 /* Maximum number of context banks per SMMU */
59 #define ARM_SMMU_MAX_CBS 128
60
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
64
65 /*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
75 /*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
80 #ifdef CONFIG_64BIT
81 #define smmu_write_atomic_lq writeq_relaxed
82 #else
83 #define smmu_write_atomic_lq writel_relaxed
84 #endif
85
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_EXIDENABLE (1 << 3)
92 #define sCR0_GCFGFRE (1 << 4)
93 #define sCR0_GCFGFIE (1 << 5)
94 #define sCR0_USFCFG (1 << 10)
95 #define sCR0_VMIDPNE (1 << 11)
96 #define sCR0_PTM (1 << 12)
97 #define sCR0_FB (1 << 13)
98 #define sCR0_VMID16EN (1 << 31)
99 #define sCR0_BSU_SHIFT 14
100 #define sCR0_BSU_MASK 0x3
101
102 /* Auxiliary Configuration register */
103 #define ARM_SMMU_GR0_sACR 0x10
104
105 /* Identification registers */
106 #define ARM_SMMU_GR0_ID0 0x20
107 #define ARM_SMMU_GR0_ID1 0x24
108 #define ARM_SMMU_GR0_ID2 0x28
109 #define ARM_SMMU_GR0_ID3 0x2c
110 #define ARM_SMMU_GR0_ID4 0x30
111 #define ARM_SMMU_GR0_ID5 0x34
112 #define ARM_SMMU_GR0_ID6 0x38
113 #define ARM_SMMU_GR0_ID7 0x3c
114 #define ARM_SMMU_GR0_sGFSR 0x48
115 #define ARM_SMMU_GR0_sGFSYNR0 0x50
116 #define ARM_SMMU_GR0_sGFSYNR1 0x54
117 #define ARM_SMMU_GR0_sGFSYNR2 0x58
118
119 #define ID0_S1TS (1 << 30)
120 #define ID0_S2TS (1 << 29)
121 #define ID0_NTS (1 << 28)
122 #define ID0_SMS (1 << 27)
123 #define ID0_ATOSNS (1 << 26)
124 #define ID0_PTFS_NO_AARCH32 (1 << 25)
125 #define ID0_PTFS_NO_AARCH32S (1 << 24)
126 #define ID0_CTTW (1 << 14)
127 #define ID0_NUMIRPT_SHIFT 16
128 #define ID0_NUMIRPT_MASK 0xff
129 #define ID0_NUMSIDB_SHIFT 9
130 #define ID0_NUMSIDB_MASK 0xf
131 #define ID0_EXIDS (1 << 8)
132 #define ID0_NUMSMRG_SHIFT 0
133 #define ID0_NUMSMRG_MASK 0xff
134
135 #define ID1_PAGESIZE (1 << 31)
136 #define ID1_NUMPAGENDXB_SHIFT 28
137 #define ID1_NUMPAGENDXB_MASK 7
138 #define ID1_NUMS2CB_SHIFT 16
139 #define ID1_NUMS2CB_MASK 0xff
140 #define ID1_NUMCB_SHIFT 0
141 #define ID1_NUMCB_MASK 0xff
142
143 #define ID2_OAS_SHIFT 4
144 #define ID2_OAS_MASK 0xf
145 #define ID2_IAS_SHIFT 0
146 #define ID2_IAS_MASK 0xf
147 #define ID2_UBS_SHIFT 8
148 #define ID2_UBS_MASK 0xf
149 #define ID2_PTFS_4K (1 << 12)
150 #define ID2_PTFS_16K (1 << 13)
151 #define ID2_PTFS_64K (1 << 14)
152 #define ID2_VMID16 (1 << 15)
153
154 #define ID7_MAJOR_SHIFT 4
155 #define ID7_MAJOR_MASK 0xf
156
157 /* Global TLB invalidation */
158 #define ARM_SMMU_GR0_TLBIVMID 0x64
159 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
160 #define ARM_SMMU_GR0_TLBIALLH 0x6c
161 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
162 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
163 #define sTLBGSTATUS_GSACTIVE (1 << 0)
164 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
165
166 /* Stream mapping registers */
167 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
168 #define SMR_VALID (1 << 31)
169 #define SMR_MASK_SHIFT 16
170 #define SMR_ID_SHIFT 0
171
172 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
173 #define S2CR_CBNDX_SHIFT 0
174 #define S2CR_CBNDX_MASK 0xff
175 #define S2CR_EXIDVALID (1 << 10)
176 #define S2CR_TYPE_SHIFT 16
177 #define S2CR_TYPE_MASK 0x3
178 enum arm_smmu_s2cr_type {
179 S2CR_TYPE_TRANS,
180 S2CR_TYPE_BYPASS,
181 S2CR_TYPE_FAULT,
182 };
183
184 #define S2CR_PRIVCFG_SHIFT 24
185 #define S2CR_PRIVCFG_MASK 0x3
186 enum arm_smmu_s2cr_privcfg {
187 S2CR_PRIVCFG_DEFAULT,
188 S2CR_PRIVCFG_DIPAN,
189 S2CR_PRIVCFG_UNPRIV,
190 S2CR_PRIVCFG_PRIV,
191 };
192
193 /* Context bank attribute registers */
194 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
195 #define CBAR_VMID_SHIFT 0
196 #define CBAR_VMID_MASK 0xff
197 #define CBAR_S1_BPSHCFG_SHIFT 8
198 #define CBAR_S1_BPSHCFG_MASK 3
199 #define CBAR_S1_BPSHCFG_NSH 3
200 #define CBAR_S1_MEMATTR_SHIFT 12
201 #define CBAR_S1_MEMATTR_MASK 0xf
202 #define CBAR_S1_MEMATTR_WB 0xf
203 #define CBAR_TYPE_SHIFT 16
204 #define CBAR_TYPE_MASK 0x3
205 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
206 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
207 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
208 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
209 #define CBAR_IRPTNDX_SHIFT 24
210 #define CBAR_IRPTNDX_MASK 0xff
211
212 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
213 #define CBA2R_RW64_32BIT (0 << 0)
214 #define CBA2R_RW64_64BIT (1 << 0)
215 #define CBA2R_VMID_SHIFT 16
216 #define CBA2R_VMID_MASK 0xffff
217
218 /* Translation context bank */
219 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
220 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
221
222 #define ARM_SMMU_CB_SCTLR 0x0
223 #define ARM_SMMU_CB_ACTLR 0x4
224 #define ARM_SMMU_CB_RESUME 0x8
225 #define ARM_SMMU_CB_TTBCR2 0x10
226 #define ARM_SMMU_CB_TTBR0 0x20
227 #define ARM_SMMU_CB_TTBR1 0x28
228 #define ARM_SMMU_CB_TTBCR 0x30
229 #define ARM_SMMU_CB_CONTEXTIDR 0x34
230 #define ARM_SMMU_CB_S1_MAIR0 0x38
231 #define ARM_SMMU_CB_S1_MAIR1 0x3c
232 #define ARM_SMMU_CB_PAR 0x50
233 #define ARM_SMMU_CB_FSR 0x58
234 #define ARM_SMMU_CB_FAR 0x60
235 #define ARM_SMMU_CB_FSYNR0 0x68
236 #define ARM_SMMU_CB_S1_TLBIVA 0x600
237 #define ARM_SMMU_CB_S1_TLBIASID 0x610
238 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
239 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
240 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
241 #define ARM_SMMU_CB_ATS1PR 0x800
242 #define ARM_SMMU_CB_ATSR 0x8f0
243
244 #define SCTLR_S1_ASIDPNE (1 << 12)
245 #define SCTLR_CFCFG (1 << 7)
246 #define SCTLR_CFIE (1 << 6)
247 #define SCTLR_CFRE (1 << 5)
248 #define SCTLR_E (1 << 4)
249 #define SCTLR_AFE (1 << 2)
250 #define SCTLR_TRE (1 << 1)
251 #define SCTLR_M (1 << 0)
252
253 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
254
255 #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
256 #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
257
258 #define CB_PAR_F (1 << 0)
259
260 #define ATSR_ACTIVE (1 << 0)
261
262 #define RESUME_RETRY (0 << 0)
263 #define RESUME_TERMINATE (1 << 0)
264
265 #define TTBCR2_SEP_SHIFT 15
266 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
267 #define TTBCR2_AS (1 << 4)
268
269 #define TTBRn_ASID_SHIFT 48
270
271 #define FSR_MULTI (1 << 31)
272 #define FSR_SS (1 << 30)
273 #define FSR_UUT (1 << 8)
274 #define FSR_ASF (1 << 7)
275 #define FSR_TLBLKF (1 << 6)
276 #define FSR_TLBMCF (1 << 5)
277 #define FSR_EF (1 << 4)
278 #define FSR_PF (1 << 3)
279 #define FSR_AFF (1 << 2)
280 #define FSR_TF (1 << 1)
281
282 #define FSR_IGN (FSR_AFF | FSR_ASF | \
283 FSR_TLBMCF | FSR_TLBLKF)
284 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
285 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
286
287 #define FSYNR0_WNR (1 << 4)
288
289 static int force_stage;
290 module_param(force_stage, int, S_IRUGO);
291 MODULE_PARM_DESC(force_stage,
292 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
293 static bool disable_bypass;
294 module_param(disable_bypass, bool, S_IRUGO);
295 MODULE_PARM_DESC(disable_bypass,
296 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
297
298 enum arm_smmu_arch_version {
299 ARM_SMMU_V1,
300 ARM_SMMU_V1_64K,
301 ARM_SMMU_V2,
302 };
303
304 enum arm_smmu_implementation {
305 GENERIC_SMMU,
306 ARM_MMU500,
307 CAVIUM_SMMUV2,
308 };
309
310 struct arm_smmu_s2cr {
311 struct iommu_group *group;
312 int count;
313 enum arm_smmu_s2cr_type type;
314 enum arm_smmu_s2cr_privcfg privcfg;
315 u8 cbndx;
316 };
317
318 #define s2cr_init_val (struct arm_smmu_s2cr){ \
319 .type = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS, \
320 }
321
322 struct arm_smmu_smr {
323 u16 mask;
324 u16 id;
325 bool valid;
326 };
327
328 struct arm_smmu_master_cfg {
329 struct arm_smmu_device *smmu;
330 s16 smendx[];
331 };
332 #define INVALID_SMENDX -1
333 #define __fwspec_cfg(fw) ((struct arm_smmu_master_cfg *)fw->iommu_priv)
334 #define fwspec_smmu(fw) (__fwspec_cfg(fw)->smmu)
335 #define fwspec_smendx(fw, i) \
336 (i >= fw->num_ids ? INVALID_SMENDX : __fwspec_cfg(fw)->smendx[i])
337 #define for_each_cfg_sme(fw, i, idx) \
338 for (i = 0; idx = fwspec_smendx(fw, i), i < fw->num_ids; ++i)
339
340 struct arm_smmu_device {
341 struct device *dev;
342
343 void __iomem *base;
344 unsigned long size;
345 unsigned long pgshift;
346
347 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
348 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
349 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
350 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
351 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
352 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
353 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
354 #define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
355 #define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
356 #define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
357 #define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
358 #define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
359 #define ARM_SMMU_FEAT_EXIDS (1 << 12)
360 u32 features;
361
362 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
363 u32 options;
364 enum arm_smmu_arch_version version;
365 enum arm_smmu_implementation model;
366
367 u32 num_context_banks;
368 u32 num_s2_context_banks;
369 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
370 atomic_t irptndx;
371
372 u32 num_mapping_groups;
373 u16 streamid_mask;
374 u16 smr_mask_mask;
375 struct arm_smmu_smr *smrs;
376 struct arm_smmu_s2cr *s2crs;
377 struct mutex stream_map_mutex;
378
379 unsigned long va_size;
380 unsigned long ipa_size;
381 unsigned long pa_size;
382 unsigned long pgsize_bitmap;
383
384 u32 num_global_irqs;
385 u32 num_context_irqs;
386 unsigned int *irqs;
387
388 u32 cavium_id_base; /* Specific to Cavium */
389 };
390
391 enum arm_smmu_context_fmt {
392 ARM_SMMU_CTX_FMT_NONE,
393 ARM_SMMU_CTX_FMT_AARCH64,
394 ARM_SMMU_CTX_FMT_AARCH32_L,
395 ARM_SMMU_CTX_FMT_AARCH32_S,
396 };
397
398 struct arm_smmu_cfg {
399 u8 cbndx;
400 u8 irptndx;
401 u32 cbar;
402 enum arm_smmu_context_fmt fmt;
403 };
404 #define INVALID_IRPTNDX 0xff
405
406 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
407 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
408
409 enum arm_smmu_domain_stage {
410 ARM_SMMU_DOMAIN_S1 = 0,
411 ARM_SMMU_DOMAIN_S2,
412 ARM_SMMU_DOMAIN_NESTED,
413 };
414
415 struct arm_smmu_domain {
416 struct arm_smmu_device *smmu;
417 struct io_pgtable_ops *pgtbl_ops;
418 spinlock_t pgtbl_lock;
419 struct arm_smmu_cfg cfg;
420 enum arm_smmu_domain_stage stage;
421 struct mutex init_mutex; /* Protects smmu pointer */
422 struct iommu_domain domain;
423 };
424
425 struct arm_smmu_option_prop {
426 u32 opt;
427 const char *prop;
428 };
429
430 static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
431
432 static bool using_legacy_binding, using_generic_binding;
433
434 static struct arm_smmu_option_prop arm_smmu_options[] = {
435 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
436 { 0, NULL},
437 };
438
439 static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
440 {
441 return container_of(dom, struct arm_smmu_domain, domain);
442 }
443
444 static void parse_driver_options(struct arm_smmu_device *smmu)
445 {
446 int i = 0;
447
448 do {
449 if (of_property_read_bool(smmu->dev->of_node,
450 arm_smmu_options[i].prop)) {
451 smmu->options |= arm_smmu_options[i].opt;
452 dev_notice(smmu->dev, "option %s\n",
453 arm_smmu_options[i].prop);
454 }
455 } while (arm_smmu_options[++i].opt);
456 }
457
458 static struct device_node *dev_get_dev_node(struct device *dev)
459 {
460 if (dev_is_pci(dev)) {
461 struct pci_bus *bus = to_pci_dev(dev)->bus;
462
463 while (!pci_is_root_bus(bus))
464 bus = bus->parent;
465 return of_node_get(bus->bridge->parent->of_node);
466 }
467
468 return of_node_get(dev->of_node);
469 }
470
471 static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
472 {
473 *((__be32 *)data) = cpu_to_be32(alias);
474 return 0; /* Continue walking */
475 }
476
477 static int __find_legacy_master_phandle(struct device *dev, void *data)
478 {
479 struct of_phandle_iterator *it = *(void **)data;
480 struct device_node *np = it->node;
481 int err;
482
483 of_for_each_phandle(it, err, dev->of_node, "mmu-masters",
484 "#stream-id-cells", 0)
485 if (it->node == np) {
486 *(void **)data = dev;
487 return 1;
488 }
489 it->node = np;
490 return err == -ENOENT ? 0 : err;
491 }
492
493 static struct platform_driver arm_smmu_driver;
494 static struct iommu_ops arm_smmu_ops;
495
496 static int arm_smmu_register_legacy_master(struct device *dev,
497 struct arm_smmu_device **smmu)
498 {
499 struct device *smmu_dev;
500 struct device_node *np;
501 struct of_phandle_iterator it;
502 void *data = &it;
503 u32 *sids;
504 __be32 pci_sid;
505 int err;
506
507 np = dev_get_dev_node(dev);
508 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) {
509 of_node_put(np);
510 return -ENODEV;
511 }
512
513 it.node = np;
514 err = driver_for_each_device(&arm_smmu_driver.driver, NULL, &data,
515 __find_legacy_master_phandle);
516 smmu_dev = data;
517 of_node_put(np);
518 if (err == 0)
519 return -ENODEV;
520 if (err < 0)
521 return err;
522
523 if (dev_is_pci(dev)) {
524 /* "mmu-masters" assumes Stream ID == Requester ID */
525 pci_for_each_dma_alias(to_pci_dev(dev), __arm_smmu_get_pci_sid,
526 &pci_sid);
527 it.cur = &pci_sid;
528 it.cur_count = 1;
529 }
530
531 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode,
532 &arm_smmu_ops);
533 if (err)
534 return err;
535
536 sids = kcalloc(it.cur_count, sizeof(*sids), GFP_KERNEL);
537 if (!sids)
538 return -ENOMEM;
539
540 *smmu = dev_get_drvdata(smmu_dev);
541 of_phandle_iterator_args(&it, sids, it.cur_count);
542 err = iommu_fwspec_add_ids(dev, sids, it.cur_count);
543 kfree(sids);
544 return err;
545 }
546
547 static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
548 {
549 int idx;
550
551 do {
552 idx = find_next_zero_bit(map, end, start);
553 if (idx == end)
554 return -ENOSPC;
555 } while (test_and_set_bit(idx, map));
556
557 return idx;
558 }
559
560 static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
561 {
562 clear_bit(idx, map);
563 }
564
565 /* Wait for any pending TLB invalidations to complete */
566 static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
567 {
568 int count = 0;
569 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
570
571 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
572 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
573 & sTLBGSTATUS_GSACTIVE) {
574 cpu_relax();
575 if (++count == TLB_LOOP_TIMEOUT) {
576 dev_err_ratelimited(smmu->dev,
577 "TLB sync timed out -- SMMU may be deadlocked\n");
578 return;
579 }
580 udelay(1);
581 }
582 }
583
584 static void arm_smmu_tlb_sync(void *cookie)
585 {
586 struct arm_smmu_domain *smmu_domain = cookie;
587 __arm_smmu_tlb_sync(smmu_domain->smmu);
588 }
589
590 static void arm_smmu_tlb_inv_context(void *cookie)
591 {
592 struct arm_smmu_domain *smmu_domain = cookie;
593 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
594 struct arm_smmu_device *smmu = smmu_domain->smmu;
595 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
596 void __iomem *base;
597
598 if (stage1) {
599 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
600 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
601 base + ARM_SMMU_CB_S1_TLBIASID);
602 } else {
603 base = ARM_SMMU_GR0(smmu);
604 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
605 base + ARM_SMMU_GR0_TLBIVMID);
606 }
607
608 __arm_smmu_tlb_sync(smmu);
609 }
610
611 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
612 size_t granule, bool leaf, void *cookie)
613 {
614 struct arm_smmu_domain *smmu_domain = cookie;
615 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
616 struct arm_smmu_device *smmu = smmu_domain->smmu;
617 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
618 void __iomem *reg;
619
620 if (stage1) {
621 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
622 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
623
624 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
625 iova &= ~12UL;
626 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
627 do {
628 writel_relaxed(iova, reg);
629 iova += granule;
630 } while (size -= granule);
631 } else {
632 iova >>= 12;
633 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
634 do {
635 writeq_relaxed(iova, reg);
636 iova += granule >> 12;
637 } while (size -= granule);
638 }
639 } else if (smmu->version == ARM_SMMU_V2) {
640 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
641 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
642 ARM_SMMU_CB_S2_TLBIIPAS2;
643 iova >>= 12;
644 do {
645 smmu_write_atomic_lq(iova, reg);
646 iova += granule >> 12;
647 } while (size -= granule);
648 } else {
649 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
650 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
651 }
652 }
653
654 static const struct iommu_gather_ops arm_smmu_gather_ops = {
655 .tlb_flush_all = arm_smmu_tlb_inv_context,
656 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
657 .tlb_sync = arm_smmu_tlb_sync,
658 };
659
660 static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
661 {
662 u32 fsr, fsynr;
663 unsigned long iova;
664 struct iommu_domain *domain = dev;
665 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
666 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
667 struct arm_smmu_device *smmu = smmu_domain->smmu;
668 void __iomem *cb_base;
669
670 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
671 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
672
673 if (!(fsr & FSR_FAULT))
674 return IRQ_NONE;
675
676 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
677 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
678
679 dev_err_ratelimited(smmu->dev,
680 "Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cb=%d\n",
681 fsr, iova, fsynr, cfg->cbndx);
682
683 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
684 return IRQ_HANDLED;
685 }
686
687 static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
688 {
689 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
690 struct arm_smmu_device *smmu = dev;
691 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
692
693 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
694 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
695 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
696 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
697
698 if (!gfsr)
699 return IRQ_NONE;
700
701 dev_err_ratelimited(smmu->dev,
702 "Unexpected global fault, this could be serious\n");
703 dev_err_ratelimited(smmu->dev,
704 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
705 gfsr, gfsynr0, gfsynr1, gfsynr2);
706
707 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
708 return IRQ_HANDLED;
709 }
710
711 static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
712 struct io_pgtable_cfg *pgtbl_cfg)
713 {
714 u32 reg, reg2;
715 u64 reg64;
716 bool stage1;
717 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
718 struct arm_smmu_device *smmu = smmu_domain->smmu;
719 void __iomem *cb_base, *gr1_base;
720
721 gr1_base = ARM_SMMU_GR1(smmu);
722 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
723 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
724
725 if (smmu->version > ARM_SMMU_V1) {
726 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
727 reg = CBA2R_RW64_64BIT;
728 else
729 reg = CBA2R_RW64_32BIT;
730 /* 16-bit VMIDs live in CBA2R */
731 if (smmu->features & ARM_SMMU_FEAT_VMID16)
732 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
733
734 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
735 }
736
737 /* CBAR */
738 reg = cfg->cbar;
739 if (smmu->version < ARM_SMMU_V2)
740 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
741
742 /*
743 * Use the weakest shareability/memory types, so they are
744 * overridden by the ttbcr/pte.
745 */
746 if (stage1) {
747 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
748 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
749 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
750 /* 8-bit VMIDs live in CBAR */
751 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
752 }
753 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
754
755 /* TTBRs */
756 if (stage1) {
757 u16 asid = ARM_SMMU_CB_ASID(smmu, cfg);
758
759 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
760 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[0];
761 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0);
762 reg = pgtbl_cfg->arm_v7s_cfg.ttbr[1];
763 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1);
764 writel_relaxed(asid, cb_base + ARM_SMMU_CB_CONTEXTIDR);
765 } else {
766 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
767 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
768 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
769 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
770 reg64 |= (u64)asid << TTBRn_ASID_SHIFT;
771 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
772 }
773 } else {
774 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
775 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
776 }
777
778 /* TTBCR */
779 if (stage1) {
780 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
781 reg = pgtbl_cfg->arm_v7s_cfg.tcr;
782 reg2 = 0;
783 } else {
784 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
785 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
786 reg2 |= TTBCR2_SEP_UPSTREAM;
787 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
788 reg2 |= TTBCR2_AS;
789 }
790 if (smmu->version > ARM_SMMU_V1)
791 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_TTBCR2);
792 } else {
793 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
794 }
795 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
796
797 /* MAIRs (stage-1 only) */
798 if (stage1) {
799 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) {
800 reg = pgtbl_cfg->arm_v7s_cfg.prrr;
801 reg2 = pgtbl_cfg->arm_v7s_cfg.nmrr;
802 } else {
803 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
804 reg2 = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
805 }
806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
807 writel_relaxed(reg2, cb_base + ARM_SMMU_CB_S1_MAIR1);
808 }
809
810 /* SCTLR */
811 reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | SCTLR_M;
812 if (stage1)
813 reg |= SCTLR_S1_ASIDPNE;
814 #ifdef __BIG_ENDIAN
815 reg |= SCTLR_E;
816 #endif
817 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
818 }
819
820 static int arm_smmu_init_domain_context(struct iommu_domain *domain,
821 struct arm_smmu_device *smmu)
822 {
823 int irq, start, ret = 0;
824 unsigned long ias, oas;
825 struct io_pgtable_ops *pgtbl_ops;
826 struct io_pgtable_cfg pgtbl_cfg;
827 enum io_pgtable_fmt fmt;
828 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
829 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
830
831 mutex_lock(&smmu_domain->init_mutex);
832 if (smmu_domain->smmu)
833 goto out_unlock;
834
835 /*
836 * Mapping the requested stage onto what we support is surprisingly
837 * complicated, mainly because the spec allows S1+S2 SMMUs without
838 * support for nested translation. That means we end up with the
839 * following table:
840 *
841 * Requested Supported Actual
842 * S1 N S1
843 * S1 S1+S2 S1
844 * S1 S2 S2
845 * S1 S1 S1
846 * N N N
847 * N S1+S2 S2
848 * N S2 S2
849 * N S1 S1
850 *
851 * Note that you can't actually request stage-2 mappings.
852 */
853 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
854 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
855 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
856 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
857
858 /*
859 * Choosing a suitable context format is even more fiddly. Until we
860 * grow some way for the caller to express a preference, and/or move
861 * the decision into the io-pgtable code where it arguably belongs,
862 * just aim for the closest thing to the rest of the system, and hope
863 * that the hardware isn't esoteric enough that we can't assume AArch64
864 * support to be a superset of AArch32 support...
865 */
866 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
867 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
868 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) &&
869 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) &&
870 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) &&
871 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1))
872 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S;
873 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
874 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
875 ARM_SMMU_FEAT_FMT_AARCH64_16K |
876 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
877 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
878
879 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
880 ret = -EINVAL;
881 goto out_unlock;
882 }
883
884 switch (smmu_domain->stage) {
885 case ARM_SMMU_DOMAIN_S1:
886 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
887 start = smmu->num_s2_context_banks;
888 ias = smmu->va_size;
889 oas = smmu->ipa_size;
890 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
891 fmt = ARM_64_LPAE_S1;
892 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) {
893 fmt = ARM_32_LPAE_S1;
894 ias = min(ias, 32UL);
895 oas = min(oas, 40UL);
896 } else {
897 fmt = ARM_V7S;
898 ias = min(ias, 32UL);
899 oas = min(oas, 32UL);
900 }
901 break;
902 case ARM_SMMU_DOMAIN_NESTED:
903 /*
904 * We will likely want to change this if/when KVM gets
905 * involved.
906 */
907 case ARM_SMMU_DOMAIN_S2:
908 cfg->cbar = CBAR_TYPE_S2_TRANS;
909 start = 0;
910 ias = smmu->ipa_size;
911 oas = smmu->pa_size;
912 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
913 fmt = ARM_64_LPAE_S2;
914 } else {
915 fmt = ARM_32_LPAE_S2;
916 ias = min(ias, 40UL);
917 oas = min(oas, 40UL);
918 }
919 break;
920 default:
921 ret = -EINVAL;
922 goto out_unlock;
923 }
924
925 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
926 smmu->num_context_banks);
927 if (ret < 0)
928 goto out_unlock;
929
930 cfg->cbndx = ret;
931 if (smmu->version < ARM_SMMU_V2) {
932 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
933 cfg->irptndx %= smmu->num_context_irqs;
934 } else {
935 cfg->irptndx = cfg->cbndx;
936 }
937
938 pgtbl_cfg = (struct io_pgtable_cfg) {
939 .pgsize_bitmap = smmu->pgsize_bitmap,
940 .ias = ias,
941 .oas = oas,
942 .tlb = &arm_smmu_gather_ops,
943 .iommu_dev = smmu->dev,
944 };
945
946 smmu_domain->smmu = smmu;
947 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
948 if (!pgtbl_ops) {
949 ret = -ENOMEM;
950 goto out_clear_smmu;
951 }
952
953 /* Update the domain's page sizes to reflect the page table format */
954 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
955 domain->geometry.aperture_end = (1UL << ias) - 1;
956 domain->geometry.force_aperture = true;
957
958 /* Initialise the context bank with our page table cfg */
959 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
960
961 /*
962 * Request context fault interrupt. Do this last to avoid the
963 * handler seeing a half-initialised domain state.
964 */
965 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
966 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault,
967 IRQF_SHARED, "arm-smmu-context-fault", domain);
968 if (ret < 0) {
969 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
970 cfg->irptndx, irq);
971 cfg->irptndx = INVALID_IRPTNDX;
972 }
973
974 mutex_unlock(&smmu_domain->init_mutex);
975
976 /* Publish page table ops for map/unmap */
977 smmu_domain->pgtbl_ops = pgtbl_ops;
978 return 0;
979
980 out_clear_smmu:
981 smmu_domain->smmu = NULL;
982 out_unlock:
983 mutex_unlock(&smmu_domain->init_mutex);
984 return ret;
985 }
986
987 static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
988 {
989 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
990 struct arm_smmu_device *smmu = smmu_domain->smmu;
991 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
992 void __iomem *cb_base;
993 int irq;
994
995 if (!smmu)
996 return;
997
998 /*
999 * Disable the context bank and free the page tables before freeing
1000 * it.
1001 */
1002 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1003 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1004
1005 if (cfg->irptndx != INVALID_IRPTNDX) {
1006 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
1007 devm_free_irq(smmu->dev, irq, domain);
1008 }
1009
1010 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
1011 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
1012 }
1013
1014 static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1015 {
1016 struct arm_smmu_domain *smmu_domain;
1017
1018 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1019 return NULL;
1020 /*
1021 * Allocate the domain and initialise some of its data structures.
1022 * We can't really do anything meaningful until we've added a
1023 * master.
1024 */
1025 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1026 if (!smmu_domain)
1027 return NULL;
1028
1029 if (type == IOMMU_DOMAIN_DMA && (using_legacy_binding ||
1030 iommu_get_dma_cookie(&smmu_domain->domain))) {
1031 kfree(smmu_domain);
1032 return NULL;
1033 }
1034
1035 mutex_init(&smmu_domain->init_mutex);
1036 spin_lock_init(&smmu_domain->pgtbl_lock);
1037
1038 return &smmu_domain->domain;
1039 }
1040
1041 static void arm_smmu_domain_free(struct iommu_domain *domain)
1042 {
1043 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1044
1045 /*
1046 * Free the domain resources. We assume that all devices have
1047 * already been detached.
1048 */
1049 iommu_put_dma_cookie(domain);
1050 arm_smmu_destroy_domain_context(domain);
1051 kfree(smmu_domain);
1052 }
1053
1054 static void arm_smmu_write_smr(struct arm_smmu_device *smmu, int idx)
1055 {
1056 struct arm_smmu_smr *smr = smmu->smrs + idx;
1057 u32 reg = smr->id << SMR_ID_SHIFT | smr->mask << SMR_MASK_SHIFT;
1058
1059 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid)
1060 reg |= SMR_VALID;
1061 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_SMR(idx));
1062 }
1063
1064 static void arm_smmu_write_s2cr(struct arm_smmu_device *smmu, int idx)
1065 {
1066 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx;
1067 u32 reg = (s2cr->type & S2CR_TYPE_MASK) << S2CR_TYPE_SHIFT |
1068 (s2cr->cbndx & S2CR_CBNDX_MASK) << S2CR_CBNDX_SHIFT |
1069 (s2cr->privcfg & S2CR_PRIVCFG_MASK) << S2CR_PRIVCFG_SHIFT;
1070
1071 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs &&
1072 smmu->smrs[idx].valid)
1073 reg |= S2CR_EXIDVALID;
1074 writel_relaxed(reg, ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_S2CR(idx));
1075 }
1076
1077 static void arm_smmu_write_sme(struct arm_smmu_device *smmu, int idx)
1078 {
1079 arm_smmu_write_s2cr(smmu, idx);
1080 if (smmu->smrs)
1081 arm_smmu_write_smr(smmu, idx);
1082 }
1083
1084 /*
1085 * The width of SMR's mask field depends on sCR0_EXIDENABLE, so this function
1086 * should be called after sCR0 is written.
1087 */
1088 static void arm_smmu_test_smr_masks(struct arm_smmu_device *smmu)
1089 {
1090 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1091 u32 smr;
1092
1093 if (!smmu->smrs)
1094 return;
1095
1096 /*
1097 * SMR.ID bits may not be preserved if the corresponding MASK
1098 * bits are set, so check each one separately. We can reject
1099 * masters later if they try to claim IDs outside these masks.
1100 */
1101 smr = smmu->streamid_mask << SMR_ID_SHIFT;
1102 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1103 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1104 smmu->streamid_mask = smr >> SMR_ID_SHIFT;
1105
1106 smr = smmu->streamid_mask << SMR_MASK_SHIFT;
1107 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1108 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1109 smmu->smr_mask_mask = smr >> SMR_MASK_SHIFT;
1110 }
1111
1112 static int arm_smmu_find_sme(struct arm_smmu_device *smmu, u16 id, u16 mask)
1113 {
1114 struct arm_smmu_smr *smrs = smmu->smrs;
1115 int i, free_idx = -ENOSPC;
1116
1117 /* Stream indexing is blissfully easy */
1118 if (!smrs)
1119 return id;
1120
1121 /* Validating SMRs is... less so */
1122 for (i = 0; i < smmu->num_mapping_groups; ++i) {
1123 if (!smrs[i].valid) {
1124 /*
1125 * Note the first free entry we come across, which
1126 * we'll claim in the end if nothing else matches.
1127 */
1128 if (free_idx < 0)
1129 free_idx = i;
1130 continue;
1131 }
1132 /*
1133 * If the new entry is _entirely_ matched by an existing entry,
1134 * then reuse that, with the guarantee that there also cannot
1135 * be any subsequent conflicting entries. In normal use we'd
1136 * expect simply identical entries for this case, but there's
1137 * no harm in accommodating the generalisation.
1138 */
1139 if ((mask & smrs[i].mask) == mask &&
1140 !((id ^ smrs[i].id) & ~smrs[i].mask))
1141 return i;
1142 /*
1143 * If the new entry has any other overlap with an existing one,
1144 * though, then there always exists at least one stream ID
1145 * which would cause a conflict, and we can't allow that risk.
1146 */
1147 if (!((id ^ smrs[i].id) & ~(smrs[i].mask | mask)))
1148 return -EINVAL;
1149 }
1150
1151 return free_idx;
1152 }
1153
1154 static bool arm_smmu_free_sme(struct arm_smmu_device *smmu, int idx)
1155 {
1156 if (--smmu->s2crs[idx].count)
1157 return false;
1158
1159 smmu->s2crs[idx] = s2cr_init_val;
1160 if (smmu->smrs)
1161 smmu->smrs[idx].valid = false;
1162
1163 return true;
1164 }
1165
1166 static int arm_smmu_master_alloc_smes(struct device *dev)
1167 {
1168 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1169 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1170 struct arm_smmu_device *smmu = cfg->smmu;
1171 struct arm_smmu_smr *smrs = smmu->smrs;
1172 struct iommu_group *group;
1173 int i, idx, ret;
1174
1175 mutex_lock(&smmu->stream_map_mutex);
1176 /* Figure out a viable stream map entry allocation */
1177 for_each_cfg_sme(fwspec, i, idx) {
1178 u16 sid = fwspec->ids[i];
1179 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1180
1181 if (idx != INVALID_SMENDX) {
1182 ret = -EEXIST;
1183 goto out_err;
1184 }
1185
1186 ret = arm_smmu_find_sme(smmu, sid, mask);
1187 if (ret < 0)
1188 goto out_err;
1189
1190 idx = ret;
1191 if (smrs && smmu->s2crs[idx].count == 0) {
1192 smrs[idx].id = sid;
1193 smrs[idx].mask = mask;
1194 smrs[idx].valid = true;
1195 }
1196 smmu->s2crs[idx].count++;
1197 cfg->smendx[i] = (s16)idx;
1198 }
1199
1200 group = iommu_group_get_for_dev(dev);
1201 if (!group)
1202 group = ERR_PTR(-ENOMEM);
1203 if (IS_ERR(group)) {
1204 ret = PTR_ERR(group);
1205 goto out_err;
1206 }
1207 iommu_group_put(group);
1208
1209 /* It worked! Now, poke the actual hardware */
1210 for_each_cfg_sme(fwspec, i, idx) {
1211 arm_smmu_write_sme(smmu, idx);
1212 smmu->s2crs[idx].group = group;
1213 }
1214
1215 mutex_unlock(&smmu->stream_map_mutex);
1216 return 0;
1217
1218 out_err:
1219 while (i--) {
1220 arm_smmu_free_sme(smmu, cfg->smendx[i]);
1221 cfg->smendx[i] = INVALID_SMENDX;
1222 }
1223 mutex_unlock(&smmu->stream_map_mutex);
1224 return ret;
1225 }
1226
1227 static void arm_smmu_master_free_smes(struct iommu_fwspec *fwspec)
1228 {
1229 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1230 struct arm_smmu_master_cfg *cfg = fwspec->iommu_priv;
1231 int i, idx;
1232
1233 mutex_lock(&smmu->stream_map_mutex);
1234 for_each_cfg_sme(fwspec, i, idx) {
1235 if (arm_smmu_free_sme(smmu, idx))
1236 arm_smmu_write_sme(smmu, idx);
1237 cfg->smendx[i] = INVALID_SMENDX;
1238 }
1239 mutex_unlock(&smmu->stream_map_mutex);
1240 }
1241
1242 static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
1243 struct iommu_fwspec *fwspec)
1244 {
1245 struct arm_smmu_device *smmu = smmu_domain->smmu;
1246 struct arm_smmu_s2cr *s2cr = smmu->s2crs;
1247 enum arm_smmu_s2cr_type type = S2CR_TYPE_TRANS;
1248 u8 cbndx = smmu_domain->cfg.cbndx;
1249 int i, idx;
1250
1251 for_each_cfg_sme(fwspec, i, idx) {
1252 if (type == s2cr[idx].type && cbndx == s2cr[idx].cbndx)
1253 continue;
1254
1255 s2cr[idx].type = type;
1256 s2cr[idx].privcfg = S2CR_PRIVCFG_UNPRIV;
1257 s2cr[idx].cbndx = cbndx;
1258 arm_smmu_write_s2cr(smmu, idx);
1259 }
1260 return 0;
1261 }
1262
1263 static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1264 {
1265 int ret;
1266 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1267 struct arm_smmu_device *smmu;
1268 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1269
1270 if (!fwspec || fwspec->ops != &arm_smmu_ops) {
1271 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1272 return -ENXIO;
1273 }
1274
1275 /*
1276 * FIXME: The arch/arm DMA API code tries to attach devices to its own
1277 * domains between of_xlate() and add_device() - we have no way to cope
1278 * with that, so until ARM gets converted to rely on groups and default
1279 * domains, just say no (but more politely than by dereferencing NULL).
1280 * This should be at least a WARN_ON once that's sorted.
1281 */
1282 if (!fwspec->iommu_priv)
1283 return -ENODEV;
1284
1285 smmu = fwspec_smmu(fwspec);
1286 /* Ensure that the domain is finalised */
1287 ret = arm_smmu_init_domain_context(domain, smmu);
1288 if (ret < 0)
1289 return ret;
1290
1291 /*
1292 * Sanity check the domain. We don't support domains across
1293 * different SMMUs.
1294 */
1295 if (smmu_domain->smmu != smmu) {
1296 dev_err(dev,
1297 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1298 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1299 return -EINVAL;
1300 }
1301
1302 /* Looks ok, so add the device to the domain */
1303 return arm_smmu_domain_add_master(smmu_domain, fwspec);
1304 }
1305
1306 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1307 phys_addr_t paddr, size_t size, int prot)
1308 {
1309 int ret;
1310 unsigned long flags;
1311 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1312 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1313
1314 if (!ops)
1315 return -ENODEV;
1316
1317 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1318 ret = ops->map(ops, iova, paddr, size, prot);
1319 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1320 return ret;
1321 }
1322
1323 static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1324 size_t size)
1325 {
1326 size_t ret;
1327 unsigned long flags;
1328 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1329 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1330
1331 if (!ops)
1332 return 0;
1333
1334 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1335 ret = ops->unmap(ops, iova, size);
1336 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1337 return ret;
1338 }
1339
1340 static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1341 dma_addr_t iova)
1342 {
1343 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1344 struct arm_smmu_device *smmu = smmu_domain->smmu;
1345 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1346 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1347 struct device *dev = smmu->dev;
1348 void __iomem *cb_base;
1349 u32 tmp;
1350 u64 phys;
1351 unsigned long va;
1352
1353 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1354
1355 /* ATS1 registers can only be written atomically */
1356 va = iova & ~0xfffUL;
1357 if (smmu->version == ARM_SMMU_V2)
1358 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1359 else /* Register is only 32-bit in v1 */
1360 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
1361
1362 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1363 !(tmp & ATSR_ACTIVE), 5, 50)) {
1364 dev_err(dev,
1365 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1366 &iova);
1367 return ops->iova_to_phys(ops, iova);
1368 }
1369
1370 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
1371 if (phys & CB_PAR_F) {
1372 dev_err(dev, "translation fault!\n");
1373 dev_err(dev, "PAR = 0x%llx\n", phys);
1374 return 0;
1375 }
1376
1377 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1378 }
1379
1380 static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
1381 dma_addr_t iova)
1382 {
1383 phys_addr_t ret;
1384 unsigned long flags;
1385 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1386 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1387
1388 if (!ops)
1389 return 0;
1390
1391 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1392 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1393 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1394 ret = arm_smmu_iova_to_phys_hard(domain, iova);
1395 } else {
1396 ret = ops->iova_to_phys(ops, iova);
1397 }
1398
1399 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1400
1401 return ret;
1402 }
1403
1404 static bool arm_smmu_capable(enum iommu_cap cap)
1405 {
1406 switch (cap) {
1407 case IOMMU_CAP_CACHE_COHERENCY:
1408 /*
1409 * Return true here as the SMMU can always send out coherent
1410 * requests.
1411 */
1412 return true;
1413 case IOMMU_CAP_INTR_REMAP:
1414 return true; /* MSIs are just memory writes */
1415 case IOMMU_CAP_NOEXEC:
1416 return true;
1417 default:
1418 return false;
1419 }
1420 }
1421
1422 static int arm_smmu_match_node(struct device *dev, void *data)
1423 {
1424 return dev->fwnode == data;
1425 }
1426
1427 static
1428 struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
1429 {
1430 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
1431 fwnode, arm_smmu_match_node);
1432 put_device(dev);
1433 return dev ? dev_get_drvdata(dev) : NULL;
1434 }
1435
1436 static int arm_smmu_add_device(struct device *dev)
1437 {
1438 struct arm_smmu_device *smmu;
1439 struct arm_smmu_master_cfg *cfg;
1440 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1441 int i, ret;
1442
1443 if (using_legacy_binding) {
1444 ret = arm_smmu_register_legacy_master(dev, &smmu);
1445 fwspec = dev->iommu_fwspec;
1446 if (ret)
1447 goto out_free;
1448 } else if (fwspec && fwspec->ops == &arm_smmu_ops) {
1449 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
1450 } else {
1451 return -ENODEV;
1452 }
1453
1454 ret = -EINVAL;
1455 for (i = 0; i < fwspec->num_ids; i++) {
1456 u16 sid = fwspec->ids[i];
1457 u16 mask = fwspec->ids[i] >> SMR_MASK_SHIFT;
1458
1459 if (sid & ~smmu->streamid_mask) {
1460 dev_err(dev, "stream ID 0x%x out of range for SMMU (0x%x)\n",
1461 sid, smmu->streamid_mask);
1462 goto out_free;
1463 }
1464 if (mask & ~smmu->smr_mask_mask) {
1465 dev_err(dev, "SMR mask 0x%x out of range for SMMU (0x%x)\n",
1466 sid, smmu->smr_mask_mask);
1467 goto out_free;
1468 }
1469 }
1470
1471 ret = -ENOMEM;
1472 cfg = kzalloc(offsetof(struct arm_smmu_master_cfg, smendx[i]),
1473 GFP_KERNEL);
1474 if (!cfg)
1475 goto out_free;
1476
1477 cfg->smmu = smmu;
1478 fwspec->iommu_priv = cfg;
1479 while (i--)
1480 cfg->smendx[i] = INVALID_SMENDX;
1481
1482 ret = arm_smmu_master_alloc_smes(dev);
1483 if (ret)
1484 goto out_free;
1485
1486 return 0;
1487
1488 out_free:
1489 if (fwspec)
1490 kfree(fwspec->iommu_priv);
1491 iommu_fwspec_free(dev);
1492 return ret;
1493 }
1494
1495 static void arm_smmu_remove_device(struct device *dev)
1496 {
1497 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1498
1499 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1500 return;
1501
1502 arm_smmu_master_free_smes(fwspec);
1503 iommu_group_remove_device(dev);
1504 kfree(fwspec->iommu_priv);
1505 iommu_fwspec_free(dev);
1506 }
1507
1508 static struct iommu_group *arm_smmu_device_group(struct device *dev)
1509 {
1510 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1511 struct arm_smmu_device *smmu = fwspec_smmu(fwspec);
1512 struct iommu_group *group = NULL;
1513 int i, idx;
1514
1515 for_each_cfg_sme(fwspec, i, idx) {
1516 if (group && smmu->s2crs[idx].group &&
1517 group != smmu->s2crs[idx].group)
1518 return ERR_PTR(-EINVAL);
1519
1520 group = smmu->s2crs[idx].group;
1521 }
1522
1523 if (group)
1524 return iommu_group_ref_get(group);
1525
1526 if (dev_is_pci(dev))
1527 group = pci_device_group(dev);
1528 else
1529 group = generic_device_group(dev);
1530
1531 return group;
1532 }
1533
1534 static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1535 enum iommu_attr attr, void *data)
1536 {
1537 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1538
1539 switch (attr) {
1540 case DOMAIN_ATTR_NESTING:
1541 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1542 return 0;
1543 default:
1544 return -ENODEV;
1545 }
1546 }
1547
1548 static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1549 enum iommu_attr attr, void *data)
1550 {
1551 int ret = 0;
1552 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1553
1554 mutex_lock(&smmu_domain->init_mutex);
1555
1556 switch (attr) {
1557 case DOMAIN_ATTR_NESTING:
1558 if (smmu_domain->smmu) {
1559 ret = -EPERM;
1560 goto out_unlock;
1561 }
1562
1563 if (*(int *)data)
1564 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1565 else
1566 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1567
1568 break;
1569 default:
1570 ret = -ENODEV;
1571 }
1572
1573 out_unlock:
1574 mutex_unlock(&smmu_domain->init_mutex);
1575 return ret;
1576 }
1577
1578 static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1579 {
1580 u32 fwid = 0;
1581
1582 if (args->args_count > 0)
1583 fwid |= (u16)args->args[0];
1584
1585 if (args->args_count > 1)
1586 fwid |= (u16)args->args[1] << SMR_MASK_SHIFT;
1587
1588 return iommu_fwspec_add_ids(dev, &fwid, 1);
1589 }
1590
1591 static struct iommu_ops arm_smmu_ops = {
1592 .capable = arm_smmu_capable,
1593 .domain_alloc = arm_smmu_domain_alloc,
1594 .domain_free = arm_smmu_domain_free,
1595 .attach_dev = arm_smmu_attach_dev,
1596 .map = arm_smmu_map,
1597 .unmap = arm_smmu_unmap,
1598 .map_sg = default_iommu_map_sg,
1599 .iova_to_phys = arm_smmu_iova_to_phys,
1600 .add_device = arm_smmu_add_device,
1601 .remove_device = arm_smmu_remove_device,
1602 .device_group = arm_smmu_device_group,
1603 .domain_get_attr = arm_smmu_domain_get_attr,
1604 .domain_set_attr = arm_smmu_domain_set_attr,
1605 .of_xlate = arm_smmu_of_xlate,
1606 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1607 };
1608
1609 static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1610 {
1611 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1612 void __iomem *cb_base;
1613 int i;
1614 u32 reg, major;
1615
1616 /* clear global FSR */
1617 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1618 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1619
1620 /*
1621 * Reset stream mapping groups: Initial values mark all SMRn as
1622 * invalid and all S2CRn as bypass unless overridden.
1623 */
1624 for (i = 0; i < smmu->num_mapping_groups; ++i)
1625 arm_smmu_write_sme(smmu, i);
1626
1627 if (smmu->model == ARM_MMU500) {
1628 /*
1629 * Before clearing ARM_MMU500_ACTLR_CPRE, need to
1630 * clear CACHE_LOCK bit of ACR first. And, CACHE_LOCK
1631 * bit is only present in MMU-500r2 onwards.
1632 */
1633 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID7);
1634 major = (reg >> ID7_MAJOR_SHIFT) & ID7_MAJOR_MASK;
1635 reg = readl_relaxed(gr0_base + ARM_SMMU_GR0_sACR);
1636 if (major >= 2)
1637 reg &= ~ARM_MMU500_ACR_CACHE_LOCK;
1638 /*
1639 * Allow unmatched Stream IDs to allocate bypass
1640 * TLB entries for reduced latency.
1641 */
1642 reg |= ARM_MMU500_ACR_SMTNMB_TLBEN;
1643 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
1644 }
1645
1646 /* Make sure all context banks are disabled and clear CB_FSR */
1647 for (i = 0; i < smmu->num_context_banks; ++i) {
1648 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1649 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1650 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
1651 /*
1652 * Disable MMU-500's not-particularly-beneficial next-page
1653 * prefetcher for the sake of errata #841119 and #826419.
1654 */
1655 if (smmu->model == ARM_MMU500) {
1656 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1657 reg &= ~ARM_MMU500_ACTLR_CPRE;
1658 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1659 }
1660 }
1661
1662 /* Invalidate the TLB, just in case */
1663 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1664 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1665
1666 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1667
1668 /* Enable fault reporting */
1669 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
1670
1671 /* Disable TLB broadcasting. */
1672 reg |= (sCR0_VMIDPNE | sCR0_PTM);
1673
1674 /* Enable client access, handling unmatched streams as appropriate */
1675 reg &= ~sCR0_CLIENTPD;
1676 if (disable_bypass)
1677 reg |= sCR0_USFCFG;
1678 else
1679 reg &= ~sCR0_USFCFG;
1680
1681 /* Disable forced broadcasting */
1682 reg &= ~sCR0_FB;
1683
1684 /* Don't upgrade barriers */
1685 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
1686
1687 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1688 reg |= sCR0_VMID16EN;
1689
1690 if (smmu->features & ARM_SMMU_FEAT_EXIDS)
1691 reg |= sCR0_EXIDENABLE;
1692
1693 /* Push the button */
1694 __arm_smmu_tlb_sync(smmu);
1695 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
1696 }
1697
1698 static int arm_smmu_id_size_to_bits(int size)
1699 {
1700 switch (size) {
1701 case 0:
1702 return 32;
1703 case 1:
1704 return 36;
1705 case 2:
1706 return 40;
1707 case 3:
1708 return 42;
1709 case 4:
1710 return 44;
1711 case 5:
1712 default:
1713 return 48;
1714 }
1715 }
1716
1717 static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1718 {
1719 unsigned long size;
1720 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1721 u32 id;
1722 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK;
1723 int i;
1724
1725 dev_notice(smmu->dev, "probing hardware configuration...\n");
1726 dev_notice(smmu->dev, "SMMUv%d with:\n",
1727 smmu->version == ARM_SMMU_V2 ? 2 : 1);
1728
1729 /* ID0 */
1730 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
1731
1732 /* Restrict available stages based on module parameter */
1733 if (force_stage == 1)
1734 id &= ~(ID0_S2TS | ID0_NTS);
1735 else if (force_stage == 2)
1736 id &= ~(ID0_S1TS | ID0_NTS);
1737
1738 if (id & ID0_S1TS) {
1739 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1740 dev_notice(smmu->dev, "\tstage 1 translation\n");
1741 }
1742
1743 if (id & ID0_S2TS) {
1744 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1745 dev_notice(smmu->dev, "\tstage 2 translation\n");
1746 }
1747
1748 if (id & ID0_NTS) {
1749 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1750 dev_notice(smmu->dev, "\tnested translation\n");
1751 }
1752
1753 if (!(smmu->features &
1754 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
1755 dev_err(smmu->dev, "\tno translation support!\n");
1756 return -ENODEV;
1757 }
1758
1759 if ((id & ID0_S1TS) &&
1760 ((smmu->version < ARM_SMMU_V2) || !(id & ID0_ATOSNS))) {
1761 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1762 dev_notice(smmu->dev, "\taddress translation ops\n");
1763 }
1764
1765 /*
1766 * In order for DMA API calls to work properly, we must defer to what
1767 * the FW says about coherency, regardless of what the hardware claims.
1768 * Fortunately, this also opens up a workaround for systems where the
1769 * ID register value has ended up configured incorrectly.
1770 */
1771 cttw_reg = !!(id & ID0_CTTW);
1772 if (cttw_fw || cttw_reg)
1773 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1774 cttw_fw ? "" : "non-");
1775 if (cttw_fw != cttw_reg)
1776 dev_notice(smmu->dev,
1777 "\t(IDR0.CTTW overridden by FW configuration)\n");
1778
1779 /* Max. number of entries we have for stream matching/indexing */
1780 if (smmu->version == ARM_SMMU_V2 && id & ID0_EXIDS) {
1781 smmu->features |= ARM_SMMU_FEAT_EXIDS;
1782 size = 1 << 16;
1783 } else {
1784 size = 1 << ((id >> ID0_NUMSIDB_SHIFT) & ID0_NUMSIDB_MASK);
1785 }
1786 smmu->streamid_mask = size - 1;
1787 if (id & ID0_SMS) {
1788 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1789 size = (id >> ID0_NUMSMRG_SHIFT) & ID0_NUMSMRG_MASK;
1790 if (size == 0) {
1791 dev_err(smmu->dev,
1792 "stream-matching supported, but no SMRs present!\n");
1793 return -ENODEV;
1794 }
1795
1796 /* Zero-initialised to mark as invalid */
1797 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs),
1798 GFP_KERNEL);
1799 if (!smmu->smrs)
1800 return -ENOMEM;
1801
1802 dev_notice(smmu->dev,
1803 "\tstream matching with %lu register groups", size);
1804 }
1805 /* s2cr->type == 0 means translation, so initialise explicitly */
1806 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs),
1807 GFP_KERNEL);
1808 if (!smmu->s2crs)
1809 return -ENOMEM;
1810 for (i = 0; i < size; i++)
1811 smmu->s2crs[i] = s2cr_init_val;
1812
1813 smmu->num_mapping_groups = size;
1814 mutex_init(&smmu->stream_map_mutex);
1815
1816 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1817 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1818 if (!(id & ID0_PTFS_NO_AARCH32S))
1819 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1820 }
1821
1822 /* ID1 */
1823 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
1824 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
1825
1826 /* Check for size mismatch of SMMU address space from mapped region */
1827 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
1828 size *= 2 << smmu->pgshift;
1829 if (smmu->size != size)
1830 dev_warn(smmu->dev,
1831 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1832 size, smmu->size);
1833
1834 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
1835 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1836 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1837 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1838 return -ENODEV;
1839 }
1840 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1841 smmu->num_context_banks, smmu->num_s2_context_banks);
1842 /*
1843 * Cavium CN88xx erratum #27704.
1844 * Ensure ASID and VMID allocation is unique across all SMMUs in
1845 * the system.
1846 */
1847 if (smmu->model == CAVIUM_SMMUV2) {
1848 smmu->cavium_id_base =
1849 atomic_add_return(smmu->num_context_banks,
1850 &cavium_smmu_context_count);
1851 smmu->cavium_id_base -= smmu->num_context_banks;
1852 }
1853
1854 /* ID2 */
1855 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1856 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
1857 smmu->ipa_size = size;
1858
1859 /* The output mask is also applied for bypass */
1860 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
1861 smmu->pa_size = size;
1862
1863 if (id & ID2_VMID16)
1864 smmu->features |= ARM_SMMU_FEAT_VMID16;
1865
1866 /*
1867 * What the page table walker can address actually depends on which
1868 * descriptor format is in use, but since a) we don't know that yet,
1869 * and b) it can vary per context bank, this will have to do...
1870 */
1871 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1872 dev_warn(smmu->dev,
1873 "failed to set DMA mask for table walker\n");
1874
1875 if (smmu->version < ARM_SMMU_V2) {
1876 smmu->va_size = smmu->ipa_size;
1877 if (smmu->version == ARM_SMMU_V1_64K)
1878 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1879 } else {
1880 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
1881 smmu->va_size = arm_smmu_id_size_to_bits(size);
1882 if (id & ID2_PTFS_4K)
1883 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
1884 if (id & ID2_PTFS_16K)
1885 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
1886 if (id & ID2_PTFS_64K)
1887 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
1888 }
1889
1890 /* Now we've corralled the various formats, what'll it do? */
1891 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1892 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1893 if (smmu->features &
1894 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1895 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
1896 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1897 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
1898 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1899 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
1900
1901 if (arm_smmu_ops.pgsize_bitmap == -1UL)
1902 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
1903 else
1904 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
1905 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n",
1906 smmu->pgsize_bitmap);
1907
1908
1909 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1910 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1911 smmu->va_size, smmu->ipa_size);
1912
1913 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1914 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1915 smmu->ipa_size, smmu->pa_size);
1916
1917 return 0;
1918 }
1919
1920 struct arm_smmu_match_data {
1921 enum arm_smmu_arch_version version;
1922 enum arm_smmu_implementation model;
1923 };
1924
1925 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1926 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1927
1928 ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1929 ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
1930 ARM_SMMU_MATCH_DATA(arm_mmu401, ARM_SMMU_V1_64K, GENERIC_SMMU);
1931 ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
1932 ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
1933
1934 static const struct of_device_id arm_smmu_of_match[] = {
1935 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1936 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1937 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1938 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1939 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1940 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1941 { },
1942 };
1943 MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1944
1945 #ifdef CONFIG_ACPI
1946 static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu)
1947 {
1948 int ret = 0;
1949
1950 switch (model) {
1951 case ACPI_IORT_SMMU_V1:
1952 case ACPI_IORT_SMMU_CORELINK_MMU400:
1953 smmu->version = ARM_SMMU_V1;
1954 smmu->model = GENERIC_SMMU;
1955 break;
1956 case ACPI_IORT_SMMU_V2:
1957 smmu->version = ARM_SMMU_V2;
1958 smmu->model = GENERIC_SMMU;
1959 break;
1960 case ACPI_IORT_SMMU_CORELINK_MMU500:
1961 smmu->version = ARM_SMMU_V2;
1962 smmu->model = ARM_MMU500;
1963 break;
1964 default:
1965 ret = -ENODEV;
1966 }
1967
1968 return ret;
1969 }
1970
1971 static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1972 struct arm_smmu_device *smmu)
1973 {
1974 struct device *dev = smmu->dev;
1975 struct acpi_iort_node *node =
1976 *(struct acpi_iort_node **)dev_get_platdata(dev);
1977 struct acpi_iort_smmu *iort_smmu;
1978 int ret;
1979
1980 /* Retrieve SMMU1/2 specific data */
1981 iort_smmu = (struct acpi_iort_smmu *)node->node_data;
1982
1983 ret = acpi_smmu_get_data(iort_smmu->model, smmu);
1984 if (ret < 0)
1985 return ret;
1986
1987 /* Ignore the configuration access interrupt */
1988 smmu->num_global_irqs = 1;
1989
1990 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK)
1991 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
1992
1993 return 0;
1994 }
1995 #else
1996 static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
1997 struct arm_smmu_device *smmu)
1998 {
1999 return -ENODEV;
2000 }
2001 #endif
2002
2003 static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2004 struct arm_smmu_device *smmu)
2005 {
2006 const struct arm_smmu_match_data *data;
2007 struct device *dev = &pdev->dev;
2008 bool legacy_binding;
2009
2010 if (of_property_read_u32(dev->of_node, "#global-interrupts",
2011 &smmu->num_global_irqs)) {
2012 dev_err(dev, "missing #global-interrupts property\n");
2013 return -ENODEV;
2014 }
2015
2016 data = of_device_get_match_data(dev);
2017 smmu->version = data->version;
2018 smmu->model = data->model;
2019
2020 parse_driver_options(smmu);
2021
2022 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
2023 if (legacy_binding && !using_generic_binding) {
2024 if (!using_legacy_binding)
2025 pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
2026 using_legacy_binding = true;
2027 } else if (!legacy_binding && !using_legacy_binding) {
2028 using_generic_binding = true;
2029 } else {
2030 dev_err(dev, "not probing due to mismatched DT properties\n");
2031 return -ENODEV;
2032 }
2033
2034 if (of_dma_is_coherent(dev->of_node))
2035 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
2036
2037 return 0;
2038 }
2039
2040 static int arm_smmu_device_probe(struct platform_device *pdev)
2041 {
2042 struct resource *res;
2043 struct arm_smmu_device *smmu;
2044 struct device *dev = &pdev->dev;
2045 int num_irqs, i, err;
2046
2047 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2048 if (!smmu) {
2049 dev_err(dev, "failed to allocate arm_smmu_device\n");
2050 return -ENOMEM;
2051 }
2052 smmu->dev = dev;
2053
2054 if (dev->of_node)
2055 err = arm_smmu_device_dt_probe(pdev, smmu);
2056 else
2057 err = arm_smmu_device_acpi_probe(pdev, smmu);
2058
2059 if (err)
2060 return err;
2061
2062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2063 smmu->base = devm_ioremap_resource(dev, res);
2064 if (IS_ERR(smmu->base))
2065 return PTR_ERR(smmu->base);
2066 smmu->size = resource_size(res);
2067
2068 num_irqs = 0;
2069 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
2070 num_irqs++;
2071 if (num_irqs > smmu->num_global_irqs)
2072 smmu->num_context_irqs++;
2073 }
2074
2075 if (!smmu->num_context_irqs) {
2076 dev_err(dev, "found %d interrupts but expected at least %d\n",
2077 num_irqs, smmu->num_global_irqs + 1);
2078 return -ENODEV;
2079 }
2080
2081 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
2082 GFP_KERNEL);
2083 if (!smmu->irqs) {
2084 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
2085 return -ENOMEM;
2086 }
2087
2088 for (i = 0; i < num_irqs; ++i) {
2089 int irq = platform_get_irq(pdev, i);
2090
2091 if (irq < 0) {
2092 dev_err(dev, "failed to get irq index %d\n", i);
2093 return -ENODEV;
2094 }
2095 smmu->irqs[i] = irq;
2096 }
2097
2098 err = arm_smmu_device_cfg_probe(smmu);
2099 if (err)
2100 return err;
2101
2102 if (smmu->version == ARM_SMMU_V2 &&
2103 smmu->num_context_banks != smmu->num_context_irqs) {
2104 dev_err(dev,
2105 "found only %d context interrupt(s) but %d required\n",
2106 smmu->num_context_irqs, smmu->num_context_banks);
2107 return -ENODEV;
2108 }
2109
2110 for (i = 0; i < smmu->num_global_irqs; ++i) {
2111 err = devm_request_irq(smmu->dev, smmu->irqs[i],
2112 arm_smmu_global_fault,
2113 IRQF_SHARED,
2114 "arm-smmu global fault",
2115 smmu);
2116 if (err) {
2117 dev_err(dev, "failed to request global IRQ %d (%u)\n",
2118 i, smmu->irqs[i]);
2119 return err;
2120 }
2121 }
2122
2123 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2124 platform_set_drvdata(pdev, smmu);
2125 arm_smmu_device_reset(smmu);
2126 arm_smmu_test_smr_masks(smmu);
2127
2128 /* Oh, for a proper bus abstraction */
2129 if (!iommu_present(&platform_bus_type))
2130 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2131 #ifdef CONFIG_ARM_AMBA
2132 if (!iommu_present(&amba_bustype))
2133 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2134 #endif
2135 #ifdef CONFIG_PCI
2136 if (!iommu_present(&pci_bus_type)) {
2137 pci_request_acs();
2138 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2139 }
2140 #endif
2141 return 0;
2142 }
2143
2144 static int arm_smmu_device_remove(struct platform_device *pdev)
2145 {
2146 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
2147
2148 if (!smmu)
2149 return -ENODEV;
2150
2151 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
2152 dev_err(&pdev->dev, "removing device with active domains!\n");
2153
2154 /* Turn the thing off */
2155 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
2156 return 0;
2157 }
2158
2159 static struct platform_driver arm_smmu_driver = {
2160 .driver = {
2161 .name = "arm-smmu",
2162 .of_match_table = of_match_ptr(arm_smmu_of_match),
2163 },
2164 .probe = arm_smmu_device_probe,
2165 .remove = arm_smmu_device_remove,
2166 };
2167
2168 static int __init arm_smmu_init(void)
2169 {
2170 static bool registered;
2171 int ret = 0;
2172
2173 if (!registered) {
2174 ret = platform_driver_register(&arm_smmu_driver);
2175 registered = !ret;
2176 }
2177 return ret;
2178 }
2179
2180 static void __exit arm_smmu_exit(void)
2181 {
2182 return platform_driver_unregister(&arm_smmu_driver);
2183 }
2184
2185 subsys_initcall(arm_smmu_init);
2186 module_exit(arm_smmu_exit);
2187
2188 static int __init arm_smmu_of_init(struct device_node *np)
2189 {
2190 int ret = arm_smmu_init();
2191
2192 if (ret)
2193 return ret;
2194
2195 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2196 return -ENODEV;
2197
2198 return 0;
2199 }
2200 IOMMU_OF_DECLARE(arm_smmuv1, "arm,smmu-v1", arm_smmu_of_init);
2201 IOMMU_OF_DECLARE(arm_smmuv2, "arm,smmu-v2", arm_smmu_of_init);
2202 IOMMU_OF_DECLARE(arm_mmu400, "arm,mmu-400", arm_smmu_of_init);
2203 IOMMU_OF_DECLARE(arm_mmu401, "arm,mmu-401", arm_smmu_of_init);
2204 IOMMU_OF_DECLARE(arm_mmu500, "arm,mmu-500", arm_smmu_of_init);
2205 IOMMU_OF_DECLARE(cavium_smmuv2, "cavium,smmu-v2", arm_smmu_of_init);
2206
2207 #ifdef CONFIG_ACPI
2208 static int __init arm_smmu_acpi_init(struct acpi_table_header *table)
2209 {
2210 if (iort_node_match(ACPI_IORT_NODE_SMMU))
2211 return arm_smmu_init();
2212
2213 return 0;
2214 }
2215 IORT_ACPI_DECLARE(arm_smmu, ACPI_SIG_IORT, arm_smmu_acpi_init);
2216 #endif
2217
2218 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2219 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2220 MODULE_LICENSE("GPL v2");