2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
37 #include <linux/io-64-nonatomic-hi-lo.h>
38 #include <linux/iommu.h>
39 #include <linux/iopoll.h>
40 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/pci.h>
44 #include <linux/platform_device.h>
45 #include <linux/slab.h>
46 #include <linux/spinlock.h>
48 #include <linux/amba/bus.h>
50 #include "io-pgtable.h"
52 /* Maximum number of stream IDs assigned to a single device */
53 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
55 /* Maximum number of context banks per SMMU */
56 #define ARM_SMMU_MAX_CBS 128
58 /* Maximum number of mapping groups per SMMU */
59 #define ARM_SMMU_MAX_SMRS 128
61 /* SMMU global address space */
62 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
63 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
70 #define ARM_SMMU_GR0_NS(smmu) \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
81 #define smmu_write_atomic_lq writeq_relaxed
83 #define smmu_write_atomic_lq writel_relaxed
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_VMID16EN (1 << 31)
98 #define sCR0_BSU_SHIFT 14
99 #define sCR0_BSU_MASK 0x3
101 /* Identification registers */
102 #define ARM_SMMU_GR0_ID0 0x20
103 #define ARM_SMMU_GR0_ID1 0x24
104 #define ARM_SMMU_GR0_ID2 0x28
105 #define ARM_SMMU_GR0_ID3 0x2c
106 #define ARM_SMMU_GR0_ID4 0x30
107 #define ARM_SMMU_GR0_ID5 0x34
108 #define ARM_SMMU_GR0_ID6 0x38
109 #define ARM_SMMU_GR0_ID7 0x3c
110 #define ARM_SMMU_GR0_sGFSR 0x48
111 #define ARM_SMMU_GR0_sGFSYNR0 0x50
112 #define ARM_SMMU_GR0_sGFSYNR1 0x54
113 #define ARM_SMMU_GR0_sGFSYNR2 0x58
115 #define ID0_S1TS (1 << 30)
116 #define ID0_S2TS (1 << 29)
117 #define ID0_NTS (1 << 28)
118 #define ID0_SMS (1 << 27)
119 #define ID0_ATOSNS (1 << 26)
120 #define ID0_CTTW (1 << 14)
121 #define ID0_NUMIRPT_SHIFT 16
122 #define ID0_NUMIRPT_MASK 0xff
123 #define ID0_NUMSIDB_SHIFT 9
124 #define ID0_NUMSIDB_MASK 0xf
125 #define ID0_NUMSMRG_SHIFT 0
126 #define ID0_NUMSMRG_MASK 0xff
128 #define ID1_PAGESIZE (1 << 31)
129 #define ID1_NUMPAGENDXB_SHIFT 28
130 #define ID1_NUMPAGENDXB_MASK 7
131 #define ID1_NUMS2CB_SHIFT 16
132 #define ID1_NUMS2CB_MASK 0xff
133 #define ID1_NUMCB_SHIFT 0
134 #define ID1_NUMCB_MASK 0xff
136 #define ID2_OAS_SHIFT 4
137 #define ID2_OAS_MASK 0xf
138 #define ID2_IAS_SHIFT 0
139 #define ID2_IAS_MASK 0xf
140 #define ID2_UBS_SHIFT 8
141 #define ID2_UBS_MASK 0xf
142 #define ID2_PTFS_4K (1 << 12)
143 #define ID2_PTFS_16K (1 << 13)
144 #define ID2_PTFS_64K (1 << 14)
145 #define ID2_VMID16 (1 << 15)
147 /* Global TLB invalidation */
148 #define ARM_SMMU_GR0_TLBIVMID 0x64
149 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150 #define ARM_SMMU_GR0_TLBIALLH 0x6c
151 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
152 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153 #define sTLBGSTATUS_GSACTIVE (1 << 0)
154 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
156 /* Stream mapping registers */
157 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158 #define SMR_VALID (1 << 31)
159 #define SMR_MASK_SHIFT 16
160 #define SMR_MASK_MASK 0x7fff
161 #define SMR_ID_SHIFT 0
162 #define SMR_ID_MASK 0x7fff
164 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165 #define S2CR_CBNDX_SHIFT 0
166 #define S2CR_CBNDX_MASK 0xff
167 #define S2CR_TYPE_SHIFT 16
168 #define S2CR_TYPE_MASK 0x3
169 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
173 #define S2CR_PRIVCFG_SHIFT 24
174 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
176 /* Context bank attribute registers */
177 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178 #define CBAR_VMID_SHIFT 0
179 #define CBAR_VMID_MASK 0xff
180 #define CBAR_S1_BPSHCFG_SHIFT 8
181 #define CBAR_S1_BPSHCFG_MASK 3
182 #define CBAR_S1_BPSHCFG_NSH 3
183 #define CBAR_S1_MEMATTR_SHIFT 12
184 #define CBAR_S1_MEMATTR_MASK 0xf
185 #define CBAR_S1_MEMATTR_WB 0xf
186 #define CBAR_TYPE_SHIFT 16
187 #define CBAR_TYPE_MASK 0x3
188 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192 #define CBAR_IRPTNDX_SHIFT 24
193 #define CBAR_IRPTNDX_MASK 0xff
195 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196 #define CBA2R_RW64_32BIT (0 << 0)
197 #define CBA2R_RW64_64BIT (1 << 0)
198 #define CBA2R_VMID_SHIFT 16
199 #define CBA2R_VMID_MASK 0xffff
201 /* Translation context bank */
202 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
203 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
205 #define ARM_SMMU_CB_SCTLR 0x0
206 #define ARM_SMMU_CB_ACTLR 0x4
207 #define ARM_SMMU_CB_RESUME 0x8
208 #define ARM_SMMU_CB_TTBCR2 0x10
209 #define ARM_SMMU_CB_TTBR0 0x20
210 #define ARM_SMMU_CB_TTBR1 0x28
211 #define ARM_SMMU_CB_TTBCR 0x30
212 #define ARM_SMMU_CB_S1_MAIR0 0x38
213 #define ARM_SMMU_CB_S1_MAIR1 0x3c
214 #define ARM_SMMU_CB_PAR 0x50
215 #define ARM_SMMU_CB_FSR 0x58
216 #define ARM_SMMU_CB_FAR 0x60
217 #define ARM_SMMU_CB_FSYNR0 0x68
218 #define ARM_SMMU_CB_S1_TLBIVA 0x600
219 #define ARM_SMMU_CB_S1_TLBIASID 0x610
220 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
221 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
222 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
223 #define ARM_SMMU_CB_ATS1PR 0x800
224 #define ARM_SMMU_CB_ATSR 0x8f0
226 #define SCTLR_S1_ASIDPNE (1 << 12)
227 #define SCTLR_CFCFG (1 << 7)
228 #define SCTLR_CFIE (1 << 6)
229 #define SCTLR_CFRE (1 << 5)
230 #define SCTLR_E (1 << 4)
231 #define SCTLR_AFE (1 << 2)
232 #define SCTLR_TRE (1 << 1)
233 #define SCTLR_M (1 << 0)
234 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
236 #define ARM_MMU500_ACTLR_CPRE (1 << 1)
238 #define CB_PAR_F (1 << 0)
240 #define ATSR_ACTIVE (1 << 0)
242 #define RESUME_RETRY (0 << 0)
243 #define RESUME_TERMINATE (1 << 0)
245 #define TTBCR2_SEP_SHIFT 15
246 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
248 #define TTBRn_ASID_SHIFT 48
250 #define FSR_MULTI (1 << 31)
251 #define FSR_SS (1 << 30)
252 #define FSR_UUT (1 << 8)
253 #define FSR_ASF (1 << 7)
254 #define FSR_TLBLKF (1 << 6)
255 #define FSR_TLBMCF (1 << 5)
256 #define FSR_EF (1 << 4)
257 #define FSR_PF (1 << 3)
258 #define FSR_AFF (1 << 2)
259 #define FSR_TF (1 << 1)
261 #define FSR_IGN (FSR_AFF | FSR_ASF | \
262 FSR_TLBMCF | FSR_TLBLKF)
263 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
264 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
266 #define FSYNR0_WNR (1 << 4)
268 static int force_stage
;
269 module_param(force_stage
, int, S_IRUGO
);
270 MODULE_PARM_DESC(force_stage
,
271 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
272 static bool disable_bypass
;
273 module_param(disable_bypass
, bool, S_IRUGO
);
274 MODULE_PARM_DESC(disable_bypass
,
275 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
277 enum arm_smmu_arch_version
{
282 enum arm_smmu_implementation
{
288 struct arm_smmu_smr
{
294 struct arm_smmu_master_cfg
{
296 u16 streamids
[MAX_MASTER_STREAMIDS
];
297 struct arm_smmu_smr
*smrs
;
300 struct arm_smmu_master
{
301 struct device_node
*of_node
;
303 struct arm_smmu_master_cfg cfg
;
306 struct arm_smmu_device
{
311 unsigned long pgshift
;
313 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
314 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
315 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
316 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
317 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
318 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
319 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
322 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
324 enum arm_smmu_arch_version version
;
325 enum arm_smmu_implementation model
;
327 u32 num_context_banks
;
328 u32 num_s2_context_banks
;
329 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
332 u32 num_mapping_groups
;
333 DECLARE_BITMAP(smr_map
, ARM_SMMU_MAX_SMRS
);
335 unsigned long va_size
;
336 unsigned long ipa_size
;
337 unsigned long pa_size
;
340 u32 num_context_irqs
;
343 struct list_head list
;
344 struct rb_root masters
;
346 u32 cavium_id_base
; /* Specific to Cavium */
349 struct arm_smmu_cfg
{
354 #define INVALID_IRPTNDX 0xff
356 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
357 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
359 enum arm_smmu_domain_stage
{
360 ARM_SMMU_DOMAIN_S1
= 0,
362 ARM_SMMU_DOMAIN_NESTED
,
365 struct arm_smmu_domain
{
366 struct arm_smmu_device
*smmu
;
367 struct io_pgtable_ops
*pgtbl_ops
;
368 spinlock_t pgtbl_lock
;
369 struct arm_smmu_cfg cfg
;
370 enum arm_smmu_domain_stage stage
;
371 struct mutex init_mutex
; /* Protects smmu pointer */
372 struct iommu_domain domain
;
375 static struct iommu_ops arm_smmu_ops
;
377 static DEFINE_SPINLOCK(arm_smmu_devices_lock
);
378 static LIST_HEAD(arm_smmu_devices
);
380 struct arm_smmu_option_prop
{
385 static atomic_t cavium_smmu_context_count
= ATOMIC_INIT(0);
387 static struct arm_smmu_option_prop arm_smmu_options
[] = {
388 { ARM_SMMU_OPT_SECURE_CFG_ACCESS
, "calxeda,smmu-secure-config-access" },
392 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
394 return container_of(dom
, struct arm_smmu_domain
, domain
);
397 static void parse_driver_options(struct arm_smmu_device
*smmu
)
402 if (of_property_read_bool(smmu
->dev
->of_node
,
403 arm_smmu_options
[i
].prop
)) {
404 smmu
->options
|= arm_smmu_options
[i
].opt
;
405 dev_notice(smmu
->dev
, "option %s\n",
406 arm_smmu_options
[i
].prop
);
408 } while (arm_smmu_options
[++i
].opt
);
411 static struct device_node
*dev_get_dev_node(struct device
*dev
)
413 if (dev_is_pci(dev
)) {
414 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
416 while (!pci_is_root_bus(bus
))
418 return bus
->bridge
->parent
->of_node
;
424 static struct arm_smmu_master
*find_smmu_master(struct arm_smmu_device
*smmu
,
425 struct device_node
*dev_node
)
427 struct rb_node
*node
= smmu
->masters
.rb_node
;
430 struct arm_smmu_master
*master
;
432 master
= container_of(node
, struct arm_smmu_master
, node
);
434 if (dev_node
< master
->of_node
)
435 node
= node
->rb_left
;
436 else if (dev_node
> master
->of_node
)
437 node
= node
->rb_right
;
445 static struct arm_smmu_master_cfg
*
446 find_smmu_master_cfg(struct device
*dev
)
448 struct arm_smmu_master_cfg
*cfg
= NULL
;
449 struct iommu_group
*group
= iommu_group_get(dev
);
452 cfg
= iommu_group_get_iommudata(group
);
453 iommu_group_put(group
);
459 static int insert_smmu_master(struct arm_smmu_device
*smmu
,
460 struct arm_smmu_master
*master
)
462 struct rb_node
**new, *parent
;
464 new = &smmu
->masters
.rb_node
;
467 struct arm_smmu_master
*this
468 = container_of(*new, struct arm_smmu_master
, node
);
471 if (master
->of_node
< this->of_node
)
472 new = &((*new)->rb_left
);
473 else if (master
->of_node
> this->of_node
)
474 new = &((*new)->rb_right
);
479 rb_link_node(&master
->node
, parent
, new);
480 rb_insert_color(&master
->node
, &smmu
->masters
);
484 static int register_smmu_master(struct arm_smmu_device
*smmu
,
486 struct of_phandle_args
*masterspec
)
489 struct arm_smmu_master
*master
;
491 master
= find_smmu_master(smmu
, masterspec
->np
);
494 "rejecting multiple registrations for master device %s\n",
495 masterspec
->np
->name
);
499 if (masterspec
->args_count
> MAX_MASTER_STREAMIDS
) {
501 "reached maximum number (%d) of stream IDs for master device %s\n",
502 MAX_MASTER_STREAMIDS
, masterspec
->np
->name
);
506 master
= devm_kzalloc(dev
, sizeof(*master
), GFP_KERNEL
);
510 master
->of_node
= masterspec
->np
;
511 master
->cfg
.num_streamids
= masterspec
->args_count
;
513 for (i
= 0; i
< master
->cfg
.num_streamids
; ++i
) {
514 u16 streamid
= masterspec
->args
[i
];
516 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) &&
517 (streamid
>= smmu
->num_mapping_groups
)) {
519 "stream ID for master device %s greater than maximum allowed (%d)\n",
520 masterspec
->np
->name
, smmu
->num_mapping_groups
);
523 master
->cfg
.streamids
[i
] = streamid
;
525 return insert_smmu_master(smmu
, master
);
528 static struct arm_smmu_device
*find_smmu_for_device(struct device
*dev
)
530 struct arm_smmu_device
*smmu
;
531 struct arm_smmu_master
*master
= NULL
;
532 struct device_node
*dev_node
= dev_get_dev_node(dev
);
534 spin_lock(&arm_smmu_devices_lock
);
535 list_for_each_entry(smmu
, &arm_smmu_devices
, list
) {
536 master
= find_smmu_master(smmu
, dev_node
);
540 spin_unlock(&arm_smmu_devices_lock
);
542 return master
? smmu
: NULL
;
545 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
550 idx
= find_next_zero_bit(map
, end
, start
);
553 } while (test_and_set_bit(idx
, map
));
558 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
563 /* Wait for any pending TLB invalidations to complete */
564 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
567 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
569 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
570 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
571 & sTLBGSTATUS_GSACTIVE
) {
573 if (++count
== TLB_LOOP_TIMEOUT
) {
574 dev_err_ratelimited(smmu
->dev
,
575 "TLB sync timed out -- SMMU may be deadlocked\n");
582 static void arm_smmu_tlb_sync(void *cookie
)
584 struct arm_smmu_domain
*smmu_domain
= cookie
;
585 __arm_smmu_tlb_sync(smmu_domain
->smmu
);
588 static void arm_smmu_tlb_inv_context(void *cookie
)
590 struct arm_smmu_domain
*smmu_domain
= cookie
;
591 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
592 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
593 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
597 base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
598 writel_relaxed(ARM_SMMU_CB_ASID(smmu
, cfg
),
599 base
+ ARM_SMMU_CB_S1_TLBIASID
);
601 base
= ARM_SMMU_GR0(smmu
);
602 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
),
603 base
+ ARM_SMMU_GR0_TLBIVMID
);
606 __arm_smmu_tlb_sync(smmu
);
609 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
610 size_t granule
, bool leaf
, void *cookie
)
612 struct arm_smmu_domain
*smmu_domain
= cookie
;
613 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
614 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
615 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
619 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
620 reg
+= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
622 if (!IS_ENABLED(CONFIG_64BIT
) || smmu
->version
== ARM_SMMU_V1
) {
624 iova
|= ARM_SMMU_CB_ASID(smmu
, cfg
);
626 writel_relaxed(iova
, reg
);
628 } while (size
-= granule
);
632 iova
|= (u64
)ARM_SMMU_CB_ASID(smmu
, cfg
) << 48;
634 writeq_relaxed(iova
, reg
);
635 iova
+= granule
>> 12;
636 } while (size
-= granule
);
640 } else if (smmu
->version
== ARM_SMMU_V2
) {
641 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
642 reg
+= leaf
? ARM_SMMU_CB_S2_TLBIIPAS2L
:
643 ARM_SMMU_CB_S2_TLBIIPAS2
;
646 smmu_write_atomic_lq(iova
, reg
);
647 iova
+= granule
>> 12;
648 } while (size
-= granule
);
651 reg
= ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_TLBIVMID
;
652 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
), reg
);
656 static struct iommu_gather_ops arm_smmu_gather_ops
= {
657 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
658 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
659 .tlb_sync
= arm_smmu_tlb_sync
,
662 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
665 u32 fsr
, fsynr
, resume
;
667 struct iommu_domain
*domain
= dev
;
668 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
669 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
670 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
671 void __iomem
*cb_base
;
673 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
674 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
676 if (!(fsr
& FSR_FAULT
))
680 dev_err_ratelimited(smmu
->dev
,
681 "Unexpected context fault (fsr 0x%x)\n",
684 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
685 flags
= fsynr
& FSYNR0_WNR
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
687 iova
= readq_relaxed(cb_base
+ ARM_SMMU_CB_FAR
);
688 if (!report_iommu_fault(domain
, smmu
->dev
, iova
, flags
)) {
690 resume
= RESUME_RETRY
;
692 dev_err_ratelimited(smmu
->dev
,
693 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
694 iova
, fsynr
, cfg
->cbndx
);
696 resume
= RESUME_TERMINATE
;
699 /* Clear the faulting FSR */
700 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
702 /* Retry or terminate any stalled transactions */
704 writel_relaxed(resume
, cb_base
+ ARM_SMMU_CB_RESUME
);
709 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
711 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
712 struct arm_smmu_device
*smmu
= dev
;
713 void __iomem
*gr0_base
= ARM_SMMU_GR0_NS(smmu
);
715 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
716 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
717 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
718 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
723 dev_err_ratelimited(smmu
->dev
,
724 "Unexpected global fault, this could be serious\n");
725 dev_err_ratelimited(smmu
->dev
,
726 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
727 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
729 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
733 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
,
734 struct io_pgtable_cfg
*pgtbl_cfg
)
739 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
740 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
741 void __iomem
*cb_base
, *gr1_base
;
743 gr1_base
= ARM_SMMU_GR1(smmu
);
744 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
745 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
747 if (smmu
->version
> ARM_SMMU_V1
) {
749 reg
= CBA2R_RW64_64BIT
;
751 reg
= CBA2R_RW64_32BIT
;
753 /* 16-bit VMIDs live in CBA2R */
754 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
755 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBA2R_VMID_SHIFT
;
757 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBA2R(cfg
->cbndx
));
762 if (smmu
->version
== ARM_SMMU_V1
)
763 reg
|= cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
766 * Use the weakest shareability/memory types, so they are
767 * overridden by the ttbcr/pte.
770 reg
|= (CBAR_S1_BPSHCFG_NSH
<< CBAR_S1_BPSHCFG_SHIFT
) |
771 (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
772 } else if (!(smmu
->features
& ARM_SMMU_FEAT_VMID16
)) {
773 /* 8-bit VMIDs live in CBAR */
774 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBAR_VMID_SHIFT
;
776 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(cfg
->cbndx
));
780 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
782 reg64
|= ((u64
)ARM_SMMU_CB_ASID(smmu
, cfg
)) << TTBRn_ASID_SHIFT
;
783 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
785 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1];
786 reg64
|= ((u64
)ARM_SMMU_CB_ASID(smmu
, cfg
)) << TTBRn_ASID_SHIFT
;
787 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR1
);
789 reg64
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
790 writeq_relaxed(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
795 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
796 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
797 if (smmu
->version
> ARM_SMMU_V1
) {
798 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
>> 32;
799 reg
|= TTBCR2_SEP_UPSTREAM
;
800 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
803 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
804 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
807 /* MAIRs (stage-1 only) */
809 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
810 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
811 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[1];
812 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR1
);
816 reg
= SCTLR_CFCFG
| SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_M
| SCTLR_EAE_SBOP
;
818 reg
|= SCTLR_S1_ASIDPNE
;
822 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
825 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
826 struct arm_smmu_device
*smmu
)
828 int irq
, start
, ret
= 0;
829 unsigned long ias
, oas
;
830 struct io_pgtable_ops
*pgtbl_ops
;
831 struct io_pgtable_cfg pgtbl_cfg
;
832 enum io_pgtable_fmt fmt
;
833 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
834 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
836 mutex_lock(&smmu_domain
->init_mutex
);
837 if (smmu_domain
->smmu
)
841 * Mapping the requested stage onto what we support is surprisingly
842 * complicated, mainly because the spec allows S1+S2 SMMUs without
843 * support for nested translation. That means we end up with the
846 * Requested Supported Actual
856 * Note that you can't actually request stage-2 mappings.
858 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
859 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
860 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
861 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
863 switch (smmu_domain
->stage
) {
864 case ARM_SMMU_DOMAIN_S1
:
865 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
866 start
= smmu
->num_s2_context_banks
;
868 oas
= smmu
->ipa_size
;
869 if (IS_ENABLED(CONFIG_64BIT
))
870 fmt
= ARM_64_LPAE_S1
;
872 fmt
= ARM_32_LPAE_S1
;
874 case ARM_SMMU_DOMAIN_NESTED
:
876 * We will likely want to change this if/when KVM gets
879 case ARM_SMMU_DOMAIN_S2
:
880 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
882 ias
= smmu
->ipa_size
;
884 if (IS_ENABLED(CONFIG_64BIT
))
885 fmt
= ARM_64_LPAE_S2
;
887 fmt
= ARM_32_LPAE_S2
;
894 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
895 smmu
->num_context_banks
);
896 if (IS_ERR_VALUE(ret
))
900 if (smmu
->version
== ARM_SMMU_V1
) {
901 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
902 cfg
->irptndx
%= smmu
->num_context_irqs
;
904 cfg
->irptndx
= cfg
->cbndx
;
907 pgtbl_cfg
= (struct io_pgtable_cfg
) {
908 .pgsize_bitmap
= arm_smmu_ops
.pgsize_bitmap
,
911 .tlb
= &arm_smmu_gather_ops
,
912 .iommu_dev
= smmu
->dev
,
915 smmu_domain
->smmu
= smmu
;
916 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
922 /* Update our support page sizes to reflect the page table format */
923 arm_smmu_ops
.pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
925 /* Initialise the context bank with our page table cfg */
926 arm_smmu_init_context_bank(smmu_domain
, &pgtbl_cfg
);
929 * Request context fault interrupt. Do this last to avoid the
930 * handler seeing a half-initialised domain state.
932 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
933 ret
= request_irq(irq
, arm_smmu_context_fault
, IRQF_SHARED
,
934 "arm-smmu-context-fault", domain
);
935 if (IS_ERR_VALUE(ret
)) {
936 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
938 cfg
->irptndx
= INVALID_IRPTNDX
;
941 mutex_unlock(&smmu_domain
->init_mutex
);
943 /* Publish page table ops for map/unmap */
944 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
948 smmu_domain
->smmu
= NULL
;
950 mutex_unlock(&smmu_domain
->init_mutex
);
954 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
956 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
957 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
958 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
959 void __iomem
*cb_base
;
966 * Disable the context bank and free the page tables before freeing
969 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
970 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
972 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
973 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
974 free_irq(irq
, domain
);
977 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
978 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
981 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
983 struct arm_smmu_domain
*smmu_domain
;
985 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
988 * Allocate the domain and initialise some of its data structures.
989 * We can't really do anything meaningful until we've added a
992 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
996 if (type
== IOMMU_DOMAIN_DMA
&&
997 iommu_get_dma_cookie(&smmu_domain
->domain
)) {
1002 mutex_init(&smmu_domain
->init_mutex
);
1003 spin_lock_init(&smmu_domain
->pgtbl_lock
);
1005 return &smmu_domain
->domain
;
1008 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1010 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1013 * Free the domain resources. We assume that all devices have
1014 * already been detached.
1016 iommu_put_dma_cookie(domain
);
1017 arm_smmu_destroy_domain_context(domain
);
1021 static int arm_smmu_master_configure_smrs(struct arm_smmu_device
*smmu
,
1022 struct arm_smmu_master_cfg
*cfg
)
1025 struct arm_smmu_smr
*smrs
;
1026 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1028 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
))
1034 smrs
= kmalloc_array(cfg
->num_streamids
, sizeof(*smrs
), GFP_KERNEL
);
1036 dev_err(smmu
->dev
, "failed to allocate %d SMRs\n",
1037 cfg
->num_streamids
);
1041 /* Allocate the SMRs on the SMMU */
1042 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1043 int idx
= __arm_smmu_alloc_bitmap(smmu
->smr_map
, 0,
1044 smmu
->num_mapping_groups
);
1045 if (IS_ERR_VALUE(idx
)) {
1046 dev_err(smmu
->dev
, "failed to allocate free SMR\n");
1050 smrs
[i
] = (struct arm_smmu_smr
) {
1052 .mask
= 0, /* We don't currently share SMRs */
1053 .id
= cfg
->streamids
[i
],
1057 /* It worked! Now, poke the actual hardware */
1058 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1059 u32 reg
= SMR_VALID
| smrs
[i
].id
<< SMR_ID_SHIFT
|
1060 smrs
[i
].mask
<< SMR_MASK_SHIFT
;
1061 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_SMR(smrs
[i
].idx
));
1069 __arm_smmu_free_bitmap(smmu
->smr_map
, smrs
[i
].idx
);
1074 static void arm_smmu_master_free_smrs(struct arm_smmu_device
*smmu
,
1075 struct arm_smmu_master_cfg
*cfg
)
1078 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1079 struct arm_smmu_smr
*smrs
= cfg
->smrs
;
1084 /* Invalidate the SMRs before freeing back to the allocator */
1085 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1086 u8 idx
= smrs
[i
].idx
;
1088 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(idx
));
1089 __arm_smmu_free_bitmap(smmu
->smr_map
, idx
);
1096 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1097 struct arm_smmu_master_cfg
*cfg
)
1100 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1101 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1103 /* Devices in an IOMMU group may already be configured */
1104 ret
= arm_smmu_master_configure_smrs(smmu
, cfg
);
1106 return ret
== -EEXIST
? 0 : ret
;
1109 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1110 * for all devices behind the SMMU.
1112 if (smmu_domain
->domain
.type
== IOMMU_DOMAIN_DMA
)
1115 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1118 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1119 s2cr
= S2CR_TYPE_TRANS
| S2CR_PRIVCFG_UNPRIV
|
1120 (smmu_domain
->cfg
.cbndx
<< S2CR_CBNDX_SHIFT
);
1121 writel_relaxed(s2cr
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1127 static void arm_smmu_domain_remove_master(struct arm_smmu_domain
*smmu_domain
,
1128 struct arm_smmu_master_cfg
*cfg
)
1131 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1132 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1134 /* An IOMMU group is torn down by the first device to be removed */
1135 if ((smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) && !cfg
->smrs
)
1139 * We *must* clear the S2CR first, because freeing the SMR means
1140 * that it can be re-allocated immediately.
1142 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1143 u32 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1144 u32 reg
= disable_bypass
? S2CR_TYPE_FAULT
: S2CR_TYPE_BYPASS
;
1146 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1149 arm_smmu_master_free_smrs(smmu
, cfg
);
1152 static void arm_smmu_detach_dev(struct device
*dev
,
1153 struct arm_smmu_master_cfg
*cfg
)
1155 struct iommu_domain
*domain
= dev
->archdata
.iommu
;
1156 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1158 dev
->archdata
.iommu
= NULL
;
1159 arm_smmu_domain_remove_master(smmu_domain
, cfg
);
1162 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1165 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1166 struct arm_smmu_device
*smmu
;
1167 struct arm_smmu_master_cfg
*cfg
;
1169 smmu
= find_smmu_for_device(dev
);
1171 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1175 /* Ensure that the domain is finalised */
1176 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1177 if (IS_ERR_VALUE(ret
))
1181 * Sanity check the domain. We don't support domains across
1184 if (smmu_domain
->smmu
!= smmu
) {
1186 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1187 dev_name(smmu_domain
->smmu
->dev
), dev_name(smmu
->dev
));
1191 /* Looks ok, so add the device to the domain */
1192 cfg
= find_smmu_master_cfg(dev
);
1196 /* Detach the dev from its current domain */
1197 if (dev
->archdata
.iommu
)
1198 arm_smmu_detach_dev(dev
, cfg
);
1200 ret
= arm_smmu_domain_add_master(smmu_domain
, cfg
);
1202 dev
->archdata
.iommu
= domain
;
1206 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1207 phys_addr_t paddr
, size_t size
, int prot
)
1210 unsigned long flags
;
1211 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1212 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1217 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1218 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1219 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1223 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1227 unsigned long flags
;
1228 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1229 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1234 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1235 ret
= ops
->unmap(ops
, iova
, size
);
1236 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1240 static phys_addr_t
arm_smmu_iova_to_phys_hard(struct iommu_domain
*domain
,
1243 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1244 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1245 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1246 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1247 struct device
*dev
= smmu
->dev
;
1248 void __iomem
*cb_base
;
1253 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
1255 /* ATS1 registers can only be written atomically */
1256 va
= iova
& ~0xfffUL
;
1257 if (smmu
->version
== ARM_SMMU_V2
)
1258 smmu_write_atomic_lq(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1259 else /* Register is only 32-bit in v1 */
1260 writel_relaxed(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1262 if (readl_poll_timeout_atomic(cb_base
+ ARM_SMMU_CB_ATSR
, tmp
,
1263 !(tmp
& ATSR_ACTIVE
), 5, 50)) {
1265 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1267 return ops
->iova_to_phys(ops
, iova
);
1270 phys
= readq_relaxed(cb_base
+ ARM_SMMU_CB_PAR
);
1271 if (phys
& CB_PAR_F
) {
1272 dev_err(dev
, "translation fault!\n");
1273 dev_err(dev
, "PAR = 0x%llx\n", phys
);
1277 return (phys
& GENMASK_ULL(39, 12)) | (iova
& 0xfff);
1280 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1284 unsigned long flags
;
1285 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1286 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1291 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1292 if (smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_TRANS_OPS
&&
1293 smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1294 ret
= arm_smmu_iova_to_phys_hard(domain
, iova
);
1296 ret
= ops
->iova_to_phys(ops
, iova
);
1299 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1304 static bool arm_smmu_capable(enum iommu_cap cap
)
1307 case IOMMU_CAP_CACHE_COHERENCY
:
1309 * Return true here as the SMMU can always send out coherent
1313 case IOMMU_CAP_INTR_REMAP
:
1314 return true; /* MSIs are just memory writes */
1315 case IOMMU_CAP_NOEXEC
:
1322 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
1324 *((u16
*)data
) = alias
;
1325 return 0; /* Continue walking */
1328 static void __arm_smmu_release_pci_iommudata(void *data
)
1333 static int arm_smmu_init_pci_device(struct pci_dev
*pdev
,
1334 struct iommu_group
*group
)
1336 struct arm_smmu_master_cfg
*cfg
;
1340 cfg
= iommu_group_get_iommudata(group
);
1342 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
1346 iommu_group_set_iommudata(group
, cfg
,
1347 __arm_smmu_release_pci_iommudata
);
1350 if (cfg
->num_streamids
>= MAX_MASTER_STREAMIDS
)
1354 * Assume Stream ID == Requester ID for now.
1355 * We need a way to describe the ID mappings in FDT.
1357 pci_for_each_dma_alias(pdev
, __arm_smmu_get_pci_sid
, &sid
);
1358 for (i
= 0; i
< cfg
->num_streamids
; ++i
)
1359 if (cfg
->streamids
[i
] == sid
)
1362 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1363 if (i
== cfg
->num_streamids
)
1364 cfg
->streamids
[cfg
->num_streamids
++] = sid
;
1369 static int arm_smmu_init_platform_device(struct device
*dev
,
1370 struct iommu_group
*group
)
1372 struct arm_smmu_device
*smmu
= find_smmu_for_device(dev
);
1373 struct arm_smmu_master
*master
;
1378 master
= find_smmu_master(smmu
, dev
->of_node
);
1382 iommu_group_set_iommudata(group
, &master
->cfg
, NULL
);
1387 static int arm_smmu_add_device(struct device
*dev
)
1389 struct iommu_group
*group
;
1391 group
= iommu_group_get_for_dev(dev
);
1393 return PTR_ERR(group
);
1395 iommu_group_put(group
);
1399 static void arm_smmu_remove_device(struct device
*dev
)
1401 iommu_group_remove_device(dev
);
1404 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
1406 struct iommu_group
*group
;
1409 if (dev_is_pci(dev
))
1410 group
= pci_device_group(dev
);
1412 group
= generic_device_group(dev
);
1417 if (dev_is_pci(dev
))
1418 ret
= arm_smmu_init_pci_device(to_pci_dev(dev
), group
);
1420 ret
= arm_smmu_init_platform_device(dev
, group
);
1423 iommu_group_put(group
);
1424 group
= ERR_PTR(ret
);
1430 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1431 enum iommu_attr attr
, void *data
)
1433 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1436 case DOMAIN_ATTR_NESTING
:
1437 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1444 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1445 enum iommu_attr attr
, void *data
)
1448 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1450 mutex_lock(&smmu_domain
->init_mutex
);
1453 case DOMAIN_ATTR_NESTING
:
1454 if (smmu_domain
->smmu
) {
1460 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1462 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1470 mutex_unlock(&smmu_domain
->init_mutex
);
1474 static struct iommu_ops arm_smmu_ops
= {
1475 .capable
= arm_smmu_capable
,
1476 .domain_alloc
= arm_smmu_domain_alloc
,
1477 .domain_free
= arm_smmu_domain_free
,
1478 .attach_dev
= arm_smmu_attach_dev
,
1479 .map
= arm_smmu_map
,
1480 .unmap
= arm_smmu_unmap
,
1481 .map_sg
= default_iommu_map_sg
,
1482 .iova_to_phys
= arm_smmu_iova_to_phys
,
1483 .add_device
= arm_smmu_add_device
,
1484 .remove_device
= arm_smmu_remove_device
,
1485 .device_group
= arm_smmu_device_group
,
1486 .domain_get_attr
= arm_smmu_domain_get_attr
,
1487 .domain_set_attr
= arm_smmu_domain_set_attr
,
1488 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1491 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1493 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1494 void __iomem
*cb_base
;
1498 /* clear global FSR */
1499 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1500 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1502 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1503 reg
= disable_bypass
? S2CR_TYPE_FAULT
: S2CR_TYPE_BYPASS
;
1504 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1505 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_SMR(i
));
1506 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_S2CR(i
));
1509 /* Make sure all context banks are disabled and clear CB_FSR */
1510 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1511 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, i
);
1512 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
1513 writel_relaxed(FSR_FAULT
, cb_base
+ ARM_SMMU_CB_FSR
);
1515 * Disable MMU-500's not-particularly-beneficial next-page
1516 * prefetcher for the sake of errata #841119 and #826419.
1518 if (smmu
->model
== ARM_MMU500
) {
1519 reg
= readl_relaxed(cb_base
+ ARM_SMMU_CB_ACTLR
);
1520 reg
&= ~ARM_MMU500_ACTLR_CPRE
;
1521 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_ACTLR
);
1525 /* Invalidate the TLB, just in case */
1526 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1527 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1529 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1531 /* Enable fault reporting */
1532 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1534 /* Disable TLB broadcasting. */
1535 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1537 /* Enable client access, handling unmatched streams as appropriate */
1538 reg
&= ~sCR0_CLIENTPD
;
1542 reg
&= ~sCR0_USFCFG
;
1544 /* Disable forced broadcasting */
1547 /* Don't upgrade barriers */
1548 reg
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1550 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
1551 reg
|= sCR0_VMID16EN
;
1553 /* Push the button */
1554 __arm_smmu_tlb_sync(smmu
);
1555 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1558 static int arm_smmu_id_size_to_bits(int size
)
1577 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1580 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1582 bool cttw_dt
, cttw_reg
;
1584 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1585 dev_notice(smmu
->dev
, "SMMUv%d with:\n", smmu
->version
);
1588 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1590 /* Restrict available stages based on module parameter */
1591 if (force_stage
== 1)
1592 id
&= ~(ID0_S2TS
| ID0_NTS
);
1593 else if (force_stage
== 2)
1594 id
&= ~(ID0_S1TS
| ID0_NTS
);
1596 if (id
& ID0_S1TS
) {
1597 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1598 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1601 if (id
& ID0_S2TS
) {
1602 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1603 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1607 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1608 dev_notice(smmu
->dev
, "\tnested translation\n");
1611 if (!(smmu
->features
&
1612 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
))) {
1613 dev_err(smmu
->dev
, "\tno translation support!\n");
1617 if ((id
& ID0_S1TS
) && ((smmu
->version
== 1) || !(id
& ID0_ATOSNS
))) {
1618 smmu
->features
|= ARM_SMMU_FEAT_TRANS_OPS
;
1619 dev_notice(smmu
->dev
, "\taddress translation ops\n");
1623 * In order for DMA API calls to work properly, we must defer to what
1624 * the DT says about coherency, regardless of what the hardware claims.
1625 * Fortunately, this also opens up a workaround for systems where the
1626 * ID register value has ended up configured incorrectly.
1628 cttw_dt
= of_dma_is_coherent(smmu
->dev
->of_node
);
1629 cttw_reg
= !!(id
& ID0_CTTW
);
1631 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1632 if (cttw_dt
|| cttw_reg
)
1633 dev_notice(smmu
->dev
, "\t%scoherent table walk\n",
1634 cttw_dt
? "" : "non-");
1635 if (cttw_dt
!= cttw_reg
)
1636 dev_notice(smmu
->dev
,
1637 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1642 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1643 smmu
->num_mapping_groups
= (id
>> ID0_NUMSMRG_SHIFT
) &
1645 if (smmu
->num_mapping_groups
== 0) {
1647 "stream-matching supported, but no SMRs present!\n");
1651 smr
= SMR_MASK_MASK
<< SMR_MASK_SHIFT
;
1652 smr
|= (SMR_ID_MASK
<< SMR_ID_SHIFT
);
1653 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1654 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1656 mask
= (smr
>> SMR_MASK_SHIFT
) & SMR_MASK_MASK
;
1657 sid
= (smr
>> SMR_ID_SHIFT
) & SMR_ID_MASK
;
1658 if ((mask
& sid
) != sid
) {
1660 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1665 dev_notice(smmu
->dev
,
1666 "\tstream matching with %u register groups, mask 0x%x",
1667 smmu
->num_mapping_groups
, mask
);
1669 smmu
->num_mapping_groups
= (id
>> ID0_NUMSIDB_SHIFT
) &
1674 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1675 smmu
->pgshift
= (id
& ID1_PAGESIZE
) ? 16 : 12;
1677 /* Check for size mismatch of SMMU address space from mapped region */
1678 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1679 size
*= 2 << smmu
->pgshift
;
1680 if (smmu
->size
!= size
)
1682 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1685 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) & ID1_NUMS2CB_MASK
;
1686 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1687 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1688 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1691 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1692 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1694 * Cavium CN88xx erratum #27704.
1695 * Ensure ASID and VMID allocation is unique across all SMMUs in
1698 if (smmu
->model
== CAVIUM_SMMUV2
) {
1699 smmu
->cavium_id_base
=
1700 atomic_add_return(smmu
->num_context_banks
,
1701 &cavium_smmu_context_count
);
1702 smmu
->cavium_id_base
-= smmu
->num_context_banks
;
1706 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1707 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1708 smmu
->ipa_size
= size
;
1710 /* The output mask is also applied for bypass */
1711 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1712 smmu
->pa_size
= size
;
1714 if (id
& ID2_VMID16
)
1715 smmu
->features
|= ARM_SMMU_FEAT_VMID16
;
1718 * What the page table walker can address actually depends on which
1719 * descriptor format is in use, but since a) we don't know that yet,
1720 * and b) it can vary per context bank, this will have to do...
1722 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(size
)))
1724 "failed to set DMA mask for table walker\n");
1726 if (smmu
->version
== ARM_SMMU_V1
) {
1727 smmu
->va_size
= smmu
->ipa_size
;
1728 size
= SZ_4K
| SZ_2M
| SZ_1G
;
1730 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1731 smmu
->va_size
= arm_smmu_id_size_to_bits(size
);
1732 #ifndef CONFIG_64BIT
1733 smmu
->va_size
= min(32UL, smmu
->va_size
);
1736 if (id
& ID2_PTFS_4K
)
1737 size
|= SZ_4K
| SZ_2M
| SZ_1G
;
1738 if (id
& ID2_PTFS_16K
)
1739 size
|= SZ_16K
| SZ_32M
;
1740 if (id
& ID2_PTFS_64K
)
1741 size
|= SZ_64K
| SZ_512M
;
1744 arm_smmu_ops
.pgsize_bitmap
&= size
;
1745 dev_notice(smmu
->dev
, "\tSupported page sizes: 0x%08lx\n", size
);
1747 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
)
1748 dev_notice(smmu
->dev
, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1749 smmu
->va_size
, smmu
->ipa_size
);
1751 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
)
1752 dev_notice(smmu
->dev
, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1753 smmu
->ipa_size
, smmu
->pa_size
);
1758 struct arm_smmu_match_data
{
1759 enum arm_smmu_arch_version version
;
1760 enum arm_smmu_implementation model
;
1763 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1764 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1766 ARM_SMMU_MATCH_DATA(smmu_generic_v1
, ARM_SMMU_V1
, GENERIC_SMMU
);
1767 ARM_SMMU_MATCH_DATA(smmu_generic_v2
, ARM_SMMU_V2
, GENERIC_SMMU
);
1768 ARM_SMMU_MATCH_DATA(arm_mmu500
, ARM_SMMU_V2
, ARM_MMU500
);
1769 ARM_SMMU_MATCH_DATA(cavium_smmuv2
, ARM_SMMU_V2
, CAVIUM_SMMUV2
);
1771 static const struct of_device_id arm_smmu_of_match
[] = {
1772 { .compatible
= "arm,smmu-v1", .data
= &smmu_generic_v1
},
1773 { .compatible
= "arm,smmu-v2", .data
= &smmu_generic_v2
},
1774 { .compatible
= "arm,mmu-400", .data
= &smmu_generic_v1
},
1775 { .compatible
= "arm,mmu-401", .data
= &smmu_generic_v1
},
1776 { .compatible
= "arm,mmu-500", .data
= &arm_mmu500
},
1777 { .compatible
= "cavium,smmu-v2", .data
= &cavium_smmuv2
},
1780 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1782 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
)
1784 const struct of_device_id
*of_id
;
1785 const struct arm_smmu_match_data
*data
;
1786 struct resource
*res
;
1787 struct arm_smmu_device
*smmu
;
1788 struct device
*dev
= &pdev
->dev
;
1789 struct rb_node
*node
;
1790 struct of_phandle_args masterspec
;
1791 int num_irqs
, i
, err
;
1793 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1795 dev_err(dev
, "failed to allocate arm_smmu_device\n");
1800 of_id
= of_match_node(arm_smmu_of_match
, dev
->of_node
);
1802 smmu
->version
= data
->version
;
1803 smmu
->model
= data
->model
;
1805 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1806 smmu
->base
= devm_ioremap_resource(dev
, res
);
1807 if (IS_ERR(smmu
->base
))
1808 return PTR_ERR(smmu
->base
);
1809 smmu
->size
= resource_size(res
);
1811 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1812 &smmu
->num_global_irqs
)) {
1813 dev_err(dev
, "missing #global-interrupts property\n");
1818 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
1820 if (num_irqs
> smmu
->num_global_irqs
)
1821 smmu
->num_context_irqs
++;
1824 if (!smmu
->num_context_irqs
) {
1825 dev_err(dev
, "found %d interrupts but expected at least %d\n",
1826 num_irqs
, smmu
->num_global_irqs
+ 1);
1830 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
1833 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
1837 for (i
= 0; i
< num_irqs
; ++i
) {
1838 int irq
= platform_get_irq(pdev
, i
);
1841 dev_err(dev
, "failed to get irq index %d\n", i
);
1844 smmu
->irqs
[i
] = irq
;
1847 err
= arm_smmu_device_cfg_probe(smmu
);
1852 smmu
->masters
= RB_ROOT
;
1853 while (!of_parse_phandle_with_args(dev
->of_node
, "mmu-masters",
1854 "#stream-id-cells", i
,
1856 err
= register_smmu_master(smmu
, dev
, &masterspec
);
1858 dev_err(dev
, "failed to add master %s\n",
1859 masterspec
.np
->name
);
1860 goto out_put_masters
;
1865 dev_notice(dev
, "registered %d master devices\n", i
);
1867 parse_driver_options(smmu
);
1869 if (smmu
->version
> ARM_SMMU_V1
&&
1870 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
1872 "found only %d context interrupt(s) but %d required\n",
1873 smmu
->num_context_irqs
, smmu
->num_context_banks
);
1875 goto out_put_masters
;
1878 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
1879 err
= request_irq(smmu
->irqs
[i
],
1880 arm_smmu_global_fault
,
1882 "arm-smmu global fault",
1885 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
1891 INIT_LIST_HEAD(&smmu
->list
);
1892 spin_lock(&arm_smmu_devices_lock
);
1893 list_add(&smmu
->list
, &arm_smmu_devices
);
1894 spin_unlock(&arm_smmu_devices_lock
);
1896 arm_smmu_device_reset(smmu
);
1901 free_irq(smmu
->irqs
[i
], smmu
);
1904 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1905 struct arm_smmu_master
*master
1906 = container_of(node
, struct arm_smmu_master
, node
);
1907 of_node_put(master
->of_node
);
1913 static int arm_smmu_device_remove(struct platform_device
*pdev
)
1916 struct device
*dev
= &pdev
->dev
;
1917 struct arm_smmu_device
*curr
, *smmu
= NULL
;
1918 struct rb_node
*node
;
1920 spin_lock(&arm_smmu_devices_lock
);
1921 list_for_each_entry(curr
, &arm_smmu_devices
, list
) {
1922 if (curr
->dev
== dev
) {
1924 list_del(&smmu
->list
);
1928 spin_unlock(&arm_smmu_devices_lock
);
1933 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1934 struct arm_smmu_master
*master
1935 = container_of(node
, struct arm_smmu_master
, node
);
1936 of_node_put(master
->of_node
);
1939 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
1940 dev_err(dev
, "removing device with active domains!\n");
1942 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
)
1943 free_irq(smmu
->irqs
[i
], smmu
);
1945 /* Turn the thing off */
1946 writel(sCR0_CLIENTPD
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1950 static struct platform_driver arm_smmu_driver
= {
1953 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
1955 .probe
= arm_smmu_device_dt_probe
,
1956 .remove
= arm_smmu_device_remove
,
1959 static int __init
arm_smmu_init(void)
1961 struct device_node
*np
;
1965 * Play nice with systems that don't have an ARM SMMU by checking that
1966 * an ARM SMMU exists in the system before proceeding with the driver
1967 * and IOMMU bus operation registration.
1969 np
= of_find_matching_node(NULL
, arm_smmu_of_match
);
1975 ret
= platform_driver_register(&arm_smmu_driver
);
1979 /* Oh, for a proper bus abstraction */
1980 if (!iommu_present(&platform_bus_type
))
1981 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
1983 #ifdef CONFIG_ARM_AMBA
1984 if (!iommu_present(&amba_bustype
))
1985 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
1989 if (!iommu_present(&pci_bus_type
))
1990 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
1996 static void __exit
arm_smmu_exit(void)
1998 return platform_driver_unregister(&arm_smmu_driver
);
2001 subsys_initcall(arm_smmu_init
);
2002 module_exit(arm_smmu_exit
);
2004 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2005 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2006 MODULE_LICENSE("GPL v2");