2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - Context fault reporting
29 #define pr_fmt(fmt) "arm-smmu: " fmt
31 #include <linux/delay.h>
32 #include <linux/dma-iommu.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/err.h>
35 #include <linux/interrupt.h>
37 #include <linux/iommu.h>
38 #include <linux/iopoll.h>
39 #include <linux/module.h>
41 #include <linux/of_address.h>
42 #include <linux/pci.h>
43 #include <linux/platform_device.h>
44 #include <linux/slab.h>
45 #include <linux/spinlock.h>
47 #include <linux/amba/bus.h>
49 #include "io-pgtable.h"
51 /* Maximum number of stream IDs assigned to a single device */
52 #define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
54 /* Maximum number of context banks per SMMU */
55 #define ARM_SMMU_MAX_CBS 128
57 /* Maximum number of mapping groups per SMMU */
58 #define ARM_SMMU_MAX_SMRS 128
60 /* SMMU global address space */
61 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
62 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
65 * SMMU global address space with conditional offset to access secure
66 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
69 #define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
75 #define smmu_writeq writeq_relaxed
77 #define smmu_writeq(reg64, addr) \
79 u64 __val = (reg64); \
80 void __iomem *__addr = (addr); \
81 writel_relaxed(__val >> 32, __addr + 4); \
82 writel_relaxed(__val, __addr); \
86 /* Configuration registers */
87 #define ARM_SMMU_GR0_sCR0 0x0
88 #define sCR0_CLIENTPD (1 << 0)
89 #define sCR0_GFRE (1 << 1)
90 #define sCR0_GFIE (1 << 2)
91 #define sCR0_GCFGFRE (1 << 4)
92 #define sCR0_GCFGFIE (1 << 5)
93 #define sCR0_USFCFG (1 << 10)
94 #define sCR0_VMIDPNE (1 << 11)
95 #define sCR0_PTM (1 << 12)
96 #define sCR0_FB (1 << 13)
97 #define sCR0_VMID16EN (1 << 31)
98 #define sCR0_BSU_SHIFT 14
99 #define sCR0_BSU_MASK 0x3
101 /* Identification registers */
102 #define ARM_SMMU_GR0_ID0 0x20
103 #define ARM_SMMU_GR0_ID1 0x24
104 #define ARM_SMMU_GR0_ID2 0x28
105 #define ARM_SMMU_GR0_ID3 0x2c
106 #define ARM_SMMU_GR0_ID4 0x30
107 #define ARM_SMMU_GR0_ID5 0x34
108 #define ARM_SMMU_GR0_ID6 0x38
109 #define ARM_SMMU_GR0_ID7 0x3c
110 #define ARM_SMMU_GR0_sGFSR 0x48
111 #define ARM_SMMU_GR0_sGFSYNR0 0x50
112 #define ARM_SMMU_GR0_sGFSYNR1 0x54
113 #define ARM_SMMU_GR0_sGFSYNR2 0x58
115 #define ID0_S1TS (1 << 30)
116 #define ID0_S2TS (1 << 29)
117 #define ID0_NTS (1 << 28)
118 #define ID0_SMS (1 << 27)
119 #define ID0_ATOSNS (1 << 26)
120 #define ID0_CTTW (1 << 14)
121 #define ID0_NUMIRPT_SHIFT 16
122 #define ID0_NUMIRPT_MASK 0xff
123 #define ID0_NUMSIDB_SHIFT 9
124 #define ID0_NUMSIDB_MASK 0xf
125 #define ID0_NUMSMRG_SHIFT 0
126 #define ID0_NUMSMRG_MASK 0xff
128 #define ID1_PAGESIZE (1 << 31)
129 #define ID1_NUMPAGENDXB_SHIFT 28
130 #define ID1_NUMPAGENDXB_MASK 7
131 #define ID1_NUMS2CB_SHIFT 16
132 #define ID1_NUMS2CB_MASK 0xff
133 #define ID1_NUMCB_SHIFT 0
134 #define ID1_NUMCB_MASK 0xff
136 #define ID2_OAS_SHIFT 4
137 #define ID2_OAS_MASK 0xf
138 #define ID2_IAS_SHIFT 0
139 #define ID2_IAS_MASK 0xf
140 #define ID2_UBS_SHIFT 8
141 #define ID2_UBS_MASK 0xf
142 #define ID2_PTFS_4K (1 << 12)
143 #define ID2_PTFS_16K (1 << 13)
144 #define ID2_PTFS_64K (1 << 14)
145 #define ID2_VMID16 (1 << 15)
147 /* Global TLB invalidation */
148 #define ARM_SMMU_GR0_TLBIVMID 0x64
149 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
150 #define ARM_SMMU_GR0_TLBIALLH 0x6c
151 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
152 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
153 #define sTLBGSTATUS_GSACTIVE (1 << 0)
154 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
156 /* Stream mapping registers */
157 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
158 #define SMR_VALID (1 << 31)
159 #define SMR_MASK_SHIFT 16
160 #define SMR_MASK_MASK 0x7fff
161 #define SMR_ID_SHIFT 0
162 #define SMR_ID_MASK 0x7fff
164 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
165 #define S2CR_CBNDX_SHIFT 0
166 #define S2CR_CBNDX_MASK 0xff
167 #define S2CR_TYPE_SHIFT 16
168 #define S2CR_TYPE_MASK 0x3
169 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
170 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
171 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
173 #define S2CR_PRIVCFG_SHIFT 24
174 #define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
176 /* Context bank attribute registers */
177 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
178 #define CBAR_VMID_SHIFT 0
179 #define CBAR_VMID_MASK 0xff
180 #define CBAR_S1_BPSHCFG_SHIFT 8
181 #define CBAR_S1_BPSHCFG_MASK 3
182 #define CBAR_S1_BPSHCFG_NSH 3
183 #define CBAR_S1_MEMATTR_SHIFT 12
184 #define CBAR_S1_MEMATTR_MASK 0xf
185 #define CBAR_S1_MEMATTR_WB 0xf
186 #define CBAR_TYPE_SHIFT 16
187 #define CBAR_TYPE_MASK 0x3
188 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
189 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
190 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
191 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
192 #define CBAR_IRPTNDX_SHIFT 24
193 #define CBAR_IRPTNDX_MASK 0xff
195 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
196 #define CBA2R_RW64_32BIT (0 << 0)
197 #define CBA2R_RW64_64BIT (1 << 0)
198 #define CBA2R_VMID_SHIFT 16
199 #define CBA2R_VMID_MASK 0xffff
201 /* Translation context bank */
202 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
203 #define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
205 #define ARM_SMMU_CB_SCTLR 0x0
206 #define ARM_SMMU_CB_RESUME 0x8
207 #define ARM_SMMU_CB_TTBCR2 0x10
208 #define ARM_SMMU_CB_TTBR0 0x20
209 #define ARM_SMMU_CB_TTBR1 0x28
210 #define ARM_SMMU_CB_TTBCR 0x30
211 #define ARM_SMMU_CB_S1_MAIR0 0x38
212 #define ARM_SMMU_CB_S1_MAIR1 0x3c
213 #define ARM_SMMU_CB_PAR_LO 0x50
214 #define ARM_SMMU_CB_PAR_HI 0x54
215 #define ARM_SMMU_CB_FSR 0x58
216 #define ARM_SMMU_CB_FAR_LO 0x60
217 #define ARM_SMMU_CB_FAR_HI 0x64
218 #define ARM_SMMU_CB_FSYNR0 0x68
219 #define ARM_SMMU_CB_S1_TLBIVA 0x600
220 #define ARM_SMMU_CB_S1_TLBIASID 0x610
221 #define ARM_SMMU_CB_S1_TLBIVAL 0x620
222 #define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
223 #define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
224 #define ARM_SMMU_CB_ATS1PR 0x800
225 #define ARM_SMMU_CB_ATSR 0x8f0
227 #define SCTLR_S1_ASIDPNE (1 << 12)
228 #define SCTLR_CFCFG (1 << 7)
229 #define SCTLR_CFIE (1 << 6)
230 #define SCTLR_CFRE (1 << 5)
231 #define SCTLR_E (1 << 4)
232 #define SCTLR_AFE (1 << 2)
233 #define SCTLR_TRE (1 << 1)
234 #define SCTLR_M (1 << 0)
235 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237 #define CB_PAR_F (1 << 0)
239 #define ATSR_ACTIVE (1 << 0)
241 #define RESUME_RETRY (0 << 0)
242 #define RESUME_TERMINATE (1 << 0)
244 #define TTBCR2_SEP_SHIFT 15
245 #define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
247 #define TTBRn_ASID_SHIFT 48
249 #define FSR_MULTI (1 << 31)
250 #define FSR_SS (1 << 30)
251 #define FSR_UUT (1 << 8)
252 #define FSR_ASF (1 << 7)
253 #define FSR_TLBLKF (1 << 6)
254 #define FSR_TLBMCF (1 << 5)
255 #define FSR_EF (1 << 4)
256 #define FSR_PF (1 << 3)
257 #define FSR_AFF (1 << 2)
258 #define FSR_TF (1 << 1)
260 #define FSR_IGN (FSR_AFF | FSR_ASF | \
261 FSR_TLBMCF | FSR_TLBLKF)
262 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
263 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
265 #define FSYNR0_WNR (1 << 4)
267 static int force_stage
;
268 module_param(force_stage
, int, S_IRUGO
);
269 MODULE_PARM_DESC(force_stage
,
270 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
271 static bool disable_bypass
;
272 module_param(disable_bypass
, bool, S_IRUGO
);
273 MODULE_PARM_DESC(disable_bypass
,
274 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
276 enum arm_smmu_arch_version
{
281 enum arm_smmu_implementation
{
286 struct arm_smmu_smr
{
292 struct arm_smmu_master_cfg
{
294 u16 streamids
[MAX_MASTER_STREAMIDS
];
295 struct arm_smmu_smr
*smrs
;
298 struct arm_smmu_master
{
299 struct device_node
*of_node
;
301 struct arm_smmu_master_cfg cfg
;
304 struct arm_smmu_device
{
309 unsigned long pgshift
;
311 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
312 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
313 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
314 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
315 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
316 #define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
317 #define ARM_SMMU_FEAT_VMID16 (1 << 6)
320 #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
322 enum arm_smmu_arch_version version
;
323 enum arm_smmu_implementation model
;
325 u32 num_context_banks
;
326 u32 num_s2_context_banks
;
327 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
330 u32 num_mapping_groups
;
331 DECLARE_BITMAP(smr_map
, ARM_SMMU_MAX_SMRS
);
333 unsigned long va_size
;
334 unsigned long ipa_size
;
335 unsigned long pa_size
;
338 u32 num_context_irqs
;
341 struct list_head list
;
342 struct rb_root masters
;
344 u32 cavium_id_base
; /* Specific to Cavium */
347 struct arm_smmu_cfg
{
352 #define INVALID_IRPTNDX 0xff
354 #define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
355 #define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
357 enum arm_smmu_domain_stage
{
358 ARM_SMMU_DOMAIN_S1
= 0,
360 ARM_SMMU_DOMAIN_NESTED
,
363 struct arm_smmu_domain
{
364 struct arm_smmu_device
*smmu
;
365 struct io_pgtable_ops
*pgtbl_ops
;
366 spinlock_t pgtbl_lock
;
367 struct arm_smmu_cfg cfg
;
368 enum arm_smmu_domain_stage stage
;
369 struct mutex init_mutex
; /* Protects smmu pointer */
370 struct iommu_domain domain
;
373 static struct iommu_ops arm_smmu_ops
;
375 static DEFINE_SPINLOCK(arm_smmu_devices_lock
);
376 static LIST_HEAD(arm_smmu_devices
);
378 struct arm_smmu_option_prop
{
383 static atomic_t cavium_smmu_context_count
= ATOMIC_INIT(0);
385 static struct arm_smmu_option_prop arm_smmu_options
[] = {
386 { ARM_SMMU_OPT_SECURE_CFG_ACCESS
, "calxeda,smmu-secure-config-access" },
390 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
392 return container_of(dom
, struct arm_smmu_domain
, domain
);
395 static void parse_driver_options(struct arm_smmu_device
*smmu
)
400 if (of_property_read_bool(smmu
->dev
->of_node
,
401 arm_smmu_options
[i
].prop
)) {
402 smmu
->options
|= arm_smmu_options
[i
].opt
;
403 dev_notice(smmu
->dev
, "option %s\n",
404 arm_smmu_options
[i
].prop
);
406 } while (arm_smmu_options
[++i
].opt
);
409 static struct device_node
*dev_get_dev_node(struct device
*dev
)
411 if (dev_is_pci(dev
)) {
412 struct pci_bus
*bus
= to_pci_dev(dev
)->bus
;
414 while (!pci_is_root_bus(bus
))
416 return bus
->bridge
->parent
->of_node
;
422 static struct arm_smmu_master
*find_smmu_master(struct arm_smmu_device
*smmu
,
423 struct device_node
*dev_node
)
425 struct rb_node
*node
= smmu
->masters
.rb_node
;
428 struct arm_smmu_master
*master
;
430 master
= container_of(node
, struct arm_smmu_master
, node
);
432 if (dev_node
< master
->of_node
)
433 node
= node
->rb_left
;
434 else if (dev_node
> master
->of_node
)
435 node
= node
->rb_right
;
443 static struct arm_smmu_master_cfg
*
444 find_smmu_master_cfg(struct device
*dev
)
446 struct arm_smmu_master_cfg
*cfg
= NULL
;
447 struct iommu_group
*group
= iommu_group_get(dev
);
450 cfg
= iommu_group_get_iommudata(group
);
451 iommu_group_put(group
);
457 static int insert_smmu_master(struct arm_smmu_device
*smmu
,
458 struct arm_smmu_master
*master
)
460 struct rb_node
**new, *parent
;
462 new = &smmu
->masters
.rb_node
;
465 struct arm_smmu_master
*this
466 = container_of(*new, struct arm_smmu_master
, node
);
469 if (master
->of_node
< this->of_node
)
470 new = &((*new)->rb_left
);
471 else if (master
->of_node
> this->of_node
)
472 new = &((*new)->rb_right
);
477 rb_link_node(&master
->node
, parent
, new);
478 rb_insert_color(&master
->node
, &smmu
->masters
);
482 static int register_smmu_master(struct arm_smmu_device
*smmu
,
484 struct of_phandle_args
*masterspec
)
487 struct arm_smmu_master
*master
;
489 master
= find_smmu_master(smmu
, masterspec
->np
);
492 "rejecting multiple registrations for master device %s\n",
493 masterspec
->np
->name
);
497 if (masterspec
->args_count
> MAX_MASTER_STREAMIDS
) {
499 "reached maximum number (%d) of stream IDs for master device %s\n",
500 MAX_MASTER_STREAMIDS
, masterspec
->np
->name
);
504 master
= devm_kzalloc(dev
, sizeof(*master
), GFP_KERNEL
);
508 master
->of_node
= masterspec
->np
;
509 master
->cfg
.num_streamids
= masterspec
->args_count
;
511 for (i
= 0; i
< master
->cfg
.num_streamids
; ++i
) {
512 u16 streamid
= masterspec
->args
[i
];
514 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) &&
515 (streamid
>= smmu
->num_mapping_groups
)) {
517 "stream ID for master device %s greater than maximum allowed (%d)\n",
518 masterspec
->np
->name
, smmu
->num_mapping_groups
);
521 master
->cfg
.streamids
[i
] = streamid
;
523 return insert_smmu_master(smmu
, master
);
526 static struct arm_smmu_device
*find_smmu_for_device(struct device
*dev
)
528 struct arm_smmu_device
*smmu
;
529 struct arm_smmu_master
*master
= NULL
;
530 struct device_node
*dev_node
= dev_get_dev_node(dev
);
532 spin_lock(&arm_smmu_devices_lock
);
533 list_for_each_entry(smmu
, &arm_smmu_devices
, list
) {
534 master
= find_smmu_master(smmu
, dev_node
);
538 spin_unlock(&arm_smmu_devices_lock
);
540 return master
? smmu
: NULL
;
543 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
548 idx
= find_next_zero_bit(map
, end
, start
);
551 } while (test_and_set_bit(idx
, map
));
556 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
561 /* Wait for any pending TLB invalidations to complete */
562 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
565 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
567 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
568 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
569 & sTLBGSTATUS_GSACTIVE
) {
571 if (++count
== TLB_LOOP_TIMEOUT
) {
572 dev_err_ratelimited(smmu
->dev
,
573 "TLB sync timed out -- SMMU may be deadlocked\n");
580 static void arm_smmu_tlb_sync(void *cookie
)
582 struct arm_smmu_domain
*smmu_domain
= cookie
;
583 __arm_smmu_tlb_sync(smmu_domain
->smmu
);
586 static void arm_smmu_tlb_inv_context(void *cookie
)
588 struct arm_smmu_domain
*smmu_domain
= cookie
;
589 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
590 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
591 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
595 base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
596 writel_relaxed(ARM_SMMU_CB_ASID(smmu
, cfg
),
597 base
+ ARM_SMMU_CB_S1_TLBIASID
);
599 base
= ARM_SMMU_GR0(smmu
);
600 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
),
601 base
+ ARM_SMMU_GR0_TLBIVMID
);
604 __arm_smmu_tlb_sync(smmu
);
607 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
608 size_t granule
, bool leaf
, void *cookie
)
610 struct arm_smmu_domain
*smmu_domain
= cookie
;
611 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
612 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
613 bool stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
617 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
618 reg
+= leaf
? ARM_SMMU_CB_S1_TLBIVAL
: ARM_SMMU_CB_S1_TLBIVA
;
620 if (!IS_ENABLED(CONFIG_64BIT
) || smmu
->version
== ARM_SMMU_V1
) {
622 iova
|= ARM_SMMU_CB_ASID(smmu
, cfg
);
624 writel_relaxed(iova
, reg
);
626 } while (size
-= granule
);
630 iova
|= (u64
)ARM_SMMU_CB_ASID(smmu
, cfg
) << 48;
632 writeq_relaxed(iova
, reg
);
633 iova
+= granule
>> 12;
634 } while (size
-= granule
);
638 } else if (smmu
->version
== ARM_SMMU_V2
) {
639 reg
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
640 reg
+= leaf
? ARM_SMMU_CB_S2_TLBIIPAS2L
:
641 ARM_SMMU_CB_S2_TLBIIPAS2
;
644 writeq_relaxed(iova
, reg
);
645 iova
+= granule
>> 12;
646 } while (size
-= granule
);
649 reg
= ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_TLBIVMID
;
650 writel_relaxed(ARM_SMMU_CB_VMID(smmu
, cfg
), reg
);
654 static struct iommu_gather_ops arm_smmu_gather_ops
= {
655 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
656 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
657 .tlb_sync
= arm_smmu_tlb_sync
,
660 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
663 u32 fsr
, far
, fsynr
, resume
;
665 struct iommu_domain
*domain
= dev
;
666 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
667 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
668 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
669 void __iomem
*cb_base
;
671 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
672 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
674 if (!(fsr
& FSR_FAULT
))
678 dev_err_ratelimited(smmu
->dev
,
679 "Unexpected context fault (fsr 0x%x)\n",
682 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
683 flags
= fsynr
& FSYNR0_WNR
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
685 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_LO
);
688 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_HI
);
689 iova
|= ((unsigned long)far
<< 32);
692 if (!report_iommu_fault(domain
, smmu
->dev
, iova
, flags
)) {
694 resume
= RESUME_RETRY
;
696 dev_err_ratelimited(smmu
->dev
,
697 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
698 iova
, fsynr
, cfg
->cbndx
);
700 resume
= RESUME_TERMINATE
;
703 /* Clear the faulting FSR */
704 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
706 /* Retry or terminate any stalled transactions */
708 writel_relaxed(resume
, cb_base
+ ARM_SMMU_CB_RESUME
);
713 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
715 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
716 struct arm_smmu_device
*smmu
= dev
;
717 void __iomem
*gr0_base
= ARM_SMMU_GR0_NS(smmu
);
719 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
720 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
721 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
722 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
727 dev_err_ratelimited(smmu
->dev
,
728 "Unexpected global fault, this could be serious\n");
729 dev_err_ratelimited(smmu
->dev
,
730 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
731 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
733 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
737 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
,
738 struct io_pgtable_cfg
*pgtbl_cfg
)
743 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
744 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
745 void __iomem
*cb_base
, *gr1_base
;
747 gr1_base
= ARM_SMMU_GR1(smmu
);
748 stage1
= cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
749 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
751 if (smmu
->version
> ARM_SMMU_V1
) {
753 reg
= CBA2R_RW64_64BIT
;
755 reg
= CBA2R_RW64_32BIT
;
757 /* 16-bit VMIDs live in CBA2R */
758 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
759 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBA2R_VMID_SHIFT
;
761 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBA2R(cfg
->cbndx
));
766 if (smmu
->version
== ARM_SMMU_V1
)
767 reg
|= cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
770 * Use the weakest shareability/memory types, so they are
771 * overridden by the ttbcr/pte.
774 reg
|= (CBAR_S1_BPSHCFG_NSH
<< CBAR_S1_BPSHCFG_SHIFT
) |
775 (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
776 } else if (!(smmu
->features
& ARM_SMMU_FEAT_VMID16
)) {
777 /* 8-bit VMIDs live in CBAR */
778 reg
|= ARM_SMMU_CB_VMID(smmu
, cfg
) << CBAR_VMID_SHIFT
;
780 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(cfg
->cbndx
));
784 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
786 reg64
|= ((u64
)ARM_SMMU_CB_ASID(smmu
, cfg
)) << TTBRn_ASID_SHIFT
;
787 smmu_writeq(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
789 reg64
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[1];
790 reg64
|= ((u64
)ARM_SMMU_CB_ASID(smmu
, cfg
)) << TTBRn_ASID_SHIFT
;
791 smmu_writeq(reg64
, cb_base
+ ARM_SMMU_CB_TTBR1
);
793 reg64
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
794 smmu_writeq(reg64
, cb_base
+ ARM_SMMU_CB_TTBR0
);
799 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
800 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
801 if (smmu
->version
> ARM_SMMU_V1
) {
802 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
>> 32;
803 reg
|= TTBCR2_SEP_UPSTREAM
;
804 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
807 reg
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
808 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
811 /* MAIRs (stage-1 only) */
813 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
814 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
815 reg
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[1];
816 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR1
);
820 reg
= SCTLR_CFCFG
| SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_M
| SCTLR_EAE_SBOP
;
822 reg
|= SCTLR_S1_ASIDPNE
;
826 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
829 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
830 struct arm_smmu_device
*smmu
)
832 int irq
, start
, ret
= 0;
833 unsigned long ias
, oas
;
834 struct io_pgtable_ops
*pgtbl_ops
;
835 struct io_pgtable_cfg pgtbl_cfg
;
836 enum io_pgtable_fmt fmt
;
837 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
838 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
840 mutex_lock(&smmu_domain
->init_mutex
);
841 if (smmu_domain
->smmu
)
845 * Mapping the requested stage onto what we support is surprisingly
846 * complicated, mainly because the spec allows S1+S2 SMMUs without
847 * support for nested translation. That means we end up with the
850 * Requested Supported Actual
860 * Note that you can't actually request stage-2 mappings.
862 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
863 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
864 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
865 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
867 switch (smmu_domain
->stage
) {
868 case ARM_SMMU_DOMAIN_S1
:
869 cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
870 start
= smmu
->num_s2_context_banks
;
872 oas
= smmu
->ipa_size
;
873 if (IS_ENABLED(CONFIG_64BIT
))
874 fmt
= ARM_64_LPAE_S1
;
876 fmt
= ARM_32_LPAE_S1
;
878 case ARM_SMMU_DOMAIN_NESTED
:
880 * We will likely want to change this if/when KVM gets
883 case ARM_SMMU_DOMAIN_S2
:
884 cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
886 ias
= smmu
->ipa_size
;
888 if (IS_ENABLED(CONFIG_64BIT
))
889 fmt
= ARM_64_LPAE_S2
;
891 fmt
= ARM_32_LPAE_S2
;
898 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
899 smmu
->num_context_banks
);
900 if (IS_ERR_VALUE(ret
))
904 if (smmu
->version
== ARM_SMMU_V1
) {
905 cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
906 cfg
->irptndx
%= smmu
->num_context_irqs
;
908 cfg
->irptndx
= cfg
->cbndx
;
911 pgtbl_cfg
= (struct io_pgtable_cfg
) {
912 .pgsize_bitmap
= arm_smmu_ops
.pgsize_bitmap
,
915 .tlb
= &arm_smmu_gather_ops
,
916 .iommu_dev
= smmu
->dev
,
919 smmu_domain
->smmu
= smmu
;
920 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
926 /* Update our support page sizes to reflect the page table format */
927 arm_smmu_ops
.pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
929 /* Initialise the context bank with our page table cfg */
930 arm_smmu_init_context_bank(smmu_domain
, &pgtbl_cfg
);
933 * Request context fault interrupt. Do this last to avoid the
934 * handler seeing a half-initialised domain state.
936 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
937 ret
= request_irq(irq
, arm_smmu_context_fault
, IRQF_SHARED
,
938 "arm-smmu-context-fault", domain
);
939 if (IS_ERR_VALUE(ret
)) {
940 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
942 cfg
->irptndx
= INVALID_IRPTNDX
;
945 mutex_unlock(&smmu_domain
->init_mutex
);
947 /* Publish page table ops for map/unmap */
948 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
952 smmu_domain
->smmu
= NULL
;
954 mutex_unlock(&smmu_domain
->init_mutex
);
958 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
960 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
961 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
962 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
963 void __iomem
*cb_base
;
970 * Disable the context bank and free the page tables before freeing
973 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
974 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
976 if (cfg
->irptndx
!= INVALID_IRPTNDX
) {
977 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ cfg
->irptndx
];
978 free_irq(irq
, domain
);
981 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
982 __arm_smmu_free_bitmap(smmu
->context_map
, cfg
->cbndx
);
985 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
987 struct arm_smmu_domain
*smmu_domain
;
989 if (type
!= IOMMU_DOMAIN_UNMANAGED
&& type
!= IOMMU_DOMAIN_DMA
)
992 * Allocate the domain and initialise some of its data structures.
993 * We can't really do anything meaningful until we've added a
996 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
1000 if (type
== IOMMU_DOMAIN_DMA
&&
1001 iommu_get_dma_cookie(&smmu_domain
->domain
)) {
1006 mutex_init(&smmu_domain
->init_mutex
);
1007 spin_lock_init(&smmu_domain
->pgtbl_lock
);
1009 return &smmu_domain
->domain
;
1012 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1014 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1017 * Free the domain resources. We assume that all devices have
1018 * already been detached.
1020 iommu_put_dma_cookie(domain
);
1021 arm_smmu_destroy_domain_context(domain
);
1025 static int arm_smmu_master_configure_smrs(struct arm_smmu_device
*smmu
,
1026 struct arm_smmu_master_cfg
*cfg
)
1029 struct arm_smmu_smr
*smrs
;
1030 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1032 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
))
1038 smrs
= kmalloc_array(cfg
->num_streamids
, sizeof(*smrs
), GFP_KERNEL
);
1040 dev_err(smmu
->dev
, "failed to allocate %d SMRs\n",
1041 cfg
->num_streamids
);
1045 /* Allocate the SMRs on the SMMU */
1046 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1047 int idx
= __arm_smmu_alloc_bitmap(smmu
->smr_map
, 0,
1048 smmu
->num_mapping_groups
);
1049 if (IS_ERR_VALUE(idx
)) {
1050 dev_err(smmu
->dev
, "failed to allocate free SMR\n");
1054 smrs
[i
] = (struct arm_smmu_smr
) {
1056 .mask
= 0, /* We don't currently share SMRs */
1057 .id
= cfg
->streamids
[i
],
1061 /* It worked! Now, poke the actual hardware */
1062 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1063 u32 reg
= SMR_VALID
| smrs
[i
].id
<< SMR_ID_SHIFT
|
1064 smrs
[i
].mask
<< SMR_MASK_SHIFT
;
1065 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_SMR(smrs
[i
].idx
));
1073 __arm_smmu_free_bitmap(smmu
->smr_map
, smrs
[i
].idx
);
1078 static void arm_smmu_master_free_smrs(struct arm_smmu_device
*smmu
,
1079 struct arm_smmu_master_cfg
*cfg
)
1082 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1083 struct arm_smmu_smr
*smrs
= cfg
->smrs
;
1088 /* Invalidate the SMRs before freeing back to the allocator */
1089 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1090 u8 idx
= smrs
[i
].idx
;
1092 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(idx
));
1093 __arm_smmu_free_bitmap(smmu
->smr_map
, idx
);
1100 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1101 struct arm_smmu_master_cfg
*cfg
)
1104 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1105 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1107 /* Devices in an IOMMU group may already be configured */
1108 ret
= arm_smmu_master_configure_smrs(smmu
, cfg
);
1110 return ret
== -EEXIST
? 0 : ret
;
1113 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1114 * for all devices behind the SMMU.
1116 if (smmu_domain
->domain
.type
== IOMMU_DOMAIN_DMA
)
1119 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1122 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1123 s2cr
= S2CR_TYPE_TRANS
| S2CR_PRIVCFG_UNPRIV
|
1124 (smmu_domain
->cfg
.cbndx
<< S2CR_CBNDX_SHIFT
);
1125 writel_relaxed(s2cr
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1131 static void arm_smmu_domain_remove_master(struct arm_smmu_domain
*smmu_domain
,
1132 struct arm_smmu_master_cfg
*cfg
)
1135 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1136 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1138 /* An IOMMU group is torn down by the first device to be removed */
1139 if ((smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
) && !cfg
->smrs
)
1143 * We *must* clear the S2CR first, because freeing the SMR means
1144 * that it can be re-allocated immediately.
1146 for (i
= 0; i
< cfg
->num_streamids
; ++i
) {
1147 u32 idx
= cfg
->smrs
? cfg
->smrs
[i
].idx
: cfg
->streamids
[i
];
1148 u32 reg
= disable_bypass
? S2CR_TYPE_FAULT
: S2CR_TYPE_BYPASS
;
1150 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1153 arm_smmu_master_free_smrs(smmu
, cfg
);
1156 static void arm_smmu_detach_dev(struct device
*dev
,
1157 struct arm_smmu_master_cfg
*cfg
)
1159 struct iommu_domain
*domain
= dev
->archdata
.iommu
;
1160 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1162 dev
->archdata
.iommu
= NULL
;
1163 arm_smmu_domain_remove_master(smmu_domain
, cfg
);
1166 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1169 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1170 struct arm_smmu_device
*smmu
;
1171 struct arm_smmu_master_cfg
*cfg
;
1173 smmu
= find_smmu_for_device(dev
);
1175 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1179 /* Ensure that the domain is finalised */
1180 ret
= arm_smmu_init_domain_context(domain
, smmu
);
1181 if (IS_ERR_VALUE(ret
))
1185 * Sanity check the domain. We don't support domains across
1188 if (smmu_domain
->smmu
!= smmu
) {
1190 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1191 dev_name(smmu_domain
->smmu
->dev
), dev_name(smmu
->dev
));
1195 /* Looks ok, so add the device to the domain */
1196 cfg
= find_smmu_master_cfg(dev
);
1200 /* Detach the dev from its current domain */
1201 if (dev
->archdata
.iommu
)
1202 arm_smmu_detach_dev(dev
, cfg
);
1204 ret
= arm_smmu_domain_add_master(smmu_domain
, cfg
);
1206 dev
->archdata
.iommu
= domain
;
1210 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1211 phys_addr_t paddr
, size_t size
, int prot
)
1214 unsigned long flags
;
1215 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1216 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1221 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1222 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1223 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1227 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1231 unsigned long flags
;
1232 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1233 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1238 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1239 ret
= ops
->unmap(ops
, iova
, size
);
1240 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1244 static phys_addr_t
arm_smmu_iova_to_phys_hard(struct iommu_domain
*domain
,
1247 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1248 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1249 struct arm_smmu_cfg
*cfg
= &smmu_domain
->cfg
;
1250 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1251 struct device
*dev
= smmu
->dev
;
1252 void __iomem
*cb_base
;
1257 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, cfg
->cbndx
);
1259 /* ATS1 registers can only be written atomically */
1260 va
= iova
& ~0xfffUL
;
1261 if (smmu
->version
== ARM_SMMU_V2
)
1262 smmu_writeq(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1264 writel_relaxed(va
, cb_base
+ ARM_SMMU_CB_ATS1PR
);
1266 if (readl_poll_timeout_atomic(cb_base
+ ARM_SMMU_CB_ATSR
, tmp
,
1267 !(tmp
& ATSR_ACTIVE
), 5, 50)) {
1269 "iova to phys timed out on %pad. Falling back to software table walk.\n",
1271 return ops
->iova_to_phys(ops
, iova
);
1274 phys
= readl_relaxed(cb_base
+ ARM_SMMU_CB_PAR_LO
);
1275 phys
|= ((u64
)readl_relaxed(cb_base
+ ARM_SMMU_CB_PAR_HI
)) << 32;
1277 if (phys
& CB_PAR_F
) {
1278 dev_err(dev
, "translation fault!\n");
1279 dev_err(dev
, "PAR = 0x%llx\n", phys
);
1283 return (phys
& GENMASK_ULL(39, 12)) | (iova
& 0xfff);
1286 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1290 unsigned long flags
;
1291 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1292 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1297 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1298 if (smmu_domain
->smmu
->features
& ARM_SMMU_FEAT_TRANS_OPS
&&
1299 smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1300 ret
= arm_smmu_iova_to_phys_hard(domain
, iova
);
1302 ret
= ops
->iova_to_phys(ops
, iova
);
1305 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1310 static bool arm_smmu_capable(enum iommu_cap cap
)
1313 case IOMMU_CAP_CACHE_COHERENCY
:
1315 * Return true here as the SMMU can always send out coherent
1319 case IOMMU_CAP_INTR_REMAP
:
1320 return true; /* MSIs are just memory writes */
1321 case IOMMU_CAP_NOEXEC
:
1328 static int __arm_smmu_get_pci_sid(struct pci_dev
*pdev
, u16 alias
, void *data
)
1330 *((u16
*)data
) = alias
;
1331 return 0; /* Continue walking */
1334 static void __arm_smmu_release_pci_iommudata(void *data
)
1339 static int arm_smmu_init_pci_device(struct pci_dev
*pdev
,
1340 struct iommu_group
*group
)
1342 struct arm_smmu_master_cfg
*cfg
;
1346 cfg
= iommu_group_get_iommudata(group
);
1348 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
1352 iommu_group_set_iommudata(group
, cfg
,
1353 __arm_smmu_release_pci_iommudata
);
1356 if (cfg
->num_streamids
>= MAX_MASTER_STREAMIDS
)
1360 * Assume Stream ID == Requester ID for now.
1361 * We need a way to describe the ID mappings in FDT.
1363 pci_for_each_dma_alias(pdev
, __arm_smmu_get_pci_sid
, &sid
);
1364 for (i
= 0; i
< cfg
->num_streamids
; ++i
)
1365 if (cfg
->streamids
[i
] == sid
)
1368 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1369 if (i
== cfg
->num_streamids
)
1370 cfg
->streamids
[cfg
->num_streamids
++] = sid
;
1375 static int arm_smmu_init_platform_device(struct device
*dev
,
1376 struct iommu_group
*group
)
1378 struct arm_smmu_device
*smmu
= find_smmu_for_device(dev
);
1379 struct arm_smmu_master
*master
;
1384 master
= find_smmu_master(smmu
, dev
->of_node
);
1388 iommu_group_set_iommudata(group
, &master
->cfg
, NULL
);
1393 static int arm_smmu_add_device(struct device
*dev
)
1395 struct iommu_group
*group
;
1397 group
= iommu_group_get_for_dev(dev
);
1399 return PTR_ERR(group
);
1401 iommu_group_put(group
);
1405 static void arm_smmu_remove_device(struct device
*dev
)
1407 iommu_group_remove_device(dev
);
1410 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
1412 struct iommu_group
*group
;
1415 if (dev_is_pci(dev
))
1416 group
= pci_device_group(dev
);
1418 group
= generic_device_group(dev
);
1423 if (dev_is_pci(dev
))
1424 ret
= arm_smmu_init_pci_device(to_pci_dev(dev
), group
);
1426 ret
= arm_smmu_init_platform_device(dev
, group
);
1429 iommu_group_put(group
);
1430 group
= ERR_PTR(ret
);
1436 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1437 enum iommu_attr attr
, void *data
)
1439 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1442 case DOMAIN_ATTR_NESTING
:
1443 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1450 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1451 enum iommu_attr attr
, void *data
)
1454 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1456 mutex_lock(&smmu_domain
->init_mutex
);
1459 case DOMAIN_ATTR_NESTING
:
1460 if (smmu_domain
->smmu
) {
1466 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1468 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1476 mutex_unlock(&smmu_domain
->init_mutex
);
1480 static struct iommu_ops arm_smmu_ops
= {
1481 .capable
= arm_smmu_capable
,
1482 .domain_alloc
= arm_smmu_domain_alloc
,
1483 .domain_free
= arm_smmu_domain_free
,
1484 .attach_dev
= arm_smmu_attach_dev
,
1485 .map
= arm_smmu_map
,
1486 .unmap
= arm_smmu_unmap
,
1487 .map_sg
= default_iommu_map_sg
,
1488 .iova_to_phys
= arm_smmu_iova_to_phys
,
1489 .add_device
= arm_smmu_add_device
,
1490 .remove_device
= arm_smmu_remove_device
,
1491 .device_group
= arm_smmu_device_group
,
1492 .domain_get_attr
= arm_smmu_domain_get_attr
,
1493 .domain_set_attr
= arm_smmu_domain_set_attr
,
1494 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1497 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1499 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1500 void __iomem
*cb_base
;
1504 /* clear global FSR */
1505 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1506 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sGFSR
);
1508 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1509 reg
= disable_bypass
? S2CR_TYPE_FAULT
: S2CR_TYPE_BYPASS
;
1510 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1511 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_SMR(i
));
1512 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_S2CR(i
));
1515 /* Make sure all context banks are disabled and clear CB_FSR */
1516 for (i
= 0; i
< smmu
->num_context_banks
; ++i
) {
1517 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, i
);
1518 writel_relaxed(0, cb_base
+ ARM_SMMU_CB_SCTLR
);
1519 writel_relaxed(FSR_FAULT
, cb_base
+ ARM_SMMU_CB_FSR
);
1522 /* Invalidate the TLB, just in case */
1523 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1524 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1526 reg
= readl_relaxed(ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1528 /* Enable fault reporting */
1529 reg
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1531 /* Disable TLB broadcasting. */
1532 reg
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1534 /* Enable client access, handling unmatched streams as appropriate */
1535 reg
&= ~sCR0_CLIENTPD
;
1539 reg
&= ~sCR0_USFCFG
;
1541 /* Disable forced broadcasting */
1544 /* Don't upgrade barriers */
1545 reg
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1547 if (smmu
->features
& ARM_SMMU_FEAT_VMID16
)
1548 reg
|= sCR0_VMID16EN
;
1550 /* Push the button */
1551 __arm_smmu_tlb_sync(smmu
);
1552 writel(reg
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1555 static int arm_smmu_id_size_to_bits(int size
)
1574 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1577 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1579 bool cttw_dt
, cttw_reg
;
1581 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1582 dev_notice(smmu
->dev
, "SMMUv%d with:\n", smmu
->version
);
1585 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1587 /* Restrict available stages based on module parameter */
1588 if (force_stage
== 1)
1589 id
&= ~(ID0_S2TS
| ID0_NTS
);
1590 else if (force_stage
== 2)
1591 id
&= ~(ID0_S1TS
| ID0_NTS
);
1593 if (id
& ID0_S1TS
) {
1594 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1595 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1598 if (id
& ID0_S2TS
) {
1599 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1600 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1604 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1605 dev_notice(smmu
->dev
, "\tnested translation\n");
1608 if (!(smmu
->features
&
1609 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
))) {
1610 dev_err(smmu
->dev
, "\tno translation support!\n");
1614 if ((id
& ID0_S1TS
) && ((smmu
->version
== 1) || !(id
& ID0_ATOSNS
))) {
1615 smmu
->features
|= ARM_SMMU_FEAT_TRANS_OPS
;
1616 dev_notice(smmu
->dev
, "\taddress translation ops\n");
1620 * In order for DMA API calls to work properly, we must defer to what
1621 * the DT says about coherency, regardless of what the hardware claims.
1622 * Fortunately, this also opens up a workaround for systems where the
1623 * ID register value has ended up configured incorrectly.
1625 cttw_dt
= of_dma_is_coherent(smmu
->dev
->of_node
);
1626 cttw_reg
= !!(id
& ID0_CTTW
);
1628 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1629 if (cttw_dt
|| cttw_reg
)
1630 dev_notice(smmu
->dev
, "\t%scoherent table walk\n",
1631 cttw_dt
? "" : "non-");
1632 if (cttw_dt
!= cttw_reg
)
1633 dev_notice(smmu
->dev
,
1634 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
1639 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1640 smmu
->num_mapping_groups
= (id
>> ID0_NUMSMRG_SHIFT
) &
1642 if (smmu
->num_mapping_groups
== 0) {
1644 "stream-matching supported, but no SMRs present!\n");
1648 smr
= SMR_MASK_MASK
<< SMR_MASK_SHIFT
;
1649 smr
|= (SMR_ID_MASK
<< SMR_ID_SHIFT
);
1650 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1651 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1653 mask
= (smr
>> SMR_MASK_SHIFT
) & SMR_MASK_MASK
;
1654 sid
= (smr
>> SMR_ID_SHIFT
) & SMR_ID_MASK
;
1655 if ((mask
& sid
) != sid
) {
1657 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1662 dev_notice(smmu
->dev
,
1663 "\tstream matching with %u register groups, mask 0x%x",
1664 smmu
->num_mapping_groups
, mask
);
1666 smmu
->num_mapping_groups
= (id
>> ID0_NUMSIDB_SHIFT
) &
1671 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1672 smmu
->pgshift
= (id
& ID1_PAGESIZE
) ? 16 : 12;
1674 /* Check for size mismatch of SMMU address space from mapped region */
1675 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1676 size
*= 2 << smmu
->pgshift
;
1677 if (smmu
->size
!= size
)
1679 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1682 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) & ID1_NUMS2CB_MASK
;
1683 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1684 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1685 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1688 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1689 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1691 * Cavium CN88xx erratum #27704.
1692 * Ensure ASID and VMID allocation is unique across all SMMUs in
1695 if (smmu
->model
== CAVIUM_SMMUV2
) {
1696 smmu
->cavium_id_base
=
1697 atomic_add_return(smmu
->num_context_banks
,
1698 &cavium_smmu_context_count
);
1699 smmu
->cavium_id_base
-= smmu
->num_context_banks
;
1703 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1704 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1705 smmu
->ipa_size
= size
;
1707 /* The output mask is also applied for bypass */
1708 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1709 smmu
->pa_size
= size
;
1711 if (id
& ID2_VMID16
)
1712 smmu
->features
|= ARM_SMMU_FEAT_VMID16
;
1715 * What the page table walker can address actually depends on which
1716 * descriptor format is in use, but since a) we don't know that yet,
1717 * and b) it can vary per context bank, this will have to do...
1719 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(size
)))
1721 "failed to set DMA mask for table walker\n");
1723 if (smmu
->version
== ARM_SMMU_V1
) {
1724 smmu
->va_size
= smmu
->ipa_size
;
1725 size
= SZ_4K
| SZ_2M
| SZ_1G
;
1727 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1728 smmu
->va_size
= arm_smmu_id_size_to_bits(size
);
1729 #ifndef CONFIG_64BIT
1730 smmu
->va_size
= min(32UL, smmu
->va_size
);
1733 if (id
& ID2_PTFS_4K
)
1734 size
|= SZ_4K
| SZ_2M
| SZ_1G
;
1735 if (id
& ID2_PTFS_16K
)
1736 size
|= SZ_16K
| SZ_32M
;
1737 if (id
& ID2_PTFS_64K
)
1738 size
|= SZ_64K
| SZ_512M
;
1741 arm_smmu_ops
.pgsize_bitmap
&= size
;
1742 dev_notice(smmu
->dev
, "\tSupported page sizes: 0x%08lx\n", size
);
1744 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
)
1745 dev_notice(smmu
->dev
, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
1746 smmu
->va_size
, smmu
->ipa_size
);
1748 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
)
1749 dev_notice(smmu
->dev
, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
1750 smmu
->ipa_size
, smmu
->pa_size
);
1755 struct arm_smmu_match_data
{
1756 enum arm_smmu_arch_version version
;
1757 enum arm_smmu_implementation model
;
1760 #define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1761 static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1763 ARM_SMMU_MATCH_DATA(smmu_generic_v1
, ARM_SMMU_V1
, GENERIC_SMMU
);
1764 ARM_SMMU_MATCH_DATA(smmu_generic_v2
, ARM_SMMU_V2
, GENERIC_SMMU
);
1765 ARM_SMMU_MATCH_DATA(cavium_smmuv2
, ARM_SMMU_V2
, CAVIUM_SMMUV2
);
1767 static const struct of_device_id arm_smmu_of_match
[] = {
1768 { .compatible
= "arm,smmu-v1", .data
= &smmu_generic_v1
},
1769 { .compatible
= "arm,smmu-v2", .data
= &smmu_generic_v2
},
1770 { .compatible
= "arm,mmu-400", .data
= &smmu_generic_v1
},
1771 { .compatible
= "arm,mmu-401", .data
= &smmu_generic_v1
},
1772 { .compatible
= "arm,mmu-500", .data
= &smmu_generic_v2
},
1773 { .compatible
= "cavium,smmu-v2", .data
= &cavium_smmuv2
},
1776 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1778 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
)
1780 const struct of_device_id
*of_id
;
1781 const struct arm_smmu_match_data
*data
;
1782 struct resource
*res
;
1783 struct arm_smmu_device
*smmu
;
1784 struct device
*dev
= &pdev
->dev
;
1785 struct rb_node
*node
;
1786 struct of_phandle_args masterspec
;
1787 int num_irqs
, i
, err
;
1789 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1791 dev_err(dev
, "failed to allocate arm_smmu_device\n");
1796 of_id
= of_match_node(arm_smmu_of_match
, dev
->of_node
);
1798 smmu
->version
= data
->version
;
1799 smmu
->model
= data
->model
;
1801 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1802 smmu
->base
= devm_ioremap_resource(dev
, res
);
1803 if (IS_ERR(smmu
->base
))
1804 return PTR_ERR(smmu
->base
);
1805 smmu
->size
= resource_size(res
);
1807 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1808 &smmu
->num_global_irqs
)) {
1809 dev_err(dev
, "missing #global-interrupts property\n");
1814 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
1816 if (num_irqs
> smmu
->num_global_irqs
)
1817 smmu
->num_context_irqs
++;
1820 if (!smmu
->num_context_irqs
) {
1821 dev_err(dev
, "found %d interrupts but expected at least %d\n",
1822 num_irqs
, smmu
->num_global_irqs
+ 1);
1826 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
1829 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
1833 for (i
= 0; i
< num_irqs
; ++i
) {
1834 int irq
= platform_get_irq(pdev
, i
);
1837 dev_err(dev
, "failed to get irq index %d\n", i
);
1840 smmu
->irqs
[i
] = irq
;
1843 err
= arm_smmu_device_cfg_probe(smmu
);
1848 smmu
->masters
= RB_ROOT
;
1849 while (!of_parse_phandle_with_args(dev
->of_node
, "mmu-masters",
1850 "#stream-id-cells", i
,
1852 err
= register_smmu_master(smmu
, dev
, &masterspec
);
1854 dev_err(dev
, "failed to add master %s\n",
1855 masterspec
.np
->name
);
1856 goto out_put_masters
;
1861 dev_notice(dev
, "registered %d master devices\n", i
);
1863 parse_driver_options(smmu
);
1865 if (smmu
->version
> ARM_SMMU_V1
&&
1866 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
1868 "found only %d context interrupt(s) but %d required\n",
1869 smmu
->num_context_irqs
, smmu
->num_context_banks
);
1871 goto out_put_masters
;
1874 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
1875 err
= request_irq(smmu
->irqs
[i
],
1876 arm_smmu_global_fault
,
1878 "arm-smmu global fault",
1881 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
1887 INIT_LIST_HEAD(&smmu
->list
);
1888 spin_lock(&arm_smmu_devices_lock
);
1889 list_add(&smmu
->list
, &arm_smmu_devices
);
1890 spin_unlock(&arm_smmu_devices_lock
);
1892 arm_smmu_device_reset(smmu
);
1897 free_irq(smmu
->irqs
[i
], smmu
);
1900 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1901 struct arm_smmu_master
*master
1902 = container_of(node
, struct arm_smmu_master
, node
);
1903 of_node_put(master
->of_node
);
1909 static int arm_smmu_device_remove(struct platform_device
*pdev
)
1912 struct device
*dev
= &pdev
->dev
;
1913 struct arm_smmu_device
*curr
, *smmu
= NULL
;
1914 struct rb_node
*node
;
1916 spin_lock(&arm_smmu_devices_lock
);
1917 list_for_each_entry(curr
, &arm_smmu_devices
, list
) {
1918 if (curr
->dev
== dev
) {
1920 list_del(&smmu
->list
);
1924 spin_unlock(&arm_smmu_devices_lock
);
1929 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1930 struct arm_smmu_master
*master
1931 = container_of(node
, struct arm_smmu_master
, node
);
1932 of_node_put(master
->of_node
);
1935 if (!bitmap_empty(smmu
->context_map
, ARM_SMMU_MAX_CBS
))
1936 dev_err(dev
, "removing device with active domains!\n");
1938 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
)
1939 free_irq(smmu
->irqs
[i
], smmu
);
1941 /* Turn the thing off */
1942 writel(sCR0_CLIENTPD
, ARM_SMMU_GR0_NS(smmu
) + ARM_SMMU_GR0_sCR0
);
1946 static struct platform_driver arm_smmu_driver
= {
1949 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
1951 .probe
= arm_smmu_device_dt_probe
,
1952 .remove
= arm_smmu_device_remove
,
1955 static int __init
arm_smmu_init(void)
1957 struct device_node
*np
;
1961 * Play nice with systems that don't have an ARM SMMU by checking that
1962 * an ARM SMMU exists in the system before proceeding with the driver
1963 * and IOMMU bus operation registration.
1965 np
= of_find_matching_node(NULL
, arm_smmu_of_match
);
1971 ret
= platform_driver_register(&arm_smmu_driver
);
1975 /* Oh, for a proper bus abstraction */
1976 if (!iommu_present(&platform_bus_type
))
1977 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
1979 #ifdef CONFIG_ARM_AMBA
1980 if (!iommu_present(&amba_bustype
))
1981 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
1985 if (!iommu_present(&pci_bus_type
))
1986 bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
1992 static void __exit
arm_smmu_exit(void)
1994 return platform_driver_unregister(&arm_smmu_driver
);
1997 subsys_initcall(arm_smmu_init
);
1998 module_exit(arm_smmu_exit
);
2000 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2001 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2002 MODULE_LICENSE("GPL v2");