2 * IOMMU API for ARM architected SMMU implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 * Copyright (C) 2013 ARM Limited
19 * Author: Will Deacon <will.deacon@arm.com>
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
26 * - 4k and 64k pages, with contiguous pte hints.
27 * - Up to 39-bit addressing
28 * - Context fault reporting
31 #define pr_fmt(fmt) "arm-smmu: " fmt
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/err.h>
36 #include <linux/interrupt.h>
38 #include <linux/iommu.h>
40 #include <linux/module.h>
42 #include <linux/platform_device.h>
43 #include <linux/slab.h>
44 #include <linux/spinlock.h>
46 #include <linux/amba/bus.h>
48 #include <asm/pgalloc.h>
50 /* Maximum number of stream IDs assigned to a single device */
51 #define MAX_MASTER_STREAMIDS 8
53 /* Maximum number of context banks per SMMU */
54 #define ARM_SMMU_MAX_CBS 128
56 /* Maximum number of mapping groups per SMMU */
57 #define ARM_SMMU_MAX_SMRS 128
59 /* Number of VMIDs per SMMU */
60 #define ARM_SMMU_NUM_VMIDS 256
62 /* SMMU global address space */
63 #define ARM_SMMU_GR0(smmu) ((smmu)->base)
64 #define ARM_SMMU_GR1(smmu) ((smmu)->base + (smmu)->pagesize)
67 #define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0)
68 #define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52)
69 #define ARM_SMMU_PTE_AF (((pteval_t)1) << 10)
70 #define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8)
71 #define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8)
72 #define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8)
74 #if PAGE_SIZE == SZ_4K
75 #define ARM_SMMU_PTE_CONT_ENTRIES 16
76 #elif PAGE_SIZE == SZ_64K
77 #define ARM_SMMU_PTE_CONT_ENTRIES 32
79 #define ARM_SMMU_PTE_CONT_ENTRIES 1
82 #define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
83 #define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
84 #define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
87 #define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
88 #define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6)
89 #define ARM_SMMU_PTE_ATTRINDX_SHIFT 2
92 #define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6)
93 #define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6)
94 #define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6)
95 #define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2)
96 #define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2)
97 #define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2)
99 /* Configuration registers */
100 #define ARM_SMMU_GR0_sCR0 0x0
101 #define sCR0_CLIENTPD (1 << 0)
102 #define sCR0_GFRE (1 << 1)
103 #define sCR0_GFIE (1 << 2)
104 #define sCR0_GCFGFRE (1 << 4)
105 #define sCR0_GCFGFIE (1 << 5)
106 #define sCR0_USFCFG (1 << 10)
107 #define sCR0_VMIDPNE (1 << 11)
108 #define sCR0_PTM (1 << 12)
109 #define sCR0_FB (1 << 13)
110 #define sCR0_BSU_SHIFT 14
111 #define sCR0_BSU_MASK 0x3
113 /* Identification registers */
114 #define ARM_SMMU_GR0_ID0 0x20
115 #define ARM_SMMU_GR0_ID1 0x24
116 #define ARM_SMMU_GR0_ID2 0x28
117 #define ARM_SMMU_GR0_ID3 0x2c
118 #define ARM_SMMU_GR0_ID4 0x30
119 #define ARM_SMMU_GR0_ID5 0x34
120 #define ARM_SMMU_GR0_ID6 0x38
121 #define ARM_SMMU_GR0_ID7 0x3c
122 #define ARM_SMMU_GR0_sGFSR 0x48
123 #define ARM_SMMU_GR0_sGFSYNR0 0x50
124 #define ARM_SMMU_GR0_sGFSYNR1 0x54
125 #define ARM_SMMU_GR0_sGFSYNR2 0x58
126 #define ARM_SMMU_GR0_PIDR0 0xfe0
127 #define ARM_SMMU_GR0_PIDR1 0xfe4
128 #define ARM_SMMU_GR0_PIDR2 0xfe8
130 #define ID0_S1TS (1 << 30)
131 #define ID0_S2TS (1 << 29)
132 #define ID0_NTS (1 << 28)
133 #define ID0_SMS (1 << 27)
134 #define ID0_PTFS_SHIFT 24
135 #define ID0_PTFS_MASK 0x2
136 #define ID0_PTFS_V8_ONLY 0x2
137 #define ID0_CTTW (1 << 14)
138 #define ID0_NUMIRPT_SHIFT 16
139 #define ID0_NUMIRPT_MASK 0xff
140 #define ID0_NUMSMRG_SHIFT 0
141 #define ID0_NUMSMRG_MASK 0xff
143 #define ID1_PAGESIZE (1 << 31)
144 #define ID1_NUMPAGENDXB_SHIFT 28
145 #define ID1_NUMPAGENDXB_MASK 7
146 #define ID1_NUMS2CB_SHIFT 16
147 #define ID1_NUMS2CB_MASK 0xff
148 #define ID1_NUMCB_SHIFT 0
149 #define ID1_NUMCB_MASK 0xff
151 #define ID2_OAS_SHIFT 4
152 #define ID2_OAS_MASK 0xf
153 #define ID2_IAS_SHIFT 0
154 #define ID2_IAS_MASK 0xf
155 #define ID2_UBS_SHIFT 8
156 #define ID2_UBS_MASK 0xf
157 #define ID2_PTFS_4K (1 << 12)
158 #define ID2_PTFS_16K (1 << 13)
159 #define ID2_PTFS_64K (1 << 14)
161 #define PIDR2_ARCH_SHIFT 4
162 #define PIDR2_ARCH_MASK 0xf
164 /* Global TLB invalidation */
165 #define ARM_SMMU_GR0_STLBIALL 0x60
166 #define ARM_SMMU_GR0_TLBIVMID 0x64
167 #define ARM_SMMU_GR0_TLBIALLNSNH 0x68
168 #define ARM_SMMU_GR0_TLBIALLH 0x6c
169 #define ARM_SMMU_GR0_sTLBGSYNC 0x70
170 #define ARM_SMMU_GR0_sTLBGSTATUS 0x74
171 #define sTLBGSTATUS_GSACTIVE (1 << 0)
172 #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
174 /* Stream mapping registers */
175 #define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
176 #define SMR_VALID (1 << 31)
177 #define SMR_MASK_SHIFT 16
178 #define SMR_MASK_MASK 0x7fff
179 #define SMR_ID_SHIFT 0
180 #define SMR_ID_MASK 0x7fff
182 #define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
183 #define S2CR_CBNDX_SHIFT 0
184 #define S2CR_CBNDX_MASK 0xff
185 #define S2CR_TYPE_SHIFT 16
186 #define S2CR_TYPE_MASK 0x3
187 #define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
188 #define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
189 #define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
191 /* Context bank attribute registers */
192 #define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
193 #define CBAR_VMID_SHIFT 0
194 #define CBAR_VMID_MASK 0xff
195 #define CBAR_S1_MEMATTR_SHIFT 12
196 #define CBAR_S1_MEMATTR_MASK 0xf
197 #define CBAR_S1_MEMATTR_WB 0xf
198 #define CBAR_TYPE_SHIFT 16
199 #define CBAR_TYPE_MASK 0x3
200 #define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
201 #define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
202 #define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
203 #define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
204 #define CBAR_IRPTNDX_SHIFT 24
205 #define CBAR_IRPTNDX_MASK 0xff
207 #define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
208 #define CBA2R_RW64_32BIT (0 << 0)
209 #define CBA2R_RW64_64BIT (1 << 0)
211 /* Translation context bank */
212 #define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
213 #define ARM_SMMU_CB(smmu, n) ((n) * (smmu)->pagesize)
215 #define ARM_SMMU_CB_SCTLR 0x0
216 #define ARM_SMMU_CB_RESUME 0x8
217 #define ARM_SMMU_CB_TTBCR2 0x10
218 #define ARM_SMMU_CB_TTBR0_LO 0x20
219 #define ARM_SMMU_CB_TTBR0_HI 0x24
220 #define ARM_SMMU_CB_TTBCR 0x30
221 #define ARM_SMMU_CB_S1_MAIR0 0x38
222 #define ARM_SMMU_CB_FSR 0x58
223 #define ARM_SMMU_CB_FAR_LO 0x60
224 #define ARM_SMMU_CB_FAR_HI 0x64
225 #define ARM_SMMU_CB_FSYNR0 0x68
227 #define SCTLR_S1_ASIDPNE (1 << 12)
228 #define SCTLR_CFCFG (1 << 7)
229 #define SCTLR_CFIE (1 << 6)
230 #define SCTLR_CFRE (1 << 5)
231 #define SCTLR_E (1 << 4)
232 #define SCTLR_AFE (1 << 2)
233 #define SCTLR_TRE (1 << 1)
234 #define SCTLR_M (1 << 0)
235 #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237 #define RESUME_RETRY (0 << 0)
238 #define RESUME_TERMINATE (1 << 0)
240 #define TTBCR_EAE (1 << 31)
242 #define TTBCR_PASIZE_SHIFT 16
243 #define TTBCR_PASIZE_MASK 0x7
245 #define TTBCR_TG0_4K (0 << 14)
246 #define TTBCR_TG0_64K (1 << 14)
248 #define TTBCR_SH0_SHIFT 12
249 #define TTBCR_SH0_MASK 0x3
250 #define TTBCR_SH_NS 0
251 #define TTBCR_SH_OS 2
252 #define TTBCR_SH_IS 3
254 #define TTBCR_ORGN0_SHIFT 10
255 #define TTBCR_IRGN0_SHIFT 8
256 #define TTBCR_RGN_MASK 0x3
257 #define TTBCR_RGN_NC 0
258 #define TTBCR_RGN_WBWA 1
259 #define TTBCR_RGN_WT 2
260 #define TTBCR_RGN_WB 3
262 #define TTBCR_SL0_SHIFT 6
263 #define TTBCR_SL0_MASK 0x3
264 #define TTBCR_SL0_LVL_2 0
265 #define TTBCR_SL0_LVL_1 1
267 #define TTBCR_T1SZ_SHIFT 16
268 #define TTBCR_T0SZ_SHIFT 0
269 #define TTBCR_SZ_MASK 0xf
271 #define TTBCR2_SEP_SHIFT 15
272 #define TTBCR2_SEP_MASK 0x7
274 #define TTBCR2_PASIZE_SHIFT 0
275 #define TTBCR2_PASIZE_MASK 0x7
277 /* Common definitions for PASize and SEP fields */
278 #define TTBCR2_ADDR_32 0
279 #define TTBCR2_ADDR_36 1
280 #define TTBCR2_ADDR_40 2
281 #define TTBCR2_ADDR_42 3
282 #define TTBCR2_ADDR_44 4
283 #define TTBCR2_ADDR_48 5
285 #define MAIR_ATTR_SHIFT(n) ((n) << 3)
286 #define MAIR_ATTR_MASK 0xff
287 #define MAIR_ATTR_DEVICE 0x04
288 #define MAIR_ATTR_NC 0x44
289 #define MAIR_ATTR_WBRWA 0xff
290 #define MAIR_ATTR_IDX_NC 0
291 #define MAIR_ATTR_IDX_CACHE 1
292 #define MAIR_ATTR_IDX_DEV 2
294 #define FSR_MULTI (1 << 31)
295 #define FSR_SS (1 << 30)
296 #define FSR_UUT (1 << 8)
297 #define FSR_ASF (1 << 7)
298 #define FSR_TLBLKF (1 << 6)
299 #define FSR_TLBMCF (1 << 5)
300 #define FSR_EF (1 << 4)
301 #define FSR_PF (1 << 3)
302 #define FSR_AFF (1 << 2)
303 #define FSR_TF (1 << 1)
305 #define FSR_IGN (FSR_AFF | FSR_ASF | FSR_TLBMCF | \
307 #define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
308 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
310 #define FSYNR0_WNR (1 << 4)
312 struct arm_smmu_smr
{
318 struct arm_smmu_master
{
319 struct device_node
*of_node
;
322 * The following is specific to the master's position in the
327 u16 streamids
[MAX_MASTER_STREAMIDS
];
330 * We only need to allocate these on the root SMMU, as we
331 * configure unmatched streams to bypass translation.
333 struct arm_smmu_smr
*smrs
;
336 struct arm_smmu_device
{
338 struct device_node
*parent_of_node
;
342 unsigned long pagesize
;
344 #define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
345 #define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
346 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
347 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
348 #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
352 u32 num_context_banks
;
353 u32 num_s2_context_banks
;
354 DECLARE_BITMAP(context_map
, ARM_SMMU_MAX_CBS
);
357 u32 num_mapping_groups
;
358 DECLARE_BITMAP(smr_map
, ARM_SMMU_MAX_SMRS
);
360 unsigned long input_size
;
361 unsigned long s1_output_size
;
362 unsigned long s2_output_size
;
365 u32 num_context_irqs
;
368 DECLARE_BITMAP(vmid_map
, ARM_SMMU_NUM_VMIDS
);
370 struct list_head list
;
371 struct rb_root masters
;
374 struct arm_smmu_cfg
{
375 struct arm_smmu_device
*smmu
;
383 struct arm_smmu_domain
{
385 * A domain can span across multiple, chained SMMUs and requires
386 * all devices within the domain to follow the same translation
389 struct arm_smmu_device
*leaf_smmu
;
390 struct arm_smmu_cfg root_cfg
;
391 phys_addr_t output_mask
;
396 static DEFINE_SPINLOCK(arm_smmu_devices_lock
);
397 static LIST_HEAD(arm_smmu_devices
);
399 static struct arm_smmu_master
*find_smmu_master(struct arm_smmu_device
*smmu
,
400 struct device_node
*dev_node
)
402 struct rb_node
*node
= smmu
->masters
.rb_node
;
405 struct arm_smmu_master
*master
;
406 master
= container_of(node
, struct arm_smmu_master
, node
);
408 if (dev_node
< master
->of_node
)
409 node
= node
->rb_left
;
410 else if (dev_node
> master
->of_node
)
411 node
= node
->rb_right
;
419 static int insert_smmu_master(struct arm_smmu_device
*smmu
,
420 struct arm_smmu_master
*master
)
422 struct rb_node
**new, *parent
;
424 new = &smmu
->masters
.rb_node
;
427 struct arm_smmu_master
*this;
428 this = container_of(*new, struct arm_smmu_master
, node
);
431 if (master
->of_node
< this->of_node
)
432 new = &((*new)->rb_left
);
433 else if (master
->of_node
> this->of_node
)
434 new = &((*new)->rb_right
);
439 rb_link_node(&master
->node
, parent
, new);
440 rb_insert_color(&master
->node
, &smmu
->masters
);
444 static int register_smmu_master(struct arm_smmu_device
*smmu
,
446 struct of_phandle_args
*masterspec
)
449 struct arm_smmu_master
*master
;
451 master
= find_smmu_master(smmu
, masterspec
->np
);
454 "rejecting multiple registrations for master device %s\n",
455 masterspec
->np
->name
);
459 if (masterspec
->args_count
> MAX_MASTER_STREAMIDS
) {
461 "reached maximum number (%d) of stream IDs for master device %s\n",
462 MAX_MASTER_STREAMIDS
, masterspec
->np
->name
);
466 master
= devm_kzalloc(dev
, sizeof(*master
), GFP_KERNEL
);
470 master
->of_node
= masterspec
->np
;
471 master
->num_streamids
= masterspec
->args_count
;
473 for (i
= 0; i
< master
->num_streamids
; ++i
)
474 master
->streamids
[i
] = masterspec
->args
[i
];
476 return insert_smmu_master(smmu
, master
);
479 static struct arm_smmu_device
*find_parent_smmu(struct arm_smmu_device
*smmu
)
481 struct arm_smmu_device
*parent
;
483 if (!smmu
->parent_of_node
)
486 spin_lock(&arm_smmu_devices_lock
);
487 list_for_each_entry(parent
, &arm_smmu_devices
, list
)
488 if (parent
->dev
->of_node
== smmu
->parent_of_node
)
493 "Failed to find SMMU parent despite parent in DT\n");
495 spin_unlock(&arm_smmu_devices_lock
);
499 static int __arm_smmu_alloc_bitmap(unsigned long *map
, int start
, int end
)
504 idx
= find_next_zero_bit(map
, end
, start
);
507 } while (test_and_set_bit(idx
, map
));
512 static void __arm_smmu_free_bitmap(unsigned long *map
, int idx
)
517 /* Wait for any pending TLB invalidations to complete */
518 static void arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
521 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
523 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_sTLBGSYNC
);
524 while (readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sTLBGSTATUS
)
525 & sTLBGSTATUS_GSACTIVE
) {
527 if (++count
== TLB_LOOP_TIMEOUT
) {
528 dev_err_ratelimited(smmu
->dev
,
529 "TLB sync timed out -- SMMU may be deadlocked\n");
536 static irqreturn_t
arm_smmu_context_fault(int irq
, void *dev
)
539 u32 fsr
, far
, fsynr
, resume
;
541 struct iommu_domain
*domain
= dev
;
542 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
543 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
544 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
545 void __iomem
*cb_base
;
547 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, root_cfg
->cbndx
);
548 fsr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSR
);
550 if (!(fsr
& FSR_FAULT
))
554 dev_err_ratelimited(smmu
->dev
,
555 "Unexpected context fault (fsr 0x%u)\n",
558 fsynr
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FSYNR0
);
559 flags
= fsynr
& FSYNR0_WNR
? IOMMU_FAULT_WRITE
: IOMMU_FAULT_READ
;
561 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_LO
);
564 far
= readl_relaxed(cb_base
+ ARM_SMMU_CB_FAR_HI
);
565 iova
|= ((unsigned long)far
<< 32);
568 if (!report_iommu_fault(domain
, smmu
->dev
, iova
, flags
)) {
570 resume
= RESUME_RETRY
;
573 resume
= RESUME_TERMINATE
;
576 /* Clear the faulting FSR */
577 writel(fsr
, cb_base
+ ARM_SMMU_CB_FSR
);
579 /* Retry or terminate any stalled transactions */
581 writel_relaxed(resume
, cb_base
+ ARM_SMMU_CB_RESUME
);
586 static irqreturn_t
arm_smmu_global_fault(int irq
, void *dev
)
588 u32 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
;
589 struct arm_smmu_device
*smmu
= dev
;
590 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
592 gfsr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSR
);
596 gfsynr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR0
);
597 gfsynr1
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR1
);
598 gfsynr2
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sGFSYNR2
);
600 dev_err_ratelimited(smmu
->dev
,
601 "Unexpected global fault, this could be serious\n");
602 dev_err_ratelimited(smmu
->dev
,
603 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
604 gfsr
, gfsynr0
, gfsynr1
, gfsynr2
);
606 writel(gfsr
, gr0_base
+ ARM_SMMU_GR0_sGFSR
);
610 static void arm_smmu_init_context_bank(struct arm_smmu_domain
*smmu_domain
)
614 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
615 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
616 void __iomem
*cb_base
, *gr0_base
, *gr1_base
;
618 gr0_base
= ARM_SMMU_GR0(smmu
);
619 gr1_base
= ARM_SMMU_GR1(smmu
);
620 stage1
= root_cfg
->cbar
!= CBAR_TYPE_S2_TRANS
;
621 cb_base
= ARM_SMMU_CB_BASE(smmu
) + ARM_SMMU_CB(smmu
, root_cfg
->cbndx
);
624 reg
= root_cfg
->cbar
|
625 (root_cfg
->vmid
<< CBAR_VMID_SHIFT
);
626 if (smmu
->version
== 1)
627 reg
|= root_cfg
->irptndx
<< CBAR_IRPTNDX_SHIFT
;
629 /* Use the weakest memory type, so it is overridden by the pte */
631 reg
|= (CBAR_S1_MEMATTR_WB
<< CBAR_S1_MEMATTR_SHIFT
);
632 writel_relaxed(reg
, gr1_base
+ ARM_SMMU_GR1_CBAR(root_cfg
->cbndx
));
634 if (smmu
->version
> 1) {
637 reg
= CBA2R_RW64_64BIT
;
639 reg
= CBA2R_RW64_32BIT
;
642 gr1_base
+ ARM_SMMU_GR1_CBA2R(root_cfg
->cbndx
));
645 switch (smmu
->input_size
) {
647 reg
= (TTBCR2_ADDR_32
<< TTBCR2_SEP_SHIFT
);
650 reg
= (TTBCR2_ADDR_36
<< TTBCR2_SEP_SHIFT
);
653 reg
= (TTBCR2_ADDR_40
<< TTBCR2_SEP_SHIFT
);
656 reg
= (TTBCR2_ADDR_42
<< TTBCR2_SEP_SHIFT
);
659 reg
= (TTBCR2_ADDR_44
<< TTBCR2_SEP_SHIFT
);
662 reg
= (TTBCR2_ADDR_48
<< TTBCR2_SEP_SHIFT
);
666 switch (smmu
->s1_output_size
) {
668 reg
|= (TTBCR2_ADDR_32
<< TTBCR2_PASIZE_SHIFT
);
671 reg
|= (TTBCR2_ADDR_36
<< TTBCR2_PASIZE_SHIFT
);
674 reg
|= (TTBCR2_ADDR_40
<< TTBCR2_PASIZE_SHIFT
);
677 reg
|= (TTBCR2_ADDR_42
<< TTBCR2_PASIZE_SHIFT
);
680 reg
|= (TTBCR2_ADDR_44
<< TTBCR2_PASIZE_SHIFT
);
683 reg
|= (TTBCR2_ADDR_48
<< TTBCR2_PASIZE_SHIFT
);
688 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR2
);
692 reg
= __pa(root_cfg
->pgd
);
693 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_LO
);
694 reg
= (phys_addr_t
)__pa(root_cfg
->pgd
) >> 32;
695 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBR0_HI
);
699 * We use long descriptor, with inner-shareable WBWA tables in TTBR0.
701 if (smmu
->version
> 1) {
702 if (PAGE_SIZE
== SZ_4K
)
708 switch (smmu
->s2_output_size
) {
710 reg
|= (TTBCR2_ADDR_32
<< TTBCR_PASIZE_SHIFT
);
713 reg
|= (TTBCR2_ADDR_36
<< TTBCR_PASIZE_SHIFT
);
716 reg
|= (TTBCR2_ADDR_40
<< TTBCR_PASIZE_SHIFT
);
719 reg
|= (TTBCR2_ADDR_42
<< TTBCR_PASIZE_SHIFT
);
722 reg
|= (TTBCR2_ADDR_44
<< TTBCR_PASIZE_SHIFT
);
725 reg
|= (TTBCR2_ADDR_48
<< TTBCR_PASIZE_SHIFT
);
729 reg
|= (64 - smmu
->s1_output_size
) << TTBCR_T0SZ_SHIFT
;
736 (TTBCR_SH_IS
<< TTBCR_SH0_SHIFT
) |
737 (TTBCR_RGN_WBWA
<< TTBCR_ORGN0_SHIFT
) |
738 (TTBCR_RGN_WBWA
<< TTBCR_IRGN0_SHIFT
) |
739 (TTBCR_SL0_LVL_1
<< TTBCR_SL0_SHIFT
);
740 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_TTBCR
);
742 /* MAIR0 (stage-1 only) */
744 reg
= (MAIR_ATTR_NC
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC
)) |
745 (MAIR_ATTR_WBRWA
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE
)) |
746 (MAIR_ATTR_DEVICE
<< MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV
));
747 writel_relaxed(reg
, cb_base
+ ARM_SMMU_CB_S1_MAIR0
);
751 writel_relaxed(root_cfg
->vmid
, gr0_base
+ ARM_SMMU_GR0_TLBIVMID
);
752 arm_smmu_tlb_sync(smmu
);
755 reg
= SCTLR_CFCFG
| SCTLR_CFIE
| SCTLR_CFRE
| SCTLR_M
| SCTLR_EAE_SBOP
;
757 reg
|= SCTLR_S1_ASIDPNE
;
761 writel(reg
, cb_base
+ ARM_SMMU_CB_SCTLR
);
764 static int arm_smmu_init_domain_context(struct iommu_domain
*domain
,
768 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
769 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
770 struct arm_smmu_device
*smmu
, *parent
;
773 * Walk the SMMU chain to find the root device for this chain.
774 * We assume that no masters have translations which terminate
775 * early, and therefore check that the root SMMU does indeed have
776 * a StreamID for the master in question.
778 parent
= dev
->archdata
.iommu
;
779 smmu_domain
->output_mask
= -1;
782 smmu_domain
->output_mask
&= (1ULL << smmu
->s2_output_size
) - 1;
783 } while ((parent
= find_parent_smmu(smmu
)));
785 if (!find_smmu_master(smmu
, dev
->of_node
)) {
786 dev_err(dev
, "unable to find root SMMU for device\n");
790 ret
= __arm_smmu_alloc_bitmap(smmu
->vmid_map
, 0, ARM_SMMU_NUM_VMIDS
);
791 if (IS_ERR_VALUE(ret
))
794 root_cfg
->vmid
= ret
;
795 if (smmu
->features
& ARM_SMMU_FEAT_TRANS_NESTED
) {
797 * We will likely want to change this if/when KVM gets
800 root_cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
801 start
= smmu
->num_s2_context_banks
;
802 } else if (smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
) {
803 root_cfg
->cbar
= CBAR_TYPE_S2_TRANS
;
806 root_cfg
->cbar
= CBAR_TYPE_S1_TRANS_S2_BYPASS
;
807 start
= smmu
->num_s2_context_banks
;
810 ret
= __arm_smmu_alloc_bitmap(smmu
->context_map
, start
,
811 smmu
->num_context_banks
);
812 if (IS_ERR_VALUE(ret
))
815 root_cfg
->cbndx
= ret
;
817 if (smmu
->version
== 1) {
818 root_cfg
->irptndx
= atomic_inc_return(&smmu
->irptndx
);
819 root_cfg
->irptndx
%= smmu
->num_context_irqs
;
821 root_cfg
->irptndx
= root_cfg
->cbndx
;
824 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ root_cfg
->irptndx
];
825 ret
= request_irq(irq
, arm_smmu_context_fault
, IRQF_SHARED
,
826 "arm-smmu-context-fault", domain
);
827 if (IS_ERR_VALUE(ret
)) {
828 dev_err(smmu
->dev
, "failed to request context IRQ %d (%u)\n",
829 root_cfg
->irptndx
, irq
);
830 root_cfg
->irptndx
= -1;
831 goto out_free_context
;
834 root_cfg
->smmu
= smmu
;
835 arm_smmu_init_context_bank(smmu_domain
);
839 __arm_smmu_free_bitmap(smmu
->context_map
, root_cfg
->cbndx
);
841 __arm_smmu_free_bitmap(smmu
->vmid_map
, root_cfg
->vmid
);
845 static void arm_smmu_destroy_domain_context(struct iommu_domain
*domain
)
847 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
848 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
849 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
855 if (root_cfg
->irptndx
!= -1) {
856 irq
= smmu
->irqs
[smmu
->num_global_irqs
+ root_cfg
->irptndx
];
857 free_irq(irq
, domain
);
860 __arm_smmu_free_bitmap(smmu
->vmid_map
, root_cfg
->vmid
);
861 __arm_smmu_free_bitmap(smmu
->context_map
, root_cfg
->cbndx
);
864 static int arm_smmu_domain_init(struct iommu_domain
*domain
)
866 struct arm_smmu_domain
*smmu_domain
;
870 * Allocate the domain and initialise some of its data structures.
871 * We can't really do anything meaningful until we've added a
874 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
878 pgd
= kzalloc(PTRS_PER_PGD
* sizeof(pgd_t
), GFP_KERNEL
);
880 goto out_free_domain
;
881 smmu_domain
->root_cfg
.pgd
= pgd
;
883 spin_lock_init(&smmu_domain
->lock
);
884 domain
->priv
= smmu_domain
;
892 static void arm_smmu_free_ptes(pmd_t
*pmd
)
894 pgtable_t table
= pmd_pgtable(*pmd
);
895 pgtable_page_dtor(table
);
899 static void arm_smmu_free_pmds(pud_t
*pud
)
902 pmd_t
*pmd
, *pmd_base
= pmd_offset(pud
, 0);
905 for (i
= 0; i
< PTRS_PER_PMD
; ++i
) {
909 arm_smmu_free_ptes(pmd
);
913 pmd_free(NULL
, pmd_base
);
916 static void arm_smmu_free_puds(pgd_t
*pgd
)
919 pud_t
*pud
, *pud_base
= pud_offset(pgd
, 0);
922 for (i
= 0; i
< PTRS_PER_PUD
; ++i
) {
926 arm_smmu_free_pmds(pud
);
930 pud_free(NULL
, pud_base
);
933 static void arm_smmu_free_pgtables(struct arm_smmu_domain
*smmu_domain
)
936 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
937 pgd_t
*pgd
, *pgd_base
= root_cfg
->pgd
;
940 * Recursively free the page tables for this domain. We don't
941 * care about speculative TLB filling, because the TLB will be
942 * nuked next time this context bank is re-allocated and no devices
943 * currently map to these tables.
946 for (i
= 0; i
< PTRS_PER_PGD
; ++i
) {
949 arm_smmu_free_puds(pgd
);
956 static void arm_smmu_domain_destroy(struct iommu_domain
*domain
)
958 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
959 arm_smmu_destroy_domain_context(domain
);
960 arm_smmu_free_pgtables(smmu_domain
);
964 static int arm_smmu_master_configure_smrs(struct arm_smmu_device
*smmu
,
965 struct arm_smmu_master
*master
)
968 struct arm_smmu_smr
*smrs
;
969 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
971 if (!(smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
))
977 smrs
= kmalloc(sizeof(*smrs
) * master
->num_streamids
, GFP_KERNEL
);
979 dev_err(smmu
->dev
, "failed to allocate %d SMRs for master %s\n",
980 master
->num_streamids
, master
->of_node
->name
);
984 /* Allocate the SMRs on the root SMMU */
985 for (i
= 0; i
< master
->num_streamids
; ++i
) {
986 int idx
= __arm_smmu_alloc_bitmap(smmu
->smr_map
, 0,
987 smmu
->num_mapping_groups
);
988 if (IS_ERR_VALUE(idx
)) {
989 dev_err(smmu
->dev
, "failed to allocate free SMR\n");
993 smrs
[i
] = (struct arm_smmu_smr
) {
995 .mask
= 0, /* We don't currently share SMRs */
996 .id
= master
->streamids
[i
],
1000 /* It worked! Now, poke the actual hardware */
1001 for (i
= 0; i
< master
->num_streamids
; ++i
) {
1002 u32 reg
= SMR_VALID
| smrs
[i
].id
<< SMR_ID_SHIFT
|
1003 smrs
[i
].mask
<< SMR_MASK_SHIFT
;
1004 writel_relaxed(reg
, gr0_base
+ ARM_SMMU_GR0_SMR(smrs
[i
].idx
));
1007 master
->smrs
= smrs
;
1012 __arm_smmu_free_bitmap(smmu
->smr_map
, smrs
[i
].idx
);
1017 static void arm_smmu_master_free_smrs(struct arm_smmu_device
*smmu
,
1018 struct arm_smmu_master
*master
)
1021 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1022 struct arm_smmu_smr
*smrs
= master
->smrs
;
1024 /* Invalidate the SMRs before freeing back to the allocator */
1025 for (i
= 0; i
< master
->num_streamids
; ++i
) {
1026 u8 idx
= smrs
[i
].idx
;
1027 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(idx
));
1028 __arm_smmu_free_bitmap(smmu
->smr_map
, idx
);
1031 master
->smrs
= NULL
;
1035 static void arm_smmu_bypass_stream_mapping(struct arm_smmu_device
*smmu
,
1036 struct arm_smmu_master
*master
)
1039 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1041 for (i
= 0; i
< master
->num_streamids
; ++i
) {
1042 u16 sid
= master
->streamids
[i
];
1043 writel_relaxed(S2CR_TYPE_BYPASS
,
1044 gr0_base
+ ARM_SMMU_GR0_S2CR(sid
));
1048 static int arm_smmu_domain_add_master(struct arm_smmu_domain
*smmu_domain
,
1049 struct arm_smmu_master
*master
)
1052 struct arm_smmu_device
*parent
, *smmu
= smmu_domain
->root_cfg
.smmu
;
1053 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1055 ret
= arm_smmu_master_configure_smrs(smmu
, master
);
1059 /* Bypass the leaves */
1060 smmu
= smmu_domain
->leaf_smmu
;
1061 while ((parent
= find_parent_smmu(smmu
))) {
1063 * We won't have a StreamID match for anything but the root
1064 * smmu, so we only need to worry about StreamID indexing,
1065 * where we must install bypass entries in the S2CRs.
1067 if (smmu
->features
& ARM_SMMU_FEAT_STREAM_MATCH
)
1070 arm_smmu_bypass_stream_mapping(smmu
, master
);
1074 /* Now we're at the root, time to point at our context bank */
1075 for (i
= 0; i
< master
->num_streamids
; ++i
) {
1077 idx
= master
->smrs
? master
->smrs
[i
].idx
: master
->streamids
[i
];
1078 s2cr
= (S2CR_TYPE_TRANS
<< S2CR_TYPE_SHIFT
) |
1079 (smmu_domain
->root_cfg
.cbndx
<< S2CR_CBNDX_SHIFT
);
1080 writel_relaxed(s2cr
, gr0_base
+ ARM_SMMU_GR0_S2CR(idx
));
1086 static void arm_smmu_domain_remove_master(struct arm_smmu_domain
*smmu_domain
,
1087 struct arm_smmu_master
*master
)
1089 struct arm_smmu_device
*smmu
= smmu_domain
->root_cfg
.smmu
;
1092 * We *must* clear the S2CR first, because freeing the SMR means
1093 * that it can be re-allocated immediately.
1095 arm_smmu_bypass_stream_mapping(smmu
, master
);
1096 arm_smmu_master_free_smrs(smmu
, master
);
1099 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1102 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1103 struct arm_smmu_device
*device_smmu
= dev
->archdata
.iommu
;
1104 struct arm_smmu_master
*master
;
1107 dev_err(dev
, "cannot attach to SMMU, is it on the same bus?\n");
1112 * Sanity check the domain. We don't currently support domains
1113 * that cross between different SMMU chains.
1115 spin_lock(&smmu_domain
->lock
);
1116 if (!smmu_domain
->leaf_smmu
) {
1117 /* Now that we have a master, we can finalise the domain */
1118 ret
= arm_smmu_init_domain_context(domain
, dev
);
1119 if (IS_ERR_VALUE(ret
))
1122 smmu_domain
->leaf_smmu
= device_smmu
;
1123 } else if (smmu_domain
->leaf_smmu
!= device_smmu
) {
1125 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
1126 dev_name(smmu_domain
->leaf_smmu
->dev
),
1127 dev_name(device_smmu
->dev
));
1130 spin_unlock(&smmu_domain
->lock
);
1132 /* Looks ok, so add the device to the domain */
1133 master
= find_smmu_master(smmu_domain
->leaf_smmu
, dev
->of_node
);
1137 return arm_smmu_domain_add_master(smmu_domain
, master
);
1140 spin_unlock(&smmu_domain
->lock
);
1144 static void arm_smmu_detach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1146 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1147 struct arm_smmu_master
*master
;
1149 master
= find_smmu_master(smmu_domain
->leaf_smmu
, dev
->of_node
);
1151 arm_smmu_domain_remove_master(smmu_domain
, master
);
1154 static void arm_smmu_flush_pgtable(struct arm_smmu_device
*smmu
, void *addr
,
1157 unsigned long offset
= (unsigned long)addr
& ~PAGE_MASK
;
1160 * If the SMMU can't walk tables in the CPU caches, treat them
1161 * like non-coherent DMA since we need to flush the new entries
1162 * all the way out to memory. There's no possibility of recursion
1163 * here as the SMMU table walker will not be wired through another
1166 if (!(smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
))
1167 dma_map_page(smmu
->dev
, virt_to_page(addr
), offset
, size
,
1171 static bool arm_smmu_pte_is_contiguous_range(unsigned long addr
,
1174 return !(addr
& ~ARM_SMMU_PTE_CONT_MASK
) &&
1175 (addr
+ ARM_SMMU_PTE_CONT_SIZE
<= end
);
1178 static int arm_smmu_alloc_init_pte(struct arm_smmu_device
*smmu
, pmd_t
*pmd
,
1179 unsigned long addr
, unsigned long end
,
1180 unsigned long pfn
, int flags
, int stage
)
1183 pteval_t pteval
= ARM_SMMU_PTE_PAGE
| ARM_SMMU_PTE_AF
;
1185 if (pmd_none(*pmd
)) {
1186 /* Allocate a new set of tables */
1187 pgtable_t table
= alloc_page(PGALLOC_GFP
);
1191 arm_smmu_flush_pgtable(smmu
, page_address(table
),
1192 ARM_SMMU_PTE_HWTABLE_SIZE
);
1193 pgtable_page_ctor(table
);
1194 pmd_populate(NULL
, pmd
, table
);
1195 arm_smmu_flush_pgtable(smmu
, pmd
, sizeof(*pmd
));
1199 pteval
|= ARM_SMMU_PTE_AP_UNPRIV
;
1200 if (!(flags
& IOMMU_WRITE
) && (flags
& IOMMU_READ
))
1201 pteval
|= ARM_SMMU_PTE_AP_RDONLY
;
1203 if (flags
& IOMMU_CACHE
)
1204 pteval
|= (MAIR_ATTR_IDX_CACHE
<<
1205 ARM_SMMU_PTE_ATTRINDX_SHIFT
);
1207 pteval
|= ARM_SMMU_PTE_HAP_FAULT
;
1208 if (flags
& IOMMU_READ
)
1209 pteval
|= ARM_SMMU_PTE_HAP_READ
;
1210 if (flags
& IOMMU_WRITE
)
1211 pteval
|= ARM_SMMU_PTE_HAP_WRITE
;
1212 if (flags
& IOMMU_CACHE
)
1213 pteval
|= ARM_SMMU_PTE_MEMATTR_OIWB
;
1215 pteval
|= ARM_SMMU_PTE_MEMATTR_NC
;
1218 /* If no access, create a faulting entry to avoid TLB fills */
1219 if (!(flags
& (IOMMU_READ
| IOMMU_WRITE
)))
1220 pteval
&= ~ARM_SMMU_PTE_PAGE
;
1222 pteval
|= ARM_SMMU_PTE_SH_IS
;
1223 start
= pmd_page_vaddr(*pmd
) + pte_index(addr
);
1227 * Install the page table entries. This is fairly complicated
1228 * since we attempt to make use of the contiguous hint in the
1229 * ptes where possible. The contiguous hint indicates a series
1230 * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically
1231 * contiguous region with the following constraints:
1233 * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE
1234 * - Each pte in the region has the contiguous hint bit set
1236 * This complicates unmapping (also handled by this code, when
1237 * neither IOMMU_READ or IOMMU_WRITE are set) because it is
1238 * possible, yet highly unlikely, that a client may unmap only
1239 * part of a contiguous range. This requires clearing of the
1240 * contiguous hint bits in the range before installing the new
1243 * Note that re-mapping an address range without first unmapping
1244 * it is not supported, so TLB invalidation is not required here
1245 * and is instead performed at unmap and domain-init time.
1249 pteval
&= ~ARM_SMMU_PTE_CONT
;
1251 if (arm_smmu_pte_is_contiguous_range(addr
, end
)) {
1252 i
= ARM_SMMU_PTE_CONT_ENTRIES
;
1253 pteval
|= ARM_SMMU_PTE_CONT
;
1254 } else if (pte_val(*pte
) &
1255 (ARM_SMMU_PTE_CONT
| ARM_SMMU_PTE_PAGE
)) {
1258 unsigned long idx
= pte_index(addr
);
1260 idx
&= ~(ARM_SMMU_PTE_CONT_ENTRIES
- 1);
1261 cont_start
= pmd_page_vaddr(*pmd
) + idx
;
1262 for (j
= 0; j
< ARM_SMMU_PTE_CONT_ENTRIES
; ++j
)
1263 pte_val(*(cont_start
+ j
)) &= ~ARM_SMMU_PTE_CONT
;
1265 arm_smmu_flush_pgtable(smmu
, cont_start
,
1267 ARM_SMMU_PTE_CONT_ENTRIES
);
1271 *pte
= pfn_pte(pfn
, __pgprot(pteval
));
1272 } while (pte
++, pfn
++, addr
+= PAGE_SIZE
, --i
);
1273 } while (addr
!= end
);
1275 arm_smmu_flush_pgtable(smmu
, start
, sizeof(*pte
) * (pte
- start
));
1279 static int arm_smmu_alloc_init_pmd(struct arm_smmu_device
*smmu
, pud_t
*pud
,
1280 unsigned long addr
, unsigned long end
,
1281 phys_addr_t phys
, int flags
, int stage
)
1285 unsigned long next
, pfn
= __phys_to_pfn(phys
);
1287 #ifndef __PAGETABLE_PMD_FOLDED
1288 if (pud_none(*pud
)) {
1289 pmd
= pmd_alloc_one(NULL
, addr
);
1294 pmd
= pmd_offset(pud
, addr
);
1297 next
= pmd_addr_end(addr
, end
);
1298 ret
= arm_smmu_alloc_init_pte(smmu
, pmd
, addr
, end
, pfn
,
1300 pud_populate(NULL
, pud
, pmd
);
1301 arm_smmu_flush_pgtable(smmu
, pud
, sizeof(*pud
));
1302 phys
+= next
- addr
;
1303 } while (pmd
++, addr
= next
, addr
< end
);
1308 static int arm_smmu_alloc_init_pud(struct arm_smmu_device
*smmu
, pgd_t
*pgd
,
1309 unsigned long addr
, unsigned long end
,
1310 phys_addr_t phys
, int flags
, int stage
)
1316 #ifndef __PAGETABLE_PUD_FOLDED
1317 if (pgd_none(*pgd
)) {
1318 pud
= pud_alloc_one(NULL
, addr
);
1323 pud
= pud_offset(pgd
, addr
);
1326 next
= pud_addr_end(addr
, end
);
1327 ret
= arm_smmu_alloc_init_pmd(smmu
, pud
, addr
, next
, phys
,
1329 pgd_populate(NULL
, pud
, pgd
);
1330 arm_smmu_flush_pgtable(smmu
, pgd
, sizeof(*pgd
));
1331 phys
+= next
- addr
;
1332 } while (pud
++, addr
= next
, addr
< end
);
1337 static int arm_smmu_handle_mapping(struct arm_smmu_domain
*smmu_domain
,
1338 unsigned long iova
, phys_addr_t paddr
,
1339 size_t size
, int flags
)
1343 phys_addr_t input_mask
, output_mask
;
1344 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
1345 pgd_t
*pgd
= root_cfg
->pgd
;
1346 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
1348 if (root_cfg
->cbar
== CBAR_TYPE_S2_TRANS
) {
1350 output_mask
= (1ULL << smmu
->s2_output_size
) - 1;
1353 output_mask
= (1ULL << smmu
->s1_output_size
) - 1;
1359 if (size
& ~PAGE_MASK
)
1362 input_mask
= (1ULL << smmu
->input_size
) - 1;
1363 if ((phys_addr_t
)iova
& ~input_mask
)
1366 if (paddr
& ~output_mask
)
1369 spin_lock(&smmu_domain
->lock
);
1370 pgd
+= pgd_index(iova
);
1373 unsigned long next
= pgd_addr_end(iova
, end
);
1375 ret
= arm_smmu_alloc_init_pud(smmu
, pgd
, iova
, next
, paddr
,
1380 paddr
+= next
- iova
;
1382 } while (pgd
++, iova
!= end
);
1385 spin_unlock(&smmu_domain
->lock
);
1387 /* Ensure new page tables are visible to the hardware walker */
1388 if (smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
)
1394 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1395 phys_addr_t paddr
, size_t size
, int flags
)
1397 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1398 struct arm_smmu_device
*smmu
= smmu_domain
->leaf_smmu
;
1400 if (!smmu_domain
|| !smmu
)
1403 /* Check for silent address truncation up the SMMU chain. */
1404 if ((phys_addr_t
)iova
& ~smmu_domain
->output_mask
)
1407 return arm_smmu_handle_mapping(smmu_domain
, iova
, paddr
, size
, flags
);
1410 static size_t arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
,
1414 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1415 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
1416 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
1417 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1419 ret
= arm_smmu_handle_mapping(smmu_domain
, iova
, 0, size
, 0);
1420 writel_relaxed(root_cfg
->vmid
, gr0_base
+ ARM_SMMU_GR0_TLBIVMID
);
1421 arm_smmu_tlb_sync(smmu
);
1422 return ret
? ret
: size
;
1425 static phys_addr_t
arm_smmu_iova_to_phys(struct iommu_domain
*domain
,
1432 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1433 struct arm_smmu_cfg
*root_cfg
= &smmu_domain
->root_cfg
;
1434 struct arm_smmu_device
*smmu
= root_cfg
->smmu
;
1436 spin_lock(&smmu_domain
->lock
);
1437 pgd
= root_cfg
->pgd
;
1441 pgd
+= pgd_index(iova
);
1442 if (pgd_none_or_clear_bad(pgd
))
1445 pud
= pud_offset(pgd
, iova
);
1446 if (pud_none_or_clear_bad(pud
))
1449 pmd
= pmd_offset(pud
, iova
);
1450 if (pmd_none_or_clear_bad(pmd
))
1453 pte
= pmd_page_vaddr(*pmd
) + pte_index(iova
);
1457 spin_unlock(&smmu_domain
->lock
);
1458 return __pfn_to_phys(pte_pfn(*pte
)) | (iova
& ~PAGE_MASK
);
1461 spin_unlock(&smmu_domain
->lock
);
1463 "invalid (corrupt?) page tables detected for iova 0x%llx\n",
1464 (unsigned long long)iova
);
1468 static int arm_smmu_domain_has_cap(struct iommu_domain
*domain
,
1471 unsigned long caps
= 0;
1472 struct arm_smmu_domain
*smmu_domain
= domain
->priv
;
1474 if (smmu_domain
->root_cfg
.smmu
->features
& ARM_SMMU_FEAT_COHERENT_WALK
)
1475 caps
|= IOMMU_CAP_CACHE_COHERENCY
;
1477 return !!(cap
& caps
);
1480 static int arm_smmu_add_device(struct device
*dev
)
1482 struct arm_smmu_device
*child
, *parent
, *smmu
;
1483 struct arm_smmu_master
*master
= NULL
;
1485 spin_lock(&arm_smmu_devices_lock
);
1486 list_for_each_entry(parent
, &arm_smmu_devices
, list
) {
1489 /* Try to find a child of the current SMMU. */
1490 list_for_each_entry(child
, &arm_smmu_devices
, list
) {
1491 if (child
->parent_of_node
== parent
->dev
->of_node
) {
1492 /* Does the child sit above our master? */
1493 master
= find_smmu_master(child
, dev
->of_node
);
1501 /* We found some children, so keep searching. */
1507 master
= find_smmu_master(smmu
, dev
->of_node
);
1511 spin_unlock(&arm_smmu_devices_lock
);
1516 dev
->archdata
.iommu
= smmu
;
1520 static void arm_smmu_remove_device(struct device
*dev
)
1522 dev
->archdata
.iommu
= NULL
;
1525 static struct iommu_ops arm_smmu_ops
= {
1526 .domain_init
= arm_smmu_domain_init
,
1527 .domain_destroy
= arm_smmu_domain_destroy
,
1528 .attach_dev
= arm_smmu_attach_dev
,
1529 .detach_dev
= arm_smmu_detach_dev
,
1530 .map
= arm_smmu_map
,
1531 .unmap
= arm_smmu_unmap
,
1532 .iova_to_phys
= arm_smmu_iova_to_phys
,
1533 .domain_has_cap
= arm_smmu_domain_has_cap
,
1534 .add_device
= arm_smmu_add_device
,
1535 .remove_device
= arm_smmu_remove_device
,
1536 .pgsize_bitmap
= (SECTION_SIZE
|
1537 ARM_SMMU_PTE_CONT_SIZE
|
1541 static void arm_smmu_device_reset(struct arm_smmu_device
*smmu
)
1543 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1545 u32 scr0
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_sCR0
);
1547 /* Mark all SMRn as invalid and all S2CRn as bypass */
1548 for (i
= 0; i
< smmu
->num_mapping_groups
; ++i
) {
1549 writel_relaxed(~SMR_VALID
, gr0_base
+ ARM_SMMU_GR0_SMR(i
));
1550 writel_relaxed(S2CR_TYPE_BYPASS
, gr0_base
+ ARM_SMMU_GR0_S2CR(i
));
1553 /* Invalidate the TLB, just in case */
1554 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_STLBIALL
);
1555 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLH
);
1556 writel_relaxed(0, gr0_base
+ ARM_SMMU_GR0_TLBIALLNSNH
);
1558 /* Enable fault reporting */
1559 scr0
|= (sCR0_GFRE
| sCR0_GFIE
| sCR0_GCFGFRE
| sCR0_GCFGFIE
);
1561 /* Disable TLB broadcasting. */
1562 scr0
|= (sCR0_VMIDPNE
| sCR0_PTM
);
1564 /* Enable client access, but bypass when no mapping is found */
1565 scr0
&= ~(sCR0_CLIENTPD
| sCR0_USFCFG
);
1567 /* Disable forced broadcasting */
1570 /* Don't upgrade barriers */
1571 scr0
&= ~(sCR0_BSU_MASK
<< sCR0_BSU_SHIFT
);
1573 /* Push the button */
1574 arm_smmu_tlb_sync(smmu
);
1575 writel(scr0
, gr0_base
+ ARM_SMMU_GR0_sCR0
);
1578 static int arm_smmu_id_size_to_bits(int size
)
1597 static int arm_smmu_device_cfg_probe(struct arm_smmu_device
*smmu
)
1600 void __iomem
*gr0_base
= ARM_SMMU_GR0(smmu
);
1603 dev_notice(smmu
->dev
, "probing hardware configuration...\n");
1606 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_PIDR2
);
1607 smmu
->version
= ((id
>> PIDR2_ARCH_SHIFT
) & PIDR2_ARCH_MASK
) + 1;
1608 dev_notice(smmu
->dev
, "SMMUv%d with:\n", smmu
->version
);
1611 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID0
);
1612 #ifndef CONFIG_64BIT
1613 if (((id
>> ID0_PTFS_SHIFT
) & ID0_PTFS_MASK
) == ID0_PTFS_V8_ONLY
) {
1614 dev_err(smmu
->dev
, "\tno v7 descriptor support!\n");
1618 if (id
& ID0_S1TS
) {
1619 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
1620 dev_notice(smmu
->dev
, "\tstage 1 translation\n");
1623 if (id
& ID0_S2TS
) {
1624 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
1625 dev_notice(smmu
->dev
, "\tstage 2 translation\n");
1629 smmu
->features
|= ARM_SMMU_FEAT_TRANS_NESTED
;
1630 dev_notice(smmu
->dev
, "\tnested translation\n");
1633 if (!(smmu
->features
&
1634 (ARM_SMMU_FEAT_TRANS_S1
| ARM_SMMU_FEAT_TRANS_S2
|
1635 ARM_SMMU_FEAT_TRANS_NESTED
))) {
1636 dev_err(smmu
->dev
, "\tno translation support!\n");
1640 if (id
& ID0_CTTW
) {
1641 smmu
->features
|= ARM_SMMU_FEAT_COHERENT_WALK
;
1642 dev_notice(smmu
->dev
, "\tcoherent table walk\n");
1648 smmu
->features
|= ARM_SMMU_FEAT_STREAM_MATCH
;
1649 smmu
->num_mapping_groups
= (id
>> ID0_NUMSMRG_SHIFT
) &
1651 if (smmu
->num_mapping_groups
== 0) {
1653 "stream-matching supported, but no SMRs present!\n");
1657 smr
= SMR_MASK_MASK
<< SMR_MASK_SHIFT
;
1658 smr
|= (SMR_ID_MASK
<< SMR_ID_SHIFT
);
1659 writel_relaxed(smr
, gr0_base
+ ARM_SMMU_GR0_SMR(0));
1660 smr
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_SMR(0));
1662 mask
= (smr
>> SMR_MASK_SHIFT
) & SMR_MASK_MASK
;
1663 sid
= (smr
>> SMR_ID_SHIFT
) & SMR_ID_MASK
;
1664 if ((mask
& sid
) != sid
) {
1666 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1671 dev_notice(smmu
->dev
,
1672 "\tstream matching with %u register groups, mask 0x%x",
1673 smmu
->num_mapping_groups
, mask
);
1677 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID1
);
1678 smmu
->pagesize
= (id
& ID1_PAGESIZE
) ? SZ_64K
: SZ_4K
;
1680 /* Check that we ioremapped enough */
1681 size
= 1 << (((id
>> ID1_NUMPAGENDXB_SHIFT
) & ID1_NUMPAGENDXB_MASK
) + 1);
1682 size
*= (smmu
->pagesize
<< 1);
1683 if (smmu
->size
< size
)
1685 "device is 0x%lx bytes but only mapped 0x%lx!\n",
1688 smmu
->num_s2_context_banks
= (id
>> ID1_NUMS2CB_SHIFT
) &
1690 smmu
->num_context_banks
= (id
>> ID1_NUMCB_SHIFT
) & ID1_NUMCB_MASK
;
1691 if (smmu
->num_s2_context_banks
> smmu
->num_context_banks
) {
1692 dev_err(smmu
->dev
, "impossible number of S2 context banks!\n");
1695 dev_notice(smmu
->dev
, "\t%u context banks (%u stage-2 only)\n",
1696 smmu
->num_context_banks
, smmu
->num_s2_context_banks
);
1699 id
= readl_relaxed(gr0_base
+ ARM_SMMU_GR0_ID2
);
1700 size
= arm_smmu_id_size_to_bits((id
>> ID2_IAS_SHIFT
) & ID2_IAS_MASK
);
1703 * Stage-1 output limited by stage-2 input size due to pgd
1704 * allocation (PTRS_PER_PGD).
1707 /* Current maximum output size of 39 bits */
1708 smmu
->s1_output_size
= min(39UL, size
);
1710 smmu
->s1_output_size
= min(32UL, size
);
1713 /* The stage-2 output mask is also applied for bypass */
1714 size
= arm_smmu_id_size_to_bits((id
>> ID2_OAS_SHIFT
) & ID2_OAS_MASK
);
1715 smmu
->s2_output_size
= min((unsigned long)PHYS_MASK_SHIFT
, size
);
1717 if (smmu
->version
== 1) {
1718 smmu
->input_size
= 32;
1721 size
= (id
>> ID2_UBS_SHIFT
) & ID2_UBS_MASK
;
1722 size
= min(39, arm_smmu_id_size_to_bits(size
));
1726 smmu
->input_size
= size
;
1728 if ((PAGE_SIZE
== SZ_4K
&& !(id
& ID2_PTFS_4K
)) ||
1729 (PAGE_SIZE
== SZ_64K
&& !(id
& ID2_PTFS_64K
)) ||
1730 (PAGE_SIZE
!= SZ_4K
&& PAGE_SIZE
!= SZ_64K
)) {
1731 dev_err(smmu
->dev
, "CPU page size 0x%lx unsupported\n",
1737 dev_notice(smmu
->dev
,
1738 "\t%lu-bit VA, %lu-bit IPA, %lu-bit PA\n",
1739 smmu
->input_size
, smmu
->s1_output_size
, smmu
->s2_output_size
);
1743 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
)
1745 struct resource
*res
;
1746 struct arm_smmu_device
*smmu
;
1747 struct device_node
*dev_node
;
1748 struct device
*dev
= &pdev
->dev
;
1749 struct rb_node
*node
;
1750 struct of_phandle_args masterspec
;
1751 int num_irqs
, i
, err
;
1753 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
1755 dev_err(dev
, "failed to allocate arm_smmu_device\n");
1760 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1762 dev_err(dev
, "missing base address/size\n");
1766 smmu
->size
= resource_size(res
);
1767 smmu
->base
= devm_request_and_ioremap(dev
, res
);
1769 return -EADDRNOTAVAIL
;
1771 if (of_property_read_u32(dev
->of_node
, "#global-interrupts",
1772 &smmu
->num_global_irqs
)) {
1773 dev_err(dev
, "missing #global-interrupts property\n");
1778 while ((res
= platform_get_resource(pdev
, IORESOURCE_IRQ
, num_irqs
))) {
1780 if (num_irqs
> smmu
->num_global_irqs
)
1781 smmu
->num_context_irqs
++;
1784 if (num_irqs
< smmu
->num_global_irqs
) {
1785 dev_warn(dev
, "found %d interrupts but expected at least %d\n",
1786 num_irqs
, smmu
->num_global_irqs
);
1787 smmu
->num_global_irqs
= num_irqs
;
1789 smmu
->num_context_irqs
= num_irqs
- smmu
->num_global_irqs
;
1791 smmu
->irqs
= devm_kzalloc(dev
, sizeof(*smmu
->irqs
) * num_irqs
,
1794 dev_err(dev
, "failed to allocate %d irqs\n", num_irqs
);
1798 for (i
= 0; i
< num_irqs
; ++i
) {
1799 int irq
= platform_get_irq(pdev
, i
);
1801 dev_err(dev
, "failed to get irq index %d\n", i
);
1804 smmu
->irqs
[i
] = irq
;
1808 smmu
->masters
= RB_ROOT
;
1809 while (!of_parse_phandle_with_args(dev
->of_node
, "mmu-masters",
1810 "#stream-id-cells", i
,
1812 err
= register_smmu_master(smmu
, dev
, &masterspec
);
1814 dev_err(dev
, "failed to add master %s\n",
1815 masterspec
.np
->name
);
1816 goto out_put_masters
;
1821 dev_notice(dev
, "registered %d master devices\n", i
);
1823 if ((dev_node
= of_parse_phandle(dev
->of_node
, "smmu-parent", 0)))
1824 smmu
->parent_of_node
= dev_node
;
1826 err
= arm_smmu_device_cfg_probe(smmu
);
1828 goto out_put_parent
;
1830 if (smmu
->version
> 1 &&
1831 smmu
->num_context_banks
!= smmu
->num_context_irqs
) {
1833 "found only %d context interrupt(s) but %d required\n",
1834 smmu
->num_context_irqs
, smmu
->num_context_banks
);
1835 goto out_put_parent
;
1838 arm_smmu_device_reset(smmu
);
1840 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
) {
1841 err
= request_irq(smmu
->irqs
[i
],
1842 arm_smmu_global_fault
,
1844 "arm-smmu global fault",
1847 dev_err(dev
, "failed to request global IRQ %d (%u)\n",
1853 INIT_LIST_HEAD(&smmu
->list
);
1854 spin_lock(&arm_smmu_devices_lock
);
1855 list_add(&smmu
->list
, &arm_smmu_devices
);
1856 spin_unlock(&arm_smmu_devices_lock
);
1861 free_irq(smmu
->irqs
[i
], smmu
);
1864 if (smmu
->parent_of_node
)
1865 of_node_put(smmu
->parent_of_node
);
1868 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1869 struct arm_smmu_master
*master
;
1870 master
= container_of(node
, struct arm_smmu_master
, node
);
1871 of_node_put(master
->of_node
);
1877 static int arm_smmu_device_remove(struct platform_device
*pdev
)
1880 struct device
*dev
= &pdev
->dev
;
1881 struct arm_smmu_device
*curr
, *smmu
= NULL
;
1882 struct rb_node
*node
;
1884 spin_lock(&arm_smmu_devices_lock
);
1885 list_for_each_entry(curr
, &arm_smmu_devices
, list
) {
1886 if (curr
->dev
== dev
) {
1888 list_del(&smmu
->list
);
1892 spin_unlock(&arm_smmu_devices_lock
);
1897 if (smmu
->parent_of_node
)
1898 of_node_put(smmu
->parent_of_node
);
1900 for (node
= rb_first(&smmu
->masters
); node
; node
= rb_next(node
)) {
1901 struct arm_smmu_master
*master
;
1902 master
= container_of(node
, struct arm_smmu_master
, node
);
1903 of_node_put(master
->of_node
);
1906 if (!bitmap_empty(smmu
->vmid_map
, ARM_SMMU_NUM_VMIDS
))
1907 dev_err(dev
, "removing device with active domains!\n");
1909 for (i
= 0; i
< smmu
->num_global_irqs
; ++i
)
1910 free_irq(smmu
->irqs
[i
], smmu
);
1912 /* Turn the thing off */
1913 writel(sCR0_CLIENTPD
, ARM_SMMU_GR0(smmu
) + ARM_SMMU_GR0_sCR0
);
1918 static struct of_device_id arm_smmu_of_match
[] = {
1919 { .compatible
= "arm,smmu-v1", },
1920 { .compatible
= "arm,smmu-v2", },
1921 { .compatible
= "arm,mmu-400", },
1922 { .compatible
= "arm,mmu-500", },
1925 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
1928 static struct platform_driver arm_smmu_driver
= {
1930 .owner
= THIS_MODULE
,
1932 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
1934 .probe
= arm_smmu_device_dt_probe
,
1935 .remove
= arm_smmu_device_remove
,
1938 static int __init
arm_smmu_init(void)
1942 ret
= platform_driver_register(&arm_smmu_driver
);
1946 /* Oh, for a proper bus abstraction */
1947 if (!iommu_present(&platform_bus_type
));
1948 bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
1950 if (!iommu_present(&amba_bustype
));
1951 bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
1956 static void __exit
arm_smmu_exit(void)
1958 return platform_driver_unregister(&arm_smmu_driver
);
1961 module_init(arm_smmu_init
);
1962 module_exit(arm_smmu_exit
);
1964 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
1965 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
1966 MODULE_LICENSE("GPL v2");