2 * IOMMU API for ARM architected SMMUv3 implementations.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2015 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
20 * This driver is powered by bad coffee and bombay mix.
23 #include <linux/acpi.h>
24 #include <linux/acpi_iort.h>
25 #include <linux/delay.h>
26 #include <linux/dma-iommu.h>
27 #include <linux/err.h>
28 #include <linux/interrupt.h>
29 #include <linux/iommu.h>
30 #include <linux/iopoll.h>
31 #include <linux/module.h>
32 #include <linux/msi.h>
34 #include <linux/of_address.h>
35 #include <linux/of_iommu.h>
36 #include <linux/of_platform.h>
37 #include <linux/pci.h>
38 #include <linux/platform_device.h>
40 #include <linux/amba/bus.h>
42 #include "io-pgtable.h"
45 #define ARM_SMMU_IDR0 0x0
46 #define IDR0_ST_LVL_SHIFT 27
47 #define IDR0_ST_LVL_MASK 0x3
48 #define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
49 #define IDR0_STALL_MODEL_SHIFT 24
50 #define IDR0_STALL_MODEL_MASK 0x3
51 #define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52 #define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
53 #define IDR0_TTENDIAN_SHIFT 21
54 #define IDR0_TTENDIAN_MASK 0x3
55 #define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56 #define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57 #define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58 #define IDR0_CD2L (1 << 19)
59 #define IDR0_VMID16 (1 << 18)
60 #define IDR0_PRI (1 << 16)
61 #define IDR0_SEV (1 << 14)
62 #define IDR0_MSI (1 << 13)
63 #define IDR0_ASID16 (1 << 12)
64 #define IDR0_ATS (1 << 10)
65 #define IDR0_HYP (1 << 9)
66 #define IDR0_COHACC (1 << 4)
67 #define IDR0_TTF_SHIFT 2
68 #define IDR0_TTF_MASK 0x3
69 #define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
70 #define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
71 #define IDR0_S1P (1 << 1)
72 #define IDR0_S2P (1 << 0)
74 #define ARM_SMMU_IDR1 0x4
75 #define IDR1_TABLES_PRESET (1 << 30)
76 #define IDR1_QUEUES_PRESET (1 << 29)
77 #define IDR1_REL (1 << 28)
78 #define IDR1_CMDQ_SHIFT 21
79 #define IDR1_CMDQ_MASK 0x1f
80 #define IDR1_EVTQ_SHIFT 16
81 #define IDR1_EVTQ_MASK 0x1f
82 #define IDR1_PRIQ_SHIFT 11
83 #define IDR1_PRIQ_MASK 0x1f
84 #define IDR1_SSID_SHIFT 6
85 #define IDR1_SSID_MASK 0x1f
86 #define IDR1_SID_SHIFT 0
87 #define IDR1_SID_MASK 0x3f
89 #define ARM_SMMU_IDR5 0x14
90 #define IDR5_STALL_MAX_SHIFT 16
91 #define IDR5_STALL_MAX_MASK 0xffff
92 #define IDR5_GRAN64K (1 << 6)
93 #define IDR5_GRAN16K (1 << 5)
94 #define IDR5_GRAN4K (1 << 4)
95 #define IDR5_OAS_SHIFT 0
96 #define IDR5_OAS_MASK 0x7
97 #define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98 #define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99 #define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100 #define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101 #define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102 #define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
104 #define ARM_SMMU_CR0 0x20
105 #define CR0_CMDQEN (1 << 3)
106 #define CR0_EVTQEN (1 << 2)
107 #define CR0_PRIQEN (1 << 1)
108 #define CR0_SMMUEN (1 << 0)
110 #define ARM_SMMU_CR0ACK 0x24
112 #define ARM_SMMU_CR1 0x28
116 #define CR1_CACHE_NC 0
117 #define CR1_CACHE_WB 1
118 #define CR1_CACHE_WT 2
119 #define CR1_TABLE_SH_SHIFT 10
120 #define CR1_TABLE_OC_SHIFT 8
121 #define CR1_TABLE_IC_SHIFT 6
122 #define CR1_QUEUE_SH_SHIFT 4
123 #define CR1_QUEUE_OC_SHIFT 2
124 #define CR1_QUEUE_IC_SHIFT 0
126 #define ARM_SMMU_CR2 0x2c
127 #define CR2_PTM (1 << 2)
128 #define CR2_RECINVSID (1 << 1)
129 #define CR2_E2H (1 << 0)
131 #define ARM_SMMU_GBPA 0x44
132 #define GBPA_ABORT (1 << 20)
133 #define GBPA_UPDATE (1 << 31)
135 #define ARM_SMMU_IRQ_CTRL 0x50
136 #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
137 #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
138 #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
140 #define ARM_SMMU_IRQ_CTRLACK 0x54
142 #define ARM_SMMU_GERROR 0x60
143 #define GERROR_SFM_ERR (1 << 8)
144 #define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145 #define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146 #define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147 #define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148 #define GERROR_PRIQ_ABT_ERR (1 << 3)
149 #define GERROR_EVTQ_ABT_ERR (1 << 2)
150 #define GERROR_CMDQ_ERR (1 << 0)
151 #define GERROR_ERR_MASK 0xfd
153 #define ARM_SMMU_GERRORN 0x64
155 #define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156 #define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157 #define ARM_SMMU_GERROR_IRQ_CFG2 0x74
159 #define ARM_SMMU_STRTAB_BASE 0x80
160 #define STRTAB_BASE_RA (1UL << 62)
161 #define STRTAB_BASE_ADDR_SHIFT 6
162 #define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
164 #define ARM_SMMU_STRTAB_BASE_CFG 0x88
165 #define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166 #define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167 #define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168 #define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169 #define STRTAB_BASE_CFG_FMT_SHIFT 16
170 #define STRTAB_BASE_CFG_FMT_MASK 0x3
171 #define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172 #define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
174 #define ARM_SMMU_CMDQ_BASE 0x90
175 #define ARM_SMMU_CMDQ_PROD 0x98
176 #define ARM_SMMU_CMDQ_CONS 0x9c
178 #define ARM_SMMU_EVTQ_BASE 0xa0
179 #define ARM_SMMU_EVTQ_PROD 0x100a8
180 #define ARM_SMMU_EVTQ_CONS 0x100ac
181 #define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182 #define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183 #define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
185 #define ARM_SMMU_PRIQ_BASE 0xc0
186 #define ARM_SMMU_PRIQ_PROD 0x100c8
187 #define ARM_SMMU_PRIQ_CONS 0x100cc
188 #define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189 #define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190 #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
192 /* Common MSI config fields */
193 #define MSI_CFG0_ADDR_SHIFT 2
194 #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
195 #define MSI_CFG2_SH_SHIFT 4
196 #define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197 #define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198 #define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199 #define MSI_CFG2_MEMATTR_SHIFT 0
200 #define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
202 #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203 #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204 #define Q_OVERFLOW_FLAG (1 << 31)
205 #define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206 #define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
209 #define Q_BASE_RWA (1UL << 62)
210 #define Q_BASE_ADDR_SHIFT 5
211 #define Q_BASE_ADDR_MASK 0xfffffffffffUL
212 #define Q_BASE_LOG2SIZE_SHIFT 0
213 #define Q_BASE_LOG2SIZE_MASK 0x1fUL
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
222 #define STRTAB_L1_SZ_SHIFT 20
223 #define STRTAB_SPLIT 8
225 #define STRTAB_L1_DESC_DWORDS 1
226 #define STRTAB_L1_DESC_SPAN_SHIFT 0
227 #define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228 #define STRTAB_L1_DESC_L2PTR_SHIFT 6
229 #define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
231 #define STRTAB_STE_DWORDS 8
232 #define STRTAB_STE_0_V (1UL << 0)
233 #define STRTAB_STE_0_CFG_SHIFT 1
234 #define STRTAB_STE_0_CFG_MASK 0x7UL
235 #define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236 #define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237 #define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238 #define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
240 #define STRTAB_STE_0_S1FMT_SHIFT 4
241 #define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242 #define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243 #define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244 #define STRTAB_STE_0_S1CDMAX_SHIFT 59
245 #define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
247 #define STRTAB_STE_1_S1C_CACHE_NC 0UL
248 #define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249 #define STRTAB_STE_1_S1C_CACHE_WT 2UL
250 #define STRTAB_STE_1_S1C_CACHE_WB 3UL
251 #define STRTAB_STE_1_S1C_SH_NSH 0UL
252 #define STRTAB_STE_1_S1C_SH_OSH 2UL
253 #define STRTAB_STE_1_S1C_SH_ISH 3UL
254 #define STRTAB_STE_1_S1CIR_SHIFT 2
255 #define STRTAB_STE_1_S1COR_SHIFT 4
256 #define STRTAB_STE_1_S1CSH_SHIFT 6
258 #define STRTAB_STE_1_S1STALLD (1UL << 27)
260 #define STRTAB_STE_1_EATS_ABT 0UL
261 #define STRTAB_STE_1_EATS_TRANS 1UL
262 #define STRTAB_STE_1_EATS_S1CHK 2UL
263 #define STRTAB_STE_1_EATS_SHIFT 28
265 #define STRTAB_STE_1_STRW_NSEL1 0UL
266 #define STRTAB_STE_1_STRW_EL2 2UL
267 #define STRTAB_STE_1_STRW_SHIFT 30
269 #define STRTAB_STE_1_SHCFG_INCOMING 1UL
270 #define STRTAB_STE_1_SHCFG_SHIFT 44
272 #define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
273 #define STRTAB_STE_1_PRIVCFG_SHIFT 48
275 #define STRTAB_STE_2_S2VMID_SHIFT 0
276 #define STRTAB_STE_2_S2VMID_MASK 0xffffUL
277 #define STRTAB_STE_2_VTCR_SHIFT 32
278 #define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
279 #define STRTAB_STE_2_S2AA64 (1UL << 51)
280 #define STRTAB_STE_2_S2ENDI (1UL << 52)
281 #define STRTAB_STE_2_S2PTW (1UL << 54)
282 #define STRTAB_STE_2_S2R (1UL << 58)
284 #define STRTAB_STE_3_S2TTB_SHIFT 4
285 #define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
287 /* Context descriptor (stage-1 only) */
288 #define CTXDESC_CD_DWORDS 8
289 #define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
290 #define ARM64_TCR_T0SZ_SHIFT 0
291 #define ARM64_TCR_T0SZ_MASK 0x1fUL
292 #define CTXDESC_CD_0_TCR_TG0_SHIFT 6
293 #define ARM64_TCR_TG0_SHIFT 14
294 #define ARM64_TCR_TG0_MASK 0x3UL
295 #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
296 #define ARM64_TCR_IRGN0_SHIFT 8
297 #define ARM64_TCR_IRGN0_MASK 0x3UL
298 #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
299 #define ARM64_TCR_ORGN0_SHIFT 10
300 #define ARM64_TCR_ORGN0_MASK 0x3UL
301 #define CTXDESC_CD_0_TCR_SH0_SHIFT 12
302 #define ARM64_TCR_SH0_SHIFT 12
303 #define ARM64_TCR_SH0_MASK 0x3UL
304 #define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
305 #define ARM64_TCR_EPD0_SHIFT 7
306 #define ARM64_TCR_EPD0_MASK 0x1UL
307 #define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
308 #define ARM64_TCR_EPD1_SHIFT 23
309 #define ARM64_TCR_EPD1_MASK 0x1UL
311 #define CTXDESC_CD_0_ENDI (1UL << 15)
312 #define CTXDESC_CD_0_V (1UL << 31)
314 #define CTXDESC_CD_0_TCR_IPS_SHIFT 32
315 #define ARM64_TCR_IPS_SHIFT 32
316 #define ARM64_TCR_IPS_MASK 0x7UL
317 #define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
318 #define ARM64_TCR_TBI0_SHIFT 37
319 #define ARM64_TCR_TBI0_MASK 0x1UL
321 #define CTXDESC_CD_0_AA64 (1UL << 41)
322 #define CTXDESC_CD_0_R (1UL << 45)
323 #define CTXDESC_CD_0_A (1UL << 46)
324 #define CTXDESC_CD_0_ASET_SHIFT 47
325 #define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
326 #define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
327 #define CTXDESC_CD_0_ASID_SHIFT 48
328 #define CTXDESC_CD_0_ASID_MASK 0xffffUL
330 #define CTXDESC_CD_1_TTB0_SHIFT 4
331 #define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
333 #define CTXDESC_CD_3_MAIR_SHIFT 0
335 /* Convert between AArch64 (CPU) TCR format and SMMU CD format */
336 #define ARM_SMMU_TCR2CD(tcr, fld) \
337 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
338 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
341 #define CMDQ_ENT_DWORDS 2
342 #define CMDQ_MAX_SZ_SHIFT 8
344 #define CMDQ_ERR_SHIFT 24
345 #define CMDQ_ERR_MASK 0x7f
346 #define CMDQ_ERR_CERROR_NONE_IDX 0
347 #define CMDQ_ERR_CERROR_ILL_IDX 1
348 #define CMDQ_ERR_CERROR_ABT_IDX 2
350 #define CMDQ_0_OP_SHIFT 0
351 #define CMDQ_0_OP_MASK 0xffUL
352 #define CMDQ_0_SSV (1UL << 11)
354 #define CMDQ_PREFETCH_0_SID_SHIFT 32
355 #define CMDQ_PREFETCH_1_SIZE_SHIFT 0
356 #define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
358 #define CMDQ_CFGI_0_SID_SHIFT 32
359 #define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
360 #define CMDQ_CFGI_1_LEAF (1UL << 0)
361 #define CMDQ_CFGI_1_RANGE_SHIFT 0
362 #define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
364 #define CMDQ_TLBI_0_VMID_SHIFT 32
365 #define CMDQ_TLBI_0_ASID_SHIFT 48
366 #define CMDQ_TLBI_1_LEAF (1UL << 0)
367 #define CMDQ_TLBI_1_VA_MASK ~0xfffUL
368 #define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
370 #define CMDQ_PRI_0_SSID_SHIFT 12
371 #define CMDQ_PRI_0_SSID_MASK 0xfffffUL
372 #define CMDQ_PRI_0_SID_SHIFT 32
373 #define CMDQ_PRI_0_SID_MASK 0xffffffffUL
374 #define CMDQ_PRI_1_GRPID_SHIFT 0
375 #define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
376 #define CMDQ_PRI_1_RESP_SHIFT 12
377 #define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
378 #define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
379 #define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
381 #define CMDQ_SYNC_0_CS_SHIFT 12
382 #define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
383 #define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
386 #define EVTQ_ENT_DWORDS 4
387 #define EVTQ_MAX_SZ_SHIFT 7
389 #define EVTQ_0_ID_SHIFT 0
390 #define EVTQ_0_ID_MASK 0xffUL
393 #define PRIQ_ENT_DWORDS 2
394 #define PRIQ_MAX_SZ_SHIFT 8
396 #define PRIQ_0_SID_SHIFT 0
397 #define PRIQ_0_SID_MASK 0xffffffffUL
398 #define PRIQ_0_SSID_SHIFT 32
399 #define PRIQ_0_SSID_MASK 0xfffffUL
400 #define PRIQ_0_PERM_PRIV (1UL << 58)
401 #define PRIQ_0_PERM_EXEC (1UL << 59)
402 #define PRIQ_0_PERM_READ (1UL << 60)
403 #define PRIQ_0_PERM_WRITE (1UL << 61)
404 #define PRIQ_0_PRG_LAST (1UL << 62)
405 #define PRIQ_0_SSID_V (1UL << 63)
407 #define PRIQ_1_PRG_IDX_SHIFT 0
408 #define PRIQ_1_PRG_IDX_MASK 0x1ffUL
409 #define PRIQ_1_ADDR_SHIFT 12
410 #define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
412 /* High-level queue structures */
413 #define ARM_SMMU_POLL_TIMEOUT_US 100
415 #define MSI_IOVA_BASE 0x8000000
416 #define MSI_IOVA_LENGTH 0x100000
418 static bool disable_bypass
;
419 module_param_named(disable_bypass
, disable_bypass
, bool, S_IRUGO
);
420 MODULE_PARM_DESC(disable_bypass
,
421 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
429 enum arm_smmu_msi_index
{
436 static phys_addr_t arm_smmu_msi_cfg
[ARM_SMMU_MAX_MSIS
][3] = {
438 ARM_SMMU_EVTQ_IRQ_CFG0
,
439 ARM_SMMU_EVTQ_IRQ_CFG1
,
440 ARM_SMMU_EVTQ_IRQ_CFG2
,
442 [GERROR_MSI_INDEX
] = {
443 ARM_SMMU_GERROR_IRQ_CFG0
,
444 ARM_SMMU_GERROR_IRQ_CFG1
,
445 ARM_SMMU_GERROR_IRQ_CFG2
,
448 ARM_SMMU_PRIQ_IRQ_CFG0
,
449 ARM_SMMU_PRIQ_IRQ_CFG1
,
450 ARM_SMMU_PRIQ_IRQ_CFG2
,
454 struct arm_smmu_cmdq_ent
{
457 bool substream_valid
;
459 /* Command-specific fields */
461 #define CMDQ_OP_PREFETCH_CFG 0x1
468 #define CMDQ_OP_CFGI_STE 0x3
469 #define CMDQ_OP_CFGI_ALL 0x4
478 #define CMDQ_OP_TLBI_NH_ASID 0x11
479 #define CMDQ_OP_TLBI_NH_VA 0x12
480 #define CMDQ_OP_TLBI_EL2_ALL 0x20
481 #define CMDQ_OP_TLBI_S12_VMALL 0x28
482 #define CMDQ_OP_TLBI_S2_IPA 0x2a
483 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
491 #define CMDQ_OP_PRI_RESP 0x41
499 #define CMDQ_OP_CMD_SYNC 0x46
503 struct arm_smmu_queue
{
504 int irq
; /* Wired interrupt */
515 u32 __iomem
*prod_reg
;
516 u32 __iomem
*cons_reg
;
519 struct arm_smmu_cmdq
{
520 struct arm_smmu_queue q
;
524 struct arm_smmu_evtq
{
525 struct arm_smmu_queue q
;
529 struct arm_smmu_priq
{
530 struct arm_smmu_queue q
;
533 /* High-level stream table and context descriptor structures */
534 struct arm_smmu_strtab_l1_desc
{
538 dma_addr_t l2ptr_dma
;
541 struct arm_smmu_s1_cfg
{
543 dma_addr_t cdptr_dma
;
545 struct arm_smmu_ctx_desc
{
553 struct arm_smmu_s2_cfg
{
559 struct arm_smmu_strtab_ent
{
561 * An STE is "assigned" if the master emitting the corresponding SID
562 * is attached to a domain. The behaviour of an unassigned STE is
563 * determined by the disable_bypass parameter, whereas an assigned
564 * STE behaves according to s1_cfg/s2_cfg, which themselves are
565 * configured according to the domain type.
568 struct arm_smmu_s1_cfg
*s1_cfg
;
569 struct arm_smmu_s2_cfg
*s2_cfg
;
572 struct arm_smmu_strtab_cfg
{
574 dma_addr_t strtab_dma
;
575 struct arm_smmu_strtab_l1_desc
*l1_desc
;
576 unsigned int num_l1_ents
;
582 /* An SMMUv3 instance */
583 struct arm_smmu_device
{
587 #define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
588 #define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
589 #define ARM_SMMU_FEAT_TT_LE (1 << 2)
590 #define ARM_SMMU_FEAT_TT_BE (1 << 3)
591 #define ARM_SMMU_FEAT_PRI (1 << 4)
592 #define ARM_SMMU_FEAT_ATS (1 << 5)
593 #define ARM_SMMU_FEAT_SEV (1 << 6)
594 #define ARM_SMMU_FEAT_MSI (1 << 7)
595 #define ARM_SMMU_FEAT_COHERENCY (1 << 8)
596 #define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
597 #define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
598 #define ARM_SMMU_FEAT_STALLS (1 << 11)
599 #define ARM_SMMU_FEAT_HYP (1 << 12)
602 #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
605 struct arm_smmu_cmdq cmdq
;
606 struct arm_smmu_evtq evtq
;
607 struct arm_smmu_priq priq
;
611 unsigned long ias
; /* IPA */
612 unsigned long oas
; /* PA */
613 unsigned long pgsize_bitmap
;
615 #define ARM_SMMU_MAX_ASIDS (1 << 16)
616 unsigned int asid_bits
;
617 DECLARE_BITMAP(asid_map
, ARM_SMMU_MAX_ASIDS
);
619 #define ARM_SMMU_MAX_VMIDS (1 << 16)
620 unsigned int vmid_bits
;
621 DECLARE_BITMAP(vmid_map
, ARM_SMMU_MAX_VMIDS
);
623 unsigned int ssid_bits
;
624 unsigned int sid_bits
;
626 struct arm_smmu_strtab_cfg strtab_cfg
;
628 /* IOMMU core code handle */
629 struct iommu_device iommu
;
632 /* SMMU private data for each master */
633 struct arm_smmu_master_data
{
634 struct arm_smmu_device
*smmu
;
635 struct arm_smmu_strtab_ent ste
;
638 /* SMMU private data for an IOMMU domain */
639 enum arm_smmu_domain_stage
{
640 ARM_SMMU_DOMAIN_S1
= 0,
642 ARM_SMMU_DOMAIN_NESTED
,
643 ARM_SMMU_DOMAIN_BYPASS
,
646 struct arm_smmu_domain
{
647 struct arm_smmu_device
*smmu
;
648 struct mutex init_mutex
; /* Protects smmu pointer */
650 struct io_pgtable_ops
*pgtbl_ops
;
651 spinlock_t pgtbl_lock
;
653 enum arm_smmu_domain_stage stage
;
655 struct arm_smmu_s1_cfg s1_cfg
;
656 struct arm_smmu_s2_cfg s2_cfg
;
659 struct iommu_domain domain
;
662 struct arm_smmu_option_prop
{
667 static struct arm_smmu_option_prop arm_smmu_options
[] = {
668 { ARM_SMMU_OPT_SKIP_PREFETCH
, "hisilicon,broken-prefetch-cmd" },
672 static struct arm_smmu_domain
*to_smmu_domain(struct iommu_domain
*dom
)
674 return container_of(dom
, struct arm_smmu_domain
, domain
);
677 static void parse_driver_options(struct arm_smmu_device
*smmu
)
682 if (of_property_read_bool(smmu
->dev
->of_node
,
683 arm_smmu_options
[i
].prop
)) {
684 smmu
->options
|= arm_smmu_options
[i
].opt
;
685 dev_notice(smmu
->dev
, "option %s\n",
686 arm_smmu_options
[i
].prop
);
688 } while (arm_smmu_options
[++i
].opt
);
691 /* Low-level queue manipulation functions */
692 static bool queue_full(struct arm_smmu_queue
*q
)
694 return Q_IDX(q
, q
->prod
) == Q_IDX(q
, q
->cons
) &&
695 Q_WRP(q
, q
->prod
) != Q_WRP(q
, q
->cons
);
698 static bool queue_empty(struct arm_smmu_queue
*q
)
700 return Q_IDX(q
, q
->prod
) == Q_IDX(q
, q
->cons
) &&
701 Q_WRP(q
, q
->prod
) == Q_WRP(q
, q
->cons
);
704 static void queue_sync_cons(struct arm_smmu_queue
*q
)
706 q
->cons
= readl_relaxed(q
->cons_reg
);
709 static void queue_inc_cons(struct arm_smmu_queue
*q
)
711 u32 cons
= (Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
)) + 1;
713 q
->cons
= Q_OVF(q
, q
->cons
) | Q_WRP(q
, cons
) | Q_IDX(q
, cons
);
714 writel(q
->cons
, q
->cons_reg
);
717 static int queue_sync_prod(struct arm_smmu_queue
*q
)
720 u32 prod
= readl_relaxed(q
->prod_reg
);
722 if (Q_OVF(q
, prod
) != Q_OVF(q
, q
->prod
))
729 static void queue_inc_prod(struct arm_smmu_queue
*q
)
731 u32 prod
= (Q_WRP(q
, q
->prod
) | Q_IDX(q
, q
->prod
)) + 1;
733 q
->prod
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, prod
) | Q_IDX(q
, prod
);
734 writel(q
->prod
, q
->prod_reg
);
738 * Wait for the SMMU to consume items. If drain is true, wait until the queue
739 * is empty. Otherwise, wait until there is at least one free slot.
741 static int queue_poll_cons(struct arm_smmu_queue
*q
, bool drain
, bool wfe
)
743 ktime_t timeout
= ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US
);
745 while (queue_sync_cons(q
), (drain
? !queue_empty(q
) : queue_full(q
))) {
746 if (ktime_compare(ktime_get(), timeout
) > 0)
760 static void queue_write(__le64
*dst
, u64
*src
, size_t n_dwords
)
764 for (i
= 0; i
< n_dwords
; ++i
)
765 *dst
++ = cpu_to_le64(*src
++);
768 static int queue_insert_raw(struct arm_smmu_queue
*q
, u64
*ent
)
773 queue_write(Q_ENT(q
, q
->prod
), ent
, q
->ent_dwords
);
778 static void queue_read(__le64
*dst
, u64
*src
, size_t n_dwords
)
782 for (i
= 0; i
< n_dwords
; ++i
)
783 *dst
++ = le64_to_cpu(*src
++);
786 static int queue_remove_raw(struct arm_smmu_queue
*q
, u64
*ent
)
791 queue_read(ent
, Q_ENT(q
, q
->cons
), q
->ent_dwords
);
796 /* High-level queue accessors */
797 static int arm_smmu_cmdq_build_cmd(u64
*cmd
, struct arm_smmu_cmdq_ent
*ent
)
799 memset(cmd
, 0, CMDQ_ENT_DWORDS
<< 3);
800 cmd
[0] |= (ent
->opcode
& CMDQ_0_OP_MASK
) << CMDQ_0_OP_SHIFT
;
802 switch (ent
->opcode
) {
803 case CMDQ_OP_TLBI_EL2_ALL
:
804 case CMDQ_OP_TLBI_NSNH_ALL
:
806 case CMDQ_OP_PREFETCH_CFG
:
807 cmd
[0] |= (u64
)ent
->prefetch
.sid
<< CMDQ_PREFETCH_0_SID_SHIFT
;
808 cmd
[1] |= ent
->prefetch
.size
<< CMDQ_PREFETCH_1_SIZE_SHIFT
;
809 cmd
[1] |= ent
->prefetch
.addr
& CMDQ_PREFETCH_1_ADDR_MASK
;
811 case CMDQ_OP_CFGI_STE
:
812 cmd
[0] |= (u64
)ent
->cfgi
.sid
<< CMDQ_CFGI_0_SID_SHIFT
;
813 cmd
[1] |= ent
->cfgi
.leaf
? CMDQ_CFGI_1_LEAF
: 0;
815 case CMDQ_OP_CFGI_ALL
:
816 /* Cover the entire SID range */
817 cmd
[1] |= CMDQ_CFGI_1_RANGE_MASK
<< CMDQ_CFGI_1_RANGE_SHIFT
;
819 case CMDQ_OP_TLBI_NH_VA
:
820 cmd
[0] |= (u64
)ent
->tlbi
.asid
<< CMDQ_TLBI_0_ASID_SHIFT
;
821 cmd
[1] |= ent
->tlbi
.leaf
? CMDQ_TLBI_1_LEAF
: 0;
822 cmd
[1] |= ent
->tlbi
.addr
& CMDQ_TLBI_1_VA_MASK
;
824 case CMDQ_OP_TLBI_S2_IPA
:
825 cmd
[0] |= (u64
)ent
->tlbi
.vmid
<< CMDQ_TLBI_0_VMID_SHIFT
;
826 cmd
[1] |= ent
->tlbi
.leaf
? CMDQ_TLBI_1_LEAF
: 0;
827 cmd
[1] |= ent
->tlbi
.addr
& CMDQ_TLBI_1_IPA_MASK
;
829 case CMDQ_OP_TLBI_NH_ASID
:
830 cmd
[0] |= (u64
)ent
->tlbi
.asid
<< CMDQ_TLBI_0_ASID_SHIFT
;
832 case CMDQ_OP_TLBI_S12_VMALL
:
833 cmd
[0] |= (u64
)ent
->tlbi
.vmid
<< CMDQ_TLBI_0_VMID_SHIFT
;
835 case CMDQ_OP_PRI_RESP
:
836 cmd
[0] |= ent
->substream_valid
? CMDQ_0_SSV
: 0;
837 cmd
[0] |= ent
->pri
.ssid
<< CMDQ_PRI_0_SSID_SHIFT
;
838 cmd
[0] |= (u64
)ent
->pri
.sid
<< CMDQ_PRI_0_SID_SHIFT
;
839 cmd
[1] |= ent
->pri
.grpid
<< CMDQ_PRI_1_GRPID_SHIFT
;
840 switch (ent
->pri
.resp
) {
842 cmd
[1] |= CMDQ_PRI_1_RESP_DENY
;
845 cmd
[1] |= CMDQ_PRI_1_RESP_FAIL
;
848 cmd
[1] |= CMDQ_PRI_1_RESP_SUCC
;
854 case CMDQ_OP_CMD_SYNC
:
855 cmd
[0] |= CMDQ_SYNC_0_CS_SEV
;
864 static void arm_smmu_cmdq_skip_err(struct arm_smmu_device
*smmu
)
866 static const char *cerror_str
[] = {
867 [CMDQ_ERR_CERROR_NONE_IDX
] = "No error",
868 [CMDQ_ERR_CERROR_ILL_IDX
] = "Illegal command",
869 [CMDQ_ERR_CERROR_ABT_IDX
] = "Abort on command fetch",
873 u64 cmd
[CMDQ_ENT_DWORDS
];
874 struct arm_smmu_queue
*q
= &smmu
->cmdq
.q
;
875 u32 cons
= readl_relaxed(q
->cons_reg
);
876 u32 idx
= cons
>> CMDQ_ERR_SHIFT
& CMDQ_ERR_MASK
;
877 struct arm_smmu_cmdq_ent cmd_sync
= {
878 .opcode
= CMDQ_OP_CMD_SYNC
,
881 dev_err(smmu
->dev
, "CMDQ error (cons 0x%08x): %s\n", cons
,
882 idx
< ARRAY_SIZE(cerror_str
) ? cerror_str
[idx
] : "Unknown");
885 case CMDQ_ERR_CERROR_ABT_IDX
:
886 dev_err(smmu
->dev
, "retrying command fetch\n");
887 case CMDQ_ERR_CERROR_NONE_IDX
:
889 case CMDQ_ERR_CERROR_ILL_IDX
:
896 * We may have concurrent producers, so we need to be careful
897 * not to touch any of the shadow cmdq state.
899 queue_read(cmd
, Q_ENT(q
, cons
), q
->ent_dwords
);
900 dev_err(smmu
->dev
, "skipping command in error state:\n");
901 for (i
= 0; i
< ARRAY_SIZE(cmd
); ++i
)
902 dev_err(smmu
->dev
, "\t0x%016llx\n", (unsigned long long)cmd
[i
]);
904 /* Convert the erroneous command into a CMD_SYNC */
905 if (arm_smmu_cmdq_build_cmd(cmd
, &cmd_sync
)) {
906 dev_err(smmu
->dev
, "failed to convert to CMD_SYNC\n");
910 queue_write(Q_ENT(q
, cons
), cmd
, q
->ent_dwords
);
913 static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device
*smmu
,
914 struct arm_smmu_cmdq_ent
*ent
)
916 u64 cmd
[CMDQ_ENT_DWORDS
];
918 bool wfe
= !!(smmu
->features
& ARM_SMMU_FEAT_SEV
);
919 struct arm_smmu_queue
*q
= &smmu
->cmdq
.q
;
921 if (arm_smmu_cmdq_build_cmd(cmd
, ent
)) {
922 dev_warn(smmu
->dev
, "ignoring unknown CMDQ opcode 0x%x\n",
927 spin_lock_irqsave(&smmu
->cmdq
.lock
, flags
);
928 while (queue_insert_raw(q
, cmd
) == -ENOSPC
) {
929 if (queue_poll_cons(q
, false, wfe
))
930 dev_err_ratelimited(smmu
->dev
, "CMDQ timeout\n");
933 if (ent
->opcode
== CMDQ_OP_CMD_SYNC
&& queue_poll_cons(q
, true, wfe
))
934 dev_err_ratelimited(smmu
->dev
, "CMD_SYNC timeout\n");
935 spin_unlock_irqrestore(&smmu
->cmdq
.lock
, flags
);
938 /* Context descriptor manipulation functions */
939 static u64
arm_smmu_cpu_tcr_to_cd(u64 tcr
)
943 /* Repack the TCR. Just care about TTBR0 for now */
944 val
|= ARM_SMMU_TCR2CD(tcr
, T0SZ
);
945 val
|= ARM_SMMU_TCR2CD(tcr
, TG0
);
946 val
|= ARM_SMMU_TCR2CD(tcr
, IRGN0
);
947 val
|= ARM_SMMU_TCR2CD(tcr
, ORGN0
);
948 val
|= ARM_SMMU_TCR2CD(tcr
, SH0
);
949 val
|= ARM_SMMU_TCR2CD(tcr
, EPD0
);
950 val
|= ARM_SMMU_TCR2CD(tcr
, EPD1
);
951 val
|= ARM_SMMU_TCR2CD(tcr
, IPS
);
952 val
|= ARM_SMMU_TCR2CD(tcr
, TBI0
);
957 static void arm_smmu_write_ctx_desc(struct arm_smmu_device
*smmu
,
958 struct arm_smmu_s1_cfg
*cfg
)
963 * We don't need to issue any invalidation here, as we'll invalidate
964 * the STE when installing the new entry anyway.
966 val
= arm_smmu_cpu_tcr_to_cd(cfg
->cd
.tcr
) |
970 CTXDESC_CD_0_R
| CTXDESC_CD_0_A
| CTXDESC_CD_0_ASET_PRIVATE
|
971 CTXDESC_CD_0_AA64
| (u64
)cfg
->cd
.asid
<< CTXDESC_CD_0_ASID_SHIFT
|
973 cfg
->cdptr
[0] = cpu_to_le64(val
);
975 val
= cfg
->cd
.ttbr
& CTXDESC_CD_1_TTB0_MASK
<< CTXDESC_CD_1_TTB0_SHIFT
;
976 cfg
->cdptr
[1] = cpu_to_le64(val
);
978 cfg
->cdptr
[3] = cpu_to_le64(cfg
->cd
.mair
<< CTXDESC_CD_3_MAIR_SHIFT
);
981 /* Stream table manipulation functions */
983 arm_smmu_write_strtab_l1_desc(__le64
*dst
, struct arm_smmu_strtab_l1_desc
*desc
)
987 val
|= (desc
->span
& STRTAB_L1_DESC_SPAN_MASK
)
988 << STRTAB_L1_DESC_SPAN_SHIFT
;
989 val
|= desc
->l2ptr_dma
&
990 STRTAB_L1_DESC_L2PTR_MASK
<< STRTAB_L1_DESC_L2PTR_SHIFT
;
992 *dst
= cpu_to_le64(val
);
995 static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device
*smmu
, u32 sid
)
997 struct arm_smmu_cmdq_ent cmd
= {
998 .opcode
= CMDQ_OP_CFGI_STE
,
1005 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1006 cmd
.opcode
= CMDQ_OP_CMD_SYNC
;
1007 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1010 static void arm_smmu_write_strtab_ent(struct arm_smmu_device
*smmu
, u32 sid
,
1011 __le64
*dst
, struct arm_smmu_strtab_ent
*ste
)
1014 * This is hideously complicated, but we only really care about
1015 * three cases at the moment:
1017 * 1. Invalid (all zero) -> bypass/fault (init)
1018 * 2. Bypass/fault -> translation/bypass (attach)
1019 * 3. Translation/bypass -> bypass/fault (detach)
1021 * Given that we can't update the STE atomically and the SMMU
1022 * doesn't read the thing in a defined order, that leaves us
1023 * with the following maintenance requirements:
1025 * 1. Update Config, return (init time STEs aren't live)
1026 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1027 * 3. Update Config, sync
1029 u64 val
= le64_to_cpu(dst
[0]);
1030 bool ste_live
= false;
1031 struct arm_smmu_cmdq_ent prefetch_cmd
= {
1032 .opcode
= CMDQ_OP_PREFETCH_CFG
,
1038 if (val
& STRTAB_STE_0_V
) {
1041 cfg
= val
& STRTAB_STE_0_CFG_MASK
<< STRTAB_STE_0_CFG_SHIFT
;
1043 case STRTAB_STE_0_CFG_BYPASS
:
1045 case STRTAB_STE_0_CFG_S1_TRANS
:
1046 case STRTAB_STE_0_CFG_S2_TRANS
:
1049 case STRTAB_STE_0_CFG_ABORT
:
1053 BUG(); /* STE corruption */
1057 /* Nuke the existing STE_0 value, as we're going to rewrite it */
1058 val
= STRTAB_STE_0_V
;
1061 if (!ste
->assigned
|| !(ste
->s1_cfg
|| ste
->s2_cfg
)) {
1062 if (!ste
->assigned
&& disable_bypass
)
1063 val
|= STRTAB_STE_0_CFG_ABORT
;
1065 val
|= STRTAB_STE_0_CFG_BYPASS
;
1067 dst
[0] = cpu_to_le64(val
);
1068 dst
[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1069 << STRTAB_STE_1_SHCFG_SHIFT
);
1070 dst
[2] = 0; /* Nuke the VMID */
1072 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1078 dst
[1] = cpu_to_le64(
1079 STRTAB_STE_1_S1C_CACHE_WBRA
1080 << STRTAB_STE_1_S1CIR_SHIFT
|
1081 STRTAB_STE_1_S1C_CACHE_WBRA
1082 << STRTAB_STE_1_S1COR_SHIFT
|
1083 STRTAB_STE_1_S1C_SH_ISH
<< STRTAB_STE_1_S1CSH_SHIFT
|
1084 #ifdef CONFIG_PCI_ATS
1085 STRTAB_STE_1_EATS_TRANS
<< STRTAB_STE_1_EATS_SHIFT
|
1087 STRTAB_STE_1_STRW_NSEL1
<< STRTAB_STE_1_STRW_SHIFT
|
1088 STRTAB_STE_1_PRIVCFG_UNPRIV
<<
1089 STRTAB_STE_1_PRIVCFG_SHIFT
);
1091 if (smmu
->features
& ARM_SMMU_FEAT_STALLS
)
1092 dst
[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD
);
1094 val
|= (ste
->s1_cfg
->cdptr_dma
& STRTAB_STE_0_S1CTXPTR_MASK
1095 << STRTAB_STE_0_S1CTXPTR_SHIFT
) |
1096 STRTAB_STE_0_CFG_S1_TRANS
;
1101 dst
[2] = cpu_to_le64(
1102 ste
->s2_cfg
->vmid
<< STRTAB_STE_2_S2VMID_SHIFT
|
1103 (ste
->s2_cfg
->vtcr
& STRTAB_STE_2_VTCR_MASK
)
1104 << STRTAB_STE_2_VTCR_SHIFT
|
1106 STRTAB_STE_2_S2ENDI
|
1108 STRTAB_STE_2_S2PTW
| STRTAB_STE_2_S2AA64
|
1111 dst
[3] = cpu_to_le64(ste
->s2_cfg
->vttbr
&
1112 STRTAB_STE_3_S2TTB_MASK
<< STRTAB_STE_3_S2TTB_SHIFT
);
1114 val
|= STRTAB_STE_0_CFG_S2_TRANS
;
1117 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1118 dst
[0] = cpu_to_le64(val
);
1119 arm_smmu_sync_ste_for_sid(smmu
, sid
);
1121 /* It's likely that we'll want to use the new STE soon */
1122 if (!(smmu
->options
& ARM_SMMU_OPT_SKIP_PREFETCH
))
1123 arm_smmu_cmdq_issue_cmd(smmu
, &prefetch_cmd
);
1126 static void arm_smmu_init_bypass_stes(u64
*strtab
, unsigned int nent
)
1129 struct arm_smmu_strtab_ent ste
= { .assigned
= false };
1131 for (i
= 0; i
< nent
; ++i
) {
1132 arm_smmu_write_strtab_ent(NULL
, -1, strtab
, &ste
);
1133 strtab
+= STRTAB_STE_DWORDS
;
1137 static int arm_smmu_init_l2_strtab(struct arm_smmu_device
*smmu
, u32 sid
)
1141 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
1142 struct arm_smmu_strtab_l1_desc
*desc
= &cfg
->l1_desc
[sid
>> STRTAB_SPLIT
];
1147 size
= 1 << (STRTAB_SPLIT
+ ilog2(STRTAB_STE_DWORDS
) + 3);
1148 strtab
= &cfg
->strtab
[(sid
>> STRTAB_SPLIT
) * STRTAB_L1_DESC_DWORDS
];
1150 desc
->span
= STRTAB_SPLIT
+ 1;
1151 desc
->l2ptr
= dmam_alloc_coherent(smmu
->dev
, size
, &desc
->l2ptr_dma
,
1152 GFP_KERNEL
| __GFP_ZERO
);
1155 "failed to allocate l2 stream table for SID %u\n",
1160 arm_smmu_init_bypass_stes(desc
->l2ptr
, 1 << STRTAB_SPLIT
);
1161 arm_smmu_write_strtab_l1_desc(strtab
, desc
);
1165 /* IRQ and event handlers */
1166 static irqreturn_t
arm_smmu_evtq_thread(int irq
, void *dev
)
1169 struct arm_smmu_device
*smmu
= dev
;
1170 struct arm_smmu_queue
*q
= &smmu
->evtq
.q
;
1171 u64 evt
[EVTQ_ENT_DWORDS
];
1174 while (!queue_remove_raw(q
, evt
)) {
1175 u8 id
= evt
[0] >> EVTQ_0_ID_SHIFT
& EVTQ_0_ID_MASK
;
1177 dev_info(smmu
->dev
, "event 0x%02x received:\n", id
);
1178 for (i
= 0; i
< ARRAY_SIZE(evt
); ++i
)
1179 dev_info(smmu
->dev
, "\t0x%016llx\n",
1180 (unsigned long long)evt
[i
]);
1185 * Not much we can do on overflow, so scream and pretend we're
1188 if (queue_sync_prod(q
) == -EOVERFLOW
)
1189 dev_err(smmu
->dev
, "EVTQ overflow detected -- events lost\n");
1190 } while (!queue_empty(q
));
1192 /* Sync our overflow flag, as we believe we're up to speed */
1193 q
->cons
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
);
1197 static void arm_smmu_handle_ppr(struct arm_smmu_device
*smmu
, u64
*evt
)
1203 sid
= evt
[0] >> PRIQ_0_SID_SHIFT
& PRIQ_0_SID_MASK
;
1204 ssv
= evt
[0] & PRIQ_0_SSID_V
;
1205 ssid
= ssv
? evt
[0] >> PRIQ_0_SSID_SHIFT
& PRIQ_0_SSID_MASK
: 0;
1206 last
= evt
[0] & PRIQ_0_PRG_LAST
;
1207 grpid
= evt
[1] >> PRIQ_1_PRG_IDX_SHIFT
& PRIQ_1_PRG_IDX_MASK
;
1209 dev_info(smmu
->dev
, "unexpected PRI request received:\n");
1211 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1212 sid
, ssid
, grpid
, last
? "L" : "",
1213 evt
[0] & PRIQ_0_PERM_PRIV
? "" : "un",
1214 evt
[0] & PRIQ_0_PERM_READ
? "R" : "",
1215 evt
[0] & PRIQ_0_PERM_WRITE
? "W" : "",
1216 evt
[0] & PRIQ_0_PERM_EXEC
? "X" : "",
1217 evt
[1] & PRIQ_1_ADDR_MASK
<< PRIQ_1_ADDR_SHIFT
);
1220 struct arm_smmu_cmdq_ent cmd
= {
1221 .opcode
= CMDQ_OP_PRI_RESP
,
1222 .substream_valid
= ssv
,
1227 .resp
= PRI_RESP_DENY
,
1231 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1235 static irqreturn_t
arm_smmu_priq_thread(int irq
, void *dev
)
1237 struct arm_smmu_device
*smmu
= dev
;
1238 struct arm_smmu_queue
*q
= &smmu
->priq
.q
;
1239 u64 evt
[PRIQ_ENT_DWORDS
];
1242 while (!queue_remove_raw(q
, evt
))
1243 arm_smmu_handle_ppr(smmu
, evt
);
1245 if (queue_sync_prod(q
) == -EOVERFLOW
)
1246 dev_err(smmu
->dev
, "PRIQ overflow detected -- requests lost\n");
1247 } while (!queue_empty(q
));
1249 /* Sync our overflow flag, as we believe we're up to speed */
1250 q
->cons
= Q_OVF(q
, q
->prod
) | Q_WRP(q
, q
->cons
) | Q_IDX(q
, q
->cons
);
1254 static irqreturn_t
arm_smmu_cmdq_sync_handler(int irq
, void *dev
)
1256 /* We don't actually use CMD_SYNC interrupts for anything */
1260 static int arm_smmu_device_disable(struct arm_smmu_device
*smmu
);
1262 static irqreturn_t
arm_smmu_gerror_handler(int irq
, void *dev
)
1264 u32 gerror
, gerrorn
, active
;
1265 struct arm_smmu_device
*smmu
= dev
;
1267 gerror
= readl_relaxed(smmu
->base
+ ARM_SMMU_GERROR
);
1268 gerrorn
= readl_relaxed(smmu
->base
+ ARM_SMMU_GERRORN
);
1270 active
= gerror
^ gerrorn
;
1271 if (!(active
& GERROR_ERR_MASK
))
1272 return IRQ_NONE
; /* No errors pending */
1275 "unexpected global error reported (0x%08x), this could be serious\n",
1278 if (active
& GERROR_SFM_ERR
) {
1279 dev_err(smmu
->dev
, "device has entered Service Failure Mode!\n");
1280 arm_smmu_device_disable(smmu
);
1283 if (active
& GERROR_MSI_GERROR_ABT_ERR
)
1284 dev_warn(smmu
->dev
, "GERROR MSI write aborted\n");
1286 if (active
& GERROR_MSI_PRIQ_ABT_ERR
)
1287 dev_warn(smmu
->dev
, "PRIQ MSI write aborted\n");
1289 if (active
& GERROR_MSI_EVTQ_ABT_ERR
)
1290 dev_warn(smmu
->dev
, "EVTQ MSI write aborted\n");
1292 if (active
& GERROR_MSI_CMDQ_ABT_ERR
) {
1293 dev_warn(smmu
->dev
, "CMDQ MSI write aborted\n");
1294 arm_smmu_cmdq_sync_handler(irq
, smmu
->dev
);
1297 if (active
& GERROR_PRIQ_ABT_ERR
)
1298 dev_err(smmu
->dev
, "PRIQ write aborted -- events may have been lost\n");
1300 if (active
& GERROR_EVTQ_ABT_ERR
)
1301 dev_err(smmu
->dev
, "EVTQ write aborted -- events may have been lost\n");
1303 if (active
& GERROR_CMDQ_ERR
)
1304 arm_smmu_cmdq_skip_err(smmu
);
1306 writel(gerror
, smmu
->base
+ ARM_SMMU_GERRORN
);
1310 /* IO_PGTABLE API */
1311 static void __arm_smmu_tlb_sync(struct arm_smmu_device
*smmu
)
1313 struct arm_smmu_cmdq_ent cmd
;
1315 cmd
.opcode
= CMDQ_OP_CMD_SYNC
;
1316 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1319 static void arm_smmu_tlb_sync(void *cookie
)
1321 struct arm_smmu_domain
*smmu_domain
= cookie
;
1322 __arm_smmu_tlb_sync(smmu_domain
->smmu
);
1325 static void arm_smmu_tlb_inv_context(void *cookie
)
1327 struct arm_smmu_domain
*smmu_domain
= cookie
;
1328 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1329 struct arm_smmu_cmdq_ent cmd
;
1331 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1332 cmd
.opcode
= CMDQ_OP_TLBI_NH_ASID
;
1333 cmd
.tlbi
.asid
= smmu_domain
->s1_cfg
.cd
.asid
;
1336 cmd
.opcode
= CMDQ_OP_TLBI_S12_VMALL
;
1337 cmd
.tlbi
.vmid
= smmu_domain
->s2_cfg
.vmid
;
1340 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1341 __arm_smmu_tlb_sync(smmu
);
1344 static void arm_smmu_tlb_inv_range_nosync(unsigned long iova
, size_t size
,
1345 size_t granule
, bool leaf
, void *cookie
)
1347 struct arm_smmu_domain
*smmu_domain
= cookie
;
1348 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1349 struct arm_smmu_cmdq_ent cmd
= {
1356 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1357 cmd
.opcode
= CMDQ_OP_TLBI_NH_VA
;
1358 cmd
.tlbi
.asid
= smmu_domain
->s1_cfg
.cd
.asid
;
1360 cmd
.opcode
= CMDQ_OP_TLBI_S2_IPA
;
1361 cmd
.tlbi
.vmid
= smmu_domain
->s2_cfg
.vmid
;
1365 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
1366 cmd
.tlbi
.addr
+= granule
;
1367 } while (size
-= granule
);
1370 static const struct iommu_gather_ops arm_smmu_gather_ops
= {
1371 .tlb_flush_all
= arm_smmu_tlb_inv_context
,
1372 .tlb_add_flush
= arm_smmu_tlb_inv_range_nosync
,
1373 .tlb_sync
= arm_smmu_tlb_sync
,
1377 static bool arm_smmu_capable(enum iommu_cap cap
)
1380 case IOMMU_CAP_CACHE_COHERENCY
:
1382 case IOMMU_CAP_NOEXEC
:
1389 static struct iommu_domain
*arm_smmu_domain_alloc(unsigned type
)
1391 struct arm_smmu_domain
*smmu_domain
;
1393 if (type
!= IOMMU_DOMAIN_UNMANAGED
&&
1394 type
!= IOMMU_DOMAIN_DMA
&&
1395 type
!= IOMMU_DOMAIN_IDENTITY
)
1399 * Allocate the domain and initialise some of its data structures.
1400 * We can't really do anything meaningful until we've added a
1403 smmu_domain
= kzalloc(sizeof(*smmu_domain
), GFP_KERNEL
);
1407 if (type
== IOMMU_DOMAIN_DMA
&&
1408 iommu_get_dma_cookie(&smmu_domain
->domain
)) {
1413 mutex_init(&smmu_domain
->init_mutex
);
1414 spin_lock_init(&smmu_domain
->pgtbl_lock
);
1415 return &smmu_domain
->domain
;
1418 static int arm_smmu_bitmap_alloc(unsigned long *map
, int span
)
1420 int idx
, size
= 1 << span
;
1423 idx
= find_first_zero_bit(map
, size
);
1426 } while (test_and_set_bit(idx
, map
));
1431 static void arm_smmu_bitmap_free(unsigned long *map
, int idx
)
1433 clear_bit(idx
, map
);
1436 static void arm_smmu_domain_free(struct iommu_domain
*domain
)
1438 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1439 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1441 iommu_put_dma_cookie(domain
);
1442 free_io_pgtable_ops(smmu_domain
->pgtbl_ops
);
1444 /* Free the CD and ASID, if we allocated them */
1445 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1446 struct arm_smmu_s1_cfg
*cfg
= &smmu_domain
->s1_cfg
;
1449 dmam_free_coherent(smmu_domain
->smmu
->dev
,
1450 CTXDESC_CD_DWORDS
<< 3,
1454 arm_smmu_bitmap_free(smmu
->asid_map
, cfg
->cd
.asid
);
1457 struct arm_smmu_s2_cfg
*cfg
= &smmu_domain
->s2_cfg
;
1459 arm_smmu_bitmap_free(smmu
->vmid_map
, cfg
->vmid
);
1465 static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain
*smmu_domain
,
1466 struct io_pgtable_cfg
*pgtbl_cfg
)
1470 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1471 struct arm_smmu_s1_cfg
*cfg
= &smmu_domain
->s1_cfg
;
1473 asid
= arm_smmu_bitmap_alloc(smmu
->asid_map
, smmu
->asid_bits
);
1477 cfg
->cdptr
= dmam_alloc_coherent(smmu
->dev
, CTXDESC_CD_DWORDS
<< 3,
1479 GFP_KERNEL
| __GFP_ZERO
);
1481 dev_warn(smmu
->dev
, "failed to allocate context descriptor\n");
1486 cfg
->cd
.asid
= (u16
)asid
;
1487 cfg
->cd
.ttbr
= pgtbl_cfg
->arm_lpae_s1_cfg
.ttbr
[0];
1488 cfg
->cd
.tcr
= pgtbl_cfg
->arm_lpae_s1_cfg
.tcr
;
1489 cfg
->cd
.mair
= pgtbl_cfg
->arm_lpae_s1_cfg
.mair
[0];
1493 arm_smmu_bitmap_free(smmu
->asid_map
, asid
);
1497 static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain
*smmu_domain
,
1498 struct io_pgtable_cfg
*pgtbl_cfg
)
1501 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1502 struct arm_smmu_s2_cfg
*cfg
= &smmu_domain
->s2_cfg
;
1504 vmid
= arm_smmu_bitmap_alloc(smmu
->vmid_map
, smmu
->vmid_bits
);
1508 cfg
->vmid
= (u16
)vmid
;
1509 cfg
->vttbr
= pgtbl_cfg
->arm_lpae_s2_cfg
.vttbr
;
1510 cfg
->vtcr
= pgtbl_cfg
->arm_lpae_s2_cfg
.vtcr
;
1514 static int arm_smmu_domain_finalise(struct iommu_domain
*domain
)
1517 unsigned long ias
, oas
;
1518 enum io_pgtable_fmt fmt
;
1519 struct io_pgtable_cfg pgtbl_cfg
;
1520 struct io_pgtable_ops
*pgtbl_ops
;
1521 int (*finalise_stage_fn
)(struct arm_smmu_domain
*,
1522 struct io_pgtable_cfg
*);
1523 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1524 struct arm_smmu_device
*smmu
= smmu_domain
->smmu
;
1526 if (domain
->type
== IOMMU_DOMAIN_IDENTITY
) {
1527 smmu_domain
->stage
= ARM_SMMU_DOMAIN_BYPASS
;
1531 /* Restrict the stage to what we can actually support */
1532 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S1
))
1533 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S2
;
1534 if (!(smmu
->features
& ARM_SMMU_FEAT_TRANS_S2
))
1535 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1537 switch (smmu_domain
->stage
) {
1538 case ARM_SMMU_DOMAIN_S1
:
1541 fmt
= ARM_64_LPAE_S1
;
1542 finalise_stage_fn
= arm_smmu_domain_finalise_s1
;
1544 case ARM_SMMU_DOMAIN_NESTED
:
1545 case ARM_SMMU_DOMAIN_S2
:
1548 fmt
= ARM_64_LPAE_S2
;
1549 finalise_stage_fn
= arm_smmu_domain_finalise_s2
;
1555 pgtbl_cfg
= (struct io_pgtable_cfg
) {
1556 .pgsize_bitmap
= smmu
->pgsize_bitmap
,
1559 .tlb
= &arm_smmu_gather_ops
,
1560 .iommu_dev
= smmu
->dev
,
1563 pgtbl_ops
= alloc_io_pgtable_ops(fmt
, &pgtbl_cfg
, smmu_domain
);
1567 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
1568 domain
->geometry
.aperture_end
= (1UL << ias
) - 1;
1569 domain
->geometry
.force_aperture
= true;
1570 smmu_domain
->pgtbl_ops
= pgtbl_ops
;
1572 ret
= finalise_stage_fn(smmu_domain
, &pgtbl_cfg
);
1574 free_io_pgtable_ops(pgtbl_ops
);
1579 static __le64
*arm_smmu_get_step_for_sid(struct arm_smmu_device
*smmu
, u32 sid
)
1582 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
1584 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
) {
1585 struct arm_smmu_strtab_l1_desc
*l1_desc
;
1588 /* Two-level walk */
1589 idx
= (sid
>> STRTAB_SPLIT
) * STRTAB_L1_DESC_DWORDS
;
1590 l1_desc
= &cfg
->l1_desc
[idx
];
1591 idx
= (sid
& ((1 << STRTAB_SPLIT
) - 1)) * STRTAB_STE_DWORDS
;
1592 step
= &l1_desc
->l2ptr
[idx
];
1594 /* Simple linear lookup */
1595 step
= &cfg
->strtab
[sid
* STRTAB_STE_DWORDS
];
1601 static void arm_smmu_install_ste_for_dev(struct iommu_fwspec
*fwspec
)
1604 struct arm_smmu_master_data
*master
= fwspec
->iommu_priv
;
1605 struct arm_smmu_device
*smmu
= master
->smmu
;
1607 for (i
= 0; i
< fwspec
->num_ids
; ++i
) {
1608 u32 sid
= fwspec
->ids
[i
];
1609 __le64
*step
= arm_smmu_get_step_for_sid(smmu
, sid
);
1611 arm_smmu_write_strtab_ent(smmu
, sid
, step
, &master
->ste
);
1615 static void arm_smmu_detach_dev(struct device
*dev
)
1617 struct arm_smmu_master_data
*master
= dev
->iommu_fwspec
->iommu_priv
;
1619 master
->ste
.assigned
= false;
1620 arm_smmu_install_ste_for_dev(dev
->iommu_fwspec
);
1623 static int arm_smmu_attach_dev(struct iommu_domain
*domain
, struct device
*dev
)
1626 struct arm_smmu_device
*smmu
;
1627 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1628 struct arm_smmu_master_data
*master
;
1629 struct arm_smmu_strtab_ent
*ste
;
1631 if (!dev
->iommu_fwspec
)
1634 master
= dev
->iommu_fwspec
->iommu_priv
;
1635 smmu
= master
->smmu
;
1638 /* Already attached to a different domain? */
1640 arm_smmu_detach_dev(dev
);
1642 mutex_lock(&smmu_domain
->init_mutex
);
1644 if (!smmu_domain
->smmu
) {
1645 smmu_domain
->smmu
= smmu
;
1646 ret
= arm_smmu_domain_finalise(domain
);
1648 smmu_domain
->smmu
= NULL
;
1651 } else if (smmu_domain
->smmu
!= smmu
) {
1653 "cannot attach to SMMU %s (upstream of %s)\n",
1654 dev_name(smmu_domain
->smmu
->dev
),
1655 dev_name(smmu
->dev
));
1660 ste
->assigned
= true;
1662 if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_BYPASS
) {
1665 } else if (smmu_domain
->stage
== ARM_SMMU_DOMAIN_S1
) {
1666 ste
->s1_cfg
= &smmu_domain
->s1_cfg
;
1668 arm_smmu_write_ctx_desc(smmu
, ste
->s1_cfg
);
1671 ste
->s2_cfg
= &smmu_domain
->s2_cfg
;
1674 arm_smmu_install_ste_for_dev(dev
->iommu_fwspec
);
1676 mutex_unlock(&smmu_domain
->init_mutex
);
1680 static int arm_smmu_map(struct iommu_domain
*domain
, unsigned long iova
,
1681 phys_addr_t paddr
, size_t size
, int prot
)
1684 unsigned long flags
;
1685 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1686 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1691 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1692 ret
= ops
->map(ops
, iova
, paddr
, size
, prot
);
1693 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1698 arm_smmu_unmap(struct iommu_domain
*domain
, unsigned long iova
, size_t size
)
1701 unsigned long flags
;
1702 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1703 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1708 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1709 ret
= ops
->unmap(ops
, iova
, size
);
1710 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1715 arm_smmu_iova_to_phys(struct iommu_domain
*domain
, dma_addr_t iova
)
1718 unsigned long flags
;
1719 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1720 struct io_pgtable_ops
*ops
= smmu_domain
->pgtbl_ops
;
1725 spin_lock_irqsave(&smmu_domain
->pgtbl_lock
, flags
);
1726 ret
= ops
->iova_to_phys(ops
, iova
);
1727 spin_unlock_irqrestore(&smmu_domain
->pgtbl_lock
, flags
);
1732 static struct platform_driver arm_smmu_driver
;
1734 static int arm_smmu_match_node(struct device
*dev
, void *data
)
1736 return dev
->fwnode
== data
;
1740 struct arm_smmu_device
*arm_smmu_get_by_fwnode(struct fwnode_handle
*fwnode
)
1742 struct device
*dev
= driver_find_device(&arm_smmu_driver
.driver
, NULL
,
1743 fwnode
, arm_smmu_match_node
);
1745 return dev
? dev_get_drvdata(dev
) : NULL
;
1748 static bool arm_smmu_sid_in_range(struct arm_smmu_device
*smmu
, u32 sid
)
1750 unsigned long limit
= smmu
->strtab_cfg
.num_l1_ents
;
1752 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
)
1753 limit
*= 1UL << STRTAB_SPLIT
;
1758 static struct iommu_ops arm_smmu_ops
;
1760 static int arm_smmu_add_device(struct device
*dev
)
1763 struct arm_smmu_device
*smmu
;
1764 struct arm_smmu_master_data
*master
;
1765 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1766 struct iommu_group
*group
;
1768 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
1771 * We _can_ actually withstand dodgy bus code re-calling add_device()
1772 * without an intervening remove_device()/of_xlate() sequence, but
1773 * we're not going to do so quietly...
1775 if (WARN_ON_ONCE(fwspec
->iommu_priv
)) {
1776 master
= fwspec
->iommu_priv
;
1777 smmu
= master
->smmu
;
1779 smmu
= arm_smmu_get_by_fwnode(fwspec
->iommu_fwnode
);
1782 master
= kzalloc(sizeof(*master
), GFP_KERNEL
);
1786 master
->smmu
= smmu
;
1787 fwspec
->iommu_priv
= master
;
1790 /* Check the SIDs are in range of the SMMU and our stream table */
1791 for (i
= 0; i
< fwspec
->num_ids
; i
++) {
1792 u32 sid
= fwspec
->ids
[i
];
1794 if (!arm_smmu_sid_in_range(smmu
, sid
))
1797 /* Ensure l2 strtab is initialised */
1798 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
) {
1799 ret
= arm_smmu_init_l2_strtab(smmu
, sid
);
1805 group
= iommu_group_get_for_dev(dev
);
1806 if (!IS_ERR(group
)) {
1807 iommu_group_put(group
);
1808 iommu_device_link(&smmu
->iommu
, dev
);
1811 return PTR_ERR_OR_ZERO(group
);
1814 static void arm_smmu_remove_device(struct device
*dev
)
1816 struct iommu_fwspec
*fwspec
= dev
->iommu_fwspec
;
1817 struct arm_smmu_master_data
*master
;
1818 struct arm_smmu_device
*smmu
;
1820 if (!fwspec
|| fwspec
->ops
!= &arm_smmu_ops
)
1823 master
= fwspec
->iommu_priv
;
1824 smmu
= master
->smmu
;
1825 if (master
&& master
->ste
.assigned
)
1826 arm_smmu_detach_dev(dev
);
1827 iommu_group_remove_device(dev
);
1828 iommu_device_unlink(&smmu
->iommu
, dev
);
1830 iommu_fwspec_free(dev
);
1833 static struct iommu_group
*arm_smmu_device_group(struct device
*dev
)
1835 struct iommu_group
*group
;
1838 * We don't support devices sharing stream IDs other than PCI RID
1839 * aliases, since the necessary ID-to-device lookup becomes rather
1840 * impractical given a potential sparse 32-bit stream ID space.
1842 if (dev_is_pci(dev
))
1843 group
= pci_device_group(dev
);
1845 group
= generic_device_group(dev
);
1850 static int arm_smmu_domain_get_attr(struct iommu_domain
*domain
,
1851 enum iommu_attr attr
, void *data
)
1853 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1855 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
1859 case DOMAIN_ATTR_NESTING
:
1860 *(int *)data
= (smmu_domain
->stage
== ARM_SMMU_DOMAIN_NESTED
);
1867 static int arm_smmu_domain_set_attr(struct iommu_domain
*domain
,
1868 enum iommu_attr attr
, void *data
)
1871 struct arm_smmu_domain
*smmu_domain
= to_smmu_domain(domain
);
1873 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
1876 mutex_lock(&smmu_domain
->init_mutex
);
1879 case DOMAIN_ATTR_NESTING
:
1880 if (smmu_domain
->smmu
) {
1886 smmu_domain
->stage
= ARM_SMMU_DOMAIN_NESTED
;
1888 smmu_domain
->stage
= ARM_SMMU_DOMAIN_S1
;
1896 mutex_unlock(&smmu_domain
->init_mutex
);
1900 static int arm_smmu_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
1902 return iommu_fwspec_add_ids(dev
, args
->args
, 1);
1905 static void arm_smmu_get_resv_regions(struct device
*dev
,
1906 struct list_head
*head
)
1908 struct iommu_resv_region
*region
;
1909 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1911 region
= iommu_alloc_resv_region(MSI_IOVA_BASE
, MSI_IOVA_LENGTH
,
1912 prot
, IOMMU_RESV_MSI
);
1916 list_add_tail(®ion
->list
, head
);
1919 static void arm_smmu_put_resv_regions(struct device
*dev
,
1920 struct list_head
*head
)
1922 struct iommu_resv_region
*entry
, *next
;
1924 list_for_each_entry_safe(entry
, next
, head
, list
)
1928 static struct iommu_ops arm_smmu_ops
= {
1929 .capable
= arm_smmu_capable
,
1930 .domain_alloc
= arm_smmu_domain_alloc
,
1931 .domain_free
= arm_smmu_domain_free
,
1932 .attach_dev
= arm_smmu_attach_dev
,
1933 .map
= arm_smmu_map
,
1934 .unmap
= arm_smmu_unmap
,
1935 .map_sg
= default_iommu_map_sg
,
1936 .iova_to_phys
= arm_smmu_iova_to_phys
,
1937 .add_device
= arm_smmu_add_device
,
1938 .remove_device
= arm_smmu_remove_device
,
1939 .device_group
= arm_smmu_device_group
,
1940 .domain_get_attr
= arm_smmu_domain_get_attr
,
1941 .domain_set_attr
= arm_smmu_domain_set_attr
,
1942 .of_xlate
= arm_smmu_of_xlate
,
1943 .get_resv_regions
= arm_smmu_get_resv_regions
,
1944 .put_resv_regions
= arm_smmu_put_resv_regions
,
1945 .pgsize_bitmap
= -1UL, /* Restricted during device attach */
1948 /* Probing and initialisation functions */
1949 static int arm_smmu_init_one_queue(struct arm_smmu_device
*smmu
,
1950 struct arm_smmu_queue
*q
,
1951 unsigned long prod_off
,
1952 unsigned long cons_off
,
1955 size_t qsz
= ((1 << q
->max_n_shift
) * dwords
) << 3;
1957 q
->base
= dmam_alloc_coherent(smmu
->dev
, qsz
, &q
->base_dma
, GFP_KERNEL
);
1959 dev_err(smmu
->dev
, "failed to allocate queue (0x%zx bytes)\n",
1964 q
->prod_reg
= smmu
->base
+ prod_off
;
1965 q
->cons_reg
= smmu
->base
+ cons_off
;
1966 q
->ent_dwords
= dwords
;
1968 q
->q_base
= Q_BASE_RWA
;
1969 q
->q_base
|= q
->base_dma
& Q_BASE_ADDR_MASK
<< Q_BASE_ADDR_SHIFT
;
1970 q
->q_base
|= (q
->max_n_shift
& Q_BASE_LOG2SIZE_MASK
)
1971 << Q_BASE_LOG2SIZE_SHIFT
;
1973 q
->prod
= q
->cons
= 0;
1977 static int arm_smmu_init_queues(struct arm_smmu_device
*smmu
)
1982 spin_lock_init(&smmu
->cmdq
.lock
);
1983 ret
= arm_smmu_init_one_queue(smmu
, &smmu
->cmdq
.q
, ARM_SMMU_CMDQ_PROD
,
1984 ARM_SMMU_CMDQ_CONS
, CMDQ_ENT_DWORDS
);
1989 ret
= arm_smmu_init_one_queue(smmu
, &smmu
->evtq
.q
, ARM_SMMU_EVTQ_PROD
,
1990 ARM_SMMU_EVTQ_CONS
, EVTQ_ENT_DWORDS
);
1995 if (!(smmu
->features
& ARM_SMMU_FEAT_PRI
))
1998 return arm_smmu_init_one_queue(smmu
, &smmu
->priq
.q
, ARM_SMMU_PRIQ_PROD
,
1999 ARM_SMMU_PRIQ_CONS
, PRIQ_ENT_DWORDS
);
2002 static int arm_smmu_init_l1_strtab(struct arm_smmu_device
*smmu
)
2005 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2006 size_t size
= sizeof(*cfg
->l1_desc
) * cfg
->num_l1_ents
;
2007 void *strtab
= smmu
->strtab_cfg
.strtab
;
2009 cfg
->l1_desc
= devm_kzalloc(smmu
->dev
, size
, GFP_KERNEL
);
2010 if (!cfg
->l1_desc
) {
2011 dev_err(smmu
->dev
, "failed to allocate l1 stream table desc\n");
2015 for (i
= 0; i
< cfg
->num_l1_ents
; ++i
) {
2016 arm_smmu_write_strtab_l1_desc(strtab
, &cfg
->l1_desc
[i
]);
2017 strtab
+= STRTAB_L1_DESC_DWORDS
<< 3;
2023 static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device
*smmu
)
2028 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2030 /* Calculate the L1 size, capped to the SIDSIZE. */
2031 size
= STRTAB_L1_SZ_SHIFT
- (ilog2(STRTAB_L1_DESC_DWORDS
) + 3);
2032 size
= min(size
, smmu
->sid_bits
- STRTAB_SPLIT
);
2033 cfg
->num_l1_ents
= 1 << size
;
2035 size
+= STRTAB_SPLIT
;
2036 if (size
< smmu
->sid_bits
)
2038 "2-level strtab only covers %u/%u bits of SID\n",
2039 size
, smmu
->sid_bits
);
2041 l1size
= cfg
->num_l1_ents
* (STRTAB_L1_DESC_DWORDS
<< 3);
2042 strtab
= dmam_alloc_coherent(smmu
->dev
, l1size
, &cfg
->strtab_dma
,
2043 GFP_KERNEL
| __GFP_ZERO
);
2046 "failed to allocate l1 stream table (%u bytes)\n",
2050 cfg
->strtab
= strtab
;
2052 /* Configure strtab_base_cfg for 2 levels */
2053 reg
= STRTAB_BASE_CFG_FMT_2LVL
;
2054 reg
|= (size
& STRTAB_BASE_CFG_LOG2SIZE_MASK
)
2055 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT
;
2056 reg
|= (STRTAB_SPLIT
& STRTAB_BASE_CFG_SPLIT_MASK
)
2057 << STRTAB_BASE_CFG_SPLIT_SHIFT
;
2058 cfg
->strtab_base_cfg
= reg
;
2060 return arm_smmu_init_l1_strtab(smmu
);
2063 static int arm_smmu_init_strtab_linear(struct arm_smmu_device
*smmu
)
2068 struct arm_smmu_strtab_cfg
*cfg
= &smmu
->strtab_cfg
;
2070 size
= (1 << smmu
->sid_bits
) * (STRTAB_STE_DWORDS
<< 3);
2071 strtab
= dmam_alloc_coherent(smmu
->dev
, size
, &cfg
->strtab_dma
,
2072 GFP_KERNEL
| __GFP_ZERO
);
2075 "failed to allocate linear stream table (%u bytes)\n",
2079 cfg
->strtab
= strtab
;
2080 cfg
->num_l1_ents
= 1 << smmu
->sid_bits
;
2082 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2083 reg
= STRTAB_BASE_CFG_FMT_LINEAR
;
2084 reg
|= (smmu
->sid_bits
& STRTAB_BASE_CFG_LOG2SIZE_MASK
)
2085 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT
;
2086 cfg
->strtab_base_cfg
= reg
;
2088 arm_smmu_init_bypass_stes(strtab
, cfg
->num_l1_ents
);
2092 static int arm_smmu_init_strtab(struct arm_smmu_device
*smmu
)
2097 if (smmu
->features
& ARM_SMMU_FEAT_2_LVL_STRTAB
)
2098 ret
= arm_smmu_init_strtab_2lvl(smmu
);
2100 ret
= arm_smmu_init_strtab_linear(smmu
);
2105 /* Set the strtab base address */
2106 reg
= smmu
->strtab_cfg
.strtab_dma
&
2107 STRTAB_BASE_ADDR_MASK
<< STRTAB_BASE_ADDR_SHIFT
;
2108 reg
|= STRTAB_BASE_RA
;
2109 smmu
->strtab_cfg
.strtab_base
= reg
;
2111 /* Allocate the first VMID for stage-2 bypass STEs */
2112 set_bit(0, smmu
->vmid_map
);
2116 static int arm_smmu_init_structures(struct arm_smmu_device
*smmu
)
2120 ret
= arm_smmu_init_queues(smmu
);
2124 return arm_smmu_init_strtab(smmu
);
2127 static int arm_smmu_write_reg_sync(struct arm_smmu_device
*smmu
, u32 val
,
2128 unsigned int reg_off
, unsigned int ack_off
)
2132 writel_relaxed(val
, smmu
->base
+ reg_off
);
2133 return readl_relaxed_poll_timeout(smmu
->base
+ ack_off
, reg
, reg
== val
,
2134 1, ARM_SMMU_POLL_TIMEOUT_US
);
2137 /* GBPA is "special" */
2138 static int arm_smmu_update_gbpa(struct arm_smmu_device
*smmu
, u32 set
, u32 clr
)
2141 u32 reg
, __iomem
*gbpa
= smmu
->base
+ ARM_SMMU_GBPA
;
2143 ret
= readl_relaxed_poll_timeout(gbpa
, reg
, !(reg
& GBPA_UPDATE
),
2144 1, ARM_SMMU_POLL_TIMEOUT_US
);
2150 writel_relaxed(reg
| GBPA_UPDATE
, gbpa
);
2151 return readl_relaxed_poll_timeout(gbpa
, reg
, !(reg
& GBPA_UPDATE
),
2152 1, ARM_SMMU_POLL_TIMEOUT_US
);
2155 static void arm_smmu_free_msis(void *data
)
2157 struct device
*dev
= data
;
2158 platform_msi_domain_free_irqs(dev
);
2161 static void arm_smmu_write_msi_msg(struct msi_desc
*desc
, struct msi_msg
*msg
)
2163 phys_addr_t doorbell
;
2164 struct device
*dev
= msi_desc_to_dev(desc
);
2165 struct arm_smmu_device
*smmu
= dev_get_drvdata(dev
);
2166 phys_addr_t
*cfg
= arm_smmu_msi_cfg
[desc
->platform
.msi_index
];
2168 doorbell
= (((u64
)msg
->address_hi
) << 32) | msg
->address_lo
;
2169 doorbell
&= MSI_CFG0_ADDR_MASK
<< MSI_CFG0_ADDR_SHIFT
;
2171 writeq_relaxed(doorbell
, smmu
->base
+ cfg
[0]);
2172 writel_relaxed(msg
->data
, smmu
->base
+ cfg
[1]);
2173 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE
, smmu
->base
+ cfg
[2]);
2176 static void arm_smmu_setup_msis(struct arm_smmu_device
*smmu
)
2178 struct msi_desc
*desc
;
2179 int ret
, nvec
= ARM_SMMU_MAX_MSIS
;
2180 struct device
*dev
= smmu
->dev
;
2182 /* Clear the MSI address regs */
2183 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_GERROR_IRQ_CFG0
);
2184 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_EVTQ_IRQ_CFG0
);
2186 if (smmu
->features
& ARM_SMMU_FEAT_PRI
)
2187 writeq_relaxed(0, smmu
->base
+ ARM_SMMU_PRIQ_IRQ_CFG0
);
2191 if (!(smmu
->features
& ARM_SMMU_FEAT_MSI
))
2194 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2195 ret
= platform_msi_domain_alloc_irqs(dev
, nvec
, arm_smmu_write_msi_msg
);
2197 dev_warn(dev
, "failed to allocate MSIs\n");
2201 for_each_msi_entry(desc
, dev
) {
2202 switch (desc
->platform
.msi_index
) {
2203 case EVTQ_MSI_INDEX
:
2204 smmu
->evtq
.q
.irq
= desc
->irq
;
2206 case GERROR_MSI_INDEX
:
2207 smmu
->gerr_irq
= desc
->irq
;
2209 case PRIQ_MSI_INDEX
:
2210 smmu
->priq
.q
.irq
= desc
->irq
;
2212 default: /* Unknown */
2217 /* Add callback to free MSIs on teardown */
2218 devm_add_action(dev
, arm_smmu_free_msis
, dev
);
2221 static int arm_smmu_setup_irqs(struct arm_smmu_device
*smmu
)
2224 u32 irqen_flags
= IRQ_CTRL_EVTQ_IRQEN
| IRQ_CTRL_GERROR_IRQEN
;
2226 /* Disable IRQs first */
2227 ret
= arm_smmu_write_reg_sync(smmu
, 0, ARM_SMMU_IRQ_CTRL
,
2228 ARM_SMMU_IRQ_CTRLACK
);
2230 dev_err(smmu
->dev
, "failed to disable irqs\n");
2234 arm_smmu_setup_msis(smmu
);
2236 /* Request interrupt lines */
2237 irq
= smmu
->evtq
.q
.irq
;
2239 ret
= devm_request_threaded_irq(smmu
->dev
, irq
, NULL
,
2240 arm_smmu_evtq_thread
,
2242 "arm-smmu-v3-evtq", smmu
);
2244 dev_warn(smmu
->dev
, "failed to enable evtq irq\n");
2247 irq
= smmu
->cmdq
.q
.irq
;
2249 ret
= devm_request_irq(smmu
->dev
, irq
,
2250 arm_smmu_cmdq_sync_handler
, 0,
2251 "arm-smmu-v3-cmdq-sync", smmu
);
2253 dev_warn(smmu
->dev
, "failed to enable cmdq-sync irq\n");
2256 irq
= smmu
->gerr_irq
;
2258 ret
= devm_request_irq(smmu
->dev
, irq
, arm_smmu_gerror_handler
,
2259 0, "arm-smmu-v3-gerror", smmu
);
2261 dev_warn(smmu
->dev
, "failed to enable gerror irq\n");
2264 if (smmu
->features
& ARM_SMMU_FEAT_PRI
) {
2265 irq
= smmu
->priq
.q
.irq
;
2267 ret
= devm_request_threaded_irq(smmu
->dev
, irq
, NULL
,
2268 arm_smmu_priq_thread
,
2274 "failed to enable priq irq\n");
2276 irqen_flags
|= IRQ_CTRL_PRIQ_IRQEN
;
2280 /* Enable interrupt generation on the SMMU */
2281 ret
= arm_smmu_write_reg_sync(smmu
, irqen_flags
,
2282 ARM_SMMU_IRQ_CTRL
, ARM_SMMU_IRQ_CTRLACK
);
2284 dev_warn(smmu
->dev
, "failed to enable irqs\n");
2289 static int arm_smmu_device_disable(struct arm_smmu_device
*smmu
)
2293 ret
= arm_smmu_write_reg_sync(smmu
, 0, ARM_SMMU_CR0
, ARM_SMMU_CR0ACK
);
2295 dev_err(smmu
->dev
, "failed to clear cr0\n");
2300 static int arm_smmu_device_reset(struct arm_smmu_device
*smmu
, bool bypass
)
2304 struct arm_smmu_cmdq_ent cmd
;
2306 /* Clear CR0 and sync (disables SMMU and queue processing) */
2307 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_CR0
);
2308 if (reg
& CR0_SMMUEN
)
2309 dev_warn(smmu
->dev
, "SMMU currently enabled! Resetting...\n");
2311 ret
= arm_smmu_device_disable(smmu
);
2315 /* CR1 (table and queue memory attributes) */
2316 reg
= (CR1_SH_ISH
<< CR1_TABLE_SH_SHIFT
) |
2317 (CR1_CACHE_WB
<< CR1_TABLE_OC_SHIFT
) |
2318 (CR1_CACHE_WB
<< CR1_TABLE_IC_SHIFT
) |
2319 (CR1_SH_ISH
<< CR1_QUEUE_SH_SHIFT
) |
2320 (CR1_CACHE_WB
<< CR1_QUEUE_OC_SHIFT
) |
2321 (CR1_CACHE_WB
<< CR1_QUEUE_IC_SHIFT
);
2322 writel_relaxed(reg
, smmu
->base
+ ARM_SMMU_CR1
);
2324 /* CR2 (random crap) */
2325 reg
= CR2_PTM
| CR2_RECINVSID
| CR2_E2H
;
2326 writel_relaxed(reg
, smmu
->base
+ ARM_SMMU_CR2
);
2329 writeq_relaxed(smmu
->strtab_cfg
.strtab_base
,
2330 smmu
->base
+ ARM_SMMU_STRTAB_BASE
);
2331 writel_relaxed(smmu
->strtab_cfg
.strtab_base_cfg
,
2332 smmu
->base
+ ARM_SMMU_STRTAB_BASE_CFG
);
2335 writeq_relaxed(smmu
->cmdq
.q
.q_base
, smmu
->base
+ ARM_SMMU_CMDQ_BASE
);
2336 writel_relaxed(smmu
->cmdq
.q
.prod
, smmu
->base
+ ARM_SMMU_CMDQ_PROD
);
2337 writel_relaxed(smmu
->cmdq
.q
.cons
, smmu
->base
+ ARM_SMMU_CMDQ_CONS
);
2339 enables
= CR0_CMDQEN
;
2340 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2343 dev_err(smmu
->dev
, "failed to enable command queue\n");
2347 /* Invalidate any cached configuration */
2348 cmd
.opcode
= CMDQ_OP_CFGI_ALL
;
2349 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2350 cmd
.opcode
= CMDQ_OP_CMD_SYNC
;
2351 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2353 /* Invalidate any stale TLB entries */
2354 if (smmu
->features
& ARM_SMMU_FEAT_HYP
) {
2355 cmd
.opcode
= CMDQ_OP_TLBI_EL2_ALL
;
2356 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2359 cmd
.opcode
= CMDQ_OP_TLBI_NSNH_ALL
;
2360 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2361 cmd
.opcode
= CMDQ_OP_CMD_SYNC
;
2362 arm_smmu_cmdq_issue_cmd(smmu
, &cmd
);
2365 writeq_relaxed(smmu
->evtq
.q
.q_base
, smmu
->base
+ ARM_SMMU_EVTQ_BASE
);
2366 writel_relaxed(smmu
->evtq
.q
.prod
, smmu
->base
+ ARM_SMMU_EVTQ_PROD
);
2367 writel_relaxed(smmu
->evtq
.q
.cons
, smmu
->base
+ ARM_SMMU_EVTQ_CONS
);
2369 enables
|= CR0_EVTQEN
;
2370 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2373 dev_err(smmu
->dev
, "failed to enable event queue\n");
2378 if (smmu
->features
& ARM_SMMU_FEAT_PRI
) {
2379 writeq_relaxed(smmu
->priq
.q
.q_base
,
2380 smmu
->base
+ ARM_SMMU_PRIQ_BASE
);
2381 writel_relaxed(smmu
->priq
.q
.prod
,
2382 smmu
->base
+ ARM_SMMU_PRIQ_PROD
);
2383 writel_relaxed(smmu
->priq
.q
.cons
,
2384 smmu
->base
+ ARM_SMMU_PRIQ_CONS
);
2386 enables
|= CR0_PRIQEN
;
2387 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2390 dev_err(smmu
->dev
, "failed to enable PRI queue\n");
2395 ret
= arm_smmu_setup_irqs(smmu
);
2397 dev_err(smmu
->dev
, "failed to setup irqs\n");
2402 /* Enable the SMMU interface, or ensure bypass */
2403 if (!bypass
|| disable_bypass
) {
2404 enables
|= CR0_SMMUEN
;
2406 ret
= arm_smmu_update_gbpa(smmu
, 0, GBPA_ABORT
);
2408 dev_err(smmu
->dev
, "GBPA not responding to update\n");
2412 ret
= arm_smmu_write_reg_sync(smmu
, enables
, ARM_SMMU_CR0
,
2415 dev_err(smmu
->dev
, "failed to enable SMMU interface\n");
2422 static int arm_smmu_device_hw_probe(struct arm_smmu_device
*smmu
)
2425 bool coherent
= smmu
->features
& ARM_SMMU_FEAT_COHERENCY
;
2428 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR0
);
2430 /* 2-level structures */
2431 if ((reg
& IDR0_ST_LVL_MASK
<< IDR0_ST_LVL_SHIFT
) == IDR0_ST_LVL_2LVL
)
2432 smmu
->features
|= ARM_SMMU_FEAT_2_LVL_STRTAB
;
2434 if (reg
& IDR0_CD2L
)
2435 smmu
->features
|= ARM_SMMU_FEAT_2_LVL_CDTAB
;
2438 * Translation table endianness.
2439 * We currently require the same endianness as the CPU, but this
2440 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2442 switch (reg
& IDR0_TTENDIAN_MASK
<< IDR0_TTENDIAN_SHIFT
) {
2443 case IDR0_TTENDIAN_MIXED
:
2444 smmu
->features
|= ARM_SMMU_FEAT_TT_LE
| ARM_SMMU_FEAT_TT_BE
;
2447 case IDR0_TTENDIAN_BE
:
2448 smmu
->features
|= ARM_SMMU_FEAT_TT_BE
;
2451 case IDR0_TTENDIAN_LE
:
2452 smmu
->features
|= ARM_SMMU_FEAT_TT_LE
;
2456 dev_err(smmu
->dev
, "unknown/unsupported TT endianness!\n");
2460 /* Boolean feature flags */
2461 if (IS_ENABLED(CONFIG_PCI_PRI
) && reg
& IDR0_PRI
)
2462 smmu
->features
|= ARM_SMMU_FEAT_PRI
;
2464 if (IS_ENABLED(CONFIG_PCI_ATS
) && reg
& IDR0_ATS
)
2465 smmu
->features
|= ARM_SMMU_FEAT_ATS
;
2468 smmu
->features
|= ARM_SMMU_FEAT_SEV
;
2471 smmu
->features
|= ARM_SMMU_FEAT_MSI
;
2474 smmu
->features
|= ARM_SMMU_FEAT_HYP
;
2477 * The coherency feature as set by FW is used in preference to the ID
2478 * register, but warn on mismatch.
2480 if (!!(reg
& IDR0_COHACC
) != coherent
)
2481 dev_warn(smmu
->dev
, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2482 coherent
? "true" : "false");
2484 switch (reg
& IDR0_STALL_MODEL_MASK
<< IDR0_STALL_MODEL_SHIFT
) {
2485 case IDR0_STALL_MODEL_STALL
:
2487 case IDR0_STALL_MODEL_FORCE
:
2488 smmu
->features
|= ARM_SMMU_FEAT_STALLS
;
2492 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S1
;
2495 smmu
->features
|= ARM_SMMU_FEAT_TRANS_S2
;
2497 if (!(reg
& (IDR0_S1P
| IDR0_S2P
))) {
2498 dev_err(smmu
->dev
, "no translation support!\n");
2502 /* We only support the AArch64 table format at present */
2503 switch (reg
& IDR0_TTF_MASK
<< IDR0_TTF_SHIFT
) {
2504 case IDR0_TTF_AARCH32_64
:
2507 case IDR0_TTF_AARCH64
:
2510 dev_err(smmu
->dev
, "AArch64 table format not supported!\n");
2514 /* ASID/VMID sizes */
2515 smmu
->asid_bits
= reg
& IDR0_ASID16
? 16 : 8;
2516 smmu
->vmid_bits
= reg
& IDR0_VMID16
? 16 : 8;
2519 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR1
);
2520 if (reg
& (IDR1_TABLES_PRESET
| IDR1_QUEUES_PRESET
| IDR1_REL
)) {
2521 dev_err(smmu
->dev
, "embedded implementation not supported\n");
2525 /* Queue sizes, capped at 4k */
2526 smmu
->cmdq
.q
.max_n_shift
= min((u32
)CMDQ_MAX_SZ_SHIFT
,
2527 reg
>> IDR1_CMDQ_SHIFT
& IDR1_CMDQ_MASK
);
2528 if (!smmu
->cmdq
.q
.max_n_shift
) {
2529 /* Odd alignment restrictions on the base, so ignore for now */
2530 dev_err(smmu
->dev
, "unit-length command queue not supported\n");
2534 smmu
->evtq
.q
.max_n_shift
= min((u32
)EVTQ_MAX_SZ_SHIFT
,
2535 reg
>> IDR1_EVTQ_SHIFT
& IDR1_EVTQ_MASK
);
2536 smmu
->priq
.q
.max_n_shift
= min((u32
)PRIQ_MAX_SZ_SHIFT
,
2537 reg
>> IDR1_PRIQ_SHIFT
& IDR1_PRIQ_MASK
);
2539 /* SID/SSID sizes */
2540 smmu
->ssid_bits
= reg
>> IDR1_SSID_SHIFT
& IDR1_SSID_MASK
;
2541 smmu
->sid_bits
= reg
>> IDR1_SID_SHIFT
& IDR1_SID_MASK
;
2544 * If the SMMU supports fewer bits than would fill a single L2 stream
2545 * table, use a linear table instead.
2547 if (smmu
->sid_bits
<= STRTAB_SPLIT
)
2548 smmu
->features
&= ~ARM_SMMU_FEAT_2_LVL_STRTAB
;
2551 reg
= readl_relaxed(smmu
->base
+ ARM_SMMU_IDR5
);
2553 /* Maximum number of outstanding stalls */
2554 smmu
->evtq
.max_stalls
= reg
>> IDR5_STALL_MAX_SHIFT
2555 & IDR5_STALL_MAX_MASK
;
2558 if (reg
& IDR5_GRAN64K
)
2559 smmu
->pgsize_bitmap
|= SZ_64K
| SZ_512M
;
2560 if (reg
& IDR5_GRAN16K
)
2561 smmu
->pgsize_bitmap
|= SZ_16K
| SZ_32M
;
2562 if (reg
& IDR5_GRAN4K
)
2563 smmu
->pgsize_bitmap
|= SZ_4K
| SZ_2M
| SZ_1G
;
2565 if (arm_smmu_ops
.pgsize_bitmap
== -1UL)
2566 arm_smmu_ops
.pgsize_bitmap
= smmu
->pgsize_bitmap
;
2568 arm_smmu_ops
.pgsize_bitmap
|= smmu
->pgsize_bitmap
;
2570 /* Output address size */
2571 switch (reg
& IDR5_OAS_MASK
<< IDR5_OAS_SHIFT
) {
2572 case IDR5_OAS_32_BIT
:
2575 case IDR5_OAS_36_BIT
:
2578 case IDR5_OAS_40_BIT
:
2581 case IDR5_OAS_42_BIT
:
2584 case IDR5_OAS_44_BIT
:
2589 "unknown output address size. Truncating to 48-bit\n");
2591 case IDR5_OAS_48_BIT
:
2595 /* Set the DMA mask for our table walker */
2596 if (dma_set_mask_and_coherent(smmu
->dev
, DMA_BIT_MASK(smmu
->oas
)))
2598 "failed to set DMA mask for table walker\n");
2600 smmu
->ias
= max(smmu
->ias
, smmu
->oas
);
2602 dev_info(smmu
->dev
, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2603 smmu
->ias
, smmu
->oas
, smmu
->features
);
2608 static int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
2609 struct arm_smmu_device
*smmu
)
2611 struct acpi_iort_smmu_v3
*iort_smmu
;
2612 struct device
*dev
= smmu
->dev
;
2613 struct acpi_iort_node
*node
;
2615 node
= *(struct acpi_iort_node
**)dev_get_platdata(dev
);
2617 /* Retrieve SMMUv3 specific data */
2618 iort_smmu
= (struct acpi_iort_smmu_v3
*)node
->node_data
;
2620 if (iort_smmu
->flags
& ACPI_IORT_SMMU_V3_COHACC_OVERRIDE
)
2621 smmu
->features
|= ARM_SMMU_FEAT_COHERENCY
;
2626 static inline int arm_smmu_device_acpi_probe(struct platform_device
*pdev
,
2627 struct arm_smmu_device
*smmu
)
2633 static int arm_smmu_device_dt_probe(struct platform_device
*pdev
,
2634 struct arm_smmu_device
*smmu
)
2636 struct device
*dev
= &pdev
->dev
;
2640 if (of_property_read_u32(dev
->of_node
, "#iommu-cells", &cells
))
2641 dev_err(dev
, "missing #iommu-cells property\n");
2642 else if (cells
!= 1)
2643 dev_err(dev
, "invalid #iommu-cells value (%d)\n", cells
);
2647 parse_driver_options(smmu
);
2649 if (of_dma_is_coherent(dev
->of_node
))
2650 smmu
->features
|= ARM_SMMU_FEAT_COHERENCY
;
2655 static int arm_smmu_device_probe(struct platform_device
*pdev
)
2658 struct resource
*res
;
2659 resource_size_t ioaddr
;
2660 struct arm_smmu_device
*smmu
;
2661 struct device
*dev
= &pdev
->dev
;
2664 smmu
= devm_kzalloc(dev
, sizeof(*smmu
), GFP_KERNEL
);
2666 dev_err(dev
, "failed to allocate arm_smmu_device\n");
2672 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
2673 if (resource_size(res
) + 1 < SZ_128K
) {
2674 dev_err(dev
, "MMIO region too small (%pr)\n", res
);
2677 ioaddr
= res
->start
;
2679 smmu
->base
= devm_ioremap_resource(dev
, res
);
2680 if (IS_ERR(smmu
->base
))
2681 return PTR_ERR(smmu
->base
);
2683 /* Interrupt lines */
2684 irq
= platform_get_irq_byname(pdev
, "eventq");
2686 smmu
->evtq
.q
.irq
= irq
;
2688 irq
= platform_get_irq_byname(pdev
, "priq");
2690 smmu
->priq
.q
.irq
= irq
;
2692 irq
= platform_get_irq_byname(pdev
, "cmdq-sync");
2694 smmu
->cmdq
.q
.irq
= irq
;
2696 irq
= platform_get_irq_byname(pdev
, "gerror");
2698 smmu
->gerr_irq
= irq
;
2701 ret
= arm_smmu_device_dt_probe(pdev
, smmu
);
2703 ret
= arm_smmu_device_acpi_probe(pdev
, smmu
);
2708 /* Set bypass mode according to firmware probing result */
2712 ret
= arm_smmu_device_hw_probe(smmu
);
2716 /* Initialise in-memory data structures */
2717 ret
= arm_smmu_init_structures(smmu
);
2721 /* Record our private device structure */
2722 platform_set_drvdata(pdev
, smmu
);
2724 /* Reset the device */
2725 ret
= arm_smmu_device_reset(smmu
, bypass
);
2729 /* And we're up. Go go go! */
2730 ret
= iommu_device_sysfs_add(&smmu
->iommu
, dev
, NULL
,
2731 "smmu3.%pa", &ioaddr
);
2735 iommu_device_set_ops(&smmu
->iommu
, &arm_smmu_ops
);
2736 iommu_device_set_fwnode(&smmu
->iommu
, dev
->fwnode
);
2738 ret
= iommu_device_register(&smmu
->iommu
);
2740 iommu_register_instance(dev
->fwnode
, &arm_smmu_ops
);
2743 if (pci_bus_type
.iommu_ops
!= &arm_smmu_ops
) {
2745 ret
= bus_set_iommu(&pci_bus_type
, &arm_smmu_ops
);
2750 #ifdef CONFIG_ARM_AMBA
2751 if (amba_bustype
.iommu_ops
!= &arm_smmu_ops
) {
2752 ret
= bus_set_iommu(&amba_bustype
, &arm_smmu_ops
);
2757 if (platform_bus_type
.iommu_ops
!= &arm_smmu_ops
) {
2758 ret
= bus_set_iommu(&platform_bus_type
, &arm_smmu_ops
);
2765 static int arm_smmu_device_remove(struct platform_device
*pdev
)
2767 struct arm_smmu_device
*smmu
= platform_get_drvdata(pdev
);
2769 arm_smmu_device_disable(smmu
);
2773 static struct of_device_id arm_smmu_of_match
[] = {
2774 { .compatible
= "arm,smmu-v3", },
2777 MODULE_DEVICE_TABLE(of
, arm_smmu_of_match
);
2779 static struct platform_driver arm_smmu_driver
= {
2781 .name
= "arm-smmu-v3",
2782 .of_match_table
= of_match_ptr(arm_smmu_of_match
),
2784 .probe
= arm_smmu_device_probe
,
2785 .remove
= arm_smmu_device_remove
,
2788 static int __init
arm_smmu_init(void)
2790 static bool registered
;
2794 ret
= platform_driver_register(&arm_smmu_driver
);
2800 static void __exit
arm_smmu_exit(void)
2802 return platform_driver_unregister(&arm_smmu_driver
);
2805 subsys_initcall(arm_smmu_init
);
2806 module_exit(arm_smmu_exit
);
2808 static int __init
arm_smmu_of_init(struct device_node
*np
)
2810 int ret
= arm_smmu_init();
2815 if (!of_platform_device_create(np
, NULL
, platform_bus_type
.dev_root
))
2820 IOMMU_OF_DECLARE(arm_smmuv3
, "arm,smmu-v3", arm_smmu_of_init
);
2823 static int __init
acpi_smmu_v3_init(struct acpi_table_header
*table
)
2825 if (iort_node_match(ACPI_IORT_NODE_SMMU_V3
))
2826 return arm_smmu_init();
2830 IORT_ACPI_DECLARE(arm_smmu_v3
, ACPI_SIG_IORT
, acpi_smmu_v3_init
);
2833 MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2834 MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2835 MODULE_LICENSE("GPL v2");