]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/iommu/arm-smmu-v3.c
iommu/arm-smmu: Implement reserved region get/put callbacks
[mirror_ubuntu-artful-kernel.git] / drivers / iommu / arm-smmu-v3.c
CommitLineData
48ec83bc
WD
1/*
2 * IOMMU API for ARM architected SMMUv3 implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2015 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 *
20 * This driver is powered by bad coffee and bombay mix.
21 */
22
e4dadfa8
LP
23#include <linux/acpi.h>
24#include <linux/acpi_iort.h>
48ec83bc 25#include <linux/delay.h>
9adb9594 26#include <linux/dma-iommu.h>
48ec83bc
WD
27#include <linux/err.h>
28#include <linux/interrupt.h>
29#include <linux/iommu.h>
30#include <linux/iopoll.h>
31#include <linux/module.h>
166bdbd2 32#include <linux/msi.h>
48ec83bc
WD
33#include <linux/of.h>
34#include <linux/of_address.h>
8f785154 35#include <linux/of_iommu.h>
941a802d 36#include <linux/of_platform.h>
48ec83bc
WD
37#include <linux/pci.h>
38#include <linux/platform_device.h>
39
08d4ca2a
RM
40#include <linux/amba/bus.h>
41
48ec83bc
WD
42#include "io-pgtable.h"
43
44/* MMIO registers */
45#define ARM_SMMU_IDR0 0x0
46#define IDR0_ST_LVL_SHIFT 27
47#define IDR0_ST_LVL_MASK 0x3
48#define IDR0_ST_LVL_2LVL (1 << IDR0_ST_LVL_SHIFT)
6380be05
PM
49#define IDR0_STALL_MODEL_SHIFT 24
50#define IDR0_STALL_MODEL_MASK 0x3
51#define IDR0_STALL_MODEL_STALL (0 << IDR0_STALL_MODEL_SHIFT)
52#define IDR0_STALL_MODEL_FORCE (2 << IDR0_STALL_MODEL_SHIFT)
48ec83bc
WD
53#define IDR0_TTENDIAN_SHIFT 21
54#define IDR0_TTENDIAN_MASK 0x3
55#define IDR0_TTENDIAN_LE (2 << IDR0_TTENDIAN_SHIFT)
56#define IDR0_TTENDIAN_BE (3 << IDR0_TTENDIAN_SHIFT)
57#define IDR0_TTENDIAN_MIXED (0 << IDR0_TTENDIAN_SHIFT)
58#define IDR0_CD2L (1 << 19)
59#define IDR0_VMID16 (1 << 18)
60#define IDR0_PRI (1 << 16)
61#define IDR0_SEV (1 << 14)
62#define IDR0_MSI (1 << 13)
63#define IDR0_ASID16 (1 << 12)
64#define IDR0_ATS (1 << 10)
65#define IDR0_HYP (1 << 9)
66#define IDR0_COHACC (1 << 4)
67#define IDR0_TTF_SHIFT 2
68#define IDR0_TTF_MASK 0x3
69#define IDR0_TTF_AARCH64 (2 << IDR0_TTF_SHIFT)
f0c453db 70#define IDR0_TTF_AARCH32_64 (3 << IDR0_TTF_SHIFT)
48ec83bc
WD
71#define IDR0_S1P (1 << 1)
72#define IDR0_S2P (1 << 0)
73
74#define ARM_SMMU_IDR1 0x4
75#define IDR1_TABLES_PRESET (1 << 30)
76#define IDR1_QUEUES_PRESET (1 << 29)
77#define IDR1_REL (1 << 28)
78#define IDR1_CMDQ_SHIFT 21
79#define IDR1_CMDQ_MASK 0x1f
80#define IDR1_EVTQ_SHIFT 16
81#define IDR1_EVTQ_MASK 0x1f
82#define IDR1_PRIQ_SHIFT 11
83#define IDR1_PRIQ_MASK 0x1f
84#define IDR1_SSID_SHIFT 6
85#define IDR1_SSID_MASK 0x1f
86#define IDR1_SID_SHIFT 0
87#define IDR1_SID_MASK 0x3f
88
89#define ARM_SMMU_IDR5 0x14
90#define IDR5_STALL_MAX_SHIFT 16
91#define IDR5_STALL_MAX_MASK 0xffff
92#define IDR5_GRAN64K (1 << 6)
93#define IDR5_GRAN16K (1 << 5)
94#define IDR5_GRAN4K (1 << 4)
95#define IDR5_OAS_SHIFT 0
96#define IDR5_OAS_MASK 0x7
97#define IDR5_OAS_32_BIT (0 << IDR5_OAS_SHIFT)
98#define IDR5_OAS_36_BIT (1 << IDR5_OAS_SHIFT)
99#define IDR5_OAS_40_BIT (2 << IDR5_OAS_SHIFT)
100#define IDR5_OAS_42_BIT (3 << IDR5_OAS_SHIFT)
101#define IDR5_OAS_44_BIT (4 << IDR5_OAS_SHIFT)
102#define IDR5_OAS_48_BIT (5 << IDR5_OAS_SHIFT)
103
104#define ARM_SMMU_CR0 0x20
105#define CR0_CMDQEN (1 << 3)
106#define CR0_EVTQEN (1 << 2)
107#define CR0_PRIQEN (1 << 1)
108#define CR0_SMMUEN (1 << 0)
109
110#define ARM_SMMU_CR0ACK 0x24
111
112#define ARM_SMMU_CR1 0x28
113#define CR1_SH_NSH 0
114#define CR1_SH_OSH 2
115#define CR1_SH_ISH 3
116#define CR1_CACHE_NC 0
117#define CR1_CACHE_WB 1
118#define CR1_CACHE_WT 2
119#define CR1_TABLE_SH_SHIFT 10
120#define CR1_TABLE_OC_SHIFT 8
121#define CR1_TABLE_IC_SHIFT 6
122#define CR1_QUEUE_SH_SHIFT 4
123#define CR1_QUEUE_OC_SHIFT 2
124#define CR1_QUEUE_IC_SHIFT 0
125
126#define ARM_SMMU_CR2 0x2c
127#define CR2_PTM (1 << 2)
128#define CR2_RECINVSID (1 << 1)
129#define CR2_E2H (1 << 0)
130
dc87a98d
RM
131#define ARM_SMMU_GBPA 0x44
132#define GBPA_ABORT (1 << 20)
133#define GBPA_UPDATE (1 << 31)
134
48ec83bc
WD
135#define ARM_SMMU_IRQ_CTRL 0x50
136#define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
ccd6385d 137#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
48ec83bc
WD
138#define IRQ_CTRL_GERROR_IRQEN (1 << 0)
139
140#define ARM_SMMU_IRQ_CTRLACK 0x54
141
142#define ARM_SMMU_GERROR 0x60
143#define GERROR_SFM_ERR (1 << 8)
144#define GERROR_MSI_GERROR_ABT_ERR (1 << 7)
145#define GERROR_MSI_PRIQ_ABT_ERR (1 << 6)
146#define GERROR_MSI_EVTQ_ABT_ERR (1 << 5)
147#define GERROR_MSI_CMDQ_ABT_ERR (1 << 4)
148#define GERROR_PRIQ_ABT_ERR (1 << 3)
149#define GERROR_EVTQ_ABT_ERR (1 << 2)
150#define GERROR_CMDQ_ERR (1 << 0)
151#define GERROR_ERR_MASK 0xfd
152
153#define ARM_SMMU_GERRORN 0x64
154
155#define ARM_SMMU_GERROR_IRQ_CFG0 0x68
156#define ARM_SMMU_GERROR_IRQ_CFG1 0x70
157#define ARM_SMMU_GERROR_IRQ_CFG2 0x74
158
159#define ARM_SMMU_STRTAB_BASE 0x80
160#define STRTAB_BASE_RA (1UL << 62)
161#define STRTAB_BASE_ADDR_SHIFT 6
162#define STRTAB_BASE_ADDR_MASK 0x3ffffffffffUL
163
164#define ARM_SMMU_STRTAB_BASE_CFG 0x88
165#define STRTAB_BASE_CFG_LOG2SIZE_SHIFT 0
166#define STRTAB_BASE_CFG_LOG2SIZE_MASK 0x3f
167#define STRTAB_BASE_CFG_SPLIT_SHIFT 6
168#define STRTAB_BASE_CFG_SPLIT_MASK 0x1f
169#define STRTAB_BASE_CFG_FMT_SHIFT 16
170#define STRTAB_BASE_CFG_FMT_MASK 0x3
171#define STRTAB_BASE_CFG_FMT_LINEAR (0 << STRTAB_BASE_CFG_FMT_SHIFT)
172#define STRTAB_BASE_CFG_FMT_2LVL (1 << STRTAB_BASE_CFG_FMT_SHIFT)
173
174#define ARM_SMMU_CMDQ_BASE 0x90
175#define ARM_SMMU_CMDQ_PROD 0x98
176#define ARM_SMMU_CMDQ_CONS 0x9c
177
178#define ARM_SMMU_EVTQ_BASE 0xa0
179#define ARM_SMMU_EVTQ_PROD 0x100a8
180#define ARM_SMMU_EVTQ_CONS 0x100ac
181#define ARM_SMMU_EVTQ_IRQ_CFG0 0xb0
182#define ARM_SMMU_EVTQ_IRQ_CFG1 0xb8
183#define ARM_SMMU_EVTQ_IRQ_CFG2 0xbc
184
185#define ARM_SMMU_PRIQ_BASE 0xc0
186#define ARM_SMMU_PRIQ_PROD 0x100c8
187#define ARM_SMMU_PRIQ_CONS 0x100cc
188#define ARM_SMMU_PRIQ_IRQ_CFG0 0xd0
189#define ARM_SMMU_PRIQ_IRQ_CFG1 0xd8
190#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
191
192/* Common MSI config fields */
48ec83bc
WD
193#define MSI_CFG0_ADDR_SHIFT 2
194#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
ec11d63c
MZ
195#define MSI_CFG2_SH_SHIFT 4
196#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
197#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
198#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
199#define MSI_CFG2_MEMATTR_SHIFT 0
200#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
48ec83bc
WD
201
202#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
203#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
204#define Q_OVERFLOW_FLAG (1 << 31)
205#define Q_OVF(q, p) ((p) & Q_OVERFLOW_FLAG)
206#define Q_ENT(q, p) ((q)->base + \
207 Q_IDX(q, p) * (q)->ent_dwords)
208
209#define Q_BASE_RWA (1UL << 62)
210#define Q_BASE_ADDR_SHIFT 5
211#define Q_BASE_ADDR_MASK 0xfffffffffffUL
212#define Q_BASE_LOG2SIZE_SHIFT 0
213#define Q_BASE_LOG2SIZE_MASK 0x1fUL
214
215/*
216 * Stream table.
217 *
218 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
e2f4c233
ZL
219 * 2lvl: 128k L1 entries,
220 * 256 lazy entries per table (each table covers a PCI bus)
48ec83bc 221 */
e2f4c233 222#define STRTAB_L1_SZ_SHIFT 20
48ec83bc
WD
223#define STRTAB_SPLIT 8
224
225#define STRTAB_L1_DESC_DWORDS 1
226#define STRTAB_L1_DESC_SPAN_SHIFT 0
227#define STRTAB_L1_DESC_SPAN_MASK 0x1fUL
228#define STRTAB_L1_DESC_L2PTR_SHIFT 6
229#define STRTAB_L1_DESC_L2PTR_MASK 0x3ffffffffffUL
230
231#define STRTAB_STE_DWORDS 8
232#define STRTAB_STE_0_V (1UL << 0)
233#define STRTAB_STE_0_CFG_SHIFT 1
234#define STRTAB_STE_0_CFG_MASK 0x7UL
235#define STRTAB_STE_0_CFG_ABORT (0UL << STRTAB_STE_0_CFG_SHIFT)
236#define STRTAB_STE_0_CFG_BYPASS (4UL << STRTAB_STE_0_CFG_SHIFT)
237#define STRTAB_STE_0_CFG_S1_TRANS (5UL << STRTAB_STE_0_CFG_SHIFT)
238#define STRTAB_STE_0_CFG_S2_TRANS (6UL << STRTAB_STE_0_CFG_SHIFT)
239
240#define STRTAB_STE_0_S1FMT_SHIFT 4
241#define STRTAB_STE_0_S1FMT_LINEAR (0UL << STRTAB_STE_0_S1FMT_SHIFT)
242#define STRTAB_STE_0_S1CTXPTR_SHIFT 6
243#define STRTAB_STE_0_S1CTXPTR_MASK 0x3ffffffffffUL
244#define STRTAB_STE_0_S1CDMAX_SHIFT 59
245#define STRTAB_STE_0_S1CDMAX_MASK 0x1fUL
246
247#define STRTAB_STE_1_S1C_CACHE_NC 0UL
248#define STRTAB_STE_1_S1C_CACHE_WBRA 1UL
249#define STRTAB_STE_1_S1C_CACHE_WT 2UL
250#define STRTAB_STE_1_S1C_CACHE_WB 3UL
251#define STRTAB_STE_1_S1C_SH_NSH 0UL
252#define STRTAB_STE_1_S1C_SH_OSH 2UL
253#define STRTAB_STE_1_S1C_SH_ISH 3UL
254#define STRTAB_STE_1_S1CIR_SHIFT 2
255#define STRTAB_STE_1_S1COR_SHIFT 4
256#define STRTAB_STE_1_S1CSH_SHIFT 6
257
258#define STRTAB_STE_1_S1STALLD (1UL << 27)
259
260#define STRTAB_STE_1_EATS_ABT 0UL
261#define STRTAB_STE_1_EATS_TRANS 1UL
262#define STRTAB_STE_1_EATS_S1CHK 2UL
263#define STRTAB_STE_1_EATS_SHIFT 28
264
265#define STRTAB_STE_1_STRW_NSEL1 0UL
266#define STRTAB_STE_1_STRW_EL2 2UL
267#define STRTAB_STE_1_STRW_SHIFT 30
268
a0eacd89
WD
269#define STRTAB_STE_1_SHCFG_INCOMING 1UL
270#define STRTAB_STE_1_SHCFG_SHIFT 44
271
95fa99aa
RM
272#define STRTAB_STE_1_PRIVCFG_UNPRIV 2UL
273#define STRTAB_STE_1_PRIVCFG_SHIFT 48
274
48ec83bc
WD
275#define STRTAB_STE_2_S2VMID_SHIFT 0
276#define STRTAB_STE_2_S2VMID_MASK 0xffffUL
277#define STRTAB_STE_2_VTCR_SHIFT 32
278#define STRTAB_STE_2_VTCR_MASK 0x7ffffUL
279#define STRTAB_STE_2_S2AA64 (1UL << 51)
280#define STRTAB_STE_2_S2ENDI (1UL << 52)
281#define STRTAB_STE_2_S2PTW (1UL << 54)
282#define STRTAB_STE_2_S2R (1UL << 58)
283
284#define STRTAB_STE_3_S2TTB_SHIFT 4
285#define STRTAB_STE_3_S2TTB_MASK 0xfffffffffffUL
286
287/* Context descriptor (stage-1 only) */
288#define CTXDESC_CD_DWORDS 8
289#define CTXDESC_CD_0_TCR_T0SZ_SHIFT 0
290#define ARM64_TCR_T0SZ_SHIFT 0
291#define ARM64_TCR_T0SZ_MASK 0x1fUL
292#define CTXDESC_CD_0_TCR_TG0_SHIFT 6
293#define ARM64_TCR_TG0_SHIFT 14
294#define ARM64_TCR_TG0_MASK 0x3UL
295#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
5d58c620 296#define ARM64_TCR_IRGN0_SHIFT 8
48ec83bc
WD
297#define ARM64_TCR_IRGN0_MASK 0x3UL
298#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
5d58c620 299#define ARM64_TCR_ORGN0_SHIFT 10
48ec83bc
WD
300#define ARM64_TCR_ORGN0_MASK 0x3UL
301#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
302#define ARM64_TCR_SH0_SHIFT 12
303#define ARM64_TCR_SH0_MASK 0x3UL
304#define CTXDESC_CD_0_TCR_EPD0_SHIFT 14
305#define ARM64_TCR_EPD0_SHIFT 7
306#define ARM64_TCR_EPD0_MASK 0x1UL
307#define CTXDESC_CD_0_TCR_EPD1_SHIFT 30
308#define ARM64_TCR_EPD1_SHIFT 23
309#define ARM64_TCR_EPD1_MASK 0x1UL
310
311#define CTXDESC_CD_0_ENDI (1UL << 15)
312#define CTXDESC_CD_0_V (1UL << 31)
313
314#define CTXDESC_CD_0_TCR_IPS_SHIFT 32
315#define ARM64_TCR_IPS_SHIFT 32
316#define ARM64_TCR_IPS_MASK 0x7UL
317#define CTXDESC_CD_0_TCR_TBI0_SHIFT 38
318#define ARM64_TCR_TBI0_SHIFT 37
319#define ARM64_TCR_TBI0_MASK 0x1UL
320
321#define CTXDESC_CD_0_AA64 (1UL << 41)
322#define CTXDESC_CD_0_R (1UL << 45)
323#define CTXDESC_CD_0_A (1UL << 46)
324#define CTXDESC_CD_0_ASET_SHIFT 47
325#define CTXDESC_CD_0_ASET_SHARED (0UL << CTXDESC_CD_0_ASET_SHIFT)
326#define CTXDESC_CD_0_ASET_PRIVATE (1UL << CTXDESC_CD_0_ASET_SHIFT)
327#define CTXDESC_CD_0_ASID_SHIFT 48
328#define CTXDESC_CD_0_ASID_MASK 0xffffUL
329
330#define CTXDESC_CD_1_TTB0_SHIFT 4
331#define CTXDESC_CD_1_TTB0_MASK 0xfffffffffffUL
332
333#define CTXDESC_CD_3_MAIR_SHIFT 0
334
335/* Convert between AArch64 (CPU) TCR format and SMMU CD format */
336#define ARM_SMMU_TCR2CD(tcr, fld) \
337 (((tcr) >> ARM64_TCR_##fld##_SHIFT & ARM64_TCR_##fld##_MASK) \
338 << CTXDESC_CD_0_TCR_##fld##_SHIFT)
339
340/* Command queue */
341#define CMDQ_ENT_DWORDS 2
342#define CMDQ_MAX_SZ_SHIFT 8
343
344#define CMDQ_ERR_SHIFT 24
345#define CMDQ_ERR_MASK 0x7f
346#define CMDQ_ERR_CERROR_NONE_IDX 0
347#define CMDQ_ERR_CERROR_ILL_IDX 1
348#define CMDQ_ERR_CERROR_ABT_IDX 2
349
350#define CMDQ_0_OP_SHIFT 0
351#define CMDQ_0_OP_MASK 0xffUL
352#define CMDQ_0_SSV (1UL << 11)
353
354#define CMDQ_PREFETCH_0_SID_SHIFT 32
355#define CMDQ_PREFETCH_1_SIZE_SHIFT 0
356#define CMDQ_PREFETCH_1_ADDR_MASK ~0xfffUL
357
358#define CMDQ_CFGI_0_SID_SHIFT 32
359#define CMDQ_CFGI_0_SID_MASK 0xffffffffUL
360#define CMDQ_CFGI_1_LEAF (1UL << 0)
361#define CMDQ_CFGI_1_RANGE_SHIFT 0
362#define CMDQ_CFGI_1_RANGE_MASK 0x1fUL
363
364#define CMDQ_TLBI_0_VMID_SHIFT 32
365#define CMDQ_TLBI_0_ASID_SHIFT 48
366#define CMDQ_TLBI_1_LEAF (1UL << 0)
1c27df1c
WD
367#define CMDQ_TLBI_1_VA_MASK ~0xfffUL
368#define CMDQ_TLBI_1_IPA_MASK 0xfffffffff000UL
48ec83bc
WD
369
370#define CMDQ_PRI_0_SSID_SHIFT 12
371#define CMDQ_PRI_0_SSID_MASK 0xfffffUL
372#define CMDQ_PRI_0_SID_SHIFT 32
373#define CMDQ_PRI_0_SID_MASK 0xffffffffUL
374#define CMDQ_PRI_1_GRPID_SHIFT 0
375#define CMDQ_PRI_1_GRPID_MASK 0x1ffUL
376#define CMDQ_PRI_1_RESP_SHIFT 12
377#define CMDQ_PRI_1_RESP_DENY (0UL << CMDQ_PRI_1_RESP_SHIFT)
378#define CMDQ_PRI_1_RESP_FAIL (1UL << CMDQ_PRI_1_RESP_SHIFT)
379#define CMDQ_PRI_1_RESP_SUCC (2UL << CMDQ_PRI_1_RESP_SHIFT)
380
381#define CMDQ_SYNC_0_CS_SHIFT 12
382#define CMDQ_SYNC_0_CS_NONE (0UL << CMDQ_SYNC_0_CS_SHIFT)
383#define CMDQ_SYNC_0_CS_SEV (2UL << CMDQ_SYNC_0_CS_SHIFT)
384
385/* Event queue */
386#define EVTQ_ENT_DWORDS 4
387#define EVTQ_MAX_SZ_SHIFT 7
388
389#define EVTQ_0_ID_SHIFT 0
390#define EVTQ_0_ID_MASK 0xffUL
391
392/* PRI queue */
393#define PRIQ_ENT_DWORDS 2
394#define PRIQ_MAX_SZ_SHIFT 8
395
396#define PRIQ_0_SID_SHIFT 0
397#define PRIQ_0_SID_MASK 0xffffffffUL
398#define PRIQ_0_SSID_SHIFT 32
399#define PRIQ_0_SSID_MASK 0xfffffUL
48ec83bc
WD
400#define PRIQ_0_PERM_PRIV (1UL << 58)
401#define PRIQ_0_PERM_EXEC (1UL << 59)
402#define PRIQ_0_PERM_READ (1UL << 60)
403#define PRIQ_0_PERM_WRITE (1UL << 61)
404#define PRIQ_0_PRG_LAST (1UL << 62)
405#define PRIQ_0_SSID_V (1UL << 63)
406
407#define PRIQ_1_PRG_IDX_SHIFT 0
408#define PRIQ_1_PRG_IDX_MASK 0x1ffUL
409#define PRIQ_1_ADDR_SHIFT 12
410#define PRIQ_1_ADDR_MASK 0xfffffffffffffUL
411
412/* High-level queue structures */
413#define ARM_SMMU_POLL_TIMEOUT_US 100
414
415static bool disable_bypass;
416module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO);
417MODULE_PARM_DESC(disable_bypass,
418 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
419
420enum pri_resp {
421 PRI_RESP_DENY,
422 PRI_RESP_FAIL,
423 PRI_RESP_SUCC,
424};
425
166bdbd2
MZ
426enum arm_smmu_msi_index {
427 EVTQ_MSI_INDEX,
428 GERROR_MSI_INDEX,
429 PRIQ_MSI_INDEX,
430 ARM_SMMU_MAX_MSIS,
431};
432
433static phys_addr_t arm_smmu_msi_cfg[ARM_SMMU_MAX_MSIS][3] = {
434 [EVTQ_MSI_INDEX] = {
435 ARM_SMMU_EVTQ_IRQ_CFG0,
436 ARM_SMMU_EVTQ_IRQ_CFG1,
437 ARM_SMMU_EVTQ_IRQ_CFG2,
438 },
439 [GERROR_MSI_INDEX] = {
440 ARM_SMMU_GERROR_IRQ_CFG0,
441 ARM_SMMU_GERROR_IRQ_CFG1,
442 ARM_SMMU_GERROR_IRQ_CFG2,
443 },
444 [PRIQ_MSI_INDEX] = {
445 ARM_SMMU_PRIQ_IRQ_CFG0,
446 ARM_SMMU_PRIQ_IRQ_CFG1,
447 ARM_SMMU_PRIQ_IRQ_CFG2,
448 },
449};
450
48ec83bc
WD
451struct arm_smmu_cmdq_ent {
452 /* Common fields */
453 u8 opcode;
454 bool substream_valid;
455
456 /* Command-specific fields */
457 union {
458 #define CMDQ_OP_PREFETCH_CFG 0x1
459 struct {
460 u32 sid;
461 u8 size;
462 u64 addr;
463 } prefetch;
464
465 #define CMDQ_OP_CFGI_STE 0x3
466 #define CMDQ_OP_CFGI_ALL 0x4
467 struct {
468 u32 sid;
469 union {
470 bool leaf;
471 u8 span;
472 };
473 } cfgi;
474
475 #define CMDQ_OP_TLBI_NH_ASID 0x11
476 #define CMDQ_OP_TLBI_NH_VA 0x12
477 #define CMDQ_OP_TLBI_EL2_ALL 0x20
478 #define CMDQ_OP_TLBI_S12_VMALL 0x28
479 #define CMDQ_OP_TLBI_S2_IPA 0x2a
480 #define CMDQ_OP_TLBI_NSNH_ALL 0x30
481 struct {
482 u16 asid;
483 u16 vmid;
484 bool leaf;
485 u64 addr;
486 } tlbi;
487
488 #define CMDQ_OP_PRI_RESP 0x41
489 struct {
490 u32 sid;
491 u32 ssid;
492 u16 grpid;
493 enum pri_resp resp;
494 } pri;
495
496 #define CMDQ_OP_CMD_SYNC 0x46
497 };
498};
499
500struct arm_smmu_queue {
501 int irq; /* Wired interrupt */
502
503 __le64 *base;
504 dma_addr_t base_dma;
505 u64 q_base;
506
507 size_t ent_dwords;
508 u32 max_n_shift;
509 u32 prod;
510 u32 cons;
511
512 u32 __iomem *prod_reg;
513 u32 __iomem *cons_reg;
514};
515
516struct arm_smmu_cmdq {
517 struct arm_smmu_queue q;
518 spinlock_t lock;
519};
520
521struct arm_smmu_evtq {
522 struct arm_smmu_queue q;
523 u32 max_stalls;
524};
525
526struct arm_smmu_priq {
527 struct arm_smmu_queue q;
528};
529
530/* High-level stream table and context descriptor structures */
531struct arm_smmu_strtab_l1_desc {
532 u8 span;
533
534 __le64 *l2ptr;
535 dma_addr_t l2ptr_dma;
536};
537
538struct arm_smmu_s1_cfg {
539 __le64 *cdptr;
540 dma_addr_t cdptr_dma;
541
542 struct arm_smmu_ctx_desc {
543 u16 asid;
544 u64 ttbr;
545 u64 tcr;
546 u64 mair;
547 } cd;
548};
549
550struct arm_smmu_s2_cfg {
551 u16 vmid;
552 u64 vttbr;
553 u64 vtcr;
554};
555
556struct arm_smmu_strtab_ent {
557 bool valid;
558
559 bool bypass; /* Overrides s1/s2 config */
560 struct arm_smmu_s1_cfg *s1_cfg;
561 struct arm_smmu_s2_cfg *s2_cfg;
562};
563
564struct arm_smmu_strtab_cfg {
565 __le64 *strtab;
566 dma_addr_t strtab_dma;
567 struct arm_smmu_strtab_l1_desc *l1_desc;
568 unsigned int num_l1_ents;
569
570 u64 strtab_base;
571 u32 strtab_base_cfg;
572};
573
574/* An SMMUv3 instance */
575struct arm_smmu_device {
576 struct device *dev;
577 void __iomem *base;
578
579#define ARM_SMMU_FEAT_2_LVL_STRTAB (1 << 0)
580#define ARM_SMMU_FEAT_2_LVL_CDTAB (1 << 1)
581#define ARM_SMMU_FEAT_TT_LE (1 << 2)
582#define ARM_SMMU_FEAT_TT_BE (1 << 3)
583#define ARM_SMMU_FEAT_PRI (1 << 4)
584#define ARM_SMMU_FEAT_ATS (1 << 5)
585#define ARM_SMMU_FEAT_SEV (1 << 6)
586#define ARM_SMMU_FEAT_MSI (1 << 7)
587#define ARM_SMMU_FEAT_COHERENCY (1 << 8)
588#define ARM_SMMU_FEAT_TRANS_S1 (1 << 9)
589#define ARM_SMMU_FEAT_TRANS_S2 (1 << 10)
590#define ARM_SMMU_FEAT_STALLS (1 << 11)
591#define ARM_SMMU_FEAT_HYP (1 << 12)
592 u32 features;
593
5e92946c
ZL
594#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
595 u32 options;
596
48ec83bc
WD
597 struct arm_smmu_cmdq cmdq;
598 struct arm_smmu_evtq evtq;
599 struct arm_smmu_priq priq;
600
601 int gerr_irq;
602
603 unsigned long ias; /* IPA */
604 unsigned long oas; /* PA */
d5466357 605 unsigned long pgsize_bitmap;
48ec83bc
WD
606
607#define ARM_SMMU_MAX_ASIDS (1 << 16)
608 unsigned int asid_bits;
609 DECLARE_BITMAP(asid_map, ARM_SMMU_MAX_ASIDS);
610
611#define ARM_SMMU_MAX_VMIDS (1 << 16)
612 unsigned int vmid_bits;
613 DECLARE_BITMAP(vmid_map, ARM_SMMU_MAX_VMIDS);
614
615 unsigned int ssid_bits;
616 unsigned int sid_bits;
617
618 struct arm_smmu_strtab_cfg strtab_cfg;
48ec83bc
WD
619};
620
8f785154
RM
621/* SMMU private data for each master */
622struct arm_smmu_master_data {
48ec83bc 623 struct arm_smmu_device *smmu;
48ec83bc
WD
624 struct arm_smmu_strtab_ent ste;
625};
626
627/* SMMU private data for an IOMMU domain */
628enum arm_smmu_domain_stage {
629 ARM_SMMU_DOMAIN_S1 = 0,
630 ARM_SMMU_DOMAIN_S2,
631 ARM_SMMU_DOMAIN_NESTED,
632};
633
634struct arm_smmu_domain {
635 struct arm_smmu_device *smmu;
636 struct mutex init_mutex; /* Protects smmu pointer */
637
638 struct io_pgtable_ops *pgtbl_ops;
639 spinlock_t pgtbl_lock;
640
641 enum arm_smmu_domain_stage stage;
642 union {
643 struct arm_smmu_s1_cfg s1_cfg;
644 struct arm_smmu_s2_cfg s2_cfg;
645 };
646
647 struct iommu_domain domain;
648};
649
5e92946c
ZL
650struct arm_smmu_option_prop {
651 u32 opt;
652 const char *prop;
653};
654
655static struct arm_smmu_option_prop arm_smmu_options[] = {
656 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
657 { 0, NULL},
658};
659
48ec83bc
WD
660static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
661{
662 return container_of(dom, struct arm_smmu_domain, domain);
663}
664
5e92946c
ZL
665static void parse_driver_options(struct arm_smmu_device *smmu)
666{
667 int i = 0;
668
669 do {
670 if (of_property_read_bool(smmu->dev->of_node,
671 arm_smmu_options[i].prop)) {
672 smmu->options |= arm_smmu_options[i].opt;
673 dev_notice(smmu->dev, "option %s\n",
674 arm_smmu_options[i].prop);
675 }
676 } while (arm_smmu_options[++i].opt);
677}
678
48ec83bc
WD
679/* Low-level queue manipulation functions */
680static bool queue_full(struct arm_smmu_queue *q)
681{
682 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
683 Q_WRP(q, q->prod) != Q_WRP(q, q->cons);
684}
685
686static bool queue_empty(struct arm_smmu_queue *q)
687{
688 return Q_IDX(q, q->prod) == Q_IDX(q, q->cons) &&
689 Q_WRP(q, q->prod) == Q_WRP(q, q->cons);
690}
691
692static void queue_sync_cons(struct arm_smmu_queue *q)
693{
694 q->cons = readl_relaxed(q->cons_reg);
695}
696
697static void queue_inc_cons(struct arm_smmu_queue *q)
698{
699 u32 cons = (Q_WRP(q, q->cons) | Q_IDX(q, q->cons)) + 1;
700
701 q->cons = Q_OVF(q, q->cons) | Q_WRP(q, cons) | Q_IDX(q, cons);
702 writel(q->cons, q->cons_reg);
703}
704
705static int queue_sync_prod(struct arm_smmu_queue *q)
706{
707 int ret = 0;
708 u32 prod = readl_relaxed(q->prod_reg);
709
710 if (Q_OVF(q, prod) != Q_OVF(q, q->prod))
711 ret = -EOVERFLOW;
712
713 q->prod = prod;
714 return ret;
715}
716
717static void queue_inc_prod(struct arm_smmu_queue *q)
718{
719 u32 prod = (Q_WRP(q, q->prod) | Q_IDX(q, q->prod)) + 1;
720
721 q->prod = Q_OVF(q, q->prod) | Q_WRP(q, prod) | Q_IDX(q, prod);
722 writel(q->prod, q->prod_reg);
723}
724
bcfced15
JPB
725/*
726 * Wait for the SMMU to consume items. If drain is true, wait until the queue
727 * is empty. Otherwise, wait until there is at least one free slot.
728 */
729static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe)
48ec83bc
WD
730{
731 ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US);
732
bcfced15 733 while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) {
48ec83bc
WD
734 if (ktime_compare(ktime_get(), timeout) > 0)
735 return -ETIMEDOUT;
736
737 if (wfe) {
738 wfe();
739 } else {
740 cpu_relax();
741 udelay(1);
742 }
743 }
744
745 return 0;
746}
747
748static void queue_write(__le64 *dst, u64 *src, size_t n_dwords)
749{
750 int i;
751
752 for (i = 0; i < n_dwords; ++i)
753 *dst++ = cpu_to_le64(*src++);
754}
755
756static int queue_insert_raw(struct arm_smmu_queue *q, u64 *ent)
757{
758 if (queue_full(q))
759 return -ENOSPC;
760
761 queue_write(Q_ENT(q, q->prod), ent, q->ent_dwords);
762 queue_inc_prod(q);
763 return 0;
764}
765
766static void queue_read(__le64 *dst, u64 *src, size_t n_dwords)
767{
768 int i;
769
770 for (i = 0; i < n_dwords; ++i)
771 *dst++ = le64_to_cpu(*src++);
772}
773
774static int queue_remove_raw(struct arm_smmu_queue *q, u64 *ent)
775{
776 if (queue_empty(q))
777 return -EAGAIN;
778
779 queue_read(ent, Q_ENT(q, q->cons), q->ent_dwords);
780 queue_inc_cons(q);
781 return 0;
782}
783
784/* High-level queue accessors */
785static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent)
786{
787 memset(cmd, 0, CMDQ_ENT_DWORDS << 3);
788 cmd[0] |= (ent->opcode & CMDQ_0_OP_MASK) << CMDQ_0_OP_SHIFT;
789
790 switch (ent->opcode) {
791 case CMDQ_OP_TLBI_EL2_ALL:
792 case CMDQ_OP_TLBI_NSNH_ALL:
793 break;
794 case CMDQ_OP_PREFETCH_CFG:
795 cmd[0] |= (u64)ent->prefetch.sid << CMDQ_PREFETCH_0_SID_SHIFT;
796 cmd[1] |= ent->prefetch.size << CMDQ_PREFETCH_1_SIZE_SHIFT;
797 cmd[1] |= ent->prefetch.addr & CMDQ_PREFETCH_1_ADDR_MASK;
798 break;
799 case CMDQ_OP_CFGI_STE:
800 cmd[0] |= (u64)ent->cfgi.sid << CMDQ_CFGI_0_SID_SHIFT;
801 cmd[1] |= ent->cfgi.leaf ? CMDQ_CFGI_1_LEAF : 0;
802 break;
803 case CMDQ_OP_CFGI_ALL:
804 /* Cover the entire SID range */
805 cmd[1] |= CMDQ_CFGI_1_RANGE_MASK << CMDQ_CFGI_1_RANGE_SHIFT;
806 break;
807 case CMDQ_OP_TLBI_NH_VA:
808 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
1c27df1c
WD
809 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
810 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_VA_MASK;
811 break;
48ec83bc
WD
812 case CMDQ_OP_TLBI_S2_IPA:
813 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
814 cmd[1] |= ent->tlbi.leaf ? CMDQ_TLBI_1_LEAF : 0;
1c27df1c 815 cmd[1] |= ent->tlbi.addr & CMDQ_TLBI_1_IPA_MASK;
48ec83bc
WD
816 break;
817 case CMDQ_OP_TLBI_NH_ASID:
818 cmd[0] |= (u64)ent->tlbi.asid << CMDQ_TLBI_0_ASID_SHIFT;
819 /* Fallthrough */
820 case CMDQ_OP_TLBI_S12_VMALL:
821 cmd[0] |= (u64)ent->tlbi.vmid << CMDQ_TLBI_0_VMID_SHIFT;
822 break;
823 case CMDQ_OP_PRI_RESP:
824 cmd[0] |= ent->substream_valid ? CMDQ_0_SSV : 0;
825 cmd[0] |= ent->pri.ssid << CMDQ_PRI_0_SSID_SHIFT;
826 cmd[0] |= (u64)ent->pri.sid << CMDQ_PRI_0_SID_SHIFT;
827 cmd[1] |= ent->pri.grpid << CMDQ_PRI_1_GRPID_SHIFT;
828 switch (ent->pri.resp) {
829 case PRI_RESP_DENY:
830 cmd[1] |= CMDQ_PRI_1_RESP_DENY;
831 break;
832 case PRI_RESP_FAIL:
833 cmd[1] |= CMDQ_PRI_1_RESP_FAIL;
834 break;
835 case PRI_RESP_SUCC:
836 cmd[1] |= CMDQ_PRI_1_RESP_SUCC;
837 break;
838 default:
839 return -EINVAL;
840 }
841 break;
842 case CMDQ_OP_CMD_SYNC:
843 cmd[0] |= CMDQ_SYNC_0_CS_SEV;
844 break;
845 default:
846 return -ENOENT;
847 }
848
849 return 0;
850}
851
852static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu)
853{
854 static const char *cerror_str[] = {
855 [CMDQ_ERR_CERROR_NONE_IDX] = "No error",
856 [CMDQ_ERR_CERROR_ILL_IDX] = "Illegal command",
857 [CMDQ_ERR_CERROR_ABT_IDX] = "Abort on command fetch",
858 };
859
860 int i;
861 u64 cmd[CMDQ_ENT_DWORDS];
862 struct arm_smmu_queue *q = &smmu->cmdq.q;
863 u32 cons = readl_relaxed(q->cons_reg);
864 u32 idx = cons >> CMDQ_ERR_SHIFT & CMDQ_ERR_MASK;
865 struct arm_smmu_cmdq_ent cmd_sync = {
866 .opcode = CMDQ_OP_CMD_SYNC,
867 };
868
869 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons,
a0d5c04c 870 idx < ARRAY_SIZE(cerror_str) ? cerror_str[idx] : "Unknown");
48ec83bc
WD
871
872 switch (idx) {
48ec83bc
WD
873 case CMDQ_ERR_CERROR_ABT_IDX:
874 dev_err(smmu->dev, "retrying command fetch\n");
875 case CMDQ_ERR_CERROR_NONE_IDX:
876 return;
a0d5c04c
WD
877 case CMDQ_ERR_CERROR_ILL_IDX:
878 /* Fallthrough */
879 default:
880 break;
48ec83bc
WD
881 }
882
883 /*
884 * We may have concurrent producers, so we need to be careful
885 * not to touch any of the shadow cmdq state.
886 */
aea2037e 887 queue_read(cmd, Q_ENT(q, cons), q->ent_dwords);
48ec83bc
WD
888 dev_err(smmu->dev, "skipping command in error state:\n");
889 for (i = 0; i < ARRAY_SIZE(cmd); ++i)
890 dev_err(smmu->dev, "\t0x%016llx\n", (unsigned long long)cmd[i]);
891
892 /* Convert the erroneous command into a CMD_SYNC */
893 if (arm_smmu_cmdq_build_cmd(cmd, &cmd_sync)) {
894 dev_err(smmu->dev, "failed to convert to CMD_SYNC\n");
895 return;
896 }
897
aea2037e 898 queue_write(Q_ENT(q, cons), cmd, q->ent_dwords);
48ec83bc
WD
899}
900
901static void arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu,
902 struct arm_smmu_cmdq_ent *ent)
903{
48ec83bc 904 u64 cmd[CMDQ_ENT_DWORDS];
8ded2909 905 unsigned long flags;
48ec83bc
WD
906 bool wfe = !!(smmu->features & ARM_SMMU_FEAT_SEV);
907 struct arm_smmu_queue *q = &smmu->cmdq.q;
908
909 if (arm_smmu_cmdq_build_cmd(cmd, ent)) {
910 dev_warn(smmu->dev, "ignoring unknown CMDQ opcode 0x%x\n",
911 ent->opcode);
912 return;
913 }
914
8ded2909 915 spin_lock_irqsave(&smmu->cmdq.lock, flags);
bcfced15
JPB
916 while (queue_insert_raw(q, cmd) == -ENOSPC) {
917 if (queue_poll_cons(q, false, wfe))
48ec83bc
WD
918 dev_err_ratelimited(smmu->dev, "CMDQ timeout\n");
919 }
920
bcfced15 921 if (ent->opcode == CMDQ_OP_CMD_SYNC && queue_poll_cons(q, true, wfe))
48ec83bc 922 dev_err_ratelimited(smmu->dev, "CMD_SYNC timeout\n");
8ded2909 923 spin_unlock_irqrestore(&smmu->cmdq.lock, flags);
48ec83bc
WD
924}
925
926/* Context descriptor manipulation functions */
927static u64 arm_smmu_cpu_tcr_to_cd(u64 tcr)
928{
929 u64 val = 0;
930
931 /* Repack the TCR. Just care about TTBR0 for now */
932 val |= ARM_SMMU_TCR2CD(tcr, T0SZ);
933 val |= ARM_SMMU_TCR2CD(tcr, TG0);
934 val |= ARM_SMMU_TCR2CD(tcr, IRGN0);
935 val |= ARM_SMMU_TCR2CD(tcr, ORGN0);
936 val |= ARM_SMMU_TCR2CD(tcr, SH0);
937 val |= ARM_SMMU_TCR2CD(tcr, EPD0);
938 val |= ARM_SMMU_TCR2CD(tcr, EPD1);
939 val |= ARM_SMMU_TCR2CD(tcr, IPS);
940 val |= ARM_SMMU_TCR2CD(tcr, TBI0);
941
942 return val;
943}
944
945static void arm_smmu_write_ctx_desc(struct arm_smmu_device *smmu,
946 struct arm_smmu_s1_cfg *cfg)
947{
948 u64 val;
949
950 /*
951 * We don't need to issue any invalidation here, as we'll invalidate
952 * the STE when installing the new entry anyway.
953 */
954 val = arm_smmu_cpu_tcr_to_cd(cfg->cd.tcr) |
955#ifdef __BIG_ENDIAN
956 CTXDESC_CD_0_ENDI |
957#endif
958 CTXDESC_CD_0_R | CTXDESC_CD_0_A | CTXDESC_CD_0_ASET_PRIVATE |
959 CTXDESC_CD_0_AA64 | (u64)cfg->cd.asid << CTXDESC_CD_0_ASID_SHIFT |
960 CTXDESC_CD_0_V;
961 cfg->cdptr[0] = cpu_to_le64(val);
962
963 val = cfg->cd.ttbr & CTXDESC_CD_1_TTB0_MASK << CTXDESC_CD_1_TTB0_SHIFT;
964 cfg->cdptr[1] = cpu_to_le64(val);
965
966 cfg->cdptr[3] = cpu_to_le64(cfg->cd.mair << CTXDESC_CD_3_MAIR_SHIFT);
967}
968
969/* Stream table manipulation functions */
970static void
971arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
972{
973 u64 val = 0;
974
975 val |= (desc->span & STRTAB_L1_DESC_SPAN_MASK)
976 << STRTAB_L1_DESC_SPAN_SHIFT;
977 val |= desc->l2ptr_dma &
978 STRTAB_L1_DESC_L2PTR_MASK << STRTAB_L1_DESC_L2PTR_SHIFT;
979
980 *dst = cpu_to_le64(val);
981}
982
983static void arm_smmu_sync_ste_for_sid(struct arm_smmu_device *smmu, u32 sid)
984{
985 struct arm_smmu_cmdq_ent cmd = {
986 .opcode = CMDQ_OP_CFGI_STE,
987 .cfgi = {
988 .sid = sid,
989 .leaf = true,
990 },
991 };
992
993 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
994 cmd.opcode = CMDQ_OP_CMD_SYNC;
995 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
996}
997
998static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
999 __le64 *dst, struct arm_smmu_strtab_ent *ste)
1000{
1001 /*
1002 * This is hideously complicated, but we only really care about
1003 * three cases at the moment:
1004 *
1005 * 1. Invalid (all zero) -> bypass (init)
1006 * 2. Bypass -> translation (attach)
1007 * 3. Translation -> bypass (detach)
1008 *
1009 * Given that we can't update the STE atomically and the SMMU
1010 * doesn't read the thing in a defined order, that leaves us
1011 * with the following maintenance requirements:
1012 *
1013 * 1. Update Config, return (init time STEs aren't live)
1014 * 2. Write everything apart from dword 0, sync, write dword 0, sync
1015 * 3. Update Config, sync
1016 */
1017 u64 val = le64_to_cpu(dst[0]);
1018 bool ste_live = false;
1019 struct arm_smmu_cmdq_ent prefetch_cmd = {
1020 .opcode = CMDQ_OP_PREFETCH_CFG,
1021 .prefetch = {
1022 .sid = sid,
1023 },
1024 };
1025
1026 if (val & STRTAB_STE_0_V) {
1027 u64 cfg;
1028
1029 cfg = val & STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT;
1030 switch (cfg) {
1031 case STRTAB_STE_0_CFG_BYPASS:
1032 break;
1033 case STRTAB_STE_0_CFG_S1_TRANS:
1034 case STRTAB_STE_0_CFG_S2_TRANS:
1035 ste_live = true;
1036 break;
5bc0a116
WD
1037 case STRTAB_STE_0_CFG_ABORT:
1038 if (disable_bypass)
1039 break;
48ec83bc
WD
1040 default:
1041 BUG(); /* STE corruption */
1042 }
1043 }
1044
1045 /* Nuke the existing Config, as we're going to rewrite it */
1046 val &= ~(STRTAB_STE_0_CFG_MASK << STRTAB_STE_0_CFG_SHIFT);
1047
1048 if (ste->valid)
1049 val |= STRTAB_STE_0_V;
1050 else
1051 val &= ~STRTAB_STE_0_V;
1052
1053 if (ste->bypass) {
1054 val |= disable_bypass ? STRTAB_STE_0_CFG_ABORT
1055 : STRTAB_STE_0_CFG_BYPASS;
1056 dst[0] = cpu_to_le64(val);
a0eacd89
WD
1057 dst[1] = cpu_to_le64(STRTAB_STE_1_SHCFG_INCOMING
1058 << STRTAB_STE_1_SHCFG_SHIFT);
48ec83bc
WD
1059 dst[2] = 0; /* Nuke the VMID */
1060 if (ste_live)
1061 arm_smmu_sync_ste_for_sid(smmu, sid);
1062 return;
1063 }
1064
1065 if (ste->s1_cfg) {
1066 BUG_ON(ste_live);
1067 dst[1] = cpu_to_le64(
1068 STRTAB_STE_1_S1C_CACHE_WBRA
1069 << STRTAB_STE_1_S1CIR_SHIFT |
1070 STRTAB_STE_1_S1C_CACHE_WBRA
1071 << STRTAB_STE_1_S1COR_SHIFT |
1072 STRTAB_STE_1_S1C_SH_ISH << STRTAB_STE_1_S1CSH_SHIFT |
48ec83bc
WD
1073#ifdef CONFIG_PCI_ATS
1074 STRTAB_STE_1_EATS_TRANS << STRTAB_STE_1_EATS_SHIFT |
1075#endif
95fa99aa
RM
1076 STRTAB_STE_1_STRW_NSEL1 << STRTAB_STE_1_STRW_SHIFT |
1077 STRTAB_STE_1_PRIVCFG_UNPRIV <<
1078 STRTAB_STE_1_PRIVCFG_SHIFT);
48ec83bc 1079
6380be05
PM
1080 if (smmu->features & ARM_SMMU_FEAT_STALLS)
1081 dst[1] |= cpu_to_le64(STRTAB_STE_1_S1STALLD);
1082
48ec83bc
WD
1083 val |= (ste->s1_cfg->cdptr_dma & STRTAB_STE_0_S1CTXPTR_MASK
1084 << STRTAB_STE_0_S1CTXPTR_SHIFT) |
1085 STRTAB_STE_0_CFG_S1_TRANS;
1086
1087 }
1088
1089 if (ste->s2_cfg) {
1090 BUG_ON(ste_live);
1091 dst[2] = cpu_to_le64(
1092 ste->s2_cfg->vmid << STRTAB_STE_2_S2VMID_SHIFT |
1093 (ste->s2_cfg->vtcr & STRTAB_STE_2_VTCR_MASK)
1094 << STRTAB_STE_2_VTCR_SHIFT |
1095#ifdef __BIG_ENDIAN
1096 STRTAB_STE_2_S2ENDI |
1097#endif
1098 STRTAB_STE_2_S2PTW | STRTAB_STE_2_S2AA64 |
1099 STRTAB_STE_2_S2R);
1100
1101 dst[3] = cpu_to_le64(ste->s2_cfg->vttbr &
1102 STRTAB_STE_3_S2TTB_MASK << STRTAB_STE_3_S2TTB_SHIFT);
1103
1104 val |= STRTAB_STE_0_CFG_S2_TRANS;
1105 }
1106
1107 arm_smmu_sync_ste_for_sid(smmu, sid);
1108 dst[0] = cpu_to_le64(val);
1109 arm_smmu_sync_ste_for_sid(smmu, sid);
1110
1111 /* It's likely that we'll want to use the new STE soon */
5e92946c
ZL
1112 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1113 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
48ec83bc
WD
1114}
1115
1116static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
1117{
1118 unsigned int i;
1119 struct arm_smmu_strtab_ent ste = {
1120 .valid = true,
1121 .bypass = true,
1122 };
1123
1124 for (i = 0; i < nent; ++i) {
1125 arm_smmu_write_strtab_ent(NULL, -1, strtab, &ste);
1126 strtab += STRTAB_STE_DWORDS;
1127 }
1128}
1129
1130static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1131{
1132 size_t size;
1133 void *strtab;
1134 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1135 struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
1136
1137 if (desc->l2ptr)
1138 return 0;
1139
1140 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
69146e7b 1141 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
48ec83bc
WD
1142
1143 desc->span = STRTAB_SPLIT + 1;
04fa26c7
WD
1144 desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
1145 GFP_KERNEL | __GFP_ZERO);
48ec83bc
WD
1146 if (!desc->l2ptr) {
1147 dev_err(smmu->dev,
1148 "failed to allocate l2 stream table for SID %u\n",
1149 sid);
1150 return -ENOMEM;
1151 }
1152
1153 arm_smmu_init_bypass_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
1154 arm_smmu_write_strtab_l1_desc(strtab, desc);
1155 return 0;
1156}
1157
1158/* IRQ and event handlers */
1159static irqreturn_t arm_smmu_evtq_thread(int irq, void *dev)
1160{
1161 int i;
1162 struct arm_smmu_device *smmu = dev;
1163 struct arm_smmu_queue *q = &smmu->evtq.q;
1164 u64 evt[EVTQ_ENT_DWORDS];
1165
b4163fb3
JPB
1166 do {
1167 while (!queue_remove_raw(q, evt)) {
1168 u8 id = evt[0] >> EVTQ_0_ID_SHIFT & EVTQ_0_ID_MASK;
48ec83bc 1169
b4163fb3
JPB
1170 dev_info(smmu->dev, "event 0x%02x received:\n", id);
1171 for (i = 0; i < ARRAY_SIZE(evt); ++i)
1172 dev_info(smmu->dev, "\t0x%016llx\n",
1173 (unsigned long long)evt[i]);
1174
1175 }
1176
1177 /*
1178 * Not much we can do on overflow, so scream and pretend we're
1179 * trying harder.
1180 */
1181 if (queue_sync_prod(q) == -EOVERFLOW)
1182 dev_err(smmu->dev, "EVTQ overflow detected -- events lost\n");
1183 } while (!queue_empty(q));
48ec83bc
WD
1184
1185 /* Sync our overflow flag, as we believe we're up to speed */
1186 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1187 return IRQ_HANDLED;
1188}
1189
b4163fb3
JPB
1190static void arm_smmu_handle_ppr(struct arm_smmu_device *smmu, u64 *evt)
1191{
1192 u32 sid, ssid;
1193 u16 grpid;
1194 bool ssv, last;
1195
1196 sid = evt[0] >> PRIQ_0_SID_SHIFT & PRIQ_0_SID_MASK;
1197 ssv = evt[0] & PRIQ_0_SSID_V;
1198 ssid = ssv ? evt[0] >> PRIQ_0_SSID_SHIFT & PRIQ_0_SSID_MASK : 0;
1199 last = evt[0] & PRIQ_0_PRG_LAST;
1200 grpid = evt[1] >> PRIQ_1_PRG_IDX_SHIFT & PRIQ_1_PRG_IDX_MASK;
1201
1202 dev_info(smmu->dev, "unexpected PRI request received:\n");
1203 dev_info(smmu->dev,
1204 "\tsid 0x%08x.0x%05x: [%u%s] %sprivileged %s%s%s access at iova 0x%016llx\n",
1205 sid, ssid, grpid, last ? "L" : "",
1206 evt[0] & PRIQ_0_PERM_PRIV ? "" : "un",
1207 evt[0] & PRIQ_0_PERM_READ ? "R" : "",
1208 evt[0] & PRIQ_0_PERM_WRITE ? "W" : "",
1209 evt[0] & PRIQ_0_PERM_EXEC ? "X" : "",
1210 evt[1] & PRIQ_1_ADDR_MASK << PRIQ_1_ADDR_SHIFT);
1211
1212 if (last) {
1213 struct arm_smmu_cmdq_ent cmd = {
1214 .opcode = CMDQ_OP_PRI_RESP,
1215 .substream_valid = ssv,
1216 .pri = {
1217 .sid = sid,
1218 .ssid = ssid,
1219 .grpid = grpid,
1220 .resp = PRI_RESP_DENY,
1221 },
1222 };
48ec83bc 1223
b4163fb3
JPB
1224 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1225 }
48ec83bc
WD
1226}
1227
1228static irqreturn_t arm_smmu_priq_thread(int irq, void *dev)
1229{
1230 struct arm_smmu_device *smmu = dev;
1231 struct arm_smmu_queue *q = &smmu->priq.q;
1232 u64 evt[PRIQ_ENT_DWORDS];
1233
b4163fb3
JPB
1234 do {
1235 while (!queue_remove_raw(q, evt))
1236 arm_smmu_handle_ppr(smmu, evt);
48ec83bc 1237
b4163fb3
JPB
1238 if (queue_sync_prod(q) == -EOVERFLOW)
1239 dev_err(smmu->dev, "PRIQ overflow detected -- requests lost\n");
1240 } while (!queue_empty(q));
48ec83bc
WD
1241
1242 /* Sync our overflow flag, as we believe we're up to speed */
1243 q->cons = Q_OVF(q, q->prod) | Q_WRP(q, q->cons) | Q_IDX(q, q->cons);
1244 return IRQ_HANDLED;
1245}
1246
48ec83bc
WD
1247static irqreturn_t arm_smmu_cmdq_sync_handler(int irq, void *dev)
1248{
1249 /* We don't actually use CMD_SYNC interrupts for anything */
1250 return IRQ_HANDLED;
1251}
1252
1253static int arm_smmu_device_disable(struct arm_smmu_device *smmu);
1254
1255static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev)
1256{
324ba108 1257 u32 gerror, gerrorn, active;
48ec83bc
WD
1258 struct arm_smmu_device *smmu = dev;
1259
1260 gerror = readl_relaxed(smmu->base + ARM_SMMU_GERROR);
1261 gerrorn = readl_relaxed(smmu->base + ARM_SMMU_GERRORN);
1262
324ba108
PM
1263 active = gerror ^ gerrorn;
1264 if (!(active & GERROR_ERR_MASK))
48ec83bc
WD
1265 return IRQ_NONE; /* No errors pending */
1266
1267 dev_warn(smmu->dev,
1268 "unexpected global error reported (0x%08x), this could be serious\n",
324ba108 1269 active);
48ec83bc 1270
324ba108 1271 if (active & GERROR_SFM_ERR) {
48ec83bc
WD
1272 dev_err(smmu->dev, "device has entered Service Failure Mode!\n");
1273 arm_smmu_device_disable(smmu);
1274 }
1275
324ba108 1276 if (active & GERROR_MSI_GERROR_ABT_ERR)
48ec83bc
WD
1277 dev_warn(smmu->dev, "GERROR MSI write aborted\n");
1278
b4163fb3 1279 if (active & GERROR_MSI_PRIQ_ABT_ERR)
48ec83bc 1280 dev_warn(smmu->dev, "PRIQ MSI write aborted\n");
48ec83bc 1281
b4163fb3 1282 if (active & GERROR_MSI_EVTQ_ABT_ERR)
48ec83bc 1283 dev_warn(smmu->dev, "EVTQ MSI write aborted\n");
48ec83bc 1284
324ba108 1285 if (active & GERROR_MSI_CMDQ_ABT_ERR) {
48ec83bc
WD
1286 dev_warn(smmu->dev, "CMDQ MSI write aborted\n");
1287 arm_smmu_cmdq_sync_handler(irq, smmu->dev);
1288 }
1289
324ba108 1290 if (active & GERROR_PRIQ_ABT_ERR)
48ec83bc
WD
1291 dev_err(smmu->dev, "PRIQ write aborted -- events may have been lost\n");
1292
324ba108 1293 if (active & GERROR_EVTQ_ABT_ERR)
48ec83bc
WD
1294 dev_err(smmu->dev, "EVTQ write aborted -- events may have been lost\n");
1295
324ba108 1296 if (active & GERROR_CMDQ_ERR)
48ec83bc
WD
1297 arm_smmu_cmdq_skip_err(smmu);
1298
1299 writel(gerror, smmu->base + ARM_SMMU_GERRORN);
1300 return IRQ_HANDLED;
1301}
1302
1303/* IO_PGTABLE API */
1304static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
1305{
1306 struct arm_smmu_cmdq_ent cmd;
1307
1308 cmd.opcode = CMDQ_OP_CMD_SYNC;
1309 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1310}
1311
1312static void arm_smmu_tlb_sync(void *cookie)
1313{
1314 struct arm_smmu_domain *smmu_domain = cookie;
1315 __arm_smmu_tlb_sync(smmu_domain->smmu);
1316}
1317
1318static void arm_smmu_tlb_inv_context(void *cookie)
1319{
1320 struct arm_smmu_domain *smmu_domain = cookie;
1321 struct arm_smmu_device *smmu = smmu_domain->smmu;
1322 struct arm_smmu_cmdq_ent cmd;
1323
1324 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1325 cmd.opcode = CMDQ_OP_TLBI_NH_ASID;
1326 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1327 cmd.tlbi.vmid = 0;
1328 } else {
1329 cmd.opcode = CMDQ_OP_TLBI_S12_VMALL;
1330 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1331 }
1332
1333 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1334 __arm_smmu_tlb_sync(smmu);
1335}
1336
1337static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
06c610e8 1338 size_t granule, bool leaf, void *cookie)
48ec83bc
WD
1339{
1340 struct arm_smmu_domain *smmu_domain = cookie;
1341 struct arm_smmu_device *smmu = smmu_domain->smmu;
1342 struct arm_smmu_cmdq_ent cmd = {
1343 .tlbi = {
1344 .leaf = leaf,
1345 .addr = iova,
1346 },
1347 };
1348
1349 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1350 cmd.opcode = CMDQ_OP_TLBI_NH_VA;
1351 cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid;
1352 } else {
1353 cmd.opcode = CMDQ_OP_TLBI_S2_IPA;
1354 cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
1355 }
1356
75df1386
RM
1357 do {
1358 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
1359 cmd.tlbi.addr += granule;
1360 } while (size -= granule);
48ec83bc
WD
1361}
1362
ca297aad 1363static const struct iommu_gather_ops arm_smmu_gather_ops = {
48ec83bc
WD
1364 .tlb_flush_all = arm_smmu_tlb_inv_context,
1365 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
1366 .tlb_sync = arm_smmu_tlb_sync,
48ec83bc
WD
1367};
1368
1369/* IOMMU API */
1370static bool arm_smmu_capable(enum iommu_cap cap)
1371{
1372 switch (cap) {
1373 case IOMMU_CAP_CACHE_COHERENCY:
1374 return true;
1375 case IOMMU_CAP_INTR_REMAP:
1376 return true; /* MSIs are just memory writes */
1377 case IOMMU_CAP_NOEXEC:
1378 return true;
1379 default:
1380 return false;
1381 }
1382}
1383
1384static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
1385{
1386 struct arm_smmu_domain *smmu_domain;
1387
9adb9594 1388 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
48ec83bc
WD
1389 return NULL;
1390
1391 /*
1392 * Allocate the domain and initialise some of its data structures.
1393 * We can't really do anything meaningful until we've added a
1394 * master.
1395 */
1396 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1397 if (!smmu_domain)
1398 return NULL;
1399
9adb9594
RM
1400 if (type == IOMMU_DOMAIN_DMA &&
1401 iommu_get_dma_cookie(&smmu_domain->domain)) {
1402 kfree(smmu_domain);
1403 return NULL;
1404 }
1405
48ec83bc
WD
1406 mutex_init(&smmu_domain->init_mutex);
1407 spin_lock_init(&smmu_domain->pgtbl_lock);
1408 return &smmu_domain->domain;
1409}
1410
1411static int arm_smmu_bitmap_alloc(unsigned long *map, int span)
1412{
1413 int idx, size = 1 << span;
1414
1415 do {
1416 idx = find_first_zero_bit(map, size);
1417 if (idx == size)
1418 return -ENOSPC;
1419 } while (test_and_set_bit(idx, map));
1420
1421 return idx;
1422}
1423
1424static void arm_smmu_bitmap_free(unsigned long *map, int idx)
1425{
1426 clear_bit(idx, map);
1427}
1428
1429static void arm_smmu_domain_free(struct iommu_domain *domain)
1430{
1431 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1432 struct arm_smmu_device *smmu = smmu_domain->smmu;
1433
9adb9594 1434 iommu_put_dma_cookie(domain);
a6e08fb2 1435 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
48ec83bc
WD
1436
1437 /* Free the CD and ASID, if we allocated them */
1438 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1439 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1440
1441 if (cfg->cdptr) {
04fa26c7
WD
1442 dmam_free_coherent(smmu_domain->smmu->dev,
1443 CTXDESC_CD_DWORDS << 3,
1444 cfg->cdptr,
1445 cfg->cdptr_dma);
48ec83bc
WD
1446
1447 arm_smmu_bitmap_free(smmu->asid_map, cfg->cd.asid);
1448 }
1449 } else {
1450 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1451 if (cfg->vmid)
1452 arm_smmu_bitmap_free(smmu->vmid_map, cfg->vmid);
1453 }
1454
1455 kfree(smmu_domain);
1456}
1457
1458static int arm_smmu_domain_finalise_s1(struct arm_smmu_domain *smmu_domain,
1459 struct io_pgtable_cfg *pgtbl_cfg)
1460{
1461 int ret;
c0733a2c 1462 int asid;
48ec83bc
WD
1463 struct arm_smmu_device *smmu = smmu_domain->smmu;
1464 struct arm_smmu_s1_cfg *cfg = &smmu_domain->s1_cfg;
1465
1466 asid = arm_smmu_bitmap_alloc(smmu->asid_map, smmu->asid_bits);
287980e4 1467 if (asid < 0)
48ec83bc
WD
1468 return asid;
1469
04fa26c7
WD
1470 cfg->cdptr = dmam_alloc_coherent(smmu->dev, CTXDESC_CD_DWORDS << 3,
1471 &cfg->cdptr_dma,
1472 GFP_KERNEL | __GFP_ZERO);
48ec83bc
WD
1473 if (!cfg->cdptr) {
1474 dev_warn(smmu->dev, "failed to allocate context descriptor\n");
c0733a2c 1475 ret = -ENOMEM;
48ec83bc
WD
1476 goto out_free_asid;
1477 }
1478
c0733a2c 1479 cfg->cd.asid = (u16)asid;
48ec83bc
WD
1480 cfg->cd.ttbr = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
1481 cfg->cd.tcr = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
1482 cfg->cd.mair = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
1483 return 0;
1484
1485out_free_asid:
1486 arm_smmu_bitmap_free(smmu->asid_map, asid);
1487 return ret;
1488}
1489
1490static int arm_smmu_domain_finalise_s2(struct arm_smmu_domain *smmu_domain,
1491 struct io_pgtable_cfg *pgtbl_cfg)
1492{
c0733a2c 1493 int vmid;
48ec83bc
WD
1494 struct arm_smmu_device *smmu = smmu_domain->smmu;
1495 struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
1496
1497 vmid = arm_smmu_bitmap_alloc(smmu->vmid_map, smmu->vmid_bits);
287980e4 1498 if (vmid < 0)
48ec83bc
WD
1499 return vmid;
1500
c0733a2c 1501 cfg->vmid = (u16)vmid;
48ec83bc
WD
1502 cfg->vttbr = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
1503 cfg->vtcr = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
1504 return 0;
1505}
1506
48ec83bc
WD
1507static int arm_smmu_domain_finalise(struct iommu_domain *domain)
1508{
1509 int ret;
1510 unsigned long ias, oas;
1511 enum io_pgtable_fmt fmt;
1512 struct io_pgtable_cfg pgtbl_cfg;
1513 struct io_pgtable_ops *pgtbl_ops;
1514 int (*finalise_stage_fn)(struct arm_smmu_domain *,
1515 struct io_pgtable_cfg *);
1516 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1517 struct arm_smmu_device *smmu = smmu_domain->smmu;
1518
1519 /* Restrict the stage to what we can actually support */
1520 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
1521 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
1522 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
1523 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1524
1525 switch (smmu_domain->stage) {
1526 case ARM_SMMU_DOMAIN_S1:
1527 ias = VA_BITS;
1528 oas = smmu->ias;
1529 fmt = ARM_64_LPAE_S1;
1530 finalise_stage_fn = arm_smmu_domain_finalise_s1;
1531 break;
1532 case ARM_SMMU_DOMAIN_NESTED:
1533 case ARM_SMMU_DOMAIN_S2:
1534 ias = smmu->ias;
1535 oas = smmu->oas;
1536 fmt = ARM_64_LPAE_S2;
1537 finalise_stage_fn = arm_smmu_domain_finalise_s2;
1538 break;
1539 default:
1540 return -EINVAL;
1541 }
1542
1543 pgtbl_cfg = (struct io_pgtable_cfg) {
d5466357 1544 .pgsize_bitmap = smmu->pgsize_bitmap,
48ec83bc
WD
1545 .ias = ias,
1546 .oas = oas,
1547 .tlb = &arm_smmu_gather_ops,
bdc6d973 1548 .iommu_dev = smmu->dev,
48ec83bc
WD
1549 };
1550
1551 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
1552 if (!pgtbl_ops)
1553 return -ENOMEM;
1554
d5466357 1555 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
455eb7d3
RM
1556 domain->geometry.aperture_end = (1UL << ias) - 1;
1557 domain->geometry.force_aperture = true;
48ec83bc
WD
1558 smmu_domain->pgtbl_ops = pgtbl_ops;
1559
1560 ret = finalise_stage_fn(smmu_domain, &pgtbl_cfg);
287980e4 1561 if (ret < 0)
48ec83bc
WD
1562 free_io_pgtable_ops(pgtbl_ops);
1563
1564 return ret;
1565}
1566
48ec83bc
WD
1567static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
1568{
1569 __le64 *step;
1570 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1571
1572 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1573 struct arm_smmu_strtab_l1_desc *l1_desc;
1574 int idx;
1575
1576 /* Two-level walk */
1577 idx = (sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS;
1578 l1_desc = &cfg->l1_desc[idx];
1579 idx = (sid & ((1 << STRTAB_SPLIT) - 1)) * STRTAB_STE_DWORDS;
1580 step = &l1_desc->l2ptr[idx];
1581 } else {
1582 /* Simple linear lookup */
1583 step = &cfg->strtab[sid * STRTAB_STE_DWORDS];
1584 }
1585
1586 return step;
1587}
1588
8f785154 1589static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
48ec83bc
WD
1590{
1591 int i;
8f785154
RM
1592 struct arm_smmu_master_data *master = fwspec->iommu_priv;
1593 struct arm_smmu_device *smmu = master->smmu;
48ec83bc 1594
8f785154
RM
1595 for (i = 0; i < fwspec->num_ids; ++i) {
1596 u32 sid = fwspec->ids[i];
48ec83bc
WD
1597 __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
1598
8f785154 1599 arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
48ec83bc
WD
1600 }
1601
1602 return 0;
1603}
1604
bc7f2ce0
WD
1605static void arm_smmu_detach_dev(struct device *dev)
1606{
8f785154 1607 struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
bc7f2ce0 1608
8f785154
RM
1609 master->ste.bypass = true;
1610 if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
bc7f2ce0 1611 dev_warn(dev, "failed to install bypass STE\n");
bc7f2ce0
WD
1612}
1613
48ec83bc
WD
1614static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1615{
1616 int ret = 0;
1617 struct arm_smmu_device *smmu;
1618 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
8f785154
RM
1619 struct arm_smmu_master_data *master;
1620 struct arm_smmu_strtab_ent *ste;
48ec83bc 1621
8f785154 1622 if (!dev->iommu_fwspec)
48ec83bc
WD
1623 return -ENOENT;
1624
8f785154
RM
1625 master = dev->iommu_fwspec->iommu_priv;
1626 smmu = master->smmu;
1627 ste = &master->ste;
1628
48ec83bc 1629 /* Already attached to a different domain? */
8f785154 1630 if (!ste->bypass)
bc7f2ce0 1631 arm_smmu_detach_dev(dev);
48ec83bc 1632
48ec83bc
WD
1633 mutex_lock(&smmu_domain->init_mutex);
1634
1635 if (!smmu_domain->smmu) {
1636 smmu_domain->smmu = smmu;
1637 ret = arm_smmu_domain_finalise(domain);
1638 if (ret) {
1639 smmu_domain->smmu = NULL;
1640 goto out_unlock;
1641 }
1642 } else if (smmu_domain->smmu != smmu) {
1643 dev_err(dev,
1644 "cannot attach to SMMU %s (upstream of %s)\n",
1645 dev_name(smmu_domain->smmu->dev),
1646 dev_name(smmu->dev));
1647 ret = -ENXIO;
1648 goto out_unlock;
1649 }
1650
8f785154
RM
1651 ste->bypass = false;
1652 ste->valid = true;
cbf8277e 1653
8f785154
RM
1654 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
1655 ste->s1_cfg = &smmu_domain->s1_cfg;
1656 ste->s2_cfg = NULL;
1657 arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
1658 } else {
1659 ste->s1_cfg = NULL;
1660 ste->s2_cfg = &smmu_domain->s2_cfg;
1661 }
48ec83bc 1662
8f785154 1663 ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
287980e4 1664 if (ret < 0)
8f785154 1665 ste->valid = false;
48ec83bc
WD
1666
1667out_unlock:
1668 mutex_unlock(&smmu_domain->init_mutex);
1669 return ret;
1670}
1671
48ec83bc
WD
1672static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
1673 phys_addr_t paddr, size_t size, int prot)
1674{
1675 int ret;
1676 unsigned long flags;
1677 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1678 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1679
1680 if (!ops)
1681 return -ENODEV;
1682
1683 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1684 ret = ops->map(ops, iova, paddr, size, prot);
1685 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1686 return ret;
1687}
1688
1689static size_t
1690arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
1691{
1692 size_t ret;
1693 unsigned long flags;
1694 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1695 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1696
1697 if (!ops)
1698 return 0;
1699
1700 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1701 ret = ops->unmap(ops, iova, size);
1702 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1703 return ret;
1704}
1705
1706static phys_addr_t
1707arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
1708{
1709 phys_addr_t ret;
1710 unsigned long flags;
1711 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1712 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
1713
1714 if (!ops)
1715 return 0;
1716
1717 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1718 ret = ops->iova_to_phys(ops, iova);
1719 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1720
1721 return ret;
1722}
1723
8f785154 1724static struct platform_driver arm_smmu_driver;
48ec83bc 1725
8f785154 1726static int arm_smmu_match_node(struct device *dev, void *data)
48ec83bc 1727{
778de074 1728 return dev->fwnode == data;
48ec83bc
WD
1729}
1730
778de074
LP
1731static
1732struct arm_smmu_device *arm_smmu_get_by_fwnode(struct fwnode_handle *fwnode)
48ec83bc 1733{
8f785154 1734 struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
778de074 1735 fwnode, arm_smmu_match_node);
8f785154
RM
1736 put_device(dev);
1737 return dev ? dev_get_drvdata(dev) : NULL;
48ec83bc
WD
1738}
1739
1740static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
1741{
1742 unsigned long limit = smmu->strtab_cfg.num_l1_ents;
1743
1744 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
1745 limit *= 1UL << STRTAB_SPLIT;
1746
1747 return sid < limit;
1748}
1749
8f785154
RM
1750static struct iommu_ops arm_smmu_ops;
1751
48ec83bc
WD
1752static int arm_smmu_add_device(struct device *dev)
1753{
1754 int i, ret;
48ec83bc 1755 struct arm_smmu_device *smmu;
8f785154
RM
1756 struct arm_smmu_master_data *master;
1757 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1758 struct iommu_group *group;
48ec83bc 1759
8f785154 1760 if (!fwspec || fwspec->ops != &arm_smmu_ops)
48ec83bc 1761 return -ENODEV;
8f785154
RM
1762 /*
1763 * We _can_ actually withstand dodgy bus code re-calling add_device()
1764 * without an intervening remove_device()/of_xlate() sequence, but
1765 * we're not going to do so quietly...
1766 */
1767 if (WARN_ON_ONCE(fwspec->iommu_priv)) {
1768 master = fwspec->iommu_priv;
1769 smmu = master->smmu;
48ec83bc 1770 } else {
778de074 1771 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode);
8f785154
RM
1772 if (!smmu)
1773 return -ENODEV;
1774 master = kzalloc(sizeof(*master), GFP_KERNEL);
1775 if (!master)
1776 return -ENOMEM;
1777
1778 master->smmu = smmu;
1779 fwspec->iommu_priv = master;
48ec83bc
WD
1780 }
1781
8f785154
RM
1782 /* Check the SIDs are in range of the SMMU and our stream table */
1783 for (i = 0; i < fwspec->num_ids; i++) {
1784 u32 sid = fwspec->ids[i];
48ec83bc 1785
8f785154
RM
1786 if (!arm_smmu_sid_in_range(smmu, sid))
1787 return -ERANGE;
48ec83bc 1788
8f785154
RM
1789 /* Ensure l2 strtab is initialised */
1790 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
1791 ret = arm_smmu_init_l2_strtab(smmu, sid);
1792 if (ret)
1793 return ret;
1794 }
48ec83bc
WD
1795 }
1796
8f785154
RM
1797 group = iommu_group_get_for_dev(dev);
1798 if (!IS_ERR(group))
1799 iommu_group_put(group);
9a4a9d8c 1800
8f785154 1801 return PTR_ERR_OR_ZERO(group);
48ec83bc
WD
1802}
1803
1804static void arm_smmu_remove_device(struct device *dev)
1805{
8f785154
RM
1806 struct iommu_fwspec *fwspec = dev->iommu_fwspec;
1807 struct arm_smmu_master_data *master;
1808
1809 if (!fwspec || fwspec->ops != &arm_smmu_ops)
1810 return;
1811
1812 master = fwspec->iommu_priv;
1813 if (master && master->ste.valid)
1814 arm_smmu_detach_dev(dev);
48ec83bc 1815 iommu_group_remove_device(dev);
8f785154
RM
1816 kfree(master);
1817 iommu_fwspec_free(dev);
48ec83bc
WD
1818}
1819
08d4ca2a
RM
1820static struct iommu_group *arm_smmu_device_group(struct device *dev)
1821{
1822 struct iommu_group *group;
1823
1824 /*
1825 * We don't support devices sharing stream IDs other than PCI RID
1826 * aliases, since the necessary ID-to-device lookup becomes rather
1827 * impractical given a potential sparse 32-bit stream ID space.
1828 */
1829 if (dev_is_pci(dev))
1830 group = pci_device_group(dev);
1831 else
1832 group = generic_device_group(dev);
1833
1834 return group;
1835}
1836
48ec83bc
WD
1837static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1838 enum iommu_attr attr, void *data)
1839{
1840 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1841
1842 switch (attr) {
1843 case DOMAIN_ATTR_NESTING:
1844 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1845 return 0;
1846 default:
1847 return -ENODEV;
1848 }
1849}
1850
1851static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1852 enum iommu_attr attr, void *data)
1853{
1854 int ret = 0;
1855 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1856
1857 mutex_lock(&smmu_domain->init_mutex);
1858
1859 switch (attr) {
1860 case DOMAIN_ATTR_NESTING:
1861 if (smmu_domain->smmu) {
1862 ret = -EPERM;
1863 goto out_unlock;
1864 }
1865
1866 if (*(int *)data)
1867 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1868 else
1869 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1870
1871 break;
1872 default:
1873 ret = -ENODEV;
1874 }
1875
1876out_unlock:
1877 mutex_unlock(&smmu_domain->init_mutex);
1878 return ret;
1879}
1880
8f785154
RM
1881static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
1882{
8f785154
RM
1883 return iommu_fwspec_add_ids(dev, args->args, 1);
1884}
1885
48ec83bc
WD
1886static struct iommu_ops arm_smmu_ops = {
1887 .capable = arm_smmu_capable,
1888 .domain_alloc = arm_smmu_domain_alloc,
1889 .domain_free = arm_smmu_domain_free,
1890 .attach_dev = arm_smmu_attach_dev,
48ec83bc
WD
1891 .map = arm_smmu_map,
1892 .unmap = arm_smmu_unmap,
9aeb26cf 1893 .map_sg = default_iommu_map_sg,
48ec83bc
WD
1894 .iova_to_phys = arm_smmu_iova_to_phys,
1895 .add_device = arm_smmu_add_device,
1896 .remove_device = arm_smmu_remove_device,
08d4ca2a 1897 .device_group = arm_smmu_device_group,
48ec83bc
WD
1898 .domain_get_attr = arm_smmu_domain_get_attr,
1899 .domain_set_attr = arm_smmu_domain_set_attr,
8f785154 1900 .of_xlate = arm_smmu_of_xlate,
48ec83bc
WD
1901 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1902};
1903
1904/* Probing and initialisation functions */
1905static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
1906 struct arm_smmu_queue *q,
1907 unsigned long prod_off,
1908 unsigned long cons_off,
1909 size_t dwords)
1910{
1911 size_t qsz = ((1 << q->max_n_shift) * dwords) << 3;
1912
04fa26c7 1913 q->base = dmam_alloc_coherent(smmu->dev, qsz, &q->base_dma, GFP_KERNEL);
48ec83bc
WD
1914 if (!q->base) {
1915 dev_err(smmu->dev, "failed to allocate queue (0x%zx bytes)\n",
1916 qsz);
1917 return -ENOMEM;
1918 }
1919
1920 q->prod_reg = smmu->base + prod_off;
1921 q->cons_reg = smmu->base + cons_off;
1922 q->ent_dwords = dwords;
1923
1924 q->q_base = Q_BASE_RWA;
1925 q->q_base |= q->base_dma & Q_BASE_ADDR_MASK << Q_BASE_ADDR_SHIFT;
1926 q->q_base |= (q->max_n_shift & Q_BASE_LOG2SIZE_MASK)
1927 << Q_BASE_LOG2SIZE_SHIFT;
1928
1929 q->prod = q->cons = 0;
1930 return 0;
1931}
1932
48ec83bc
WD
1933static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
1934{
1935 int ret;
1936
1937 /* cmdq */
1938 spin_lock_init(&smmu->cmdq.lock);
1939 ret = arm_smmu_init_one_queue(smmu, &smmu->cmdq.q, ARM_SMMU_CMDQ_PROD,
1940 ARM_SMMU_CMDQ_CONS, CMDQ_ENT_DWORDS);
1941 if (ret)
04fa26c7 1942 return ret;
48ec83bc
WD
1943
1944 /* evtq */
1945 ret = arm_smmu_init_one_queue(smmu, &smmu->evtq.q, ARM_SMMU_EVTQ_PROD,
1946 ARM_SMMU_EVTQ_CONS, EVTQ_ENT_DWORDS);
1947 if (ret)
04fa26c7 1948 return ret;
48ec83bc
WD
1949
1950 /* priq */
1951 if (!(smmu->features & ARM_SMMU_FEAT_PRI))
1952 return 0;
1953
04fa26c7
WD
1954 return arm_smmu_init_one_queue(smmu, &smmu->priq.q, ARM_SMMU_PRIQ_PROD,
1955 ARM_SMMU_PRIQ_CONS, PRIQ_ENT_DWORDS);
48ec83bc
WD
1956}
1957
1958static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
1959{
1960 unsigned int i;
1961 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1962 size_t size = sizeof(*cfg->l1_desc) * cfg->num_l1_ents;
1963 void *strtab = smmu->strtab_cfg.strtab;
1964
1965 cfg->l1_desc = devm_kzalloc(smmu->dev, size, GFP_KERNEL);
1966 if (!cfg->l1_desc) {
1967 dev_err(smmu->dev, "failed to allocate l1 stream table desc\n");
1968 return -ENOMEM;
1969 }
1970
1971 for (i = 0; i < cfg->num_l1_ents; ++i) {
1972 arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
1973 strtab += STRTAB_L1_DESC_DWORDS << 3;
1974 }
1975
1976 return 0;
1977}
1978
1979static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
1980{
1981 void *strtab;
1982 u64 reg;
d2e88e7c 1983 u32 size, l1size;
48ec83bc
WD
1984 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
1985
28c8b404
WD
1986 /*
1987 * If we can resolve everything with a single L2 table, then we
1988 * just need a single L1 descriptor. Otherwise, calculate the L1
1989 * size, capped to the SIDSIZE.
1990 */
1991 if (smmu->sid_bits < STRTAB_SPLIT) {
1992 size = 0;
1993 } else {
1994 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
1995 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
1996 }
d2e88e7c
WD
1997 cfg->num_l1_ents = 1 << size;
1998
1999 size += STRTAB_SPLIT;
2000 if (size < smmu->sid_bits)
48ec83bc
WD
2001 dev_warn(smmu->dev,
2002 "2-level strtab only covers %u/%u bits of SID\n",
d2e88e7c 2003 size, smmu->sid_bits);
48ec83bc 2004
d2e88e7c 2005 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
04fa26c7
WD
2006 strtab = dmam_alloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2007 GFP_KERNEL | __GFP_ZERO);
48ec83bc
WD
2008 if (!strtab) {
2009 dev_err(smmu->dev,
2010 "failed to allocate l1 stream table (%u bytes)\n",
2011 size);
2012 return -ENOMEM;
2013 }
2014 cfg->strtab = strtab;
2015
2016 /* Configure strtab_base_cfg for 2 levels */
2017 reg = STRTAB_BASE_CFG_FMT_2LVL;
2018 reg |= (size & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2019 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2020 reg |= (STRTAB_SPLIT & STRTAB_BASE_CFG_SPLIT_MASK)
2021 << STRTAB_BASE_CFG_SPLIT_SHIFT;
2022 cfg->strtab_base_cfg = reg;
2023
04fa26c7 2024 return arm_smmu_init_l1_strtab(smmu);
48ec83bc
WD
2025}
2026
2027static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
2028{
2029 void *strtab;
2030 u64 reg;
2031 u32 size;
2032 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2033
2034 size = (1 << smmu->sid_bits) * (STRTAB_STE_DWORDS << 3);
04fa26c7
WD
2035 strtab = dmam_alloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2036 GFP_KERNEL | __GFP_ZERO);
48ec83bc
WD
2037 if (!strtab) {
2038 dev_err(smmu->dev,
2039 "failed to allocate linear stream table (%u bytes)\n",
2040 size);
2041 return -ENOMEM;
2042 }
2043 cfg->strtab = strtab;
2044 cfg->num_l1_ents = 1 << smmu->sid_bits;
2045
2046 /* Configure strtab_base_cfg for a linear table covering all SIDs */
2047 reg = STRTAB_BASE_CFG_FMT_LINEAR;
2048 reg |= (smmu->sid_bits & STRTAB_BASE_CFG_LOG2SIZE_MASK)
2049 << STRTAB_BASE_CFG_LOG2SIZE_SHIFT;
2050 cfg->strtab_base_cfg = reg;
2051
2052 arm_smmu_init_bypass_stes(strtab, cfg->num_l1_ents);
2053 return 0;
2054}
2055
2056static int arm_smmu_init_strtab(struct arm_smmu_device *smmu)
2057{
2058 u64 reg;
2059 int ret;
2060
2061 if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB)
2062 ret = arm_smmu_init_strtab_2lvl(smmu);
2063 else
2064 ret = arm_smmu_init_strtab_linear(smmu);
2065
2066 if (ret)
2067 return ret;
2068
2069 /* Set the strtab base address */
2070 reg = smmu->strtab_cfg.strtab_dma &
2071 STRTAB_BASE_ADDR_MASK << STRTAB_BASE_ADDR_SHIFT;
2072 reg |= STRTAB_BASE_RA;
2073 smmu->strtab_cfg.strtab_base = reg;
2074
2075 /* Allocate the first VMID for stage-2 bypass STEs */
2076 set_bit(0, smmu->vmid_map);
2077 return 0;
2078}
2079
48ec83bc
WD
2080static int arm_smmu_init_structures(struct arm_smmu_device *smmu)
2081{
2082 int ret;
2083
2084 ret = arm_smmu_init_queues(smmu);
2085 if (ret)
2086 return ret;
2087
04fa26c7 2088 return arm_smmu_init_strtab(smmu);
48ec83bc
WD
2089}
2090
2091static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
2092 unsigned int reg_off, unsigned int ack_off)
2093{
2094 u32 reg;
2095
2096 writel_relaxed(val, smmu->base + reg_off);
2097 return readl_relaxed_poll_timeout(smmu->base + ack_off, reg, reg == val,
2098 1, ARM_SMMU_POLL_TIMEOUT_US);
2099}
2100
dc87a98d
RM
2101/* GBPA is "special" */
2102static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr)
2103{
2104 int ret;
2105 u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA;
2106
2107 ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2108 1, ARM_SMMU_POLL_TIMEOUT_US);
2109 if (ret)
2110 return ret;
2111
2112 reg &= ~clr;
2113 reg |= set;
2114 writel_relaxed(reg | GBPA_UPDATE, gbpa);
2115 return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE),
2116 1, ARM_SMMU_POLL_TIMEOUT_US);
2117}
2118
166bdbd2
MZ
2119static void arm_smmu_free_msis(void *data)
2120{
2121 struct device *dev = data;
2122 platform_msi_domain_free_irqs(dev);
2123}
2124
2125static void arm_smmu_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
2126{
2127 phys_addr_t doorbell;
2128 struct device *dev = msi_desc_to_dev(desc);
2129 struct arm_smmu_device *smmu = dev_get_drvdata(dev);
2130 phys_addr_t *cfg = arm_smmu_msi_cfg[desc->platform.msi_index];
2131
2132 doorbell = (((u64)msg->address_hi) << 32) | msg->address_lo;
2133 doorbell &= MSI_CFG0_ADDR_MASK << MSI_CFG0_ADDR_SHIFT;
2134
2135 writeq_relaxed(doorbell, smmu->base + cfg[0]);
2136 writel_relaxed(msg->data, smmu->base + cfg[1]);
2137 writel_relaxed(MSI_CFG2_MEMATTR_DEVICE_nGnRE, smmu->base + cfg[2]);
2138}
2139
2140static void arm_smmu_setup_msis(struct arm_smmu_device *smmu)
2141{
2142 struct msi_desc *desc;
2143 int ret, nvec = ARM_SMMU_MAX_MSIS;
2144 struct device *dev = smmu->dev;
2145
2146 /* Clear the MSI address regs */
2147 writeq_relaxed(0, smmu->base + ARM_SMMU_GERROR_IRQ_CFG0);
2148 writeq_relaxed(0, smmu->base + ARM_SMMU_EVTQ_IRQ_CFG0);
2149
2150 if (smmu->features & ARM_SMMU_FEAT_PRI)
2151 writeq_relaxed(0, smmu->base + ARM_SMMU_PRIQ_IRQ_CFG0);
2152 else
2153 nvec--;
2154
2155 if (!(smmu->features & ARM_SMMU_FEAT_MSI))
2156 return;
2157
2158 /* Allocate MSIs for evtq, gerror and priq. Ignore cmdq */
2159 ret = platform_msi_domain_alloc_irqs(dev, nvec, arm_smmu_write_msi_msg);
2160 if (ret) {
2161 dev_warn(dev, "failed to allocate MSIs\n");
2162 return;
2163 }
2164
2165 for_each_msi_entry(desc, dev) {
2166 switch (desc->platform.msi_index) {
2167 case EVTQ_MSI_INDEX:
2168 smmu->evtq.q.irq = desc->irq;
2169 break;
2170 case GERROR_MSI_INDEX:
2171 smmu->gerr_irq = desc->irq;
2172 break;
2173 case PRIQ_MSI_INDEX:
2174 smmu->priq.q.irq = desc->irq;
2175 break;
2176 default: /* Unknown */
2177 continue;
2178 }
2179 }
2180
2181 /* Add callback to free MSIs on teardown */
2182 devm_add_action(dev, arm_smmu_free_msis, dev);
2183}
2184
48ec83bc
WD
2185static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
2186{
2187 int ret, irq;
ccd6385d 2188 u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
48ec83bc
WD
2189
2190 /* Disable IRQs first */
2191 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
2192 ARM_SMMU_IRQ_CTRLACK);
2193 if (ret) {
2194 dev_err(smmu->dev, "failed to disable irqs\n");
2195 return ret;
2196 }
2197
166bdbd2 2198 arm_smmu_setup_msis(smmu);
48ec83bc 2199
166bdbd2 2200 /* Request interrupt lines */
48ec83bc
WD
2201 irq = smmu->evtq.q.irq;
2202 if (irq) {
b4163fb3 2203 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
48ec83bc 2204 arm_smmu_evtq_thread,
b4163fb3
JPB
2205 IRQF_ONESHOT,
2206 "arm-smmu-v3-evtq", smmu);
287980e4 2207 if (ret < 0)
48ec83bc
WD
2208 dev_warn(smmu->dev, "failed to enable evtq irq\n");
2209 }
2210
2211 irq = smmu->cmdq.q.irq;
2212 if (irq) {
2213 ret = devm_request_irq(smmu->dev, irq,
2214 arm_smmu_cmdq_sync_handler, 0,
2215 "arm-smmu-v3-cmdq-sync", smmu);
287980e4 2216 if (ret < 0)
48ec83bc
WD
2217 dev_warn(smmu->dev, "failed to enable cmdq-sync irq\n");
2218 }
2219
2220 irq = smmu->gerr_irq;
2221 if (irq) {
2222 ret = devm_request_irq(smmu->dev, irq, arm_smmu_gerror_handler,
2223 0, "arm-smmu-v3-gerror", smmu);
287980e4 2224 if (ret < 0)
48ec83bc
WD
2225 dev_warn(smmu->dev, "failed to enable gerror irq\n");
2226 }
2227
2228 if (smmu->features & ARM_SMMU_FEAT_PRI) {
48ec83bc
WD
2229 irq = smmu->priq.q.irq;
2230 if (irq) {
b4163fb3 2231 ret = devm_request_threaded_irq(smmu->dev, irq, NULL,
48ec83bc 2232 arm_smmu_priq_thread,
b4163fb3
JPB
2233 IRQF_ONESHOT,
2234 "arm-smmu-v3-priq",
48ec83bc 2235 smmu);
287980e4 2236 if (ret < 0)
48ec83bc
WD
2237 dev_warn(smmu->dev,
2238 "failed to enable priq irq\n");
ccd6385d
MZ
2239 else
2240 irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
48ec83bc
WD
2241 }
2242 }
2243
2244 /* Enable interrupt generation on the SMMU */
ccd6385d 2245 ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
48ec83bc
WD
2246 ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
2247 if (ret)
2248 dev_warn(smmu->dev, "failed to enable irqs\n");
2249
2250 return 0;
2251}
2252
2253static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
2254{
2255 int ret;
2256
2257 ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_CR0, ARM_SMMU_CR0ACK);
2258 if (ret)
2259 dev_err(smmu->dev, "failed to clear cr0\n");
2260
2261 return ret;
2262}
2263
dc87a98d 2264static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass)
48ec83bc
WD
2265{
2266 int ret;
2267 u32 reg, enables;
2268 struct arm_smmu_cmdq_ent cmd;
2269
2270 /* Clear CR0 and sync (disables SMMU and queue processing) */
2271 reg = readl_relaxed(smmu->base + ARM_SMMU_CR0);
2272 if (reg & CR0_SMMUEN)
2273 dev_warn(smmu->dev, "SMMU currently enabled! Resetting...\n");
2274
2275 ret = arm_smmu_device_disable(smmu);
2276 if (ret)
2277 return ret;
2278
2279 /* CR1 (table and queue memory attributes) */
2280 reg = (CR1_SH_ISH << CR1_TABLE_SH_SHIFT) |
2281 (CR1_CACHE_WB << CR1_TABLE_OC_SHIFT) |
2282 (CR1_CACHE_WB << CR1_TABLE_IC_SHIFT) |
2283 (CR1_SH_ISH << CR1_QUEUE_SH_SHIFT) |
2284 (CR1_CACHE_WB << CR1_QUEUE_OC_SHIFT) |
2285 (CR1_CACHE_WB << CR1_QUEUE_IC_SHIFT);
2286 writel_relaxed(reg, smmu->base + ARM_SMMU_CR1);
2287
2288 /* CR2 (random crap) */
2289 reg = CR2_PTM | CR2_RECINVSID | CR2_E2H;
2290 writel_relaxed(reg, smmu->base + ARM_SMMU_CR2);
2291
2292 /* Stream table */
2293 writeq_relaxed(smmu->strtab_cfg.strtab_base,
2294 smmu->base + ARM_SMMU_STRTAB_BASE);
2295 writel_relaxed(smmu->strtab_cfg.strtab_base_cfg,
2296 smmu->base + ARM_SMMU_STRTAB_BASE_CFG);
2297
2298 /* Command queue */
2299 writeq_relaxed(smmu->cmdq.q.q_base, smmu->base + ARM_SMMU_CMDQ_BASE);
2300 writel_relaxed(smmu->cmdq.q.prod, smmu->base + ARM_SMMU_CMDQ_PROD);
2301 writel_relaxed(smmu->cmdq.q.cons, smmu->base + ARM_SMMU_CMDQ_CONS);
2302
2303 enables = CR0_CMDQEN;
2304 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2305 ARM_SMMU_CR0ACK);
2306 if (ret) {
2307 dev_err(smmu->dev, "failed to enable command queue\n");
2308 return ret;
2309 }
2310
2311 /* Invalidate any cached configuration */
2312 cmd.opcode = CMDQ_OP_CFGI_ALL;
2313 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2314 cmd.opcode = CMDQ_OP_CMD_SYNC;
2315 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2316
2317 /* Invalidate any stale TLB entries */
2318 if (smmu->features & ARM_SMMU_FEAT_HYP) {
2319 cmd.opcode = CMDQ_OP_TLBI_EL2_ALL;
2320 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2321 }
2322
2323 cmd.opcode = CMDQ_OP_TLBI_NSNH_ALL;
2324 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2325 cmd.opcode = CMDQ_OP_CMD_SYNC;
2326 arm_smmu_cmdq_issue_cmd(smmu, &cmd);
2327
2328 /* Event queue */
2329 writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE);
2330 writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD);
2331 writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS);
2332
2333 enables |= CR0_EVTQEN;
2334 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2335 ARM_SMMU_CR0ACK);
2336 if (ret) {
2337 dev_err(smmu->dev, "failed to enable event queue\n");
2338 return ret;
2339 }
2340
2341 /* PRI queue */
2342 if (smmu->features & ARM_SMMU_FEAT_PRI) {
2343 writeq_relaxed(smmu->priq.q.q_base,
2344 smmu->base + ARM_SMMU_PRIQ_BASE);
2345 writel_relaxed(smmu->priq.q.prod,
2346 smmu->base + ARM_SMMU_PRIQ_PROD);
2347 writel_relaxed(smmu->priq.q.cons,
2348 smmu->base + ARM_SMMU_PRIQ_CONS);
2349
2350 enables |= CR0_PRIQEN;
2351 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2352 ARM_SMMU_CR0ACK);
2353 if (ret) {
2354 dev_err(smmu->dev, "failed to enable PRI queue\n");
2355 return ret;
2356 }
2357 }
2358
2359 ret = arm_smmu_setup_irqs(smmu);
2360 if (ret) {
2361 dev_err(smmu->dev, "failed to setup irqs\n");
2362 return ret;
2363 }
2364
dc87a98d
RM
2365
2366 /* Enable the SMMU interface, or ensure bypass */
2367 if (!bypass || disable_bypass) {
2368 enables |= CR0_SMMUEN;
2369 } else {
2370 ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT);
2371 if (ret) {
2372 dev_err(smmu->dev, "GBPA not responding to update\n");
2373 return ret;
2374 }
2375 }
48ec83bc
WD
2376 ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
2377 ARM_SMMU_CR0ACK);
2378 if (ret) {
2379 dev_err(smmu->dev, "failed to enable SMMU interface\n");
2380 return ret;
2381 }
2382
2383 return 0;
2384}
2385
2985b521 2386static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
48ec83bc
WD
2387{
2388 u32 reg;
2985b521 2389 bool coherent = smmu->features & ARM_SMMU_FEAT_COHERENCY;
48ec83bc
WD
2390
2391 /* IDR0 */
2392 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR0);
2393
2394 /* 2-level structures */
2395 if ((reg & IDR0_ST_LVL_MASK << IDR0_ST_LVL_SHIFT) == IDR0_ST_LVL_2LVL)
2396 smmu->features |= ARM_SMMU_FEAT_2_LVL_STRTAB;
2397
2398 if (reg & IDR0_CD2L)
2399 smmu->features |= ARM_SMMU_FEAT_2_LVL_CDTAB;
2400
2401 /*
2402 * Translation table endianness.
2403 * We currently require the same endianness as the CPU, but this
2404 * could be changed later by adding a new IO_PGTABLE_QUIRK.
2405 */
2406 switch (reg & IDR0_TTENDIAN_MASK << IDR0_TTENDIAN_SHIFT) {
2407 case IDR0_TTENDIAN_MIXED:
2408 smmu->features |= ARM_SMMU_FEAT_TT_LE | ARM_SMMU_FEAT_TT_BE;
2409 break;
2410#ifdef __BIG_ENDIAN
2411 case IDR0_TTENDIAN_BE:
2412 smmu->features |= ARM_SMMU_FEAT_TT_BE;
2413 break;
2414#else
2415 case IDR0_TTENDIAN_LE:
2416 smmu->features |= ARM_SMMU_FEAT_TT_LE;
2417 break;
2418#endif
2419 default:
2420 dev_err(smmu->dev, "unknown/unsupported TT endianness!\n");
2421 return -ENXIO;
2422 }
2423
2424 /* Boolean feature flags */
2425 if (IS_ENABLED(CONFIG_PCI_PRI) && reg & IDR0_PRI)
2426 smmu->features |= ARM_SMMU_FEAT_PRI;
2427
2428 if (IS_ENABLED(CONFIG_PCI_ATS) && reg & IDR0_ATS)
2429 smmu->features |= ARM_SMMU_FEAT_ATS;
2430
2431 if (reg & IDR0_SEV)
2432 smmu->features |= ARM_SMMU_FEAT_SEV;
2433
2434 if (reg & IDR0_MSI)
2435 smmu->features |= ARM_SMMU_FEAT_MSI;
2436
2437 if (reg & IDR0_HYP)
2438 smmu->features |= ARM_SMMU_FEAT_HYP;
2439
2440 /*
2985b521 2441 * The coherency feature as set by FW is used in preference to the ID
48ec83bc
WD
2442 * register, but warn on mismatch.
2443 */
48ec83bc
WD
2444 if (!!(reg & IDR0_COHACC) != coherent)
2445 dev_warn(smmu->dev, "IDR0.COHACC overridden by dma-coherent property (%s)\n",
2446 coherent ? "true" : "false");
2447
6380be05
PM
2448 switch (reg & IDR0_STALL_MODEL_MASK << IDR0_STALL_MODEL_SHIFT) {
2449 case IDR0_STALL_MODEL_STALL:
2450 /* Fallthrough */
2451 case IDR0_STALL_MODEL_FORCE:
48ec83bc 2452 smmu->features |= ARM_SMMU_FEAT_STALLS;
6380be05 2453 }
48ec83bc
WD
2454
2455 if (reg & IDR0_S1P)
2456 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
2457
2458 if (reg & IDR0_S2P)
2459 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
2460
2461 if (!(reg & (IDR0_S1P | IDR0_S2P))) {
2462 dev_err(smmu->dev, "no translation support!\n");
2463 return -ENXIO;
2464 }
2465
2466 /* We only support the AArch64 table format at present */
f0c453db
WD
2467 switch (reg & IDR0_TTF_MASK << IDR0_TTF_SHIFT) {
2468 case IDR0_TTF_AARCH32_64:
2469 smmu->ias = 40;
2470 /* Fallthrough */
2471 case IDR0_TTF_AARCH64:
2472 break;
2473 default:
48ec83bc
WD
2474 dev_err(smmu->dev, "AArch64 table format not supported!\n");
2475 return -ENXIO;
2476 }
2477
2478 /* ASID/VMID sizes */
2479 smmu->asid_bits = reg & IDR0_ASID16 ? 16 : 8;
2480 smmu->vmid_bits = reg & IDR0_VMID16 ? 16 : 8;
2481
2482 /* IDR1 */
2483 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR1);
2484 if (reg & (IDR1_TABLES_PRESET | IDR1_QUEUES_PRESET | IDR1_REL)) {
2485 dev_err(smmu->dev, "embedded implementation not supported\n");
2486 return -ENXIO;
2487 }
2488
2489 /* Queue sizes, capped at 4k */
2490 smmu->cmdq.q.max_n_shift = min((u32)CMDQ_MAX_SZ_SHIFT,
2491 reg >> IDR1_CMDQ_SHIFT & IDR1_CMDQ_MASK);
2492 if (!smmu->cmdq.q.max_n_shift) {
2493 /* Odd alignment restrictions on the base, so ignore for now */
2494 dev_err(smmu->dev, "unit-length command queue not supported\n");
2495 return -ENXIO;
2496 }
2497
2498 smmu->evtq.q.max_n_shift = min((u32)EVTQ_MAX_SZ_SHIFT,
2499 reg >> IDR1_EVTQ_SHIFT & IDR1_EVTQ_MASK);
2500 smmu->priq.q.max_n_shift = min((u32)PRIQ_MAX_SZ_SHIFT,
2501 reg >> IDR1_PRIQ_SHIFT & IDR1_PRIQ_MASK);
2502
2503 /* SID/SSID sizes */
2504 smmu->ssid_bits = reg >> IDR1_SSID_SHIFT & IDR1_SSID_MASK;
2505 smmu->sid_bits = reg >> IDR1_SID_SHIFT & IDR1_SID_MASK;
2506
2507 /* IDR5 */
2508 reg = readl_relaxed(smmu->base + ARM_SMMU_IDR5);
2509
2510 /* Maximum number of outstanding stalls */
2511 smmu->evtq.max_stalls = reg >> IDR5_STALL_MAX_SHIFT
2512 & IDR5_STALL_MAX_MASK;
2513
2514 /* Page sizes */
2515 if (reg & IDR5_GRAN64K)
d5466357 2516 smmu->pgsize_bitmap |= SZ_64K | SZ_512M;
48ec83bc 2517 if (reg & IDR5_GRAN16K)
d5466357 2518 smmu->pgsize_bitmap |= SZ_16K | SZ_32M;
48ec83bc 2519 if (reg & IDR5_GRAN4K)
d5466357 2520 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G;
48ec83bc 2521
d5466357
RM
2522 if (arm_smmu_ops.pgsize_bitmap == -1UL)
2523 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap;
2524 else
2525 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap;
48ec83bc
WD
2526
2527 /* Output address size */
2528 switch (reg & IDR5_OAS_MASK << IDR5_OAS_SHIFT) {
2529 case IDR5_OAS_32_BIT:
2530 smmu->oas = 32;
2531 break;
2532 case IDR5_OAS_36_BIT:
2533 smmu->oas = 36;
2534 break;
2535 case IDR5_OAS_40_BIT:
2536 smmu->oas = 40;
2537 break;
2538 case IDR5_OAS_42_BIT:
2539 smmu->oas = 42;
2540 break;
2541 case IDR5_OAS_44_BIT:
2542 smmu->oas = 44;
2543 break;
85430968
WD
2544 default:
2545 dev_info(smmu->dev,
2546 "unknown output address size. Truncating to 48-bit\n");
2547 /* Fallthrough */
48ec83bc
WD
2548 case IDR5_OAS_48_BIT:
2549 smmu->oas = 48;
48ec83bc
WD
2550 }
2551
2552 /* Set the DMA mask for our table walker */
2553 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(smmu->oas)))
2554 dev_warn(smmu->dev,
2555 "failed to set DMA mask for table walker\n");
2556
f0c453db 2557 smmu->ias = max(smmu->ias, smmu->oas);
48ec83bc
WD
2558
2559 dev_info(smmu->dev, "ias %lu-bit, oas %lu-bit (features 0x%08x)\n",
2560 smmu->ias, smmu->oas, smmu->features);
2561 return 0;
2562}
2563
e4dadfa8
LP
2564#ifdef CONFIG_ACPI
2565static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2566 struct arm_smmu_device *smmu)
2567{
2568 struct acpi_iort_smmu_v3 *iort_smmu;
2569 struct device *dev = smmu->dev;
2570 struct acpi_iort_node *node;
2571
2572 node = *(struct acpi_iort_node **)dev_get_platdata(dev);
2573
2574 /* Retrieve SMMUv3 specific data */
2575 iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
2576
2577 if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
2578 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2579
2580 return 0;
2581}
2582#else
2583static inline int arm_smmu_device_acpi_probe(struct platform_device *pdev,
2584 struct arm_smmu_device *smmu)
2585{
2586 return -ENODEV;
2587}
2588#endif
2589
2985b521
LP
2590static int arm_smmu_device_dt_probe(struct platform_device *pdev,
2591 struct arm_smmu_device *smmu)
48ec83bc 2592{
48ec83bc 2593 struct device *dev = &pdev->dev;
dc87a98d 2594 u32 cells;
2985b521 2595 int ret = -EINVAL;
dc87a98d
RM
2596
2597 if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells))
2598 dev_err(dev, "missing #iommu-cells property\n");
2599 else if (cells != 1)
2600 dev_err(dev, "invalid #iommu-cells value (%d)\n", cells);
2601 else
2985b521
LP
2602 ret = 0;
2603
2604 parse_driver_options(smmu);
2605
2606 if (of_dma_is_coherent(dev->of_node))
2607 smmu->features |= ARM_SMMU_FEAT_COHERENCY;
2608
2609 return ret;
2610}
2611
2612static int arm_smmu_device_probe(struct platform_device *pdev)
2613{
2614 int irq, ret;
2615 struct resource *res;
2616 struct arm_smmu_device *smmu;
2617 struct device *dev = &pdev->dev;
2618 bool bypass;
48ec83bc
WD
2619
2620 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
2621 if (!smmu) {
2622 dev_err(dev, "failed to allocate arm_smmu_device\n");
2623 return -ENOMEM;
2624 }
2625 smmu->dev = dev;
2626
2627 /* Base address */
2628 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2629 if (resource_size(res) + 1 < SZ_128K) {
2630 dev_err(dev, "MMIO region too small (%pr)\n", res);
2631 return -EINVAL;
2632 }
2633
2634 smmu->base = devm_ioremap_resource(dev, res);
2635 if (IS_ERR(smmu->base))
2636 return PTR_ERR(smmu->base);
2637
2638 /* Interrupt lines */
2639 irq = platform_get_irq_byname(pdev, "eventq");
2640 if (irq > 0)
2641 smmu->evtq.q.irq = irq;
2642
2643 irq = platform_get_irq_byname(pdev, "priq");
2644 if (irq > 0)
2645 smmu->priq.q.irq = irq;
2646
2647 irq = platform_get_irq_byname(pdev, "cmdq-sync");
2648 if (irq > 0)
2649 smmu->cmdq.q.irq = irq;
2650
2651 irq = platform_get_irq_byname(pdev, "gerror");
2652 if (irq > 0)
2653 smmu->gerr_irq = irq;
2654
e4dadfa8
LP
2655 if (dev->of_node) {
2656 ret = arm_smmu_device_dt_probe(pdev, smmu);
2657 } else {
2658 ret = arm_smmu_device_acpi_probe(pdev, smmu);
2659 if (ret == -ENODEV)
2660 return ret;
2661 }
2662
2985b521 2663 /* Set bypass mode according to firmware probing result */
e4dadfa8 2664 bypass = !!ret;
5e92946c 2665
48ec83bc 2666 /* Probe the h/w */
2985b521 2667 ret = arm_smmu_device_hw_probe(smmu);
48ec83bc
WD
2668 if (ret)
2669 return ret;
2670
2671 /* Initialise in-memory data structures */
2672 ret = arm_smmu_init_structures(smmu);
2673 if (ret)
2674 return ret;
2675
166bdbd2
MZ
2676 /* Record our private device structure */
2677 platform_set_drvdata(pdev, smmu);
2678
48ec83bc 2679 /* Reset the device */
8f785154
RM
2680 ret = arm_smmu_device_reset(smmu, bypass);
2681 if (ret)
2682 return ret;
2683
2684 /* And we're up. Go go go! */
778de074
LP
2685 iommu_register_instance(dev->fwnode, &arm_smmu_ops);
2686
08d4ca2a 2687#ifdef CONFIG_PCI
ec615f43
RM
2688 if (pci_bus_type.iommu_ops != &arm_smmu_ops) {
2689 pci_request_acs();
2690 ret = bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2691 if (ret)
2692 return ret;
2693 }
08d4ca2a
RM
2694#endif
2695#ifdef CONFIG_ARM_AMBA
ec615f43
RM
2696 if (amba_bustype.iommu_ops != &arm_smmu_ops) {
2697 ret = bus_set_iommu(&amba_bustype, &arm_smmu_ops);
2698 if (ret)
2699 return ret;
2700 }
08d4ca2a 2701#endif
ec615f43
RM
2702 if (platform_bus_type.iommu_ops != &arm_smmu_ops) {
2703 ret = bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2704 if (ret)
2705 return ret;
2706 }
2707 return 0;
48ec83bc
WD
2708}
2709
2710static int arm_smmu_device_remove(struct platform_device *pdev)
2711{
941a802d 2712 struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
48ec83bc
WD
2713
2714 arm_smmu_device_disable(smmu);
48ec83bc
WD
2715 return 0;
2716}
2717
2718static struct of_device_id arm_smmu_of_match[] = {
2719 { .compatible = "arm,smmu-v3", },
2720 { },
2721};
2722MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
2723
2724static struct platform_driver arm_smmu_driver = {
2725 .driver = {
2726 .name = "arm-smmu-v3",
2727 .of_match_table = of_match_ptr(arm_smmu_of_match),
2728 },
2985b521 2729 .probe = arm_smmu_device_probe,
48ec83bc
WD
2730 .remove = arm_smmu_device_remove,
2731};
2732
2733static int __init arm_smmu_init(void)
2734{
8f785154
RM
2735 static bool registered;
2736 int ret = 0;
112c898b 2737
8f785154
RM
2738 if (!registered) {
2739 ret = platform_driver_register(&arm_smmu_driver);
2740 registered = !ret;
2741 }
2742 return ret;
48ec83bc
WD
2743}
2744
2745static void __exit arm_smmu_exit(void)
2746{
2747 return platform_driver_unregister(&arm_smmu_driver);
2748}
2749
2750subsys_initcall(arm_smmu_init);
2751module_exit(arm_smmu_exit);
2752
8f785154
RM
2753static int __init arm_smmu_of_init(struct device_node *np)
2754{
2755 int ret = arm_smmu_init();
2756
2757 if (ret)
2758 return ret;
2759
2760 if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
2761 return -ENODEV;
2762
2763 return 0;
2764}
2765IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
2766
e4dadfa8
LP
2767#ifdef CONFIG_ACPI
2768static int __init acpi_smmu_v3_init(struct acpi_table_header *table)
2769{
2770 if (iort_node_match(ACPI_IORT_NODE_SMMU_V3))
2771 return arm_smmu_init();
2772
2773 return 0;
2774}
2775IORT_ACPI_DECLARE(arm_smmu_v3, ACPI_SIG_IORT, acpi_smmu_v3_init);
2776#endif
2777
48ec83bc
WD
2778MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
2779MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2780MODULE_LICENSE("GPL v2");