]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/iommu/arm-smmu.c
iommu/arm-smmu: Decouple context format from kernel config
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / arm-smmu.c
CommitLineData
45ae7cff
WD
1/*
2 * IOMMU API for ARM architected SMMU implementations.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
16 *
17 * Copyright (C) 2013 ARM Limited
18 *
19 * Author: Will Deacon <will.deacon@arm.com>
20 *
21 * This driver currently supports:
22 * - SMMUv1 and v2 implementations
23 * - Stream-matching and stream-indexing
24 * - v7/v8 long-descriptor format
25 * - Non-secure access to the SMMU
45ae7cff
WD
26 * - Context fault reporting
27 */
28
29#define pr_fmt(fmt) "arm-smmu: " fmt
30
31#include <linux/delay.h>
9adb9594 32#include <linux/dma-iommu.h>
45ae7cff
WD
33#include <linux/dma-mapping.h>
34#include <linux/err.h>
35#include <linux/interrupt.h>
36#include <linux/io.h>
f9a05f05 37#include <linux/io-64-nonatomic-hi-lo.h>
45ae7cff 38#include <linux/iommu.h>
859a732e 39#include <linux/iopoll.h>
45ae7cff
WD
40#include <linux/module.h>
41#include <linux/of.h>
bae2c2d4 42#include <linux/of_address.h>
a9a1b0b5 43#include <linux/pci.h>
45ae7cff
WD
44#include <linux/platform_device.h>
45#include <linux/slab.h>
46#include <linux/spinlock.h>
47
48#include <linux/amba/bus.h>
49
518f7136 50#include "io-pgtable.h"
45ae7cff
WD
51
52/* Maximum number of stream IDs assigned to a single device */
636e97b0 53#define MAX_MASTER_STREAMIDS MAX_PHANDLE_ARGS
45ae7cff
WD
54
55/* Maximum number of context banks per SMMU */
56#define ARM_SMMU_MAX_CBS 128
57
58/* Maximum number of mapping groups per SMMU */
59#define ARM_SMMU_MAX_SMRS 128
60
45ae7cff
WD
61/* SMMU global address space */
62#define ARM_SMMU_GR0(smmu) ((smmu)->base)
c757e852 63#define ARM_SMMU_GR1(smmu) ((smmu)->base + (1 << (smmu)->pgshift))
45ae7cff 64
3a5df8ff
AH
65/*
66 * SMMU global address space with conditional offset to access secure
67 * aliases of non-secure registers (e.g. nsCR0: 0x400, nsGFSR: 0x448,
68 * nsGFSYNR0: 0x450)
69 */
70#define ARM_SMMU_GR0_NS(smmu) \
71 ((smmu)->base + \
72 ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \
73 ? 0x400 : 0))
74
f9a05f05
RM
75/*
76 * Some 64-bit registers only make sense to write atomically, but in such
77 * cases all the data relevant to AArch32 formats lies within the lower word,
78 * therefore this actually makes more sense than it might first appear.
79 */
668b4ada 80#ifdef CONFIG_64BIT
f9a05f05 81#define smmu_write_atomic_lq writeq_relaxed
668b4ada 82#else
f9a05f05 83#define smmu_write_atomic_lq writel_relaxed
668b4ada
TC
84#endif
85
45ae7cff
WD
86/* Configuration registers */
87#define ARM_SMMU_GR0_sCR0 0x0
88#define sCR0_CLIENTPD (1 << 0)
89#define sCR0_GFRE (1 << 1)
90#define sCR0_GFIE (1 << 2)
91#define sCR0_GCFGFRE (1 << 4)
92#define sCR0_GCFGFIE (1 << 5)
93#define sCR0_USFCFG (1 << 10)
94#define sCR0_VMIDPNE (1 << 11)
95#define sCR0_PTM (1 << 12)
96#define sCR0_FB (1 << 13)
4e3e9b69 97#define sCR0_VMID16EN (1 << 31)
45ae7cff
WD
98#define sCR0_BSU_SHIFT 14
99#define sCR0_BSU_MASK 0x3
100
101/* Identification registers */
102#define ARM_SMMU_GR0_ID0 0x20
103#define ARM_SMMU_GR0_ID1 0x24
104#define ARM_SMMU_GR0_ID2 0x28
105#define ARM_SMMU_GR0_ID3 0x2c
106#define ARM_SMMU_GR0_ID4 0x30
107#define ARM_SMMU_GR0_ID5 0x34
108#define ARM_SMMU_GR0_ID6 0x38
109#define ARM_SMMU_GR0_ID7 0x3c
110#define ARM_SMMU_GR0_sGFSR 0x48
111#define ARM_SMMU_GR0_sGFSYNR0 0x50
112#define ARM_SMMU_GR0_sGFSYNR1 0x54
113#define ARM_SMMU_GR0_sGFSYNR2 0x58
45ae7cff
WD
114
115#define ID0_S1TS (1 << 30)
116#define ID0_S2TS (1 << 29)
117#define ID0_NTS (1 << 28)
118#define ID0_SMS (1 << 27)
859a732e 119#define ID0_ATOSNS (1 << 26)
7602b871
RM
120#define ID0_PTFS_NO_AARCH32 (1 << 25)
121#define ID0_PTFS_NO_AARCH32S (1 << 24)
45ae7cff
WD
122#define ID0_CTTW (1 << 14)
123#define ID0_NUMIRPT_SHIFT 16
124#define ID0_NUMIRPT_MASK 0xff
3c8766d0
OH
125#define ID0_NUMSIDB_SHIFT 9
126#define ID0_NUMSIDB_MASK 0xf
45ae7cff
WD
127#define ID0_NUMSMRG_SHIFT 0
128#define ID0_NUMSMRG_MASK 0xff
129
130#define ID1_PAGESIZE (1 << 31)
131#define ID1_NUMPAGENDXB_SHIFT 28
132#define ID1_NUMPAGENDXB_MASK 7
133#define ID1_NUMS2CB_SHIFT 16
134#define ID1_NUMS2CB_MASK 0xff
135#define ID1_NUMCB_SHIFT 0
136#define ID1_NUMCB_MASK 0xff
137
138#define ID2_OAS_SHIFT 4
139#define ID2_OAS_MASK 0xf
140#define ID2_IAS_SHIFT 0
141#define ID2_IAS_MASK 0xf
142#define ID2_UBS_SHIFT 8
143#define ID2_UBS_MASK 0xf
144#define ID2_PTFS_4K (1 << 12)
145#define ID2_PTFS_16K (1 << 13)
146#define ID2_PTFS_64K (1 << 14)
4e3e9b69 147#define ID2_VMID16 (1 << 15)
45ae7cff 148
45ae7cff 149/* Global TLB invalidation */
45ae7cff
WD
150#define ARM_SMMU_GR0_TLBIVMID 0x64
151#define ARM_SMMU_GR0_TLBIALLNSNH 0x68
152#define ARM_SMMU_GR0_TLBIALLH 0x6c
153#define ARM_SMMU_GR0_sTLBGSYNC 0x70
154#define ARM_SMMU_GR0_sTLBGSTATUS 0x74
155#define sTLBGSTATUS_GSACTIVE (1 << 0)
156#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
157
158/* Stream mapping registers */
159#define ARM_SMMU_GR0_SMR(n) (0x800 + ((n) << 2))
160#define SMR_VALID (1 << 31)
161#define SMR_MASK_SHIFT 16
162#define SMR_MASK_MASK 0x7fff
163#define SMR_ID_SHIFT 0
164#define SMR_ID_MASK 0x7fff
165
166#define ARM_SMMU_GR0_S2CR(n) (0xc00 + ((n) << 2))
167#define S2CR_CBNDX_SHIFT 0
168#define S2CR_CBNDX_MASK 0xff
169#define S2CR_TYPE_SHIFT 16
170#define S2CR_TYPE_MASK 0x3
171#define S2CR_TYPE_TRANS (0 << S2CR_TYPE_SHIFT)
172#define S2CR_TYPE_BYPASS (1 << S2CR_TYPE_SHIFT)
173#define S2CR_TYPE_FAULT (2 << S2CR_TYPE_SHIFT)
174
d346180e
RM
175#define S2CR_PRIVCFG_SHIFT 24
176#define S2CR_PRIVCFG_UNPRIV (2 << S2CR_PRIVCFG_SHIFT)
177
45ae7cff
WD
178/* Context bank attribute registers */
179#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
180#define CBAR_VMID_SHIFT 0
181#define CBAR_VMID_MASK 0xff
57ca90f6
WD
182#define CBAR_S1_BPSHCFG_SHIFT 8
183#define CBAR_S1_BPSHCFG_MASK 3
184#define CBAR_S1_BPSHCFG_NSH 3
45ae7cff
WD
185#define CBAR_S1_MEMATTR_SHIFT 12
186#define CBAR_S1_MEMATTR_MASK 0xf
187#define CBAR_S1_MEMATTR_WB 0xf
188#define CBAR_TYPE_SHIFT 16
189#define CBAR_TYPE_MASK 0x3
190#define CBAR_TYPE_S2_TRANS (0 << CBAR_TYPE_SHIFT)
191#define CBAR_TYPE_S1_TRANS_S2_BYPASS (1 << CBAR_TYPE_SHIFT)
192#define CBAR_TYPE_S1_TRANS_S2_FAULT (2 << CBAR_TYPE_SHIFT)
193#define CBAR_TYPE_S1_TRANS_S2_TRANS (3 << CBAR_TYPE_SHIFT)
194#define CBAR_IRPTNDX_SHIFT 24
195#define CBAR_IRPTNDX_MASK 0xff
196
197#define ARM_SMMU_GR1_CBA2R(n) (0x800 + ((n) << 2))
198#define CBA2R_RW64_32BIT (0 << 0)
199#define CBA2R_RW64_64BIT (1 << 0)
4e3e9b69
TC
200#define CBA2R_VMID_SHIFT 16
201#define CBA2R_VMID_MASK 0xffff
45ae7cff
WD
202
203/* Translation context bank */
204#define ARM_SMMU_CB_BASE(smmu) ((smmu)->base + ((smmu)->size >> 1))
c757e852 205#define ARM_SMMU_CB(smmu, n) ((n) * (1 << (smmu)->pgshift))
45ae7cff
WD
206
207#define ARM_SMMU_CB_SCTLR 0x0
f0cfffc4 208#define ARM_SMMU_CB_ACTLR 0x4
45ae7cff
WD
209#define ARM_SMMU_CB_RESUME 0x8
210#define ARM_SMMU_CB_TTBCR2 0x10
668b4ada
TC
211#define ARM_SMMU_CB_TTBR0 0x20
212#define ARM_SMMU_CB_TTBR1 0x28
45ae7cff
WD
213#define ARM_SMMU_CB_TTBCR 0x30
214#define ARM_SMMU_CB_S1_MAIR0 0x38
518f7136 215#define ARM_SMMU_CB_S1_MAIR1 0x3c
f9a05f05 216#define ARM_SMMU_CB_PAR 0x50
45ae7cff 217#define ARM_SMMU_CB_FSR 0x58
f9a05f05 218#define ARM_SMMU_CB_FAR 0x60
45ae7cff 219#define ARM_SMMU_CB_FSYNR0 0x68
518f7136 220#define ARM_SMMU_CB_S1_TLBIVA 0x600
1463fe44 221#define ARM_SMMU_CB_S1_TLBIASID 0x610
518f7136
WD
222#define ARM_SMMU_CB_S1_TLBIVAL 0x620
223#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630
224#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638
661d962f 225#define ARM_SMMU_CB_ATS1PR 0x800
859a732e 226#define ARM_SMMU_CB_ATSR 0x8f0
45ae7cff
WD
227
228#define SCTLR_S1_ASIDPNE (1 << 12)
229#define SCTLR_CFCFG (1 << 7)
230#define SCTLR_CFIE (1 << 6)
231#define SCTLR_CFRE (1 << 5)
232#define SCTLR_E (1 << 4)
233#define SCTLR_AFE (1 << 2)
234#define SCTLR_TRE (1 << 1)
235#define SCTLR_M (1 << 0)
236#define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE)
237
f0cfffc4
RM
238#define ARM_MMU500_ACTLR_CPRE (1 << 1)
239
859a732e
MH
240#define CB_PAR_F (1 << 0)
241
242#define ATSR_ACTIVE (1 << 0)
243
45ae7cff
WD
244#define RESUME_RETRY (0 << 0)
245#define RESUME_TERMINATE (1 << 0)
246
45ae7cff 247#define TTBCR2_SEP_SHIFT 15
5dc5616e 248#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
45ae7cff 249
668b4ada 250#define TTBRn_ASID_SHIFT 48
45ae7cff
WD
251
252#define FSR_MULTI (1 << 31)
253#define FSR_SS (1 << 30)
254#define FSR_UUT (1 << 8)
255#define FSR_ASF (1 << 7)
256#define FSR_TLBLKF (1 << 6)
257#define FSR_TLBMCF (1 << 5)
258#define FSR_EF (1 << 4)
259#define FSR_PF (1 << 3)
260#define FSR_AFF (1 << 2)
261#define FSR_TF (1 << 1)
262
2907320d
MH
263#define FSR_IGN (FSR_AFF | FSR_ASF | \
264 FSR_TLBMCF | FSR_TLBLKF)
265#define FSR_FAULT (FSR_MULTI | FSR_SS | FSR_UUT | \
adaba320 266 FSR_EF | FSR_PF | FSR_TF | FSR_IGN)
45ae7cff
WD
267
268#define FSYNR0_WNR (1 << 4)
269
4cf740b0 270static int force_stage;
25a1c96c 271module_param(force_stage, int, S_IRUGO);
4cf740b0
WD
272MODULE_PARM_DESC(force_stage,
273 "Force SMMU mappings to be installed at a particular stage of translation. A value of '1' or '2' forces the corresponding stage. All other values are ignored (i.e. no stage is forced). Note that selecting a specific stage will disable support for nested translation.");
25a1c96c
RM
274static bool disable_bypass;
275module_param(disable_bypass, bool, S_IRUGO);
276MODULE_PARM_DESC(disable_bypass,
277 "Disable bypass streams such that incoming transactions from devices that are not attached to an iommu domain will report an abort back to the device and will not be allowed to pass through the SMMU.");
4cf740b0 278
09360403
RM
279enum arm_smmu_arch_version {
280 ARM_SMMU_V1 = 1,
281 ARM_SMMU_V2,
282};
283
67b65a3f
RM
284enum arm_smmu_implementation {
285 GENERIC_SMMU,
f0cfffc4 286 ARM_MMU500,
e086d912 287 CAVIUM_SMMUV2,
67b65a3f
RM
288};
289
45ae7cff
WD
290struct arm_smmu_smr {
291 u8 idx;
292 u16 mask;
293 u16 id;
294};
295
a9a1b0b5 296struct arm_smmu_master_cfg {
45ae7cff
WD
297 int num_streamids;
298 u16 streamids[MAX_MASTER_STREAMIDS];
45ae7cff
WD
299 struct arm_smmu_smr *smrs;
300};
301
a9a1b0b5
WD
302struct arm_smmu_master {
303 struct device_node *of_node;
a9a1b0b5
WD
304 struct rb_node node;
305 struct arm_smmu_master_cfg cfg;
306};
307
45ae7cff
WD
308struct arm_smmu_device {
309 struct device *dev;
45ae7cff
WD
310
311 void __iomem *base;
312 unsigned long size;
c757e852 313 unsigned long pgshift;
45ae7cff
WD
314
315#define ARM_SMMU_FEAT_COHERENT_WALK (1 << 0)
316#define ARM_SMMU_FEAT_STREAM_MATCH (1 << 1)
317#define ARM_SMMU_FEAT_TRANS_S1 (1 << 2)
318#define ARM_SMMU_FEAT_TRANS_S2 (1 << 3)
319#define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4)
859a732e 320#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5)
4e3e9b69 321#define ARM_SMMU_FEAT_VMID16 (1 << 6)
7602b871
RM
322#define ARM_SMMU_FEAT_FMT_AARCH64_4K (1 << 7)
323#define ARM_SMMU_FEAT_FMT_AARCH64_16K (1 << 8)
324#define ARM_SMMU_FEAT_FMT_AARCH64_64K (1 << 9)
325#define ARM_SMMU_FEAT_FMT_AARCH32_L (1 << 10)
326#define ARM_SMMU_FEAT_FMT_AARCH32_S (1 << 11)
45ae7cff 327 u32 features;
3a5df8ff
AH
328
329#define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0)
330 u32 options;
09360403 331 enum arm_smmu_arch_version version;
67b65a3f 332 enum arm_smmu_implementation model;
45ae7cff
WD
333
334 u32 num_context_banks;
335 u32 num_s2_context_banks;
336 DECLARE_BITMAP(context_map, ARM_SMMU_MAX_CBS);
337 atomic_t irptndx;
338
339 u32 num_mapping_groups;
340 DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS);
341
518f7136
WD
342 unsigned long va_size;
343 unsigned long ipa_size;
344 unsigned long pa_size;
45ae7cff
WD
345
346 u32 num_global_irqs;
347 u32 num_context_irqs;
348 unsigned int *irqs;
349
45ae7cff
WD
350 struct list_head list;
351 struct rb_root masters;
1bd37a68
TC
352
353 u32 cavium_id_base; /* Specific to Cavium */
45ae7cff
WD
354};
355
7602b871
RM
356enum arm_smmu_context_fmt {
357 ARM_SMMU_CTX_FMT_NONE,
358 ARM_SMMU_CTX_FMT_AARCH64,
359 ARM_SMMU_CTX_FMT_AARCH32_L,
360 ARM_SMMU_CTX_FMT_AARCH32_S,
361};
362
45ae7cff 363struct arm_smmu_cfg {
45ae7cff
WD
364 u8 cbndx;
365 u8 irptndx;
366 u32 cbar;
7602b871 367 enum arm_smmu_context_fmt fmt;
45ae7cff 368};
faea13b7 369#define INVALID_IRPTNDX 0xff
45ae7cff 370
1bd37a68
TC
371#define ARM_SMMU_CB_ASID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx)
372#define ARM_SMMU_CB_VMID(smmu, cfg) ((u16)(smmu)->cavium_id_base + (cfg)->cbndx + 1)
ecfadb6e 373
c752ce45
WD
374enum arm_smmu_domain_stage {
375 ARM_SMMU_DOMAIN_S1 = 0,
376 ARM_SMMU_DOMAIN_S2,
377 ARM_SMMU_DOMAIN_NESTED,
378};
379
45ae7cff 380struct arm_smmu_domain {
44680eed 381 struct arm_smmu_device *smmu;
518f7136
WD
382 struct io_pgtable_ops *pgtbl_ops;
383 spinlock_t pgtbl_lock;
44680eed 384 struct arm_smmu_cfg cfg;
c752ce45 385 enum arm_smmu_domain_stage stage;
518f7136 386 struct mutex init_mutex; /* Protects smmu pointer */
1d672638 387 struct iommu_domain domain;
45ae7cff
WD
388};
389
518f7136
WD
390static struct iommu_ops arm_smmu_ops;
391
45ae7cff
WD
392static DEFINE_SPINLOCK(arm_smmu_devices_lock);
393static LIST_HEAD(arm_smmu_devices);
394
3a5df8ff
AH
395struct arm_smmu_option_prop {
396 u32 opt;
397 const char *prop;
398};
399
1bd37a68
TC
400static atomic_t cavium_smmu_context_count = ATOMIC_INIT(0);
401
2907320d 402static struct arm_smmu_option_prop arm_smmu_options[] = {
3a5df8ff
AH
403 { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" },
404 { 0, NULL},
405};
406
1d672638
JR
407static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
408{
409 return container_of(dom, struct arm_smmu_domain, domain);
410}
411
3a5df8ff
AH
412static void parse_driver_options(struct arm_smmu_device *smmu)
413{
414 int i = 0;
2907320d 415
3a5df8ff
AH
416 do {
417 if (of_property_read_bool(smmu->dev->of_node,
418 arm_smmu_options[i].prop)) {
419 smmu->options |= arm_smmu_options[i].opt;
420 dev_notice(smmu->dev, "option %s\n",
421 arm_smmu_options[i].prop);
422 }
423 } while (arm_smmu_options[++i].opt);
424}
425
8f68f8e2 426static struct device_node *dev_get_dev_node(struct device *dev)
a9a1b0b5
WD
427{
428 if (dev_is_pci(dev)) {
429 struct pci_bus *bus = to_pci_dev(dev)->bus;
2907320d 430
a9a1b0b5
WD
431 while (!pci_is_root_bus(bus))
432 bus = bus->parent;
8f68f8e2 433 return bus->bridge->parent->of_node;
a9a1b0b5
WD
434 }
435
8f68f8e2 436 return dev->of_node;
a9a1b0b5
WD
437}
438
45ae7cff
WD
439static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
440 struct device_node *dev_node)
441{
442 struct rb_node *node = smmu->masters.rb_node;
443
444 while (node) {
445 struct arm_smmu_master *master;
2907320d 446
45ae7cff
WD
447 master = container_of(node, struct arm_smmu_master, node);
448
449 if (dev_node < master->of_node)
450 node = node->rb_left;
451 else if (dev_node > master->of_node)
452 node = node->rb_right;
453 else
454 return master;
455 }
456
457 return NULL;
458}
459
a9a1b0b5 460static struct arm_smmu_master_cfg *
8f68f8e2 461find_smmu_master_cfg(struct device *dev)
a9a1b0b5 462{
8f68f8e2
WD
463 struct arm_smmu_master_cfg *cfg = NULL;
464 struct iommu_group *group = iommu_group_get(dev);
a9a1b0b5 465
8f68f8e2
WD
466 if (group) {
467 cfg = iommu_group_get_iommudata(group);
468 iommu_group_put(group);
469 }
a9a1b0b5 470
8f68f8e2 471 return cfg;
a9a1b0b5
WD
472}
473
45ae7cff
WD
474static int insert_smmu_master(struct arm_smmu_device *smmu,
475 struct arm_smmu_master *master)
476{
477 struct rb_node **new, *parent;
478
479 new = &smmu->masters.rb_node;
480 parent = NULL;
481 while (*new) {
2907320d
MH
482 struct arm_smmu_master *this
483 = container_of(*new, struct arm_smmu_master, node);
45ae7cff
WD
484
485 parent = *new;
486 if (master->of_node < this->of_node)
487 new = &((*new)->rb_left);
488 else if (master->of_node > this->of_node)
489 new = &((*new)->rb_right);
490 else
491 return -EEXIST;
492 }
493
494 rb_link_node(&master->node, parent, new);
495 rb_insert_color(&master->node, &smmu->masters);
496 return 0;
497}
498
499static int register_smmu_master(struct arm_smmu_device *smmu,
500 struct device *dev,
501 struct of_phandle_args *masterspec)
502{
503 int i;
504 struct arm_smmu_master *master;
505
506 master = find_smmu_master(smmu, masterspec->np);
507 if (master) {
508 dev_err(dev,
509 "rejecting multiple registrations for master device %s\n",
510 masterspec->np->name);
511 return -EBUSY;
512 }
513
514 if (masterspec->args_count > MAX_MASTER_STREAMIDS) {
515 dev_err(dev,
516 "reached maximum number (%d) of stream IDs for master device %s\n",
517 MAX_MASTER_STREAMIDS, masterspec->np->name);
518 return -ENOSPC;
519 }
520
521 master = devm_kzalloc(dev, sizeof(*master), GFP_KERNEL);
522 if (!master)
523 return -ENOMEM;
524
a9a1b0b5
WD
525 master->of_node = masterspec->np;
526 master->cfg.num_streamids = masterspec->args_count;
45ae7cff 527
3c8766d0
OH
528 for (i = 0; i < master->cfg.num_streamids; ++i) {
529 u16 streamid = masterspec->args[i];
45ae7cff 530
3c8766d0
OH
531 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) &&
532 (streamid >= smmu->num_mapping_groups)) {
533 dev_err(dev,
534 "stream ID for master device %s greater than maximum allowed (%d)\n",
535 masterspec->np->name, smmu->num_mapping_groups);
536 return -ERANGE;
537 }
538 master->cfg.streamids[i] = streamid;
539 }
45ae7cff
WD
540 return insert_smmu_master(smmu, master);
541}
542
44680eed 543static struct arm_smmu_device *find_smmu_for_device(struct device *dev)
45ae7cff 544{
44680eed 545 struct arm_smmu_device *smmu;
a9a1b0b5 546 struct arm_smmu_master *master = NULL;
8f68f8e2 547 struct device_node *dev_node = dev_get_dev_node(dev);
45ae7cff
WD
548
549 spin_lock(&arm_smmu_devices_lock);
44680eed 550 list_for_each_entry(smmu, &arm_smmu_devices, list) {
a9a1b0b5
WD
551 master = find_smmu_master(smmu, dev_node);
552 if (master)
553 break;
554 }
45ae7cff 555 spin_unlock(&arm_smmu_devices_lock);
44680eed 556
a9a1b0b5 557 return master ? smmu : NULL;
45ae7cff
WD
558}
559
560static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
561{
562 int idx;
563
564 do {
565 idx = find_next_zero_bit(map, end, start);
566 if (idx == end)
567 return -ENOSPC;
568 } while (test_and_set_bit(idx, map));
569
570 return idx;
571}
572
573static void __arm_smmu_free_bitmap(unsigned long *map, int idx)
574{
575 clear_bit(idx, map);
576}
577
578/* Wait for any pending TLB invalidations to complete */
518f7136 579static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu)
45ae7cff
WD
580{
581 int count = 0;
582 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
583
584 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_sTLBGSYNC);
585 while (readl_relaxed(gr0_base + ARM_SMMU_GR0_sTLBGSTATUS)
586 & sTLBGSTATUS_GSACTIVE) {
587 cpu_relax();
588 if (++count == TLB_LOOP_TIMEOUT) {
589 dev_err_ratelimited(smmu->dev,
590 "TLB sync timed out -- SMMU may be deadlocked\n");
591 return;
592 }
593 udelay(1);
594 }
595}
596
518f7136
WD
597static void arm_smmu_tlb_sync(void *cookie)
598{
599 struct arm_smmu_domain *smmu_domain = cookie;
600 __arm_smmu_tlb_sync(smmu_domain->smmu);
601}
602
603static void arm_smmu_tlb_inv_context(void *cookie)
1463fe44 604{
518f7136 605 struct arm_smmu_domain *smmu_domain = cookie;
44680eed
WD
606 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
607 struct arm_smmu_device *smmu = smmu_domain->smmu;
1463fe44 608 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
518f7136 609 void __iomem *base;
1463fe44
WD
610
611 if (stage1) {
612 base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1bd37a68 613 writel_relaxed(ARM_SMMU_CB_ASID(smmu, cfg),
ecfadb6e 614 base + ARM_SMMU_CB_S1_TLBIASID);
1463fe44
WD
615 } else {
616 base = ARM_SMMU_GR0(smmu);
1bd37a68 617 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg),
ecfadb6e 618 base + ARM_SMMU_GR0_TLBIVMID);
1463fe44
WD
619 }
620
518f7136
WD
621 __arm_smmu_tlb_sync(smmu);
622}
623
624static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
06c610e8 625 size_t granule, bool leaf, void *cookie)
518f7136
WD
626{
627 struct arm_smmu_domain *smmu_domain = cookie;
628 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
629 struct arm_smmu_device *smmu = smmu_domain->smmu;
630 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
631 void __iomem *reg;
632
633 if (stage1) {
634 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
635 reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
636
7602b871 637 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 638 iova &= ~12UL;
1bd37a68 639 iova |= ARM_SMMU_CB_ASID(smmu, cfg);
75df1386
RM
640 do {
641 writel_relaxed(iova, reg);
642 iova += granule;
643 } while (size -= granule);
518f7136
WD
644 } else {
645 iova >>= 12;
1bd37a68 646 iova |= (u64)ARM_SMMU_CB_ASID(smmu, cfg) << 48;
75df1386
RM
647 do {
648 writeq_relaxed(iova, reg);
649 iova += granule >> 12;
650 } while (size -= granule);
518f7136 651 }
518f7136
WD
652 } else if (smmu->version == ARM_SMMU_V2) {
653 reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
654 reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
655 ARM_SMMU_CB_S2_TLBIIPAS2;
75df1386
RM
656 iova >>= 12;
657 do {
f9a05f05 658 smmu_write_atomic_lq(iova, reg);
75df1386
RM
659 iova += granule >> 12;
660 } while (size -= granule);
518f7136
WD
661 } else {
662 reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;
1bd37a68 663 writel_relaxed(ARM_SMMU_CB_VMID(smmu, cfg), reg);
518f7136
WD
664 }
665}
666
518f7136
WD
667static struct iommu_gather_ops arm_smmu_gather_ops = {
668 .tlb_flush_all = arm_smmu_tlb_inv_context,
669 .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
670 .tlb_sync = arm_smmu_tlb_sync,
518f7136
WD
671};
672
45ae7cff
WD
673static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
674{
675 int flags, ret;
f9a05f05 676 u32 fsr, fsynr, resume;
45ae7cff
WD
677 unsigned long iova;
678 struct iommu_domain *domain = dev;
1d672638 679 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
680 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
681 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
682 void __iomem *cb_base;
683
44680eed 684 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff
WD
685 fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
686
687 if (!(fsr & FSR_FAULT))
688 return IRQ_NONE;
689
690 if (fsr & FSR_IGN)
691 dev_err_ratelimited(smmu->dev,
70c9a7db 692 "Unexpected context fault (fsr 0x%x)\n",
45ae7cff
WD
693 fsr);
694
695 fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
696 flags = fsynr & FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
697
f9a05f05 698 iova = readq_relaxed(cb_base + ARM_SMMU_CB_FAR);
45ae7cff
WD
699 if (!report_iommu_fault(domain, smmu->dev, iova, flags)) {
700 ret = IRQ_HANDLED;
701 resume = RESUME_RETRY;
702 } else {
2ef0f031
AH
703 dev_err_ratelimited(smmu->dev,
704 "Unhandled context fault: iova=0x%08lx, fsynr=0x%x, cb=%d\n",
44680eed 705 iova, fsynr, cfg->cbndx);
45ae7cff
WD
706 ret = IRQ_NONE;
707 resume = RESUME_TERMINATE;
708 }
709
710 /* Clear the faulting FSR */
711 writel(fsr, cb_base + ARM_SMMU_CB_FSR);
712
713 /* Retry or terminate any stalled transactions */
714 if (fsr & FSR_SS)
715 writel_relaxed(resume, cb_base + ARM_SMMU_CB_RESUME);
716
717 return ret;
718}
719
720static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
721{
722 u32 gfsr, gfsynr0, gfsynr1, gfsynr2;
723 struct arm_smmu_device *smmu = dev;
3a5df8ff 724 void __iomem *gr0_base = ARM_SMMU_GR0_NS(smmu);
45ae7cff
WD
725
726 gfsr = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSR);
727 gfsynr0 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR0);
728 gfsynr1 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR1);
729 gfsynr2 = readl_relaxed(gr0_base + ARM_SMMU_GR0_sGFSYNR2);
730
3a5df8ff
AH
731 if (!gfsr)
732 return IRQ_NONE;
733
45ae7cff
WD
734 dev_err_ratelimited(smmu->dev,
735 "Unexpected global fault, this could be serious\n");
736 dev_err_ratelimited(smmu->dev,
737 "\tGFSR 0x%08x, GFSYNR0 0x%08x, GFSYNR1 0x%08x, GFSYNR2 0x%08x\n",
738 gfsr, gfsynr0, gfsynr1, gfsynr2);
739
740 writel(gfsr, gr0_base + ARM_SMMU_GR0_sGFSR);
adaba320 741 return IRQ_HANDLED;
45ae7cff
WD
742}
743
518f7136
WD
744static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
745 struct io_pgtable_cfg *pgtbl_cfg)
45ae7cff
WD
746{
747 u32 reg;
668b4ada 748 u64 reg64;
45ae7cff 749 bool stage1;
44680eed
WD
750 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
751 struct arm_smmu_device *smmu = smmu_domain->smmu;
c88ae5de 752 void __iomem *cb_base, *gr1_base;
45ae7cff 753
45ae7cff 754 gr1_base = ARM_SMMU_GR1(smmu);
44680eed
WD
755 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
756 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
45ae7cff 757
4a1c93cb 758 if (smmu->version > ARM_SMMU_V1) {
7602b871
RM
759 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64)
760 reg = CBA2R_RW64_64BIT;
761 else
762 reg = CBA2R_RW64_32BIT;
4e3e9b69
TC
763 /* 16-bit VMIDs live in CBA2R */
764 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1bd37a68 765 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBA2R_VMID_SHIFT;
4e3e9b69 766
4a1c93cb
WD
767 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx));
768 }
769
45ae7cff 770 /* CBAR */
44680eed 771 reg = cfg->cbar;
09360403 772 if (smmu->version == ARM_SMMU_V1)
2907320d 773 reg |= cfg->irptndx << CBAR_IRPTNDX_SHIFT;
45ae7cff 774
57ca90f6
WD
775 /*
776 * Use the weakest shareability/memory types, so they are
777 * overridden by the ttbcr/pte.
778 */
779 if (stage1) {
780 reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
781 (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
4e3e9b69
TC
782 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) {
783 /* 8-bit VMIDs live in CBAR */
1bd37a68 784 reg |= ARM_SMMU_CB_VMID(smmu, cfg) << CBAR_VMID_SHIFT;
57ca90f6 785 }
44680eed 786 writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx));
45ae7cff 787
518f7136
WD
788 /* TTBRs */
789 if (stage1) {
668b4ada
TC
790 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0];
791
1bd37a68 792 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
f9a05f05 793 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
668b4ada
TC
794
795 reg64 = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1];
1bd37a68 796 reg64 |= ((u64)ARM_SMMU_CB_ASID(smmu, cfg)) << TTBRn_ASID_SHIFT;
f9a05f05 797 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR1);
518f7136 798 } else {
668b4ada 799 reg64 = pgtbl_cfg->arm_lpae_s2_cfg.vttbr;
f9a05f05 800 writeq_relaxed(reg64, cb_base + ARM_SMMU_CB_TTBR0);
518f7136 801 }
a65217a4 802
518f7136
WD
803 /* TTBCR */
804 if (stage1) {
805 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr;
806 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
807 if (smmu->version > ARM_SMMU_V1) {
808 reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
5dc5616e 809 reg |= TTBCR2_SEP_UPSTREAM;
518f7136 810 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
45ae7cff
WD
811 }
812 } else {
518f7136
WD
813 reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr;
814 writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
45ae7cff
WD
815 }
816
518f7136 817 /* MAIRs (stage-1 only) */
45ae7cff 818 if (stage1) {
518f7136 819 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0];
45ae7cff 820 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0);
518f7136
WD
821 reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1];
822 writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1);
45ae7cff
WD
823 }
824
45ae7cff
WD
825 /* SCTLR */
826 reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP;
827 if (stage1)
828 reg |= SCTLR_S1_ASIDPNE;
829#ifdef __BIG_ENDIAN
830 reg |= SCTLR_E;
831#endif
25724841 832 writel_relaxed(reg, cb_base + ARM_SMMU_CB_SCTLR);
45ae7cff
WD
833}
834
835static int arm_smmu_init_domain_context(struct iommu_domain *domain,
44680eed 836 struct arm_smmu_device *smmu)
45ae7cff 837{
a18037b2 838 int irq, start, ret = 0;
518f7136
WD
839 unsigned long ias, oas;
840 struct io_pgtable_ops *pgtbl_ops;
841 struct io_pgtable_cfg pgtbl_cfg;
842 enum io_pgtable_fmt fmt;
1d672638 843 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed 844 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
45ae7cff 845
518f7136 846 mutex_lock(&smmu_domain->init_mutex);
a18037b2
MH
847 if (smmu_domain->smmu)
848 goto out_unlock;
849
c752ce45
WD
850 /*
851 * Mapping the requested stage onto what we support is surprisingly
852 * complicated, mainly because the spec allows S1+S2 SMMUs without
853 * support for nested translation. That means we end up with the
854 * following table:
855 *
856 * Requested Supported Actual
857 * S1 N S1
858 * S1 S1+S2 S1
859 * S1 S2 S2
860 * S1 S1 S1
861 * N N N
862 * N S1+S2 S2
863 * N S2 S2
864 * N S1 S1
865 *
866 * Note that you can't actually request stage-2 mappings.
867 */
868 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
869 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
870 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
871 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
872
7602b871
RM
873 /*
874 * Choosing a suitable context format is even more fiddly. Until we
875 * grow some way for the caller to express a preference, and/or move
876 * the decision into the io-pgtable code where it arguably belongs,
877 * just aim for the closest thing to the rest of the system, and hope
878 * that the hardware isn't esoteric enough that we can't assume AArch64
879 * support to be a superset of AArch32 support...
880 */
881 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L)
882 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L;
883 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) &&
884 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K |
885 ARM_SMMU_FEAT_FMT_AARCH64_16K |
886 ARM_SMMU_FEAT_FMT_AARCH64_4K)))
887 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64;
888
889 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) {
890 ret = -EINVAL;
891 goto out_unlock;
892 }
893
c752ce45
WD
894 switch (smmu_domain->stage) {
895 case ARM_SMMU_DOMAIN_S1:
896 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
897 start = smmu->num_s2_context_banks;
518f7136
WD
898 ias = smmu->va_size;
899 oas = smmu->ipa_size;
7602b871 900 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 901 fmt = ARM_64_LPAE_S1;
7602b871 902 } else {
518f7136 903 fmt = ARM_32_LPAE_S1;
7602b871
RM
904 ias = min(ias, 32UL);
905 oas = min(oas, 40UL);
906 }
c752ce45
WD
907 break;
908 case ARM_SMMU_DOMAIN_NESTED:
45ae7cff
WD
909 /*
910 * We will likely want to change this if/when KVM gets
911 * involved.
912 */
c752ce45 913 case ARM_SMMU_DOMAIN_S2:
9c5c92e3
WD
914 cfg->cbar = CBAR_TYPE_S2_TRANS;
915 start = 0;
518f7136
WD
916 ias = smmu->ipa_size;
917 oas = smmu->pa_size;
7602b871 918 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) {
518f7136 919 fmt = ARM_64_LPAE_S2;
7602b871 920 } else {
518f7136 921 fmt = ARM_32_LPAE_S2;
7602b871
RM
922 ias = min(ias, 40UL);
923 oas = min(oas, 40UL);
924 }
c752ce45
WD
925 break;
926 default:
927 ret = -EINVAL;
928 goto out_unlock;
45ae7cff
WD
929 }
930
931 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
932 smmu->num_context_banks);
933 if (IS_ERR_VALUE(ret))
a18037b2 934 goto out_unlock;
45ae7cff 935
44680eed 936 cfg->cbndx = ret;
09360403 937 if (smmu->version == ARM_SMMU_V1) {
44680eed
WD
938 cfg->irptndx = atomic_inc_return(&smmu->irptndx);
939 cfg->irptndx %= smmu->num_context_irqs;
45ae7cff 940 } else {
44680eed 941 cfg->irptndx = cfg->cbndx;
45ae7cff
WD
942 }
943
518f7136
WD
944 pgtbl_cfg = (struct io_pgtable_cfg) {
945 .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
946 .ias = ias,
947 .oas = oas,
948 .tlb = &arm_smmu_gather_ops,
2df7a25c 949 .iommu_dev = smmu->dev,
518f7136
WD
950 };
951
952 smmu_domain->smmu = smmu;
953 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
954 if (!pgtbl_ops) {
955 ret = -ENOMEM;
956 goto out_clear_smmu;
957 }
958
959 /* Update our support page sizes to reflect the page table format */
960 arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
a18037b2 961
518f7136
WD
962 /* Initialise the context bank with our page table cfg */
963 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg);
964
965 /*
966 * Request context fault interrupt. Do this last to avoid the
967 * handler seeing a half-initialised domain state.
968 */
44680eed 969 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
970 ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED,
971 "arm-smmu-context-fault", domain);
972 if (IS_ERR_VALUE(ret)) {
973 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n",
44680eed
WD
974 cfg->irptndx, irq);
975 cfg->irptndx = INVALID_IRPTNDX;
45ae7cff
WD
976 }
977
518f7136
WD
978 mutex_unlock(&smmu_domain->init_mutex);
979
980 /* Publish page table ops for map/unmap */
981 smmu_domain->pgtbl_ops = pgtbl_ops;
a9a1b0b5 982 return 0;
45ae7cff 983
518f7136
WD
984out_clear_smmu:
985 smmu_domain->smmu = NULL;
a18037b2 986out_unlock:
518f7136 987 mutex_unlock(&smmu_domain->init_mutex);
45ae7cff
WD
988 return ret;
989}
990
991static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
992{
1d672638 993 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
44680eed
WD
994 struct arm_smmu_device *smmu = smmu_domain->smmu;
995 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1463fe44 996 void __iomem *cb_base;
45ae7cff
WD
997 int irq;
998
999 if (!smmu)
1000 return;
1001
518f7136
WD
1002 /*
1003 * Disable the context bank and free the page tables before freeing
1004 * it.
1005 */
44680eed 1006 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1463fe44 1007 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1463fe44 1008
44680eed
WD
1009 if (cfg->irptndx != INVALID_IRPTNDX) {
1010 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
45ae7cff
WD
1011 free_irq(irq, domain);
1012 }
1013
44830b0c 1014 free_io_pgtable_ops(smmu_domain->pgtbl_ops);
44680eed 1015 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx);
45ae7cff
WD
1016}
1017
1d672638 1018static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
45ae7cff
WD
1019{
1020 struct arm_smmu_domain *smmu_domain;
45ae7cff 1021
9adb9594 1022 if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_DMA)
1d672638 1023 return NULL;
45ae7cff
WD
1024 /*
1025 * Allocate the domain and initialise some of its data structures.
1026 * We can't really do anything meaningful until we've added a
1027 * master.
1028 */
1029 smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
1030 if (!smmu_domain)
1d672638 1031 return NULL;
45ae7cff 1032
9adb9594
RM
1033 if (type == IOMMU_DOMAIN_DMA &&
1034 iommu_get_dma_cookie(&smmu_domain->domain)) {
1035 kfree(smmu_domain);
1036 return NULL;
1037 }
1038
518f7136
WD
1039 mutex_init(&smmu_domain->init_mutex);
1040 spin_lock_init(&smmu_domain->pgtbl_lock);
1d672638
JR
1041
1042 return &smmu_domain->domain;
45ae7cff
WD
1043}
1044
1d672638 1045static void arm_smmu_domain_free(struct iommu_domain *domain)
45ae7cff 1046{
1d672638 1047 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1463fe44
WD
1048
1049 /*
1050 * Free the domain resources. We assume that all devices have
1051 * already been detached.
1052 */
9adb9594 1053 iommu_put_dma_cookie(domain);
45ae7cff 1054 arm_smmu_destroy_domain_context(domain);
45ae7cff
WD
1055 kfree(smmu_domain);
1056}
1057
1058static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1059 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1060{
1061 int i;
1062 struct arm_smmu_smr *smrs;
1063 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1064
1065 if (!(smmu->features & ARM_SMMU_FEAT_STREAM_MATCH))
1066 return 0;
1067
a9a1b0b5 1068 if (cfg->smrs)
45ae7cff
WD
1069 return -EEXIST;
1070
2907320d 1071 smrs = kmalloc_array(cfg->num_streamids, sizeof(*smrs), GFP_KERNEL);
45ae7cff 1072 if (!smrs) {
a9a1b0b5
WD
1073 dev_err(smmu->dev, "failed to allocate %d SMRs\n",
1074 cfg->num_streamids);
45ae7cff
WD
1075 return -ENOMEM;
1076 }
1077
44680eed 1078 /* Allocate the SMRs on the SMMU */
a9a1b0b5 1079 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1080 int idx = __arm_smmu_alloc_bitmap(smmu->smr_map, 0,
1081 smmu->num_mapping_groups);
1082 if (IS_ERR_VALUE(idx)) {
1083 dev_err(smmu->dev, "failed to allocate free SMR\n");
1084 goto err_free_smrs;
1085 }
1086
1087 smrs[i] = (struct arm_smmu_smr) {
1088 .idx = idx,
1089 .mask = 0, /* We don't currently share SMRs */
a9a1b0b5 1090 .id = cfg->streamids[i],
45ae7cff
WD
1091 };
1092 }
1093
1094 /* It worked! Now, poke the actual hardware */
a9a1b0b5 1095 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff
WD
1096 u32 reg = SMR_VALID | smrs[i].id << SMR_ID_SHIFT |
1097 smrs[i].mask << SMR_MASK_SHIFT;
1098 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_SMR(smrs[i].idx));
1099 }
1100
a9a1b0b5 1101 cfg->smrs = smrs;
45ae7cff
WD
1102 return 0;
1103
1104err_free_smrs:
1105 while (--i >= 0)
1106 __arm_smmu_free_bitmap(smmu->smr_map, smrs[i].idx);
1107 kfree(smrs);
1108 return -ENOSPC;
1109}
1110
1111static void arm_smmu_master_free_smrs(struct arm_smmu_device *smmu,
a9a1b0b5 1112 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1113{
1114 int i;
1115 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
a9a1b0b5 1116 struct arm_smmu_smr *smrs = cfg->smrs;
45ae7cff 1117
43b412be
WD
1118 if (!smrs)
1119 return;
1120
45ae7cff 1121 /* Invalidate the SMRs before freeing back to the allocator */
a9a1b0b5 1122 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1123 u8 idx = smrs[i].idx;
2907320d 1124
45ae7cff
WD
1125 writel_relaxed(~SMR_VALID, gr0_base + ARM_SMMU_GR0_SMR(idx));
1126 __arm_smmu_free_bitmap(smmu->smr_map, idx);
1127 }
1128
a9a1b0b5 1129 cfg->smrs = NULL;
45ae7cff
WD
1130 kfree(smrs);
1131}
1132
45ae7cff 1133static int arm_smmu_domain_add_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1134 struct arm_smmu_master_cfg *cfg)
45ae7cff
WD
1135{
1136 int i, ret;
44680eed 1137 struct arm_smmu_device *smmu = smmu_domain->smmu;
45ae7cff
WD
1138 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1139
8f68f8e2 1140 /* Devices in an IOMMU group may already be configured */
a9a1b0b5 1141 ret = arm_smmu_master_configure_smrs(smmu, cfg);
45ae7cff 1142 if (ret)
8f68f8e2 1143 return ret == -EEXIST ? 0 : ret;
45ae7cff 1144
cbf8277e
WD
1145 /*
1146 * FIXME: This won't be needed once we have IOMMU-backed DMA ops
1147 * for all devices behind the SMMU.
1148 */
1149 if (smmu_domain->domain.type == IOMMU_DOMAIN_DMA)
1150 return 0;
1151
a9a1b0b5 1152 for (i = 0; i < cfg->num_streamids; ++i) {
45ae7cff 1153 u32 idx, s2cr;
2907320d 1154
a9a1b0b5 1155 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
d346180e 1156 s2cr = S2CR_TYPE_TRANS | S2CR_PRIVCFG_UNPRIV |
44680eed 1157 (smmu_domain->cfg.cbndx << S2CR_CBNDX_SHIFT);
45ae7cff
WD
1158 writel_relaxed(s2cr, gr0_base + ARM_SMMU_GR0_S2CR(idx));
1159 }
1160
1161 return 0;
1162}
1163
1164static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain,
a9a1b0b5 1165 struct arm_smmu_master_cfg *cfg)
45ae7cff 1166{
43b412be 1167 int i;
44680eed 1168 struct arm_smmu_device *smmu = smmu_domain->smmu;
43b412be 1169 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
45ae7cff 1170
8f68f8e2
WD
1171 /* An IOMMU group is torn down by the first device to be removed */
1172 if ((smmu->features & ARM_SMMU_FEAT_STREAM_MATCH) && !cfg->smrs)
1173 return;
45ae7cff
WD
1174
1175 /*
1176 * We *must* clear the S2CR first, because freeing the SMR means
1177 * that it can be re-allocated immediately.
1178 */
43b412be
WD
1179 for (i = 0; i < cfg->num_streamids; ++i) {
1180 u32 idx = cfg->smrs ? cfg->smrs[i].idx : cfg->streamids[i];
25a1c96c 1181 u32 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
43b412be 1182
25a1c96c 1183 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(idx));
43b412be
WD
1184 }
1185
a9a1b0b5 1186 arm_smmu_master_free_smrs(smmu, cfg);
45ae7cff
WD
1187}
1188
bc7f2ce0
WD
1189static void arm_smmu_detach_dev(struct device *dev,
1190 struct arm_smmu_master_cfg *cfg)
1191{
1192 struct iommu_domain *domain = dev->archdata.iommu;
1193 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
1194
1195 dev->archdata.iommu = NULL;
1196 arm_smmu_domain_remove_master(smmu_domain, cfg);
1197}
1198
45ae7cff
WD
1199static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
1200{
a18037b2 1201 int ret;
1d672638 1202 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1203 struct arm_smmu_device *smmu;
a9a1b0b5 1204 struct arm_smmu_master_cfg *cfg;
45ae7cff 1205
8f68f8e2 1206 smmu = find_smmu_for_device(dev);
44680eed 1207 if (!smmu) {
45ae7cff
WD
1208 dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
1209 return -ENXIO;
1210 }
1211
518f7136
WD
1212 /* Ensure that the domain is finalised */
1213 ret = arm_smmu_init_domain_context(domain, smmu);
1214 if (IS_ERR_VALUE(ret))
1215 return ret;
1216
45ae7cff 1217 /*
44680eed
WD
1218 * Sanity check the domain. We don't support domains across
1219 * different SMMUs.
45ae7cff 1220 */
518f7136 1221 if (smmu_domain->smmu != smmu) {
45ae7cff
WD
1222 dev_err(dev,
1223 "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n",
a18037b2
MH
1224 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev));
1225 return -EINVAL;
45ae7cff 1226 }
45ae7cff
WD
1227
1228 /* Looks ok, so add the device to the domain */
8f68f8e2 1229 cfg = find_smmu_master_cfg(dev);
a9a1b0b5 1230 if (!cfg)
45ae7cff
WD
1231 return -ENODEV;
1232
bc7f2ce0
WD
1233 /* Detach the dev from its current domain */
1234 if (dev->archdata.iommu)
1235 arm_smmu_detach_dev(dev, cfg);
1236
844e35bd
WD
1237 ret = arm_smmu_domain_add_master(smmu_domain, cfg);
1238 if (!ret)
1239 dev->archdata.iommu = domain;
45ae7cff
WD
1240 return ret;
1241}
1242
45ae7cff 1243static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
b410aed9 1244 phys_addr_t paddr, size_t size, int prot)
45ae7cff 1245{
518f7136
WD
1246 int ret;
1247 unsigned long flags;
1d672638 1248 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1249 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1250
518f7136 1251 if (!ops)
45ae7cff
WD
1252 return -ENODEV;
1253
518f7136
WD
1254 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1255 ret = ops->map(ops, iova, paddr, size, prot);
1256 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1257 return ret;
45ae7cff
WD
1258}
1259
1260static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
1261 size_t size)
1262{
518f7136
WD
1263 size_t ret;
1264 unsigned long flags;
1d672638 1265 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1266 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1267
518f7136
WD
1268 if (!ops)
1269 return 0;
1270
1271 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
1272 ret = ops->unmap(ops, iova, size);
1273 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
1274 return ret;
45ae7cff
WD
1275}
1276
859a732e
MH
1277static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
1278 dma_addr_t iova)
1279{
1d672638 1280 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
859a732e
MH
1281 struct arm_smmu_device *smmu = smmu_domain->smmu;
1282 struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
1283 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
1284 struct device *dev = smmu->dev;
1285 void __iomem *cb_base;
1286 u32 tmp;
1287 u64 phys;
661d962f 1288 unsigned long va;
859a732e
MH
1289
1290 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
1291
661d962f
RM
1292 /* ATS1 registers can only be written atomically */
1293 va = iova & ~0xfffUL;
661d962f 1294 if (smmu->version == ARM_SMMU_V2)
f9a05f05
RM
1295 smmu_write_atomic_lq(va, cb_base + ARM_SMMU_CB_ATS1PR);
1296 else /* Register is only 32-bit in v1 */
661d962f 1297 writel_relaxed(va, cb_base + ARM_SMMU_CB_ATS1PR);
859a732e
MH
1298
1299 if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp,
1300 !(tmp & ATSR_ACTIVE), 5, 50)) {
1301 dev_err(dev,
077124c9 1302 "iova to phys timed out on %pad. Falling back to software table walk.\n",
859a732e
MH
1303 &iova);
1304 return ops->iova_to_phys(ops, iova);
1305 }
1306
f9a05f05 1307 phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR);
859a732e
MH
1308 if (phys & CB_PAR_F) {
1309 dev_err(dev, "translation fault!\n");
1310 dev_err(dev, "PAR = 0x%llx\n", phys);
1311 return 0;
1312 }
1313
1314 return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff);
1315}
1316
45ae7cff 1317static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
859a732e 1318 dma_addr_t iova)
45ae7cff 1319{
518f7136
WD
1320 phys_addr_t ret;
1321 unsigned long flags;
1d672638 1322 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
518f7136 1323 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops;
45ae7cff 1324
518f7136 1325 if (!ops)
a44a9791 1326 return 0;
45ae7cff 1327
518f7136 1328 spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags);
83a60ed8
BR
1329 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS &&
1330 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
859a732e 1331 ret = arm_smmu_iova_to_phys_hard(domain, iova);
83a60ed8 1332 } else {
859a732e 1333 ret = ops->iova_to_phys(ops, iova);
83a60ed8
BR
1334 }
1335
518f7136 1336 spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags);
859a732e 1337
518f7136 1338 return ret;
45ae7cff
WD
1339}
1340
1fd0c775 1341static bool arm_smmu_capable(enum iommu_cap cap)
45ae7cff 1342{
d0948945
WD
1343 switch (cap) {
1344 case IOMMU_CAP_CACHE_COHERENCY:
1fd0c775
JR
1345 /*
1346 * Return true here as the SMMU can always send out coherent
1347 * requests.
1348 */
1349 return true;
d0948945 1350 case IOMMU_CAP_INTR_REMAP:
1fd0c775 1351 return true; /* MSIs are just memory writes */
0029a8dd
AM
1352 case IOMMU_CAP_NOEXEC:
1353 return true;
d0948945 1354 default:
1fd0c775 1355 return false;
d0948945 1356 }
45ae7cff 1357}
45ae7cff 1358
a9a1b0b5
WD
1359static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *data)
1360{
1361 *((u16 *)data) = alias;
1362 return 0; /* Continue walking */
45ae7cff
WD
1363}
1364
8f68f8e2
WD
1365static void __arm_smmu_release_pci_iommudata(void *data)
1366{
1367 kfree(data);
1368}
1369
af659932
JR
1370static int arm_smmu_init_pci_device(struct pci_dev *pdev,
1371 struct iommu_group *group)
45ae7cff 1372{
03edb226 1373 struct arm_smmu_master_cfg *cfg;
af659932
JR
1374 u16 sid;
1375 int i;
a9a1b0b5 1376
03edb226
WD
1377 cfg = iommu_group_get_iommudata(group);
1378 if (!cfg) {
a9a1b0b5 1379 cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
af659932
JR
1380 if (!cfg)
1381 return -ENOMEM;
a9a1b0b5 1382
03edb226
WD
1383 iommu_group_set_iommudata(group, cfg,
1384 __arm_smmu_release_pci_iommudata);
1385 }
8f68f8e2 1386
af659932
JR
1387 if (cfg->num_streamids >= MAX_MASTER_STREAMIDS)
1388 return -ENOSPC;
a9a1b0b5 1389
03edb226
WD
1390 /*
1391 * Assume Stream ID == Requester ID for now.
1392 * We need a way to describe the ID mappings in FDT.
1393 */
1394 pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid);
1395 for (i = 0; i < cfg->num_streamids; ++i)
1396 if (cfg->streamids[i] == sid)
1397 break;
1398
1399 /* Avoid duplicate SIDs, as this can lead to SMR conflicts */
1400 if (i == cfg->num_streamids)
1401 cfg->streamids[cfg->num_streamids++] = sid;
5fc63a7c 1402
03edb226 1403 return 0;
45ae7cff
WD
1404}
1405
af659932
JR
1406static int arm_smmu_init_platform_device(struct device *dev,
1407 struct iommu_group *group)
03edb226 1408{
03edb226 1409 struct arm_smmu_device *smmu = find_smmu_for_device(dev);
af659932 1410 struct arm_smmu_master *master;
03edb226
WD
1411
1412 if (!smmu)
1413 return -ENODEV;
1414
1415 master = find_smmu_master(smmu, dev->of_node);
1416 if (!master)
1417 return -ENODEV;
1418
03edb226 1419 iommu_group_set_iommudata(group, &master->cfg, NULL);
af659932
JR
1420
1421 return 0;
03edb226
WD
1422}
1423
1424static int arm_smmu_add_device(struct device *dev)
1425{
af659932 1426 struct iommu_group *group;
03edb226 1427
af659932
JR
1428 group = iommu_group_get_for_dev(dev);
1429 if (IS_ERR(group))
1430 return PTR_ERR(group);
03edb226 1431
9a4a9d8c 1432 iommu_group_put(group);
af659932 1433 return 0;
03edb226
WD
1434}
1435
45ae7cff
WD
1436static void arm_smmu_remove_device(struct device *dev)
1437{
5fc63a7c 1438 iommu_group_remove_device(dev);
45ae7cff
WD
1439}
1440
af659932
JR
1441static struct iommu_group *arm_smmu_device_group(struct device *dev)
1442{
1443 struct iommu_group *group;
1444 int ret;
1445
1446 if (dev_is_pci(dev))
1447 group = pci_device_group(dev);
1448 else
1449 group = generic_device_group(dev);
1450
1451 if (IS_ERR(group))
1452 return group;
1453
1454 if (dev_is_pci(dev))
1455 ret = arm_smmu_init_pci_device(to_pci_dev(dev), group);
1456 else
1457 ret = arm_smmu_init_platform_device(dev, group);
1458
1459 if (ret) {
1460 iommu_group_put(group);
1461 group = ERR_PTR(ret);
1462 }
1463
1464 return group;
1465}
1466
c752ce45
WD
1467static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1468 enum iommu_attr attr, void *data)
1469{
1d672638 1470 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45
WD
1471
1472 switch (attr) {
1473 case DOMAIN_ATTR_NESTING:
1474 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1475 return 0;
1476 default:
1477 return -ENODEV;
1478 }
1479}
1480
1481static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1482 enum iommu_attr attr, void *data)
1483{
518f7136 1484 int ret = 0;
1d672638 1485 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
c752ce45 1486
518f7136
WD
1487 mutex_lock(&smmu_domain->init_mutex);
1488
c752ce45
WD
1489 switch (attr) {
1490 case DOMAIN_ATTR_NESTING:
518f7136
WD
1491 if (smmu_domain->smmu) {
1492 ret = -EPERM;
1493 goto out_unlock;
1494 }
1495
c752ce45
WD
1496 if (*(int *)data)
1497 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1498 else
1499 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1500
518f7136 1501 break;
c752ce45 1502 default:
518f7136 1503 ret = -ENODEV;
c752ce45 1504 }
518f7136
WD
1505
1506out_unlock:
1507 mutex_unlock(&smmu_domain->init_mutex);
1508 return ret;
c752ce45
WD
1509}
1510
518f7136 1511static struct iommu_ops arm_smmu_ops = {
c752ce45 1512 .capable = arm_smmu_capable,
1d672638
JR
1513 .domain_alloc = arm_smmu_domain_alloc,
1514 .domain_free = arm_smmu_domain_free,
c752ce45 1515 .attach_dev = arm_smmu_attach_dev,
c752ce45
WD
1516 .map = arm_smmu_map,
1517 .unmap = arm_smmu_unmap,
76771c93 1518 .map_sg = default_iommu_map_sg,
c752ce45
WD
1519 .iova_to_phys = arm_smmu_iova_to_phys,
1520 .add_device = arm_smmu_add_device,
1521 .remove_device = arm_smmu_remove_device,
af659932 1522 .device_group = arm_smmu_device_group,
c752ce45
WD
1523 .domain_get_attr = arm_smmu_domain_get_attr,
1524 .domain_set_attr = arm_smmu_domain_set_attr,
518f7136 1525 .pgsize_bitmap = -1UL, /* Restricted during device attach */
45ae7cff
WD
1526};
1527
1528static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
1529{
1530 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
659db6f6 1531 void __iomem *cb_base;
45ae7cff 1532 int i = 0;
659db6f6
AH
1533 u32 reg;
1534
3a5df8ff
AH
1535 /* clear global FSR */
1536 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
1537 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sGFSR);
45ae7cff 1538
25a1c96c
RM
1539 /* Mark all SMRn as invalid and all S2CRn as bypass unless overridden */
1540 reg = disable_bypass ? S2CR_TYPE_FAULT : S2CR_TYPE_BYPASS;
45ae7cff 1541 for (i = 0; i < smmu->num_mapping_groups; ++i) {
3c8766d0 1542 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_SMR(i));
25a1c96c 1543 writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_S2CR(i));
45ae7cff
WD
1544 }
1545
659db6f6
AH
1546 /* Make sure all context banks are disabled and clear CB_FSR */
1547 for (i = 0; i < smmu->num_context_banks; ++i) {
1548 cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, i);
1549 writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
1550 writel_relaxed(FSR_FAULT, cb_base + ARM_SMMU_CB_FSR);
f0cfffc4
RM
1551 /*
1552 * Disable MMU-500's not-particularly-beneficial next-page
1553 * prefetcher for the sake of errata #841119 and #826419.
1554 */
1555 if (smmu->model == ARM_MMU500) {
1556 reg = readl_relaxed(cb_base + ARM_SMMU_CB_ACTLR);
1557 reg &= ~ARM_MMU500_ACTLR_CPRE;
1558 writel_relaxed(reg, cb_base + ARM_SMMU_CB_ACTLR);
1559 }
659db6f6 1560 }
1463fe44 1561
45ae7cff 1562 /* Invalidate the TLB, just in case */
45ae7cff
WD
1563 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
1564 writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
1565
3a5df8ff 1566 reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
659db6f6 1567
45ae7cff 1568 /* Enable fault reporting */
659db6f6 1569 reg |= (sCR0_GFRE | sCR0_GFIE | sCR0_GCFGFRE | sCR0_GCFGFIE);
45ae7cff
WD
1570
1571 /* Disable TLB broadcasting. */
659db6f6 1572 reg |= (sCR0_VMIDPNE | sCR0_PTM);
45ae7cff 1573
25a1c96c
RM
1574 /* Enable client access, handling unmatched streams as appropriate */
1575 reg &= ~sCR0_CLIENTPD;
1576 if (disable_bypass)
1577 reg |= sCR0_USFCFG;
1578 else
1579 reg &= ~sCR0_USFCFG;
45ae7cff
WD
1580
1581 /* Disable forced broadcasting */
659db6f6 1582 reg &= ~sCR0_FB;
45ae7cff
WD
1583
1584 /* Don't upgrade barriers */
659db6f6 1585 reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT);
45ae7cff 1586
4e3e9b69
TC
1587 if (smmu->features & ARM_SMMU_FEAT_VMID16)
1588 reg |= sCR0_VMID16EN;
1589
45ae7cff 1590 /* Push the button */
518f7136 1591 __arm_smmu_tlb_sync(smmu);
3a5df8ff 1592 writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1593}
1594
1595static int arm_smmu_id_size_to_bits(int size)
1596{
1597 switch (size) {
1598 case 0:
1599 return 32;
1600 case 1:
1601 return 36;
1602 case 2:
1603 return 40;
1604 case 3:
1605 return 42;
1606 case 4:
1607 return 44;
1608 case 5:
1609 default:
1610 return 48;
1611 }
1612}
1613
1614static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
1615{
1616 unsigned long size;
1617 void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
1618 u32 id;
bae2c2d4 1619 bool cttw_dt, cttw_reg;
45ae7cff
WD
1620
1621 dev_notice(smmu->dev, "probing hardware configuration...\n");
45ae7cff
WD
1622 dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
1623
1624 /* ID0 */
1625 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0);
4cf740b0
WD
1626
1627 /* Restrict available stages based on module parameter */
1628 if (force_stage == 1)
1629 id &= ~(ID0_S2TS | ID0_NTS);
1630 else if (force_stage == 2)
1631 id &= ~(ID0_S1TS | ID0_NTS);
1632
45ae7cff
WD
1633 if (id & ID0_S1TS) {
1634 smmu->features |= ARM_SMMU_FEAT_TRANS_S1;
1635 dev_notice(smmu->dev, "\tstage 1 translation\n");
1636 }
1637
1638 if (id & ID0_S2TS) {
1639 smmu->features |= ARM_SMMU_FEAT_TRANS_S2;
1640 dev_notice(smmu->dev, "\tstage 2 translation\n");
1641 }
1642
1643 if (id & ID0_NTS) {
1644 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED;
1645 dev_notice(smmu->dev, "\tnested translation\n");
1646 }
1647
1648 if (!(smmu->features &
4cf740b0 1649 (ARM_SMMU_FEAT_TRANS_S1 | ARM_SMMU_FEAT_TRANS_S2))) {
45ae7cff
WD
1650 dev_err(smmu->dev, "\tno translation support!\n");
1651 return -ENODEV;
1652 }
1653
d38f0ff9 1654 if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) {
859a732e
MH
1655 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS;
1656 dev_notice(smmu->dev, "\taddress translation ops\n");
1657 }
1658
bae2c2d4
RM
1659 /*
1660 * In order for DMA API calls to work properly, we must defer to what
1661 * the DT says about coherency, regardless of what the hardware claims.
1662 * Fortunately, this also opens up a workaround for systems where the
1663 * ID register value has ended up configured incorrectly.
1664 */
1665 cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
1666 cttw_reg = !!(id & ID0_CTTW);
1667 if (cttw_dt)
45ae7cff 1668 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
bae2c2d4
RM
1669 if (cttw_dt || cttw_reg)
1670 dev_notice(smmu->dev, "\t%scoherent table walk\n",
1671 cttw_dt ? "" : "non-");
1672 if (cttw_dt != cttw_reg)
1673 dev_notice(smmu->dev,
1674 "\t(IDR0.CTTW overridden by dma-coherent property)\n");
45ae7cff
WD
1675
1676 if (id & ID0_SMS) {
1677 u32 smr, sid, mask;
1678
1679 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH;
1680 smmu->num_mapping_groups = (id >> ID0_NUMSMRG_SHIFT) &
1681 ID0_NUMSMRG_MASK;
1682 if (smmu->num_mapping_groups == 0) {
1683 dev_err(smmu->dev,
1684 "stream-matching supported, but no SMRs present!\n");
1685 return -ENODEV;
1686 }
1687
1688 smr = SMR_MASK_MASK << SMR_MASK_SHIFT;
1689 smr |= (SMR_ID_MASK << SMR_ID_SHIFT);
1690 writel_relaxed(smr, gr0_base + ARM_SMMU_GR0_SMR(0));
1691 smr = readl_relaxed(gr0_base + ARM_SMMU_GR0_SMR(0));
1692
1693 mask = (smr >> SMR_MASK_SHIFT) & SMR_MASK_MASK;
1694 sid = (smr >> SMR_ID_SHIFT) & SMR_ID_MASK;
1695 if ((mask & sid) != sid) {
1696 dev_err(smmu->dev,
1697 "SMR mask bits (0x%x) insufficient for ID field (0x%x)\n",
1698 mask, sid);
1699 return -ENODEV;
1700 }
1701
1702 dev_notice(smmu->dev,
1703 "\tstream matching with %u register groups, mask 0x%x",
1704 smmu->num_mapping_groups, mask);
3c8766d0
OH
1705 } else {
1706 smmu->num_mapping_groups = (id >> ID0_NUMSIDB_SHIFT) &
1707 ID0_NUMSIDB_MASK;
45ae7cff
WD
1708 }
1709
7602b871
RM
1710 if (smmu->version < ARM_SMMU_V2 || !(id & ID0_PTFS_NO_AARCH32)) {
1711 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L;
1712 if (!(id & ID0_PTFS_NO_AARCH32S))
1713 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S;
1714 }
1715
45ae7cff
WD
1716 /* ID1 */
1717 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID1);
c757e852 1718 smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12;
45ae7cff 1719
c55af7f7 1720 /* Check for size mismatch of SMMU address space from mapped region */
518f7136 1721 size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1);
c757e852 1722 size *= 2 << smmu->pgshift;
c55af7f7 1723 if (smmu->size != size)
2907320d
MH
1724 dev_warn(smmu->dev,
1725 "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n",
1726 size, smmu->size);
45ae7cff 1727
518f7136 1728 smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK;
45ae7cff
WD
1729 smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK;
1730 if (smmu->num_s2_context_banks > smmu->num_context_banks) {
1731 dev_err(smmu->dev, "impossible number of S2 context banks!\n");
1732 return -ENODEV;
1733 }
1734 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n",
1735 smmu->num_context_banks, smmu->num_s2_context_banks);
e086d912
RM
1736 /*
1737 * Cavium CN88xx erratum #27704.
1738 * Ensure ASID and VMID allocation is unique across all SMMUs in
1739 * the system.
1740 */
1741 if (smmu->model == CAVIUM_SMMUV2) {
1742 smmu->cavium_id_base =
1743 atomic_add_return(smmu->num_context_banks,
1744 &cavium_smmu_context_count);
1745 smmu->cavium_id_base -= smmu->num_context_banks;
1746 }
45ae7cff
WD
1747
1748 /* ID2 */
1749 id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2);
1750 size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK);
518f7136 1751 smmu->ipa_size = size;
45ae7cff 1752
518f7136 1753 /* The output mask is also applied for bypass */
45ae7cff 1754 size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK);
518f7136 1755 smmu->pa_size = size;
45ae7cff 1756
4e3e9b69
TC
1757 if (id & ID2_VMID16)
1758 smmu->features |= ARM_SMMU_FEAT_VMID16;
1759
f1d84548
RM
1760 /*
1761 * What the page table walker can address actually depends on which
1762 * descriptor format is in use, but since a) we don't know that yet,
1763 * and b) it can vary per context bank, this will have to do...
1764 */
1765 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size)))
1766 dev_warn(smmu->dev,
1767 "failed to set DMA mask for table walker\n");
1768
09360403 1769 if (smmu->version == ARM_SMMU_V1) {
518f7136 1770 smmu->va_size = smmu->ipa_size;
45ae7cff 1771 } else {
45ae7cff 1772 size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK;
518f7136 1773 smmu->va_size = arm_smmu_id_size_to_bits(size);
518f7136 1774 if (id & ID2_PTFS_4K)
7602b871 1775 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K;
518f7136 1776 if (id & ID2_PTFS_16K)
7602b871 1777 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K;
518f7136 1778 if (id & ID2_PTFS_64K)
7602b871 1779 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K;
45ae7cff
WD
1780 }
1781
7602b871
RM
1782 /* Now we've corralled the various formats, what'll it do? */
1783 size = 0;
1784 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S)
1785 size |= SZ_4K | SZ_64K | SZ_1M | SZ_16M;
1786 if (smmu->features &
1787 (ARM_SMMU_FEAT_FMT_AARCH32_L | ARM_SMMU_FEAT_FMT_AARCH64_4K))
1788 size |= SZ_4K | SZ_2M | SZ_1G;
1789 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K)
1790 size |= SZ_16K | SZ_32M;
1791 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K)
1792 size |= SZ_64K | SZ_512M;
1793
518f7136
WD
1794 arm_smmu_ops.pgsize_bitmap &= size;
1795 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size);
1796
28d6007b
WD
1797 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1)
1798 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n",
518f7136 1799 smmu->va_size, smmu->ipa_size);
28d6007b
WD
1800
1801 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2)
1802 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n",
518f7136 1803 smmu->ipa_size, smmu->pa_size);
28d6007b 1804
45ae7cff
WD
1805 return 0;
1806}
1807
67b65a3f
RM
1808struct arm_smmu_match_data {
1809 enum arm_smmu_arch_version version;
1810 enum arm_smmu_implementation model;
1811};
1812
1813#define ARM_SMMU_MATCH_DATA(name, ver, imp) \
1814static struct arm_smmu_match_data name = { .version = ver, .model = imp }
1815
1816ARM_SMMU_MATCH_DATA(smmu_generic_v1, ARM_SMMU_V1, GENERIC_SMMU);
1817ARM_SMMU_MATCH_DATA(smmu_generic_v2, ARM_SMMU_V2, GENERIC_SMMU);
f0cfffc4 1818ARM_SMMU_MATCH_DATA(arm_mmu500, ARM_SMMU_V2, ARM_MMU500);
e086d912 1819ARM_SMMU_MATCH_DATA(cavium_smmuv2, ARM_SMMU_V2, CAVIUM_SMMUV2);
67b65a3f 1820
09b5269a 1821static const struct of_device_id arm_smmu_of_match[] = {
67b65a3f
RM
1822 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1823 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1824 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1825 { .compatible = "arm,mmu-401", .data = &smmu_generic_v1 },
f0cfffc4 1826 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
e086d912 1827 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
09360403
RM
1828 { },
1829};
1830MODULE_DEVICE_TABLE(of, arm_smmu_of_match);
1831
45ae7cff
WD
1832static int arm_smmu_device_dt_probe(struct platform_device *pdev)
1833{
09360403 1834 const struct of_device_id *of_id;
67b65a3f 1835 const struct arm_smmu_match_data *data;
45ae7cff
WD
1836 struct resource *res;
1837 struct arm_smmu_device *smmu;
45ae7cff
WD
1838 struct device *dev = &pdev->dev;
1839 struct rb_node *node;
1840 struct of_phandle_args masterspec;
1841 int num_irqs, i, err;
1842
1843 smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
1844 if (!smmu) {
1845 dev_err(dev, "failed to allocate arm_smmu_device\n");
1846 return -ENOMEM;
1847 }
1848 smmu->dev = dev;
1849
09360403 1850 of_id = of_match_node(arm_smmu_of_match, dev->of_node);
67b65a3f
RM
1851 data = of_id->data;
1852 smmu->version = data->version;
1853 smmu->model = data->model;
09360403 1854
45ae7cff 1855 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
8a7f4312
JL
1856 smmu->base = devm_ioremap_resource(dev, res);
1857 if (IS_ERR(smmu->base))
1858 return PTR_ERR(smmu->base);
45ae7cff 1859 smmu->size = resource_size(res);
45ae7cff
WD
1860
1861 if (of_property_read_u32(dev->of_node, "#global-interrupts",
1862 &smmu->num_global_irqs)) {
1863 dev_err(dev, "missing #global-interrupts property\n");
1864 return -ENODEV;
1865 }
1866
1867 num_irqs = 0;
1868 while ((res = platform_get_resource(pdev, IORESOURCE_IRQ, num_irqs))) {
1869 num_irqs++;
1870 if (num_irqs > smmu->num_global_irqs)
1871 smmu->num_context_irqs++;
1872 }
1873
44a08de2
AH
1874 if (!smmu->num_context_irqs) {
1875 dev_err(dev, "found %d interrupts but expected at least %d\n",
1876 num_irqs, smmu->num_global_irqs + 1);
1877 return -ENODEV;
45ae7cff 1878 }
45ae7cff
WD
1879
1880 smmu->irqs = devm_kzalloc(dev, sizeof(*smmu->irqs) * num_irqs,
1881 GFP_KERNEL);
1882 if (!smmu->irqs) {
1883 dev_err(dev, "failed to allocate %d irqs\n", num_irqs);
1884 return -ENOMEM;
1885 }
1886
1887 for (i = 0; i < num_irqs; ++i) {
1888 int irq = platform_get_irq(pdev, i);
2907320d 1889
45ae7cff
WD
1890 if (irq < 0) {
1891 dev_err(dev, "failed to get irq index %d\n", i);
1892 return -ENODEV;
1893 }
1894 smmu->irqs[i] = irq;
1895 }
1896
3c8766d0
OH
1897 err = arm_smmu_device_cfg_probe(smmu);
1898 if (err)
1899 return err;
1900
45ae7cff
WD
1901 i = 0;
1902 smmu->masters = RB_ROOT;
1903 while (!of_parse_phandle_with_args(dev->of_node, "mmu-masters",
1904 "#stream-id-cells", i,
1905 &masterspec)) {
1906 err = register_smmu_master(smmu, dev, &masterspec);
1907 if (err) {
1908 dev_err(dev, "failed to add master %s\n",
1909 masterspec.np->name);
1910 goto out_put_masters;
1911 }
1912
1913 i++;
1914 }
1915 dev_notice(dev, "registered %d master devices\n", i);
1916
3a5df8ff
AH
1917 parse_driver_options(smmu);
1918
09360403 1919 if (smmu->version > ARM_SMMU_V1 &&
45ae7cff
WD
1920 smmu->num_context_banks != smmu->num_context_irqs) {
1921 dev_err(dev,
1922 "found only %d context interrupt(s) but %d required\n",
1923 smmu->num_context_irqs, smmu->num_context_banks);
89a23cde 1924 err = -ENODEV;
44680eed 1925 goto out_put_masters;
45ae7cff
WD
1926 }
1927
45ae7cff
WD
1928 for (i = 0; i < smmu->num_global_irqs; ++i) {
1929 err = request_irq(smmu->irqs[i],
1930 arm_smmu_global_fault,
1931 IRQF_SHARED,
1932 "arm-smmu global fault",
1933 smmu);
1934 if (err) {
1935 dev_err(dev, "failed to request global IRQ %d (%u)\n",
1936 i, smmu->irqs[i]);
1937 goto out_free_irqs;
1938 }
1939 }
1940
1941 INIT_LIST_HEAD(&smmu->list);
1942 spin_lock(&arm_smmu_devices_lock);
1943 list_add(&smmu->list, &arm_smmu_devices);
1944 spin_unlock(&arm_smmu_devices_lock);
fd90cecb
WD
1945
1946 arm_smmu_device_reset(smmu);
45ae7cff
WD
1947 return 0;
1948
1949out_free_irqs:
1950 while (i--)
1951 free_irq(smmu->irqs[i], smmu);
1952
45ae7cff
WD
1953out_put_masters:
1954 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
1955 struct arm_smmu_master *master
1956 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
1957 of_node_put(master->of_node);
1958 }
1959
1960 return err;
1961}
1962
1963static int arm_smmu_device_remove(struct platform_device *pdev)
1964{
1965 int i;
1966 struct device *dev = &pdev->dev;
1967 struct arm_smmu_device *curr, *smmu = NULL;
1968 struct rb_node *node;
1969
1970 spin_lock(&arm_smmu_devices_lock);
1971 list_for_each_entry(curr, &arm_smmu_devices, list) {
1972 if (curr->dev == dev) {
1973 smmu = curr;
1974 list_del(&smmu->list);
1975 break;
1976 }
1977 }
1978 spin_unlock(&arm_smmu_devices_lock);
1979
1980 if (!smmu)
1981 return -ENODEV;
1982
45ae7cff 1983 for (node = rb_first(&smmu->masters); node; node = rb_next(node)) {
2907320d
MH
1984 struct arm_smmu_master *master
1985 = container_of(node, struct arm_smmu_master, node);
45ae7cff
WD
1986 of_node_put(master->of_node);
1987 }
1988
ecfadb6e 1989 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS))
45ae7cff
WD
1990 dev_err(dev, "removing device with active domains!\n");
1991
1992 for (i = 0; i < smmu->num_global_irqs; ++i)
1993 free_irq(smmu->irqs[i], smmu);
1994
1995 /* Turn the thing off */
2907320d 1996 writel(sCR0_CLIENTPD, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
45ae7cff
WD
1997 return 0;
1998}
1999
45ae7cff
WD
2000static struct platform_driver arm_smmu_driver = {
2001 .driver = {
45ae7cff
WD
2002 .name = "arm-smmu",
2003 .of_match_table = of_match_ptr(arm_smmu_of_match),
2004 },
2005 .probe = arm_smmu_device_dt_probe,
2006 .remove = arm_smmu_device_remove,
2007};
2008
2009static int __init arm_smmu_init(void)
2010{
0e7d37ad 2011 struct device_node *np;
45ae7cff
WD
2012 int ret;
2013
0e7d37ad
TR
2014 /*
2015 * Play nice with systems that don't have an ARM SMMU by checking that
2016 * an ARM SMMU exists in the system before proceeding with the driver
2017 * and IOMMU bus operation registration.
2018 */
2019 np = of_find_matching_node(NULL, arm_smmu_of_match);
2020 if (!np)
2021 return 0;
2022
2023 of_node_put(np);
2024
45ae7cff
WD
2025 ret = platform_driver_register(&arm_smmu_driver);
2026 if (ret)
2027 return ret;
2028
2029 /* Oh, for a proper bus abstraction */
6614ee77 2030 if (!iommu_present(&platform_bus_type))
45ae7cff
WD
2031 bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
2032
d123cf82 2033#ifdef CONFIG_ARM_AMBA
6614ee77 2034 if (!iommu_present(&amba_bustype))
45ae7cff 2035 bus_set_iommu(&amba_bustype, &arm_smmu_ops);
d123cf82 2036#endif
45ae7cff 2037
a9a1b0b5
WD
2038#ifdef CONFIG_PCI
2039 if (!iommu_present(&pci_bus_type))
2040 bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
2041#endif
2042
45ae7cff
WD
2043 return 0;
2044}
2045
2046static void __exit arm_smmu_exit(void)
2047{
2048 return platform_driver_unregister(&arm_smmu_driver);
2049}
2050
b1950b27 2051subsys_initcall(arm_smmu_init);
45ae7cff
WD
2052module_exit(arm_smmu_exit);
2053
2054MODULE_DESCRIPTION("IOMMU API for ARM architected SMMU implementations");
2055MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
2056MODULE_LICENSE("GPL v2");