1 // SPDX-License-Identifier: GPL-2.0-only
3 * Apple DART (Device Address Resolution Table) IOMMU driver
5 * Copyright (C) 2021 The Asahi Linux Contributors
7 * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c
8 * Copyright (C) 2013 ARM Limited
9 * Copyright (C) 2015 ARM Limited
10 * and on exynos-iommu.c
11 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/dev_printk.h>
18 #include <linux/dma-iommu.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
22 #include <linux/io-pgtable.h>
23 #include <linux/iommu.h>
24 #include <linux/iopoll.h>
25 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_iommu.h>
29 #include <linux/of_platform.h>
30 #include <linux/pci.h>
31 #include <linux/platform_device.h>
32 #include <linux/slab.h>
33 #include <linux/swab.h>
34 #include <linux/types.h>
36 #define DART_MAX_STREAMS 16
37 #define DART_MAX_TTBR 4
38 #define MAX_DARTS_PER_DEVICE 2
40 #define DART_STREAM_ALL 0xffff
42 #define DART_PARAMS1 0x00
43 #define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
45 #define DART_PARAMS2 0x04
46 #define DART_PARAMS_BYPASS_SUPPORT BIT(0)
48 #define DART_STREAM_COMMAND 0x20
49 #define DART_STREAM_COMMAND_BUSY BIT(2)
50 #define DART_STREAM_COMMAND_INVALIDATE BIT(20)
52 #define DART_STREAM_SELECT 0x34
54 #define DART_ERROR 0x40
55 #define DART_ERROR_STREAM GENMASK(27, 24)
56 #define DART_ERROR_CODE GENMASK(11, 0)
57 #define DART_ERROR_FLAG BIT(31)
59 #define DART_ERROR_READ_FAULT BIT(4)
60 #define DART_ERROR_WRITE_FAULT BIT(3)
61 #define DART_ERROR_NO_PTE BIT(2)
62 #define DART_ERROR_NO_PMD BIT(1)
63 #define DART_ERROR_NO_TTBR BIT(0)
65 #define DART_CONFIG 0x60
66 #define DART_CONFIG_LOCK BIT(15)
68 #define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
70 #define DART_ERROR_ADDR_HI 0x54
71 #define DART_ERROR_ADDR_LO 0x50
73 #define DART_TCR(sid) (0x100 + 4 * (sid))
74 #define DART_TCR_TRANSLATE_ENABLE BIT(7)
75 #define DART_TCR_BYPASS0_ENABLE BIT(8)
76 #define DART_TCR_BYPASS1_ENABLE BIT(12)
78 #define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
79 #define DART_TTBR_VALID BIT(31)
80 #define DART_TTBR_SHIFT 12
83 * Private structure associated with each DART device.
86 * @regs: mapped MMIO region
87 * @irq: interrupt number, can be shared with other DARTs
88 * @clks: clocks associated with this DART
89 * @num_clks: number of @clks
90 * @lock: lock for hardware operations involving this dart
91 * @pgsize: pagesize supported by this DART
92 * @supports_bypass: indicates if this DART supports bypass mode
93 * @force_bypass: force bypass mode due to pagesize mismatch?
94 * @sid2group: maps stream ids to iommu_groups
95 * @iommu: iommu core device
103 struct clk_bulk_data
*clks
;
109 u32 supports_bypass
: 1;
110 u32 force_bypass
: 1;
112 struct iommu_group
*sid2group
[DART_MAX_STREAMS
];
113 struct iommu_device iommu
;
117 * Convenience struct to identify streams.
119 * The normal variant is used inside apple_dart_master_cfg which isn't written
121 * The atomic variant is used inside apple_dart_domain where we have to guard
122 * against races from potential parallel calls to attach/detach_device.
123 * Note that even inside the atomic variant the apple_dart pointer is not
124 * protected: This pointer is initialized once under the domain init mutex
125 * and never changed again afterwards. Devices with different dart pointers
126 * cannot be attached to the same domain.
129 * @sid stream id bitmap
131 struct apple_dart_stream_map
{
132 struct apple_dart
*dart
;
133 unsigned long sidmap
;
135 struct apple_dart_atomic_stream_map
{
136 struct apple_dart
*dart
;
141 * This structure is attached to each iommu domain handled by a DART.
143 * @pgtbl_ops: pagetable ops allocated by io-pgtable
144 * @finalized: true if the domain has been completely initialized
145 * @init_lock: protects domain initialization
146 * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only)
147 * @domain: core iommu domain pointer
149 struct apple_dart_domain
{
150 struct io_pgtable_ops
*pgtbl_ops
;
153 struct mutex init_lock
;
154 struct apple_dart_atomic_stream_map stream_maps
[MAX_DARTS_PER_DEVICE
];
156 struct iommu_domain domain
;
160 * This structure is attached to devices with dev_iommu_priv_set() on of_xlate
161 * and contains a list of streams bound to this device.
162 * So far the worst case seen is a single device with two streams
163 * from different darts, such that this simple static array is enough.
165 * @streams: streams for this device
167 struct apple_dart_master_cfg
{
168 struct apple_dart_stream_map stream_maps
[MAX_DARTS_PER_DEVICE
];
172 * Helper macro to iterate over apple_dart_master_cfg.stream_maps and
173 * apple_dart_domain.stream_maps
175 * @i int used as loop variable
176 * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain)
177 * @stream pointer to the apple_dart_streams struct for each loop iteration
179 #define for_each_stream_map(i, base, stream_map) \
180 for (i = 0, stream_map = &(base)->stream_maps[0]; \
181 i < MAX_DARTS_PER_DEVICE && stream_map->dart; \
182 stream_map = &(base)->stream_maps[++i])
184 static struct platform_driver apple_dart_driver
;
185 static const struct iommu_ops apple_dart_iommu_ops
;
186 static const struct iommu_flush_ops apple_dart_tlb_ops
;
188 static struct apple_dart_domain
*to_dart_domain(struct iommu_domain
*dom
)
190 return container_of(dom
, struct apple_dart_domain
, domain
);
194 apple_dart_hw_enable_translation(struct apple_dart_stream_map
*stream_map
)
198 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
199 writel(DART_TCR_TRANSLATE_ENABLE
,
200 stream_map
->dart
->regs
+ DART_TCR(sid
));
203 static void apple_dart_hw_disable_dma(struct apple_dart_stream_map
*stream_map
)
207 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
208 writel(0, stream_map
->dart
->regs
+ DART_TCR(sid
));
212 apple_dart_hw_enable_bypass(struct apple_dart_stream_map
*stream_map
)
216 WARN_ON(!stream_map
->dart
->supports_bypass
);
217 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
218 writel(DART_TCR_BYPASS0_ENABLE
| DART_TCR_BYPASS1_ENABLE
,
219 stream_map
->dart
->regs
+ DART_TCR(sid
));
222 static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map
*stream_map
,
223 u8 idx
, phys_addr_t paddr
)
227 WARN_ON(paddr
& ((1 << DART_TTBR_SHIFT
) - 1));
228 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
229 writel(DART_TTBR_VALID
| (paddr
>> DART_TTBR_SHIFT
),
230 stream_map
->dart
->regs
+ DART_TTBR(sid
, idx
));
233 static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map
*stream_map
,
238 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
239 writel(0, stream_map
->dart
->regs
+ DART_TTBR(sid
, idx
));
243 apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map
*stream_map
)
247 for (i
= 0; i
< DART_MAX_TTBR
; ++i
)
248 apple_dart_hw_clear_ttbr(stream_map
, i
);
252 apple_dart_hw_stream_command(struct apple_dart_stream_map
*stream_map
,
259 spin_lock_irqsave(&stream_map
->dart
->lock
, flags
);
261 writel(stream_map
->sidmap
, stream_map
->dart
->regs
+ DART_STREAM_SELECT
);
262 writel(command
, stream_map
->dart
->regs
+ DART_STREAM_COMMAND
);
264 ret
= readl_poll_timeout_atomic(
265 stream_map
->dart
->regs
+ DART_STREAM_COMMAND
, command_reg
,
266 !(command_reg
& DART_STREAM_COMMAND_BUSY
), 1,
267 DART_STREAM_COMMAND_BUSY_TIMEOUT
);
269 spin_unlock_irqrestore(&stream_map
->dart
->lock
, flags
);
272 dev_err(stream_map
->dart
->dev
,
273 "busy bit did not clear after command %x for streams %lx\n",
274 command
, stream_map
->sidmap
);
282 apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map
*stream_map
)
284 return apple_dart_hw_stream_command(stream_map
,
285 DART_STREAM_COMMAND_INVALIDATE
);
288 static int apple_dart_hw_reset(struct apple_dart
*dart
)
291 struct apple_dart_stream_map stream_map
;
293 config
= readl(dart
->regs
+ DART_CONFIG
);
294 if (config
& DART_CONFIG_LOCK
) {
295 dev_err(dart
->dev
, "DART is locked down until reboot: %08x\n",
300 stream_map
.dart
= dart
;
301 stream_map
.sidmap
= DART_STREAM_ALL
;
302 apple_dart_hw_disable_dma(&stream_map
);
303 apple_dart_hw_clear_all_ttbrs(&stream_map
);
305 /* clear any pending errors before the interrupt is unmasked */
306 writel(readl(dart
->regs
+ DART_ERROR
), dart
->regs
+ DART_ERROR
);
308 return apple_dart_hw_invalidate_tlb(&stream_map
);
311 static void apple_dart_domain_flush_tlb(struct apple_dart_domain
*domain
)
314 struct apple_dart_atomic_stream_map
*domain_stream_map
;
315 struct apple_dart_stream_map stream_map
;
317 for_each_stream_map(i
, domain
, domain_stream_map
) {
318 stream_map
.dart
= domain_stream_map
->dart
;
319 stream_map
.sidmap
= atomic64_read(&domain_stream_map
->sidmap
);
320 apple_dart_hw_invalidate_tlb(&stream_map
);
324 static void apple_dart_flush_iotlb_all(struct iommu_domain
*domain
)
326 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
329 static void apple_dart_iotlb_sync(struct iommu_domain
*domain
,
330 struct iommu_iotlb_gather
*gather
)
332 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
335 static void apple_dart_iotlb_sync_map(struct iommu_domain
*domain
,
336 unsigned long iova
, size_t size
)
338 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
341 static void apple_dart_tlb_flush_all(void *cookie
)
343 apple_dart_domain_flush_tlb(cookie
);
346 static void apple_dart_tlb_flush_walk(unsigned long iova
, size_t size
,
347 size_t granule
, void *cookie
)
349 apple_dart_domain_flush_tlb(cookie
);
352 static const struct iommu_flush_ops apple_dart_tlb_ops
= {
353 .tlb_flush_all
= apple_dart_tlb_flush_all
,
354 .tlb_flush_walk
= apple_dart_tlb_flush_walk
,
357 static phys_addr_t
apple_dart_iova_to_phys(struct iommu_domain
*domain
,
360 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
361 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
366 return ops
->iova_to_phys(ops
, iova
);
369 static int apple_dart_map_pages(struct iommu_domain
*domain
, unsigned long iova
,
370 phys_addr_t paddr
, size_t pgsize
,
371 size_t pgcount
, int prot
, gfp_t gfp
,
374 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
375 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
380 return ops
->map_pages(ops
, iova
, paddr
, pgsize
, pgcount
, prot
, gfp
,
384 static size_t apple_dart_unmap_pages(struct iommu_domain
*domain
,
385 unsigned long iova
, size_t pgsize
,
387 struct iommu_iotlb_gather
*gather
)
389 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
390 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
392 return ops
->unmap_pages(ops
, iova
, pgsize
, pgcount
, gather
);
396 apple_dart_setup_translation(struct apple_dart_domain
*domain
,
397 struct apple_dart_stream_map
*stream_map
)
400 struct io_pgtable_cfg
*pgtbl_cfg
=
401 &io_pgtable_ops_to_pgtable(domain
->pgtbl_ops
)->cfg
;
403 for (i
= 0; i
< pgtbl_cfg
->apple_dart_cfg
.n_ttbrs
; ++i
)
404 apple_dart_hw_set_ttbr(stream_map
, i
,
405 pgtbl_cfg
->apple_dart_cfg
.ttbr
[i
]);
406 for (; i
< DART_MAX_TTBR
; ++i
)
407 apple_dart_hw_clear_ttbr(stream_map
, i
);
409 apple_dart_hw_enable_translation(stream_map
);
410 apple_dart_hw_invalidate_tlb(stream_map
);
413 static int apple_dart_finalize_domain(struct iommu_domain
*domain
,
414 struct apple_dart_master_cfg
*cfg
)
416 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
417 struct apple_dart
*dart
= cfg
->stream_maps
[0].dart
;
418 struct io_pgtable_cfg pgtbl_cfg
;
422 mutex_lock(&dart_domain
->init_lock
);
424 if (dart_domain
->finalized
)
427 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
428 dart_domain
->stream_maps
[i
].dart
= cfg
->stream_maps
[i
].dart
;
429 atomic64_set(&dart_domain
->stream_maps
[i
].sidmap
,
430 cfg
->stream_maps
[i
].sidmap
);
433 pgtbl_cfg
= (struct io_pgtable_cfg
){
434 .pgsize_bitmap
= dart
->pgsize
,
438 .tlb
= &apple_dart_tlb_ops
,
439 .iommu_dev
= dart
->dev
,
442 dart_domain
->pgtbl_ops
=
443 alloc_io_pgtable_ops(APPLE_DART
, &pgtbl_cfg
, domain
);
444 if (!dart_domain
->pgtbl_ops
) {
449 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
450 domain
->geometry
.aperture_start
= 0;
451 domain
->geometry
.aperture_end
= DMA_BIT_MASK(32);
452 domain
->geometry
.force_aperture
= true;
454 dart_domain
->finalized
= true;
457 mutex_unlock(&dart_domain
->init_lock
);
462 apple_dart_mod_streams(struct apple_dart_atomic_stream_map
*domain_maps
,
463 struct apple_dart_stream_map
*master_maps
,
468 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
469 if (domain_maps
[i
].dart
!= master_maps
[i
].dart
)
473 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
474 if (!domain_maps
[i
].dart
)
477 atomic64_or(master_maps
[i
].sidmap
,
478 &domain_maps
[i
].sidmap
);
480 atomic64_and(~master_maps
[i
].sidmap
,
481 &domain_maps
[i
].sidmap
);
487 static int apple_dart_domain_add_streams(struct apple_dart_domain
*domain
,
488 struct apple_dart_master_cfg
*cfg
)
490 return apple_dart_mod_streams(domain
->stream_maps
, cfg
->stream_maps
,
494 static int apple_dart_domain_remove_streams(struct apple_dart_domain
*domain
,
495 struct apple_dart_master_cfg
*cfg
)
497 return apple_dart_mod_streams(domain
->stream_maps
, cfg
->stream_maps
,
501 static int apple_dart_attach_dev(struct iommu_domain
*domain
,
505 struct apple_dart_stream_map
*stream_map
;
506 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
507 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
509 if (cfg
->stream_maps
[0].dart
->force_bypass
&&
510 domain
->type
!= IOMMU_DOMAIN_IDENTITY
)
512 if (!cfg
->stream_maps
[0].dart
->supports_bypass
&&
513 domain
->type
== IOMMU_DOMAIN_IDENTITY
)
516 ret
= apple_dart_finalize_domain(domain
, cfg
);
520 switch (domain
->type
) {
521 case IOMMU_DOMAIN_DMA
:
522 case IOMMU_DOMAIN_UNMANAGED
:
523 ret
= apple_dart_domain_add_streams(dart_domain
, cfg
);
527 for_each_stream_map(i
, cfg
, stream_map
)
528 apple_dart_setup_translation(dart_domain
, stream_map
);
530 case IOMMU_DOMAIN_BLOCKED
:
531 for_each_stream_map(i
, cfg
, stream_map
)
532 apple_dart_hw_disable_dma(stream_map
);
534 case IOMMU_DOMAIN_IDENTITY
:
535 for_each_stream_map(i
, cfg
, stream_map
)
536 apple_dart_hw_enable_bypass(stream_map
);
543 static void apple_dart_detach_dev(struct iommu_domain
*domain
,
547 struct apple_dart_stream_map
*stream_map
;
548 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
549 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
551 for_each_stream_map(i
, cfg
, stream_map
)
552 apple_dart_hw_disable_dma(stream_map
);
554 if (domain
->type
== IOMMU_DOMAIN_DMA
||
555 domain
->type
== IOMMU_DOMAIN_UNMANAGED
)
556 apple_dart_domain_remove_streams(dart_domain
, cfg
);
559 static struct iommu_device
*apple_dart_probe_device(struct device
*dev
)
561 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
562 struct apple_dart_stream_map
*stream_map
;
566 return ERR_PTR(-ENODEV
);
568 for_each_stream_map(i
, cfg
, stream_map
)
570 dev
, stream_map
->dart
->dev
,
571 DL_FLAG_PM_RUNTIME
| DL_FLAG_AUTOREMOVE_SUPPLIER
);
573 return &cfg
->stream_maps
[0].dart
->iommu
;
576 static void apple_dart_release_device(struct device
*dev
)
578 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
583 dev_iommu_priv_set(dev
, NULL
);
587 static struct iommu_domain
*apple_dart_domain_alloc(unsigned int type
)
589 struct apple_dart_domain
*dart_domain
;
591 if (type
!= IOMMU_DOMAIN_DMA
&& type
!= IOMMU_DOMAIN_UNMANAGED
&&
592 type
!= IOMMU_DOMAIN_IDENTITY
&& type
!= IOMMU_DOMAIN_BLOCKED
)
595 dart_domain
= kzalloc(sizeof(*dart_domain
), GFP_KERNEL
);
599 iommu_get_dma_cookie(&dart_domain
->domain
);
600 mutex_init(&dart_domain
->init_lock
);
602 /* no need to allocate pgtbl_ops or do any other finalization steps */
603 if (type
== IOMMU_DOMAIN_IDENTITY
|| type
== IOMMU_DOMAIN_BLOCKED
)
604 dart_domain
->finalized
= true;
606 return &dart_domain
->domain
;
609 static void apple_dart_domain_free(struct iommu_domain
*domain
)
611 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
613 if (dart_domain
->pgtbl_ops
)
614 free_io_pgtable_ops(dart_domain
->pgtbl_ops
);
619 static int apple_dart_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
621 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
622 struct platform_device
*iommu_pdev
= of_find_device_by_node(args
->np
);
623 struct apple_dart
*dart
= platform_get_drvdata(iommu_pdev
);
624 struct apple_dart
*cfg_dart
;
627 if (args
->args_count
!= 1)
632 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
635 dev_iommu_priv_set(dev
, cfg
);
637 cfg_dart
= cfg
->stream_maps
[0].dart
;
639 if (cfg_dart
->supports_bypass
!= dart
->supports_bypass
)
641 if (cfg_dart
->force_bypass
!= dart
->force_bypass
)
643 if (cfg_dart
->pgsize
!= dart
->pgsize
)
647 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
648 if (cfg
->stream_maps
[i
].dart
== dart
) {
649 cfg
->stream_maps
[i
].sidmap
|= 1 << sid
;
653 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
654 if (!cfg
->stream_maps
[i
].dart
) {
655 cfg
->stream_maps
[i
].dart
= dart
;
656 cfg
->stream_maps
[i
].sidmap
= 1 << sid
;
664 static struct iommu_group
*apple_dart_device_group(struct device
*dev
)
666 static DEFINE_MUTEX(lock
);
668 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
669 struct apple_dart_stream_map
*stream_map
;
670 struct iommu_group
*group
= NULL
;
671 struct iommu_group
*res
= ERR_PTR(-EINVAL
);
675 for_each_stream_map(i
, cfg
, stream_map
) {
676 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
) {
677 struct iommu_group
*stream_group
=
678 stream_map
->dart
->sid2group
[sid
];
680 if (group
&& group
!= stream_group
) {
681 res
= ERR_PTR(-EINVAL
);
685 group
= stream_group
;
690 res
= iommu_group_ref_get(group
);
696 group
= pci_device_group(dev
);
699 group
= generic_device_group(dev
);
701 for_each_stream_map(i
, cfg
, stream_map
)
702 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
703 stream_map
->dart
->sid2group
[sid
] = group
;
712 static int apple_dart_def_domain_type(struct device
*dev
)
714 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
716 if (cfg
->stream_maps
[0].dart
->force_bypass
)
717 return IOMMU_DOMAIN_IDENTITY
;
718 if (!cfg
->stream_maps
[0].dart
->supports_bypass
)
719 return IOMMU_DOMAIN_DMA
;
724 static const struct iommu_ops apple_dart_iommu_ops
= {
725 .domain_alloc
= apple_dart_domain_alloc
,
726 .domain_free
= apple_dart_domain_free
,
727 .attach_dev
= apple_dart_attach_dev
,
728 .detach_dev
= apple_dart_detach_dev
,
729 .map_pages
= apple_dart_map_pages
,
730 .unmap_pages
= apple_dart_unmap_pages
,
731 .flush_iotlb_all
= apple_dart_flush_iotlb_all
,
732 .iotlb_sync
= apple_dart_iotlb_sync
,
733 .iotlb_sync_map
= apple_dart_iotlb_sync_map
,
734 .iova_to_phys
= apple_dart_iova_to_phys
,
735 .probe_device
= apple_dart_probe_device
,
736 .release_device
= apple_dart_release_device
,
737 .device_group
= apple_dart_device_group
,
738 .of_xlate
= apple_dart_of_xlate
,
739 .def_domain_type
= apple_dart_def_domain_type
,
740 .pgsize_bitmap
= -1UL, /* Restricted during dart probe */
743 static irqreturn_t
apple_dart_irq(int irq
, void *dev
)
745 struct apple_dart
*dart
= dev
;
746 const char *fault_name
= NULL
;
747 u32 error
= readl(dart
->regs
+ DART_ERROR
);
748 u32 error_code
= FIELD_GET(DART_ERROR_CODE
, error
);
749 u32 addr_lo
= readl(dart
->regs
+ DART_ERROR_ADDR_LO
);
750 u32 addr_hi
= readl(dart
->regs
+ DART_ERROR_ADDR_HI
);
751 u64 addr
= addr_lo
| (((u64
)addr_hi
) << 32);
752 u8 stream_idx
= FIELD_GET(DART_ERROR_STREAM
, error
);
754 if (!(error
& DART_ERROR_FLAG
))
757 /* there should only be a single bit set but let's use == to be sure */
758 if (error_code
== DART_ERROR_READ_FAULT
)
759 fault_name
= "READ FAULT";
760 else if (error_code
== DART_ERROR_WRITE_FAULT
)
761 fault_name
= "WRITE FAULT";
762 else if (error_code
== DART_ERROR_NO_PTE
)
763 fault_name
= "NO PTE FOR IOVA";
764 else if (error_code
== DART_ERROR_NO_PMD
)
765 fault_name
= "NO PMD FOR IOVA";
766 else if (error_code
== DART_ERROR_NO_TTBR
)
767 fault_name
= "NO TTBR FOR IOVA";
769 fault_name
= "unknown";
773 "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
774 error
, stream_idx
, error_code
, fault_name
, addr
);
776 writel(error
, dart
->regs
+ DART_ERROR
);
780 static int apple_dart_set_bus_ops(const struct iommu_ops
*ops
)
784 if (!iommu_present(&platform_bus_type
)) {
785 ret
= bus_set_iommu(&platform_bus_type
, ops
);
790 if (!iommu_present(&pci_bus_type
)) {
791 ret
= bus_set_iommu(&pci_bus_type
, ops
);
793 bus_set_iommu(&platform_bus_type
, NULL
);
801 static int apple_dart_probe(struct platform_device
*pdev
)
805 struct resource
*res
;
806 struct apple_dart
*dart
;
807 struct device
*dev
= &pdev
->dev
;
809 dart
= devm_kzalloc(dev
, sizeof(*dart
), GFP_KERNEL
);
814 spin_lock_init(&dart
->lock
);
816 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
817 if (resource_size(res
) < 0x4000) {
818 dev_err(dev
, "MMIO region too small (%pr)\n", res
);
822 dart
->regs
= devm_ioremap_resource(dev
, res
);
823 if (IS_ERR(dart
->regs
))
824 return PTR_ERR(dart
->regs
);
826 dart
->irq
= platform_get_irq(pdev
, 0);
830 ret
= devm_clk_bulk_get_all(dev
, &dart
->clks
);
833 dart
->num_clks
= ret
;
835 ret
= clk_bulk_prepare_enable(dart
->num_clks
, dart
->clks
);
839 ret
= apple_dart_hw_reset(dart
);
841 goto err_clk_disable
;
843 dart_params
[0] = readl(dart
->regs
+ DART_PARAMS1
);
844 dart_params
[1] = readl(dart
->regs
+ DART_PARAMS2
);
845 dart
->pgsize
= 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT
, dart_params
[0]);
846 dart
->supports_bypass
= dart_params
[1] & DART_PARAMS_BYPASS_SUPPORT
;
847 dart
->force_bypass
= dart
->pgsize
> PAGE_SIZE
;
849 ret
= request_irq(dart
->irq
, apple_dart_irq
, IRQF_SHARED
,
850 "apple-dart fault handler", dart
);
852 goto err_clk_disable
;
854 platform_set_drvdata(pdev
, dart
);
856 ret
= apple_dart_set_bus_ops(&apple_dart_iommu_ops
);
860 ret
= iommu_device_sysfs_add(&dart
->iommu
, dev
, NULL
, "apple-dart.%s",
861 dev_name(&pdev
->dev
));
863 goto err_remove_bus_ops
;
865 ret
= iommu_device_register(&dart
->iommu
, &apple_dart_iommu_ops
, dev
);
867 goto err_sysfs_remove
;
871 "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
872 dart
->pgsize
, dart
->supports_bypass
, dart
->force_bypass
);
876 iommu_device_sysfs_remove(&dart
->iommu
);
878 apple_dart_set_bus_ops(NULL
);
880 free_irq(dart
->irq
, dart
);
882 clk_bulk_disable_unprepare(dart
->num_clks
, dart
->clks
);
887 static int apple_dart_remove(struct platform_device
*pdev
)
889 struct apple_dart
*dart
= platform_get_drvdata(pdev
);
891 apple_dart_hw_reset(dart
);
892 free_irq(dart
->irq
, dart
);
893 apple_dart_set_bus_ops(NULL
);
895 iommu_device_unregister(&dart
->iommu
);
896 iommu_device_sysfs_remove(&dart
->iommu
);
898 clk_bulk_disable_unprepare(dart
->num_clks
, dart
->clks
);
903 static const struct of_device_id apple_dart_of_match
[] = {
904 { .compatible
= "apple,t8103-dart", .data
= NULL
},
907 MODULE_DEVICE_TABLE(of
, apple_dart_of_match
);
909 static struct platform_driver apple_dart_driver
= {
911 .name
= "apple-dart",
912 .of_match_table
= apple_dart_of_match
,
913 .suppress_bind_attrs
= true,
915 .probe
= apple_dart_probe
,
916 .remove
= apple_dart_remove
,
919 module_platform_driver(apple_dart_driver
);
921 MODULE_DESCRIPTION("IOMMU API for Apple's DART");
922 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
923 MODULE_LICENSE("GPL v2");