1 // SPDX-License-Identifier: GPL-2.0-only
3 * Apple DART (Device Address Resolution Table) IOMMU driver
5 * Copyright (C) 2021 The Asahi Linux Contributors
7 * Based on arm/arm-smmu/arm-ssmu.c and arm/arm-smmu-v3/arm-smmu-v3.c
8 * Copyright (C) 2013 ARM Limited
9 * Copyright (C) 2015 ARM Limited
10 * and on exynos-iommu.c
11 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
14 #include <linux/atomic.h>
15 #include <linux/bitfield.h>
16 #include <linux/clk.h>
17 #include <linux/dev_printk.h>
18 #include <linux/dma-iommu.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/err.h>
21 #include <linux/interrupt.h>
22 #include <linux/io-pgtable.h>
23 #include <linux/iommu.h>
24 #include <linux/iopoll.h>
25 #include <linux/module.h>
27 #include <linux/of_address.h>
28 #include <linux/of_iommu.h>
29 #include <linux/of_platform.h>
30 #include <linux/pci.h>
31 #include <linux/platform_device.h>
32 #include <linux/slab.h>
33 #include <linux/swab.h>
34 #include <linux/types.h>
36 #define DART_MAX_STREAMS 16
37 #define DART_MAX_TTBR 4
38 #define MAX_DARTS_PER_DEVICE 2
40 #define DART_STREAM_ALL 0xffff
42 #define DART_PARAMS1 0x00
43 #define DART_PARAMS_PAGE_SHIFT GENMASK(27, 24)
45 #define DART_PARAMS2 0x04
46 #define DART_PARAMS_BYPASS_SUPPORT BIT(0)
48 #define DART_STREAM_COMMAND 0x20
49 #define DART_STREAM_COMMAND_BUSY BIT(2)
50 #define DART_STREAM_COMMAND_INVALIDATE BIT(20)
52 #define DART_STREAM_SELECT 0x34
54 #define DART_ERROR 0x40
55 #define DART_ERROR_STREAM GENMASK(27, 24)
56 #define DART_ERROR_CODE GENMASK(11, 0)
57 #define DART_ERROR_FLAG BIT(31)
59 #define DART_ERROR_READ_FAULT BIT(4)
60 #define DART_ERROR_WRITE_FAULT BIT(3)
61 #define DART_ERROR_NO_PTE BIT(2)
62 #define DART_ERROR_NO_PMD BIT(1)
63 #define DART_ERROR_NO_TTBR BIT(0)
65 #define DART_CONFIG 0x60
66 #define DART_CONFIG_LOCK BIT(15)
68 #define DART_STREAM_COMMAND_BUSY_TIMEOUT 100
70 #define DART_ERROR_ADDR_HI 0x54
71 #define DART_ERROR_ADDR_LO 0x50
73 #define DART_TCR(sid) (0x100 + 4 * (sid))
74 #define DART_TCR_TRANSLATE_ENABLE BIT(7)
75 #define DART_TCR_BYPASS0_ENABLE BIT(8)
76 #define DART_TCR_BYPASS1_ENABLE BIT(12)
78 #define DART_TTBR(sid, idx) (0x200 + 16 * (sid) + 4 * (idx))
79 #define DART_TTBR_VALID BIT(31)
80 #define DART_TTBR_SHIFT 12
83 * Private structure associated with each DART device.
86 * @regs: mapped MMIO region
87 * @irq: interrupt number, can be shared with other DARTs
88 * @clks: clocks associated with this DART
89 * @num_clks: number of @clks
90 * @lock: lock for hardware operations involving this dart
91 * @pgsize: pagesize supported by this DART
92 * @supports_bypass: indicates if this DART supports bypass mode
93 * @force_bypass: force bypass mode due to pagesize mismatch?
94 * @sid2group: maps stream ids to iommu_groups
95 * @iommu: iommu core device
103 struct clk_bulk_data
*clks
;
109 u32 supports_bypass
: 1;
110 u32 force_bypass
: 1;
112 struct iommu_group
*sid2group
[DART_MAX_STREAMS
];
113 struct iommu_device iommu
;
117 * Convenience struct to identify streams.
119 * The normal variant is used inside apple_dart_master_cfg which isn't written
121 * The atomic variant is used inside apple_dart_domain where we have to guard
122 * against races from potential parallel calls to attach/detach_device.
123 * Note that even inside the atomic variant the apple_dart pointer is not
124 * protected: This pointer is initialized once under the domain init mutex
125 * and never changed again afterwards. Devices with different dart pointers
126 * cannot be attached to the same domain.
129 * @sid stream id bitmap
131 struct apple_dart_stream_map
{
132 struct apple_dart
*dart
;
133 unsigned long sidmap
;
135 struct apple_dart_atomic_stream_map
{
136 struct apple_dart
*dart
;
141 * This structure is attached to each iommu domain handled by a DART.
143 * @pgtbl_ops: pagetable ops allocated by io-pgtable
144 * @finalized: true if the domain has been completely initialized
145 * @init_lock: protects domain initialization
146 * @stream_maps: streams attached to this domain (valid for DMA/UNMANAGED only)
147 * @domain: core iommu domain pointer
149 struct apple_dart_domain
{
150 struct io_pgtable_ops
*pgtbl_ops
;
153 struct mutex init_lock
;
154 struct apple_dart_atomic_stream_map stream_maps
[MAX_DARTS_PER_DEVICE
];
156 struct iommu_domain domain
;
160 * This structure is attached to devices with dev_iommu_priv_set() on of_xlate
161 * and contains a list of streams bound to this device.
162 * So far the worst case seen is a single device with two streams
163 * from different darts, such that this simple static array is enough.
165 * @streams: streams for this device
167 struct apple_dart_master_cfg
{
168 struct apple_dart_stream_map stream_maps
[MAX_DARTS_PER_DEVICE
];
172 * Helper macro to iterate over apple_dart_master_cfg.stream_maps and
173 * apple_dart_domain.stream_maps
175 * @i int used as loop variable
176 * @base pointer to base struct (apple_dart_master_cfg or apple_dart_domain)
177 * @stream pointer to the apple_dart_streams struct for each loop iteration
179 #define for_each_stream_map(i, base, stream_map) \
180 for (i = 0, stream_map = &(base)->stream_maps[0]; \
181 i < MAX_DARTS_PER_DEVICE && stream_map->dart; \
182 stream_map = &(base)->stream_maps[++i])
184 static struct platform_driver apple_dart_driver
;
185 static const struct iommu_ops apple_dart_iommu_ops
;
187 static struct apple_dart_domain
*to_dart_domain(struct iommu_domain
*dom
)
189 return container_of(dom
, struct apple_dart_domain
, domain
);
193 apple_dart_hw_enable_translation(struct apple_dart_stream_map
*stream_map
)
197 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
198 writel(DART_TCR_TRANSLATE_ENABLE
,
199 stream_map
->dart
->regs
+ DART_TCR(sid
));
202 static void apple_dart_hw_disable_dma(struct apple_dart_stream_map
*stream_map
)
206 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
207 writel(0, stream_map
->dart
->regs
+ DART_TCR(sid
));
211 apple_dart_hw_enable_bypass(struct apple_dart_stream_map
*stream_map
)
215 WARN_ON(!stream_map
->dart
->supports_bypass
);
216 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
217 writel(DART_TCR_BYPASS0_ENABLE
| DART_TCR_BYPASS1_ENABLE
,
218 stream_map
->dart
->regs
+ DART_TCR(sid
));
221 static void apple_dart_hw_set_ttbr(struct apple_dart_stream_map
*stream_map
,
222 u8 idx
, phys_addr_t paddr
)
226 WARN_ON(paddr
& ((1 << DART_TTBR_SHIFT
) - 1));
227 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
228 writel(DART_TTBR_VALID
| (paddr
>> DART_TTBR_SHIFT
),
229 stream_map
->dart
->regs
+ DART_TTBR(sid
, idx
));
232 static void apple_dart_hw_clear_ttbr(struct apple_dart_stream_map
*stream_map
,
237 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
238 writel(0, stream_map
->dart
->regs
+ DART_TTBR(sid
, idx
));
242 apple_dart_hw_clear_all_ttbrs(struct apple_dart_stream_map
*stream_map
)
246 for (i
= 0; i
< DART_MAX_TTBR
; ++i
)
247 apple_dart_hw_clear_ttbr(stream_map
, i
);
251 apple_dart_hw_stream_command(struct apple_dart_stream_map
*stream_map
,
258 spin_lock_irqsave(&stream_map
->dart
->lock
, flags
);
260 writel(stream_map
->sidmap
, stream_map
->dart
->regs
+ DART_STREAM_SELECT
);
261 writel(command
, stream_map
->dart
->regs
+ DART_STREAM_COMMAND
);
263 ret
= readl_poll_timeout_atomic(
264 stream_map
->dart
->regs
+ DART_STREAM_COMMAND
, command_reg
,
265 !(command_reg
& DART_STREAM_COMMAND_BUSY
), 1,
266 DART_STREAM_COMMAND_BUSY_TIMEOUT
);
268 spin_unlock_irqrestore(&stream_map
->dart
->lock
, flags
);
271 dev_err(stream_map
->dart
->dev
,
272 "busy bit did not clear after command %x for streams %lx\n",
273 command
, stream_map
->sidmap
);
281 apple_dart_hw_invalidate_tlb(struct apple_dart_stream_map
*stream_map
)
283 return apple_dart_hw_stream_command(stream_map
,
284 DART_STREAM_COMMAND_INVALIDATE
);
287 static int apple_dart_hw_reset(struct apple_dart
*dart
)
290 struct apple_dart_stream_map stream_map
;
292 config
= readl(dart
->regs
+ DART_CONFIG
);
293 if (config
& DART_CONFIG_LOCK
) {
294 dev_err(dart
->dev
, "DART is locked down until reboot: %08x\n",
299 stream_map
.dart
= dart
;
300 stream_map
.sidmap
= DART_STREAM_ALL
;
301 apple_dart_hw_disable_dma(&stream_map
);
302 apple_dart_hw_clear_all_ttbrs(&stream_map
);
304 /* clear any pending errors before the interrupt is unmasked */
305 writel(readl(dart
->regs
+ DART_ERROR
), dart
->regs
+ DART_ERROR
);
307 return apple_dart_hw_invalidate_tlb(&stream_map
);
310 static void apple_dart_domain_flush_tlb(struct apple_dart_domain
*domain
)
313 struct apple_dart_atomic_stream_map
*domain_stream_map
;
314 struct apple_dart_stream_map stream_map
;
316 for_each_stream_map(i
, domain
, domain_stream_map
) {
317 stream_map
.dart
= domain_stream_map
->dart
;
318 stream_map
.sidmap
= atomic64_read(&domain_stream_map
->sidmap
);
319 apple_dart_hw_invalidate_tlb(&stream_map
);
323 static void apple_dart_flush_iotlb_all(struct iommu_domain
*domain
)
325 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
328 static void apple_dart_iotlb_sync(struct iommu_domain
*domain
,
329 struct iommu_iotlb_gather
*gather
)
331 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
334 static void apple_dart_iotlb_sync_map(struct iommu_domain
*domain
,
335 unsigned long iova
, size_t size
)
337 apple_dart_domain_flush_tlb(to_dart_domain(domain
));
340 static phys_addr_t
apple_dart_iova_to_phys(struct iommu_domain
*domain
,
343 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
344 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
349 return ops
->iova_to_phys(ops
, iova
);
352 static int apple_dart_map_pages(struct iommu_domain
*domain
, unsigned long iova
,
353 phys_addr_t paddr
, size_t pgsize
,
354 size_t pgcount
, int prot
, gfp_t gfp
,
357 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
358 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
363 return ops
->map_pages(ops
, iova
, paddr
, pgsize
, pgcount
, prot
, gfp
,
367 static size_t apple_dart_unmap_pages(struct iommu_domain
*domain
,
368 unsigned long iova
, size_t pgsize
,
370 struct iommu_iotlb_gather
*gather
)
372 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
373 struct io_pgtable_ops
*ops
= dart_domain
->pgtbl_ops
;
375 return ops
->unmap_pages(ops
, iova
, pgsize
, pgcount
, gather
);
379 apple_dart_setup_translation(struct apple_dart_domain
*domain
,
380 struct apple_dart_stream_map
*stream_map
)
383 struct io_pgtable_cfg
*pgtbl_cfg
=
384 &io_pgtable_ops_to_pgtable(domain
->pgtbl_ops
)->cfg
;
386 for (i
= 0; i
< pgtbl_cfg
->apple_dart_cfg
.n_ttbrs
; ++i
)
387 apple_dart_hw_set_ttbr(stream_map
, i
,
388 pgtbl_cfg
->apple_dart_cfg
.ttbr
[i
]);
389 for (; i
< DART_MAX_TTBR
; ++i
)
390 apple_dart_hw_clear_ttbr(stream_map
, i
);
392 apple_dart_hw_enable_translation(stream_map
);
393 apple_dart_hw_invalidate_tlb(stream_map
);
396 static int apple_dart_finalize_domain(struct iommu_domain
*domain
,
397 struct apple_dart_master_cfg
*cfg
)
399 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
400 struct apple_dart
*dart
= cfg
->stream_maps
[0].dart
;
401 struct io_pgtable_cfg pgtbl_cfg
;
405 mutex_lock(&dart_domain
->init_lock
);
407 if (dart_domain
->finalized
)
410 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
411 dart_domain
->stream_maps
[i
].dart
= cfg
->stream_maps
[i
].dart
;
412 atomic64_set(&dart_domain
->stream_maps
[i
].sidmap
,
413 cfg
->stream_maps
[i
].sidmap
);
416 pgtbl_cfg
= (struct io_pgtable_cfg
){
417 .pgsize_bitmap
= dart
->pgsize
,
421 .iommu_dev
= dart
->dev
,
424 dart_domain
->pgtbl_ops
=
425 alloc_io_pgtable_ops(APPLE_DART
, &pgtbl_cfg
, domain
);
426 if (!dart_domain
->pgtbl_ops
) {
431 domain
->pgsize_bitmap
= pgtbl_cfg
.pgsize_bitmap
;
432 domain
->geometry
.aperture_start
= 0;
433 domain
->geometry
.aperture_end
= DMA_BIT_MASK(32);
434 domain
->geometry
.force_aperture
= true;
436 dart_domain
->finalized
= true;
439 mutex_unlock(&dart_domain
->init_lock
);
444 apple_dart_mod_streams(struct apple_dart_atomic_stream_map
*domain_maps
,
445 struct apple_dart_stream_map
*master_maps
,
450 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
451 if (domain_maps
[i
].dart
!= master_maps
[i
].dart
)
455 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
456 if (!domain_maps
[i
].dart
)
459 atomic64_or(master_maps
[i
].sidmap
,
460 &domain_maps
[i
].sidmap
);
462 atomic64_and(~master_maps
[i
].sidmap
,
463 &domain_maps
[i
].sidmap
);
469 static int apple_dart_domain_add_streams(struct apple_dart_domain
*domain
,
470 struct apple_dart_master_cfg
*cfg
)
472 return apple_dart_mod_streams(domain
->stream_maps
, cfg
->stream_maps
,
476 static int apple_dart_domain_remove_streams(struct apple_dart_domain
*domain
,
477 struct apple_dart_master_cfg
*cfg
)
479 return apple_dart_mod_streams(domain
->stream_maps
, cfg
->stream_maps
,
483 static int apple_dart_attach_dev(struct iommu_domain
*domain
,
487 struct apple_dart_stream_map
*stream_map
;
488 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
489 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
491 if (cfg
->stream_maps
[0].dart
->force_bypass
&&
492 domain
->type
!= IOMMU_DOMAIN_IDENTITY
)
494 if (!cfg
->stream_maps
[0].dart
->supports_bypass
&&
495 domain
->type
== IOMMU_DOMAIN_IDENTITY
)
498 ret
= apple_dart_finalize_domain(domain
, cfg
);
502 switch (domain
->type
) {
503 case IOMMU_DOMAIN_DMA
:
504 case IOMMU_DOMAIN_UNMANAGED
:
505 ret
= apple_dart_domain_add_streams(dart_domain
, cfg
);
509 for_each_stream_map(i
, cfg
, stream_map
)
510 apple_dart_setup_translation(dart_domain
, stream_map
);
512 case IOMMU_DOMAIN_BLOCKED
:
513 for_each_stream_map(i
, cfg
, stream_map
)
514 apple_dart_hw_disable_dma(stream_map
);
516 case IOMMU_DOMAIN_IDENTITY
:
517 for_each_stream_map(i
, cfg
, stream_map
)
518 apple_dart_hw_enable_bypass(stream_map
);
525 static void apple_dart_detach_dev(struct iommu_domain
*domain
,
529 struct apple_dart_stream_map
*stream_map
;
530 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
531 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
533 for_each_stream_map(i
, cfg
, stream_map
)
534 apple_dart_hw_disable_dma(stream_map
);
536 if (domain
->type
== IOMMU_DOMAIN_DMA
||
537 domain
->type
== IOMMU_DOMAIN_UNMANAGED
)
538 apple_dart_domain_remove_streams(dart_domain
, cfg
);
541 static struct iommu_device
*apple_dart_probe_device(struct device
*dev
)
543 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
544 struct apple_dart_stream_map
*stream_map
;
548 return ERR_PTR(-ENODEV
);
550 for_each_stream_map(i
, cfg
, stream_map
)
552 dev
, stream_map
->dart
->dev
,
553 DL_FLAG_PM_RUNTIME
| DL_FLAG_AUTOREMOVE_SUPPLIER
);
555 return &cfg
->stream_maps
[0].dart
->iommu
;
558 static void apple_dart_release_device(struct device
*dev
)
560 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
565 dev_iommu_priv_set(dev
, NULL
);
569 static struct iommu_domain
*apple_dart_domain_alloc(unsigned int type
)
571 struct apple_dart_domain
*dart_domain
;
573 if (type
!= IOMMU_DOMAIN_DMA
&& type
!= IOMMU_DOMAIN_UNMANAGED
&&
574 type
!= IOMMU_DOMAIN_IDENTITY
&& type
!= IOMMU_DOMAIN_BLOCKED
)
577 dart_domain
= kzalloc(sizeof(*dart_domain
), GFP_KERNEL
);
581 iommu_get_dma_cookie(&dart_domain
->domain
);
582 mutex_init(&dart_domain
->init_lock
);
584 /* no need to allocate pgtbl_ops or do any other finalization steps */
585 if (type
== IOMMU_DOMAIN_IDENTITY
|| type
== IOMMU_DOMAIN_BLOCKED
)
586 dart_domain
->finalized
= true;
588 return &dart_domain
->domain
;
591 static void apple_dart_domain_free(struct iommu_domain
*domain
)
593 struct apple_dart_domain
*dart_domain
= to_dart_domain(domain
);
595 if (dart_domain
->pgtbl_ops
)
596 free_io_pgtable_ops(dart_domain
->pgtbl_ops
);
601 static int apple_dart_of_xlate(struct device
*dev
, struct of_phandle_args
*args
)
603 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
604 struct platform_device
*iommu_pdev
= of_find_device_by_node(args
->np
);
605 struct apple_dart
*dart
= platform_get_drvdata(iommu_pdev
);
606 struct apple_dart
*cfg_dart
;
609 if (args
->args_count
!= 1)
614 cfg
= kzalloc(sizeof(*cfg
), GFP_KERNEL
);
617 dev_iommu_priv_set(dev
, cfg
);
619 cfg_dart
= cfg
->stream_maps
[0].dart
;
621 if (cfg_dart
->supports_bypass
!= dart
->supports_bypass
)
623 if (cfg_dart
->force_bypass
!= dart
->force_bypass
)
625 if (cfg_dart
->pgsize
!= dart
->pgsize
)
629 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
630 if (cfg
->stream_maps
[i
].dart
== dart
) {
631 cfg
->stream_maps
[i
].sidmap
|= 1 << sid
;
635 for (i
= 0; i
< MAX_DARTS_PER_DEVICE
; ++i
) {
636 if (!cfg
->stream_maps
[i
].dart
) {
637 cfg
->stream_maps
[i
].dart
= dart
;
638 cfg
->stream_maps
[i
].sidmap
= 1 << sid
;
646 static DEFINE_MUTEX(apple_dart_groups_lock
);
648 static void apple_dart_release_group(void *iommu_data
)
651 struct apple_dart_stream_map
*stream_map
;
652 struct apple_dart_master_cfg
*group_master_cfg
= iommu_data
;
654 mutex_lock(&apple_dart_groups_lock
);
656 for_each_stream_map(i
, group_master_cfg
, stream_map
)
657 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
658 stream_map
->dart
->sid2group
[sid
] = NULL
;
661 mutex_unlock(&apple_dart_groups_lock
);
664 static struct iommu_group
*apple_dart_device_group(struct device
*dev
)
667 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
668 struct apple_dart_stream_map
*stream_map
;
669 struct apple_dart_master_cfg
*group_master_cfg
;
670 struct iommu_group
*group
= NULL
;
671 struct iommu_group
*res
= ERR_PTR(-EINVAL
);
673 mutex_lock(&apple_dart_groups_lock
);
675 for_each_stream_map(i
, cfg
, stream_map
) {
676 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
) {
677 struct iommu_group
*stream_group
=
678 stream_map
->dart
->sid2group
[sid
];
680 if (group
&& group
!= stream_group
) {
681 res
= ERR_PTR(-EINVAL
);
685 group
= stream_group
;
690 res
= iommu_group_ref_get(group
);
696 group
= pci_device_group(dev
);
699 group
= generic_device_group(dev
);
701 res
= ERR_PTR(-ENOMEM
);
705 group_master_cfg
= kzalloc(sizeof(*group_master_cfg
), GFP_KERNEL
);
706 if (!group_master_cfg
) {
707 iommu_group_put(group
);
711 memcpy(group_master_cfg
, cfg
, sizeof(*group_master_cfg
));
712 iommu_group_set_iommudata(group
, group_master_cfg
,
713 apple_dart_release_group
);
715 for_each_stream_map(i
, cfg
, stream_map
)
716 for_each_set_bit(sid
, &stream_map
->sidmap
, DART_MAX_STREAMS
)
717 stream_map
->dart
->sid2group
[sid
] = group
;
722 mutex_unlock(&apple_dart_groups_lock
);
726 static int apple_dart_def_domain_type(struct device
*dev
)
728 struct apple_dart_master_cfg
*cfg
= dev_iommu_priv_get(dev
);
730 if (cfg
->stream_maps
[0].dart
->force_bypass
)
731 return IOMMU_DOMAIN_IDENTITY
;
732 if (!cfg
->stream_maps
[0].dart
->supports_bypass
)
733 return IOMMU_DOMAIN_DMA
;
738 static const struct iommu_ops apple_dart_iommu_ops
= {
739 .domain_alloc
= apple_dart_domain_alloc
,
740 .domain_free
= apple_dart_domain_free
,
741 .attach_dev
= apple_dart_attach_dev
,
742 .detach_dev
= apple_dart_detach_dev
,
743 .map_pages
= apple_dart_map_pages
,
744 .unmap_pages
= apple_dart_unmap_pages
,
745 .flush_iotlb_all
= apple_dart_flush_iotlb_all
,
746 .iotlb_sync
= apple_dart_iotlb_sync
,
747 .iotlb_sync_map
= apple_dart_iotlb_sync_map
,
748 .iova_to_phys
= apple_dart_iova_to_phys
,
749 .probe_device
= apple_dart_probe_device
,
750 .release_device
= apple_dart_release_device
,
751 .device_group
= apple_dart_device_group
,
752 .of_xlate
= apple_dart_of_xlate
,
753 .def_domain_type
= apple_dart_def_domain_type
,
754 .pgsize_bitmap
= -1UL, /* Restricted during dart probe */
757 static irqreturn_t
apple_dart_irq(int irq
, void *dev
)
759 struct apple_dart
*dart
= dev
;
760 const char *fault_name
= NULL
;
761 u32 error
= readl(dart
->regs
+ DART_ERROR
);
762 u32 error_code
= FIELD_GET(DART_ERROR_CODE
, error
);
763 u32 addr_lo
= readl(dart
->regs
+ DART_ERROR_ADDR_LO
);
764 u32 addr_hi
= readl(dart
->regs
+ DART_ERROR_ADDR_HI
);
765 u64 addr
= addr_lo
| (((u64
)addr_hi
) << 32);
766 u8 stream_idx
= FIELD_GET(DART_ERROR_STREAM
, error
);
768 if (!(error
& DART_ERROR_FLAG
))
771 /* there should only be a single bit set but let's use == to be sure */
772 if (error_code
== DART_ERROR_READ_FAULT
)
773 fault_name
= "READ FAULT";
774 else if (error_code
== DART_ERROR_WRITE_FAULT
)
775 fault_name
= "WRITE FAULT";
776 else if (error_code
== DART_ERROR_NO_PTE
)
777 fault_name
= "NO PTE FOR IOVA";
778 else if (error_code
== DART_ERROR_NO_PMD
)
779 fault_name
= "NO PMD FOR IOVA";
780 else if (error_code
== DART_ERROR_NO_TTBR
)
781 fault_name
= "NO TTBR FOR IOVA";
783 fault_name
= "unknown";
787 "translation fault: status:0x%x stream:%d code:0x%x (%s) at 0x%llx",
788 error
, stream_idx
, error_code
, fault_name
, addr
);
790 writel(error
, dart
->regs
+ DART_ERROR
);
794 static int apple_dart_set_bus_ops(const struct iommu_ops
*ops
)
798 if (!iommu_present(&platform_bus_type
)) {
799 ret
= bus_set_iommu(&platform_bus_type
, ops
);
804 if (!iommu_present(&pci_bus_type
)) {
805 ret
= bus_set_iommu(&pci_bus_type
, ops
);
807 bus_set_iommu(&platform_bus_type
, NULL
);
815 static int apple_dart_probe(struct platform_device
*pdev
)
819 struct resource
*res
;
820 struct apple_dart
*dart
;
821 struct device
*dev
= &pdev
->dev
;
823 dart
= devm_kzalloc(dev
, sizeof(*dart
), GFP_KERNEL
);
828 spin_lock_init(&dart
->lock
);
830 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
831 if (resource_size(res
) < 0x4000) {
832 dev_err(dev
, "MMIO region too small (%pr)\n", res
);
836 dart
->regs
= devm_ioremap_resource(dev
, res
);
837 if (IS_ERR(dart
->regs
))
838 return PTR_ERR(dart
->regs
);
840 dart
->irq
= platform_get_irq(pdev
, 0);
844 ret
= devm_clk_bulk_get_all(dev
, &dart
->clks
);
847 dart
->num_clks
= ret
;
849 ret
= clk_bulk_prepare_enable(dart
->num_clks
, dart
->clks
);
853 ret
= apple_dart_hw_reset(dart
);
855 goto err_clk_disable
;
857 dart_params
[0] = readl(dart
->regs
+ DART_PARAMS1
);
858 dart_params
[1] = readl(dart
->regs
+ DART_PARAMS2
);
859 dart
->pgsize
= 1 << FIELD_GET(DART_PARAMS_PAGE_SHIFT
, dart_params
[0]);
860 dart
->supports_bypass
= dart_params
[1] & DART_PARAMS_BYPASS_SUPPORT
;
861 dart
->force_bypass
= dart
->pgsize
> PAGE_SIZE
;
863 ret
= request_irq(dart
->irq
, apple_dart_irq
, IRQF_SHARED
,
864 "apple-dart fault handler", dart
);
866 goto err_clk_disable
;
868 platform_set_drvdata(pdev
, dart
);
870 ret
= apple_dart_set_bus_ops(&apple_dart_iommu_ops
);
874 ret
= iommu_device_sysfs_add(&dart
->iommu
, dev
, NULL
, "apple-dart.%s",
875 dev_name(&pdev
->dev
));
877 goto err_remove_bus_ops
;
879 ret
= iommu_device_register(&dart
->iommu
, &apple_dart_iommu_ops
, dev
);
881 goto err_sysfs_remove
;
885 "DART [pagesize %x, bypass support: %d, bypass forced: %d] initialized\n",
886 dart
->pgsize
, dart
->supports_bypass
, dart
->force_bypass
);
890 iommu_device_sysfs_remove(&dart
->iommu
);
892 apple_dart_set_bus_ops(NULL
);
894 free_irq(dart
->irq
, dart
);
896 clk_bulk_disable_unprepare(dart
->num_clks
, dart
->clks
);
901 static int apple_dart_remove(struct platform_device
*pdev
)
903 struct apple_dart
*dart
= platform_get_drvdata(pdev
);
905 apple_dart_hw_reset(dart
);
906 free_irq(dart
->irq
, dart
);
907 apple_dart_set_bus_ops(NULL
);
909 iommu_device_unregister(&dart
->iommu
);
910 iommu_device_sysfs_remove(&dart
->iommu
);
912 clk_bulk_disable_unprepare(dart
->num_clks
, dart
->clks
);
917 static const struct of_device_id apple_dart_of_match
[] = {
918 { .compatible
= "apple,t8103-dart", .data
= NULL
},
921 MODULE_DEVICE_TABLE(of
, apple_dart_of_match
);
923 static struct platform_driver apple_dart_driver
= {
925 .name
= "apple-dart",
926 .of_match_table
= apple_dart_of_match
,
927 .suppress_bind_attrs
= true,
929 .probe
= apple_dart_probe
,
930 .remove
= apple_dart_remove
,
933 module_platform_driver(apple_dart_driver
);
935 MODULE_DESCRIPTION("IOMMU API for Apple's DART");
936 MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
937 MODULE_LICENSE("GPL v2");