1 // SPDX-License-Identifier: GPL-2.0-only
3 * A fairly generic DMA-API to IOMMU-API glue layer.
5 * Copyright (C) 2014-2015 ARM Ltd.
7 * based in part on arch/arm/mm/dma-mapping.c:
8 * Copyright (C) 2000-2004 Russell King
11 #include <linux/acpi_iort.h>
12 #include <linux/device.h>
13 #include <linux/dma-map-ops.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/gfp.h>
16 #include <linux/huge_mm.h>
17 #include <linux/iommu.h>
18 #include <linux/iova.h>
19 #include <linux/irq.h>
21 #include <linux/mutex.h>
22 #include <linux/pci.h>
23 #include <linux/swiotlb.h>
24 #include <linux/scatterlist.h>
25 #include <linux/vmalloc.h>
26 #include <linux/crash_dump.h>
27 #include <linux/dma-direct.h>
29 struct iommu_dma_msi_page
{
30 struct list_head list
;
35 enum iommu_dma_cookie_type
{
36 IOMMU_DMA_IOVA_COOKIE
,
40 struct iommu_dma_cookie
{
41 enum iommu_dma_cookie_type type
;
43 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
44 struct iova_domain iovad
;
45 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
48 struct list_head msi_page_list
;
50 /* Domain for flush queue callback; NULL if flush queue not in use */
51 struct iommu_domain
*fq_domain
;
54 static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled
);
55 bool iommu_dma_forcedac __read_mostly
;
57 static int __init
iommu_dma_forcedac_setup(char *str
)
59 int ret
= kstrtobool(str
, &iommu_dma_forcedac
);
61 if (!ret
&& iommu_dma_forcedac
)
62 pr_info("Forcing DAC for PCI devices\n");
65 early_param("iommu.forcedac", iommu_dma_forcedac_setup
);
67 static void iommu_dma_entry_dtor(unsigned long data
)
69 struct page
*freelist
= (struct page
*)data
;
72 unsigned long p
= (unsigned long)page_address(freelist
);
74 freelist
= freelist
->freelist
;
79 static inline size_t cookie_msi_granule(struct iommu_dma_cookie
*cookie
)
81 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
)
82 return cookie
->iovad
.granule
;
86 static struct iommu_dma_cookie
*cookie_alloc(enum iommu_dma_cookie_type type
)
88 struct iommu_dma_cookie
*cookie
;
90 cookie
= kzalloc(sizeof(*cookie
), GFP_KERNEL
);
92 INIT_LIST_HEAD(&cookie
->msi_page_list
);
99 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
100 * @domain: IOMMU domain to prepare for DMA-API usage
102 * IOMMU drivers should normally call this from their domain_alloc
103 * callback when domain->type == IOMMU_DOMAIN_DMA.
105 int iommu_get_dma_cookie(struct iommu_domain
*domain
)
107 if (domain
->iova_cookie
)
110 domain
->iova_cookie
= cookie_alloc(IOMMU_DMA_IOVA_COOKIE
);
111 if (!domain
->iova_cookie
)
116 EXPORT_SYMBOL(iommu_get_dma_cookie
);
119 * iommu_get_msi_cookie - Acquire just MSI remapping resources
120 * @domain: IOMMU domain to prepare
121 * @base: Start address of IOVA region for MSI mappings
123 * Users who manage their own IOVA allocation and do not want DMA API support,
124 * but would still like to take advantage of automatic MSI remapping, can use
125 * this to initialise their own domain appropriately. Users should reserve a
126 * contiguous IOVA region, starting at @base, large enough to accommodate the
127 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
128 * used by the devices attached to @domain.
130 int iommu_get_msi_cookie(struct iommu_domain
*domain
, dma_addr_t base
)
132 struct iommu_dma_cookie
*cookie
;
134 if (domain
->type
!= IOMMU_DOMAIN_UNMANAGED
)
137 if (domain
->iova_cookie
)
140 cookie
= cookie_alloc(IOMMU_DMA_MSI_COOKIE
);
144 cookie
->msi_iova
= base
;
145 domain
->iova_cookie
= cookie
;
148 EXPORT_SYMBOL(iommu_get_msi_cookie
);
151 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
152 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
153 * iommu_get_msi_cookie()
155 * IOMMU drivers should normally call this from their domain_free callback.
157 void iommu_put_dma_cookie(struct iommu_domain
*domain
)
159 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
160 struct iommu_dma_msi_page
*msi
, *tmp
;
165 if (cookie
->type
== IOMMU_DMA_IOVA_COOKIE
&& cookie
->iovad
.granule
)
166 put_iova_domain(&cookie
->iovad
);
168 list_for_each_entry_safe(msi
, tmp
, &cookie
->msi_page_list
, list
) {
169 list_del(&msi
->list
);
173 domain
->iova_cookie
= NULL
;
175 EXPORT_SYMBOL(iommu_put_dma_cookie
);
178 * iommu_dma_get_resv_regions - Reserved region driver helper
179 * @dev: Device from iommu_get_resv_regions()
180 * @list: Reserved region list from iommu_get_resv_regions()
182 * IOMMU drivers can use this to implement their .get_resv_regions callback
183 * for general non-IOMMU-specific reservations. Currently, this covers GICv3
184 * ITS region reservation on ACPI based ARM platforms that may require HW MSI
187 void iommu_dma_get_resv_regions(struct device
*dev
, struct list_head
*list
)
190 if (!is_of_node(dev_iommu_fwspec_get(dev
)->iommu_fwnode
))
191 iort_iommu_msi_get_resv_regions(dev
, list
);
194 EXPORT_SYMBOL(iommu_dma_get_resv_regions
);
196 static int cookie_init_hw_msi_region(struct iommu_dma_cookie
*cookie
,
197 phys_addr_t start
, phys_addr_t end
)
199 struct iova_domain
*iovad
= &cookie
->iovad
;
200 struct iommu_dma_msi_page
*msi_page
;
203 start
-= iova_offset(iovad
, start
);
204 num_pages
= iova_align(iovad
, end
- start
) >> iova_shift(iovad
);
206 for (i
= 0; i
< num_pages
; i
++) {
207 msi_page
= kmalloc(sizeof(*msi_page
), GFP_KERNEL
);
211 msi_page
->phys
= start
;
212 msi_page
->iova
= start
;
213 INIT_LIST_HEAD(&msi_page
->list
);
214 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
215 start
+= iovad
->granule
;
221 static int iova_reserve_pci_windows(struct pci_dev
*dev
,
222 struct iova_domain
*iovad
)
224 struct pci_host_bridge
*bridge
= pci_find_host_bridge(dev
->bus
);
225 struct resource_entry
*window
;
226 unsigned long lo
, hi
;
227 phys_addr_t start
= 0, end
;
229 resource_list_for_each_entry(window
, &bridge
->windows
) {
230 if (resource_type(window
->res
) != IORESOURCE_MEM
)
233 lo
= iova_pfn(iovad
, window
->res
->start
- window
->offset
);
234 hi
= iova_pfn(iovad
, window
->res
->end
- window
->offset
);
235 reserve_iova(iovad
, lo
, hi
);
238 /* Get reserved DMA windows from host bridge */
239 resource_list_for_each_entry(window
, &bridge
->dma_ranges
) {
240 end
= window
->res
->start
- window
->offset
;
243 lo
= iova_pfn(iovad
, start
);
244 hi
= iova_pfn(iovad
, end
);
245 reserve_iova(iovad
, lo
, hi
);
246 } else if (end
< start
) {
247 /* dma_ranges list should be sorted */
249 "Failed to reserve IOVA [%pa-%pa]\n",
254 start
= window
->res
->end
- window
->offset
+ 1;
255 /* If window is last entry */
256 if (window
->node
.next
== &bridge
->dma_ranges
&&
257 end
!= ~(phys_addr_t
)0) {
258 end
= ~(phys_addr_t
)0;
266 static int iova_reserve_iommu_regions(struct device
*dev
,
267 struct iommu_domain
*domain
)
269 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
270 struct iova_domain
*iovad
= &cookie
->iovad
;
271 struct iommu_resv_region
*region
;
272 LIST_HEAD(resv_regions
);
275 if (dev_is_pci(dev
)) {
276 ret
= iova_reserve_pci_windows(to_pci_dev(dev
), iovad
);
281 iommu_get_resv_regions(dev
, &resv_regions
);
282 list_for_each_entry(region
, &resv_regions
, list
) {
283 unsigned long lo
, hi
;
285 /* We ARE the software that manages these! */
286 if (region
->type
== IOMMU_RESV_SW_MSI
)
289 lo
= iova_pfn(iovad
, region
->start
);
290 hi
= iova_pfn(iovad
, region
->start
+ region
->length
- 1);
291 reserve_iova(iovad
, lo
, hi
);
293 if (region
->type
== IOMMU_RESV_MSI
)
294 ret
= cookie_init_hw_msi_region(cookie
, region
->start
,
295 region
->start
+ region
->length
);
299 iommu_put_resv_regions(dev
, &resv_regions
);
304 static void iommu_dma_flush_iotlb_all(struct iova_domain
*iovad
)
306 struct iommu_dma_cookie
*cookie
;
307 struct iommu_domain
*domain
;
309 cookie
= container_of(iovad
, struct iommu_dma_cookie
, iovad
);
310 domain
= cookie
->fq_domain
;
312 domain
->ops
->flush_iotlb_all(domain
);
315 static bool dev_is_untrusted(struct device
*dev
)
317 return dev_is_pci(dev
) && to_pci_dev(dev
)->untrusted
;
320 static bool dev_use_swiotlb(struct device
*dev
)
322 return IS_ENABLED(CONFIG_SWIOTLB
) && dev_is_untrusted(dev
);
325 /* sysfs updates are serialised by the mutex of the group owning @domain */
326 int iommu_dma_init_fq(struct iommu_domain
*domain
)
328 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
331 if (cookie
->fq_domain
)
334 ret
= init_iova_flush_queue(&cookie
->iovad
, iommu_dma_flush_iotlb_all
,
335 iommu_dma_entry_dtor
);
337 pr_warn("iova flush queue initialization failed\n");
341 * Prevent incomplete iovad->fq being observable. Pairs with path from
342 * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
345 WRITE_ONCE(cookie
->fq_domain
, domain
);
350 * iommu_dma_init_domain - Initialise a DMA mapping domain
351 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
352 * @base: IOVA at which the mappable address space starts
353 * @limit: Last address of the IOVA space
354 * @dev: Device the domain is being initialised for
356 * @base and @limit + 1 should be exact multiples of IOMMU page granularity to
357 * avoid rounding surprises. If necessary, we reserve the page at address 0
358 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
359 * any change which could make prior IOVAs invalid will fail.
361 static int iommu_dma_init_domain(struct iommu_domain
*domain
, dma_addr_t base
,
362 dma_addr_t limit
, struct device
*dev
)
364 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
365 unsigned long order
, base_pfn
;
366 struct iova_domain
*iovad
;
368 if (!cookie
|| cookie
->type
!= IOMMU_DMA_IOVA_COOKIE
)
371 iovad
= &cookie
->iovad
;
373 /* Use the smallest supported page size for IOVA granularity */
374 order
= __ffs(domain
->pgsize_bitmap
);
375 base_pfn
= max_t(unsigned long, 1, base
>> order
);
377 /* Check the domain allows at least some access to the device... */
378 if (domain
->geometry
.force_aperture
) {
379 if (base
> domain
->geometry
.aperture_end
||
380 limit
< domain
->geometry
.aperture_start
) {
381 pr_warn("specified DMA range outside IOMMU capability\n");
384 /* ...then finally give it a kicking to make sure it fits */
385 base_pfn
= max_t(unsigned long, base_pfn
,
386 domain
->geometry
.aperture_start
>> order
);
389 /* start_pfn is always nonzero for an already-initialised domain */
390 if (iovad
->start_pfn
) {
391 if (1UL << order
!= iovad
->granule
||
392 base_pfn
!= iovad
->start_pfn
) {
393 pr_warn("Incompatible range for DMA domain\n");
400 init_iova_domain(iovad
, 1UL << order
, base_pfn
);
402 /* If the FQ fails we can simply fall back to strict mode */
403 if (domain
->type
== IOMMU_DOMAIN_DMA_FQ
&& iommu_dma_init_fq(domain
))
404 domain
->type
= IOMMU_DOMAIN_DMA
;
406 return iova_reserve_iommu_regions(dev
, domain
);
410 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
412 * @dir: Direction of DMA transfer
413 * @coherent: Is the DMA master cache-coherent?
414 * @attrs: DMA attributes for the mapping
416 * Return: corresponding IOMMU API page protection flags
418 static int dma_info_to_prot(enum dma_data_direction dir
, bool coherent
,
421 int prot
= coherent
? IOMMU_CACHE
: 0;
423 if (attrs
& DMA_ATTR_PRIVILEGED
)
427 case DMA_BIDIRECTIONAL
:
428 return prot
| IOMMU_READ
| IOMMU_WRITE
;
430 return prot
| IOMMU_READ
;
431 case DMA_FROM_DEVICE
:
432 return prot
| IOMMU_WRITE
;
438 static dma_addr_t
iommu_dma_alloc_iova(struct iommu_domain
*domain
,
439 size_t size
, u64 dma_limit
, struct device
*dev
)
441 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
442 struct iova_domain
*iovad
= &cookie
->iovad
;
443 unsigned long shift
, iova_len
, iova
= 0;
445 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
) {
446 cookie
->msi_iova
+= size
;
447 return cookie
->msi_iova
- size
;
450 shift
= iova_shift(iovad
);
451 iova_len
= size
>> shift
;
453 * Freeing non-power-of-two-sized allocations back into the IOVA caches
454 * will come back to bite us badly, so we have to waste a bit of space
455 * rounding up anything cacheable to make sure that can't happen. The
456 * order of the unadjusted size will still match upon freeing.
458 if (iova_len
< (1 << (IOVA_RANGE_CACHE_MAX_SIZE
- 1)))
459 iova_len
= roundup_pow_of_two(iova_len
);
461 dma_limit
= min_not_zero(dma_limit
, dev
->bus_dma_limit
);
463 if (domain
->geometry
.force_aperture
)
464 dma_limit
= min(dma_limit
, (u64
)domain
->geometry
.aperture_end
);
466 /* Try to get PCI devices a SAC address */
467 if (dma_limit
> DMA_BIT_MASK(32) && !iommu_dma_forcedac
&& dev_is_pci(dev
))
468 iova
= alloc_iova_fast(iovad
, iova_len
,
469 DMA_BIT_MASK(32) >> shift
, false);
472 iova
= alloc_iova_fast(iovad
, iova_len
, dma_limit
>> shift
,
475 return (dma_addr_t
)iova
<< shift
;
478 static void iommu_dma_free_iova(struct iommu_dma_cookie
*cookie
,
479 dma_addr_t iova
, size_t size
, struct iommu_iotlb_gather
*gather
)
481 struct iova_domain
*iovad
= &cookie
->iovad
;
483 /* The MSI case is only ever cleaning up its most recent allocation */
484 if (cookie
->type
== IOMMU_DMA_MSI_COOKIE
)
485 cookie
->msi_iova
-= size
;
486 else if (gather
&& gather
->queued
)
487 queue_iova(iovad
, iova_pfn(iovad
, iova
),
488 size
>> iova_shift(iovad
),
489 (unsigned long)gather
->freelist
);
491 free_iova_fast(iovad
, iova_pfn(iovad
, iova
),
492 size
>> iova_shift(iovad
));
495 static void __iommu_dma_unmap(struct device
*dev
, dma_addr_t dma_addr
,
498 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
499 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
500 struct iova_domain
*iovad
= &cookie
->iovad
;
501 size_t iova_off
= iova_offset(iovad
, dma_addr
);
502 struct iommu_iotlb_gather iotlb_gather
;
505 dma_addr
-= iova_off
;
506 size
= iova_align(iovad
, size
+ iova_off
);
507 iommu_iotlb_gather_init(&iotlb_gather
);
508 iotlb_gather
.queued
= READ_ONCE(cookie
->fq_domain
);
510 unmapped
= iommu_unmap_fast(domain
, dma_addr
, size
, &iotlb_gather
);
511 WARN_ON(unmapped
!= size
);
513 if (!iotlb_gather
.queued
)
514 iommu_iotlb_sync(domain
, &iotlb_gather
);
515 iommu_dma_free_iova(cookie
, dma_addr
, size
, &iotlb_gather
);
518 static dma_addr_t
__iommu_dma_map(struct device
*dev
, phys_addr_t phys
,
519 size_t size
, int prot
, u64 dma_mask
)
521 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
522 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
523 struct iova_domain
*iovad
= &cookie
->iovad
;
524 size_t iova_off
= iova_offset(iovad
, phys
);
527 if (static_branch_unlikely(&iommu_deferred_attach_enabled
) &&
528 iommu_deferred_attach(dev
, domain
))
529 return DMA_MAPPING_ERROR
;
531 size
= iova_align(iovad
, size
+ iova_off
);
533 iova
= iommu_dma_alloc_iova(domain
, size
, dma_mask
, dev
);
535 return DMA_MAPPING_ERROR
;
537 if (iommu_map_atomic(domain
, iova
, phys
- iova_off
, size
, prot
)) {
538 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
539 return DMA_MAPPING_ERROR
;
541 return iova
+ iova_off
;
544 static void __iommu_dma_free_pages(struct page
**pages
, int count
)
547 __free_page(pages
[count
]);
551 static struct page
**__iommu_dma_alloc_pages(struct device
*dev
,
552 unsigned int count
, unsigned long order_mask
, gfp_t gfp
)
555 unsigned int i
= 0, nid
= dev_to_node(dev
);
557 order_mask
&= (2U << MAX_ORDER
) - 1;
561 pages
= kvzalloc(count
* sizeof(*pages
), GFP_KERNEL
);
565 /* IOMMU can map any pages, so himem can also be used here */
566 gfp
|= __GFP_NOWARN
| __GFP_HIGHMEM
;
568 /* It makes no sense to muck about with huge pages */
572 struct page
*page
= NULL
;
573 unsigned int order_size
;
576 * Higher-order allocations are a convenience rather
577 * than a necessity, hence using __GFP_NORETRY until
578 * falling back to minimum-order allocations.
580 for (order_mask
&= (2U << __fls(count
)) - 1;
581 order_mask
; order_mask
&= ~order_size
) {
582 unsigned int order
= __fls(order_mask
);
583 gfp_t alloc_flags
= gfp
;
585 order_size
= 1U << order
;
586 if (order_mask
> order_size
)
587 alloc_flags
|= __GFP_NORETRY
;
588 page
= alloc_pages_node(nid
, alloc_flags
, order
);
592 split_page(page
, order
);
596 __iommu_dma_free_pages(pages
, i
);
607 * If size is less than PAGE_SIZE, then a full CPU page will be allocated,
608 * but an IOMMU which supports smaller pages might not map the whole thing.
610 static struct page
**__iommu_dma_alloc_noncontiguous(struct device
*dev
,
611 size_t size
, struct sg_table
*sgt
, gfp_t gfp
, pgprot_t prot
,
614 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
615 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
616 struct iova_domain
*iovad
= &cookie
->iovad
;
617 bool coherent
= dev_is_dma_coherent(dev
);
618 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
619 unsigned int count
, min_size
, alloc_sizes
= domain
->pgsize_bitmap
;
623 if (static_branch_unlikely(&iommu_deferred_attach_enabled
) &&
624 iommu_deferred_attach(dev
, domain
))
627 min_size
= alloc_sizes
& -alloc_sizes
;
628 if (min_size
< PAGE_SIZE
) {
629 min_size
= PAGE_SIZE
;
630 alloc_sizes
|= PAGE_SIZE
;
632 size
= ALIGN(size
, min_size
);
634 if (attrs
& DMA_ATTR_ALLOC_SINGLE_PAGES
)
635 alloc_sizes
= min_size
;
637 count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
638 pages
= __iommu_dma_alloc_pages(dev
, count
, alloc_sizes
>> PAGE_SHIFT
,
643 size
= iova_align(iovad
, size
);
644 iova
= iommu_dma_alloc_iova(domain
, size
, dev
->coherent_dma_mask
, dev
);
648 if (sg_alloc_table_from_pages(sgt
, pages
, count
, 0, size
, GFP_KERNEL
))
651 if (!(ioprot
& IOMMU_CACHE
)) {
652 struct scatterlist
*sg
;
655 for_each_sg(sgt
->sgl
, sg
, sgt
->orig_nents
, i
)
656 arch_dma_prep_coherent(sg_page(sg
), sg
->length
);
659 if (iommu_map_sg_atomic(domain
, iova
, sgt
->sgl
, sgt
->orig_nents
, ioprot
)
663 sgt
->sgl
->dma_address
= iova
;
664 sgt
->sgl
->dma_length
= size
;
670 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
672 __iommu_dma_free_pages(pages
, count
);
676 static void *iommu_dma_alloc_remap(struct device
*dev
, size_t size
,
677 dma_addr_t
*dma_handle
, gfp_t gfp
, pgprot_t prot
,
684 pages
= __iommu_dma_alloc_noncontiguous(dev
, size
, &sgt
, gfp
, prot
,
688 *dma_handle
= sgt
.sgl
->dma_address
;
690 vaddr
= dma_common_pages_remap(pages
, size
, prot
,
691 __builtin_return_address(0));
697 __iommu_dma_unmap(dev
, *dma_handle
, size
);
698 __iommu_dma_free_pages(pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
702 #ifdef CONFIG_DMA_REMAP
703 static struct sg_table
*iommu_dma_alloc_noncontiguous(struct device
*dev
,
704 size_t size
, enum dma_data_direction dir
, gfp_t gfp
,
707 struct dma_sgt_handle
*sh
;
709 sh
= kmalloc(sizeof(*sh
), gfp
);
713 sh
->pages
= __iommu_dma_alloc_noncontiguous(dev
, size
, &sh
->sgt
, gfp
,
722 static void iommu_dma_free_noncontiguous(struct device
*dev
, size_t size
,
723 struct sg_table
*sgt
, enum dma_data_direction dir
)
725 struct dma_sgt_handle
*sh
= sgt_handle(sgt
);
727 __iommu_dma_unmap(dev
, sgt
->sgl
->dma_address
, size
);
728 __iommu_dma_free_pages(sh
->pages
, PAGE_ALIGN(size
) >> PAGE_SHIFT
);
729 sg_free_table(&sh
->sgt
);
732 #endif /* CONFIG_DMA_REMAP */
734 static void iommu_dma_sync_single_for_cpu(struct device
*dev
,
735 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
739 if (dev_is_dma_coherent(dev
) && !dev_use_swiotlb(dev
))
742 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
743 if (!dev_is_dma_coherent(dev
))
744 arch_sync_dma_for_cpu(phys
, size
, dir
);
746 if (is_swiotlb_buffer(dev
, phys
))
747 swiotlb_sync_single_for_cpu(dev
, phys
, size
, dir
);
750 static void iommu_dma_sync_single_for_device(struct device
*dev
,
751 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
755 if (dev_is_dma_coherent(dev
) && !dev_use_swiotlb(dev
))
758 phys
= iommu_iova_to_phys(iommu_get_dma_domain(dev
), dma_handle
);
759 if (is_swiotlb_buffer(dev
, phys
))
760 swiotlb_sync_single_for_device(dev
, phys
, size
, dir
);
762 if (!dev_is_dma_coherent(dev
))
763 arch_sync_dma_for_device(phys
, size
, dir
);
766 static void iommu_dma_sync_sg_for_cpu(struct device
*dev
,
767 struct scatterlist
*sgl
, int nelems
,
768 enum dma_data_direction dir
)
770 struct scatterlist
*sg
;
773 if (dev_use_swiotlb(dev
))
774 for_each_sg(sgl
, sg
, nelems
, i
)
775 iommu_dma_sync_single_for_cpu(dev
, sg_dma_address(sg
),
777 else if (!dev_is_dma_coherent(dev
))
778 for_each_sg(sgl
, sg
, nelems
, i
)
779 arch_sync_dma_for_cpu(sg_phys(sg
), sg
->length
, dir
);
782 static void iommu_dma_sync_sg_for_device(struct device
*dev
,
783 struct scatterlist
*sgl
, int nelems
,
784 enum dma_data_direction dir
)
786 struct scatterlist
*sg
;
789 if (dev_use_swiotlb(dev
))
790 for_each_sg(sgl
, sg
, nelems
, i
)
791 iommu_dma_sync_single_for_device(dev
,
794 else if (!dev_is_dma_coherent(dev
))
795 for_each_sg(sgl
, sg
, nelems
, i
)
796 arch_sync_dma_for_device(sg_phys(sg
), sg
->length
, dir
);
799 static dma_addr_t
iommu_dma_map_page(struct device
*dev
, struct page
*page
,
800 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
803 phys_addr_t phys
= page_to_phys(page
) + offset
;
804 bool coherent
= dev_is_dma_coherent(dev
);
805 int prot
= dma_info_to_prot(dir
, coherent
, attrs
);
806 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
807 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
808 struct iova_domain
*iovad
= &cookie
->iovad
;
809 dma_addr_t iova
, dma_mask
= dma_get_mask(dev
);
812 * If both the physical buffer start address and size are
813 * page aligned, we don't need to use a bounce page.
815 if (dev_use_swiotlb(dev
) && iova_offset(iovad
, phys
| size
)) {
817 size_t padding_size
, aligned_size
;
819 aligned_size
= iova_align(iovad
, size
);
820 phys
= swiotlb_tbl_map_single(dev
, phys
, size
, aligned_size
,
821 iova_mask(iovad
), dir
, attrs
);
823 if (phys
== DMA_MAPPING_ERROR
)
824 return DMA_MAPPING_ERROR
;
826 /* Cleanup the padding area. */
827 padding_start
= phys_to_virt(phys
);
828 padding_size
= aligned_size
;
830 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) &&
831 (dir
== DMA_TO_DEVICE
|| dir
== DMA_BIDIRECTIONAL
)) {
832 padding_start
+= size
;
833 padding_size
-= size
;
836 memset(padding_start
, 0, padding_size
);
839 if (!coherent
&& !(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
840 arch_sync_dma_for_device(phys
, size
, dir
);
842 iova
= __iommu_dma_map(dev
, phys
, size
, prot
, dma_mask
);
843 if (iova
== DMA_MAPPING_ERROR
&& is_swiotlb_buffer(dev
, phys
))
844 swiotlb_tbl_unmap_single(dev
, phys
, size
, dir
, attrs
);
848 static void iommu_dma_unmap_page(struct device
*dev
, dma_addr_t dma_handle
,
849 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
851 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
854 phys
= iommu_iova_to_phys(domain
, dma_handle
);
858 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
) && !dev_is_dma_coherent(dev
))
859 arch_sync_dma_for_cpu(phys
, size
, dir
);
861 __iommu_dma_unmap(dev
, dma_handle
, size
);
863 if (unlikely(is_swiotlb_buffer(dev
, phys
)))
864 swiotlb_tbl_unmap_single(dev
, phys
, size
, dir
, attrs
);
868 * Prepare a successfully-mapped scatterlist to give back to the caller.
870 * At this point the segments are already laid out by iommu_dma_map_sg() to
871 * avoid individually crossing any boundaries, so we merely need to check a
872 * segment's start address to avoid concatenating across one.
874 static int __finalise_sg(struct device
*dev
, struct scatterlist
*sg
, int nents
,
877 struct scatterlist
*s
, *cur
= sg
;
878 unsigned long seg_mask
= dma_get_seg_boundary(dev
);
879 unsigned int cur_len
= 0, max_len
= dma_get_max_seg_size(dev
);
882 for_each_sg(sg
, s
, nents
, i
) {
883 /* Restore this segment's original unaligned fields first */
884 unsigned int s_iova_off
= sg_dma_address(s
);
885 unsigned int s_length
= sg_dma_len(s
);
886 unsigned int s_iova_len
= s
->length
;
888 s
->offset
+= s_iova_off
;
889 s
->length
= s_length
;
890 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
894 * Now fill in the real DMA data. If...
895 * - there is a valid output segment to append to
896 * - and this segment starts on an IOVA page boundary
897 * - but doesn't fall at a segment boundary
898 * - and wouldn't make the resulting output segment too long
900 if (cur_len
&& !s_iova_off
&& (dma_addr
& seg_mask
) &&
901 (max_len
- cur_len
>= s_length
)) {
902 /* ...then concatenate it with the previous one */
905 /* Otherwise start the next output segment */
911 sg_dma_address(cur
) = dma_addr
+ s_iova_off
;
914 sg_dma_len(cur
) = cur_len
;
915 dma_addr
+= s_iova_len
;
917 if (s_length
+ s_iova_off
< s_iova_len
)
924 * If mapping failed, then just restore the original list,
925 * but making sure the DMA fields are invalidated.
927 static void __invalidate_sg(struct scatterlist
*sg
, int nents
)
929 struct scatterlist
*s
;
932 for_each_sg(sg
, s
, nents
, i
) {
933 if (sg_dma_address(s
) != DMA_MAPPING_ERROR
)
934 s
->offset
+= sg_dma_address(s
);
936 s
->length
= sg_dma_len(s
);
937 sg_dma_address(s
) = DMA_MAPPING_ERROR
;
942 static void iommu_dma_unmap_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
943 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
945 struct scatterlist
*s
;
948 for_each_sg(sg
, s
, nents
, i
)
949 iommu_dma_unmap_page(dev
, sg_dma_address(s
),
950 sg_dma_len(s
), dir
, attrs
);
953 static int iommu_dma_map_sg_swiotlb(struct device
*dev
, struct scatterlist
*sg
,
954 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
956 struct scatterlist
*s
;
959 for_each_sg(sg
, s
, nents
, i
) {
960 sg_dma_address(s
) = iommu_dma_map_page(dev
, sg_page(s
),
961 s
->offset
, s
->length
, dir
, attrs
);
962 if (sg_dma_address(s
) == DMA_MAPPING_ERROR
)
964 sg_dma_len(s
) = s
->length
;
970 iommu_dma_unmap_sg_swiotlb(dev
, sg
, i
, dir
, attrs
| DMA_ATTR_SKIP_CPU_SYNC
);
975 * The DMA API client is passing in a scatterlist which could describe
976 * any old buffer layout, but the IOMMU API requires everything to be
977 * aligned to IOMMU pages. Hence the need for this complicated bit of
978 * impedance-matching, to be able to hand off a suitably-aligned list,
979 * but still preserve the original offsets and sizes for the caller.
981 static int iommu_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
982 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
984 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
985 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
986 struct iova_domain
*iovad
= &cookie
->iovad
;
987 struct scatterlist
*s
, *prev
= NULL
;
988 int prot
= dma_info_to_prot(dir
, dev_is_dma_coherent(dev
), attrs
);
991 unsigned long mask
= dma_get_seg_boundary(dev
);
995 if (static_branch_unlikely(&iommu_deferred_attach_enabled
)) {
996 ret
= iommu_deferred_attach(dev
, domain
);
1001 if (dev_use_swiotlb(dev
))
1002 return iommu_dma_map_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
1004 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1005 iommu_dma_sync_sg_for_device(dev
, sg
, nents
, dir
);
1008 * Work out how much IOVA space we need, and align the segments to
1009 * IOVA granules for the IOMMU driver to handle. With some clever
1010 * trickery we can modify the list in-place, but reversibly, by
1011 * stashing the unaligned parts in the as-yet-unused DMA fields.
1013 for_each_sg(sg
, s
, nents
, i
) {
1014 size_t s_iova_off
= iova_offset(iovad
, s
->offset
);
1015 size_t s_length
= s
->length
;
1016 size_t pad_len
= (mask
- iova_len
+ 1) & mask
;
1018 sg_dma_address(s
) = s_iova_off
;
1019 sg_dma_len(s
) = s_length
;
1020 s
->offset
-= s_iova_off
;
1021 s_length
= iova_align(iovad
, s_length
+ s_iova_off
);
1022 s
->length
= s_length
;
1025 * Due to the alignment of our single IOVA allocation, we can
1026 * depend on these assumptions about the segment boundary mask:
1027 * - If mask size >= IOVA size, then the IOVA range cannot
1028 * possibly fall across a boundary, so we don't care.
1029 * - If mask size < IOVA size, then the IOVA range must start
1030 * exactly on a boundary, therefore we can lay things out
1031 * based purely on segment lengths without needing to know
1032 * the actual addresses beforehand.
1033 * - The mask must be a power of 2, so pad_len == 0 if
1034 * iova_len == 0, thus we cannot dereference prev the first
1035 * time through here (i.e. before it has a meaningful value).
1037 if (pad_len
&& pad_len
< s_length
- 1) {
1038 prev
->length
+= pad_len
;
1039 iova_len
+= pad_len
;
1042 iova_len
+= s_length
;
1046 iova
= iommu_dma_alloc_iova(domain
, iova_len
, dma_get_mask(dev
), dev
);
1049 goto out_restore_sg
;
1053 * We'll leave any physical concatenation to the IOMMU driver's
1054 * implementation - it knows better than we do.
1056 ret
= iommu_map_sg_atomic(domain
, iova
, sg
, nents
, prot
);
1060 return __finalise_sg(dev
, sg
, nents
, iova
);
1063 iommu_dma_free_iova(cookie
, iova
, iova_len
, NULL
);
1065 __invalidate_sg(sg
, nents
);
1072 static void iommu_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
1073 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
1075 dma_addr_t start
, end
;
1076 struct scatterlist
*tmp
;
1079 if (dev_use_swiotlb(dev
)) {
1080 iommu_dma_unmap_sg_swiotlb(dev
, sg
, nents
, dir
, attrs
);
1084 if (!(attrs
& DMA_ATTR_SKIP_CPU_SYNC
))
1085 iommu_dma_sync_sg_for_cpu(dev
, sg
, nents
, dir
);
1088 * The scatterlist segments are mapped into a single
1089 * contiguous IOVA allocation, so this is incredibly easy.
1091 start
= sg_dma_address(sg
);
1092 for_each_sg(sg_next(sg
), tmp
, nents
- 1, i
) {
1093 if (sg_dma_len(tmp
) == 0)
1097 end
= sg_dma_address(sg
) + sg_dma_len(sg
);
1098 __iommu_dma_unmap(dev
, start
, end
- start
);
1101 static dma_addr_t
iommu_dma_map_resource(struct device
*dev
, phys_addr_t phys
,
1102 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1104 return __iommu_dma_map(dev
, phys
, size
,
1105 dma_info_to_prot(dir
, false, attrs
) | IOMMU_MMIO
,
1109 static void iommu_dma_unmap_resource(struct device
*dev
, dma_addr_t handle
,
1110 size_t size
, enum dma_data_direction dir
, unsigned long attrs
)
1112 __iommu_dma_unmap(dev
, handle
, size
);
1115 static void __iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
)
1117 size_t alloc_size
= PAGE_ALIGN(size
);
1118 int count
= alloc_size
>> PAGE_SHIFT
;
1119 struct page
*page
= NULL
, **pages
= NULL
;
1121 /* Non-coherent atomic allocation? Easy */
1122 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1123 dma_free_from_pool(dev
, cpu_addr
, alloc_size
))
1126 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1128 * If it the address is remapped, then it's either non-coherent
1129 * or highmem CMA, or an iommu_dma_alloc_remap() construction.
1131 pages
= dma_common_find_pages(cpu_addr
);
1133 page
= vmalloc_to_page(cpu_addr
);
1134 dma_common_free_remap(cpu_addr
, alloc_size
);
1136 /* Lowmem means a coherent atomic or CMA allocation */
1137 page
= virt_to_page(cpu_addr
);
1141 __iommu_dma_free_pages(pages
, count
);
1143 dma_free_contiguous(dev
, page
, alloc_size
);
1146 static void iommu_dma_free(struct device
*dev
, size_t size
, void *cpu_addr
,
1147 dma_addr_t handle
, unsigned long attrs
)
1149 __iommu_dma_unmap(dev
, handle
, size
);
1150 __iommu_dma_free(dev
, size
, cpu_addr
);
1153 static void *iommu_dma_alloc_pages(struct device
*dev
, size_t size
,
1154 struct page
**pagep
, gfp_t gfp
, unsigned long attrs
)
1156 bool coherent
= dev_is_dma_coherent(dev
);
1157 size_t alloc_size
= PAGE_ALIGN(size
);
1158 int node
= dev_to_node(dev
);
1159 struct page
*page
= NULL
;
1162 page
= dma_alloc_contiguous(dev
, alloc_size
, gfp
);
1164 page
= alloc_pages_node(node
, gfp
, get_order(alloc_size
));
1168 if (IS_ENABLED(CONFIG_DMA_REMAP
) && (!coherent
|| PageHighMem(page
))) {
1169 pgprot_t prot
= dma_pgprot(dev
, PAGE_KERNEL
, attrs
);
1171 cpu_addr
= dma_common_contiguous_remap(page
, alloc_size
,
1172 prot
, __builtin_return_address(0));
1174 goto out_free_pages
;
1177 arch_dma_prep_coherent(page
, size
);
1179 cpu_addr
= page_address(page
);
1183 memset(cpu_addr
, 0, alloc_size
);
1186 dma_free_contiguous(dev
, page
, alloc_size
);
1190 static void *iommu_dma_alloc(struct device
*dev
, size_t size
,
1191 dma_addr_t
*handle
, gfp_t gfp
, unsigned long attrs
)
1193 bool coherent
= dev_is_dma_coherent(dev
);
1194 int ioprot
= dma_info_to_prot(DMA_BIDIRECTIONAL
, coherent
, attrs
);
1195 struct page
*page
= NULL
;
1200 if (IS_ENABLED(CONFIG_DMA_REMAP
) && gfpflags_allow_blocking(gfp
) &&
1201 !(attrs
& DMA_ATTR_FORCE_CONTIGUOUS
)) {
1202 return iommu_dma_alloc_remap(dev
, size
, handle
, gfp
,
1203 dma_pgprot(dev
, PAGE_KERNEL
, attrs
), attrs
);
1206 if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP
) &&
1207 !gfpflags_allow_blocking(gfp
) && !coherent
)
1208 page
= dma_alloc_from_pool(dev
, PAGE_ALIGN(size
), &cpu_addr
,
1211 cpu_addr
= iommu_dma_alloc_pages(dev
, size
, &page
, gfp
, attrs
);
1215 *handle
= __iommu_dma_map(dev
, page_to_phys(page
), size
, ioprot
,
1216 dev
->coherent_dma_mask
);
1217 if (*handle
== DMA_MAPPING_ERROR
) {
1218 __iommu_dma_free(dev
, size
, cpu_addr
);
1225 static int iommu_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
1226 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1227 unsigned long attrs
)
1229 unsigned long nr_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
1230 unsigned long pfn
, off
= vma
->vm_pgoff
;
1233 vma
->vm_page_prot
= dma_pgprot(dev
, vma
->vm_page_prot
, attrs
);
1235 if (dma_mmap_from_dev_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
1238 if (off
>= nr_pages
|| vma_pages(vma
) > nr_pages
- off
)
1241 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1242 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1245 return vm_map_pages(vma
, pages
, nr_pages
);
1246 pfn
= vmalloc_to_pfn(cpu_addr
);
1248 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
1251 return remap_pfn_range(vma
, vma
->vm_start
, pfn
+ off
,
1252 vma
->vm_end
- vma
->vm_start
,
1256 static int iommu_dma_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
1257 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
1258 unsigned long attrs
)
1263 if (IS_ENABLED(CONFIG_DMA_REMAP
) && is_vmalloc_addr(cpu_addr
)) {
1264 struct page
**pages
= dma_common_find_pages(cpu_addr
);
1267 return sg_alloc_table_from_pages(sgt
, pages
,
1268 PAGE_ALIGN(size
) >> PAGE_SHIFT
,
1269 0, size
, GFP_KERNEL
);
1272 page
= vmalloc_to_page(cpu_addr
);
1274 page
= virt_to_page(cpu_addr
);
1277 ret
= sg_alloc_table(sgt
, 1, GFP_KERNEL
);
1279 sg_set_page(sgt
->sgl
, page
, PAGE_ALIGN(size
), 0);
1283 static unsigned long iommu_dma_get_merge_boundary(struct device
*dev
)
1285 struct iommu_domain
*domain
= iommu_get_dma_domain(dev
);
1287 return (1UL << __ffs(domain
->pgsize_bitmap
)) - 1;
1290 static const struct dma_map_ops iommu_dma_ops
= {
1291 .alloc
= iommu_dma_alloc
,
1292 .free
= iommu_dma_free
,
1293 .alloc_pages
= dma_common_alloc_pages
,
1294 .free_pages
= dma_common_free_pages
,
1295 #ifdef CONFIG_DMA_REMAP
1296 .alloc_noncontiguous
= iommu_dma_alloc_noncontiguous
,
1297 .free_noncontiguous
= iommu_dma_free_noncontiguous
,
1299 .mmap
= iommu_dma_mmap
,
1300 .get_sgtable
= iommu_dma_get_sgtable
,
1301 .map_page
= iommu_dma_map_page
,
1302 .unmap_page
= iommu_dma_unmap_page
,
1303 .map_sg
= iommu_dma_map_sg
,
1304 .unmap_sg
= iommu_dma_unmap_sg
,
1305 .sync_single_for_cpu
= iommu_dma_sync_single_for_cpu
,
1306 .sync_single_for_device
= iommu_dma_sync_single_for_device
,
1307 .sync_sg_for_cpu
= iommu_dma_sync_sg_for_cpu
,
1308 .sync_sg_for_device
= iommu_dma_sync_sg_for_device
,
1309 .map_resource
= iommu_dma_map_resource
,
1310 .unmap_resource
= iommu_dma_unmap_resource
,
1311 .get_merge_boundary
= iommu_dma_get_merge_boundary
,
1315 * The IOMMU core code allocates the default DMA domain, which the underlying
1316 * IOMMU driver needs to support via the dma-iommu layer.
1318 void iommu_setup_dma_ops(struct device
*dev
, u64 dma_base
, u64 dma_limit
)
1320 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1326 * The IOMMU core code allocates the default DMA domain, which the
1327 * underlying IOMMU driver needs to support via the dma-iommu layer.
1329 if (iommu_is_dma_domain(domain
)) {
1330 if (iommu_dma_init_domain(domain
, dma_base
, dma_limit
, dev
))
1332 dev
->dma_ops
= &iommu_dma_ops
;
1337 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
1340 EXPORT_SYMBOL_GPL(iommu_setup_dma_ops
);
1342 static struct iommu_dma_msi_page
*iommu_dma_get_msi_page(struct device
*dev
,
1343 phys_addr_t msi_addr
, struct iommu_domain
*domain
)
1345 struct iommu_dma_cookie
*cookie
= domain
->iova_cookie
;
1346 struct iommu_dma_msi_page
*msi_page
;
1348 int prot
= IOMMU_WRITE
| IOMMU_NOEXEC
| IOMMU_MMIO
;
1349 size_t size
= cookie_msi_granule(cookie
);
1351 msi_addr
&= ~(phys_addr_t
)(size
- 1);
1352 list_for_each_entry(msi_page
, &cookie
->msi_page_list
, list
)
1353 if (msi_page
->phys
== msi_addr
)
1356 msi_page
= kzalloc(sizeof(*msi_page
), GFP_KERNEL
);
1360 iova
= iommu_dma_alloc_iova(domain
, size
, dma_get_mask(dev
), dev
);
1364 if (iommu_map(domain
, iova
, msi_addr
, size
, prot
))
1367 INIT_LIST_HEAD(&msi_page
->list
);
1368 msi_page
->phys
= msi_addr
;
1369 msi_page
->iova
= iova
;
1370 list_add(&msi_page
->list
, &cookie
->msi_page_list
);
1374 iommu_dma_free_iova(cookie
, iova
, size
, NULL
);
1380 int iommu_dma_prepare_msi(struct msi_desc
*desc
, phys_addr_t msi_addr
)
1382 struct device
*dev
= msi_desc_to_dev(desc
);
1383 struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1384 struct iommu_dma_msi_page
*msi_page
;
1385 static DEFINE_MUTEX(msi_prepare_lock
); /* see below */
1387 if (!domain
|| !domain
->iova_cookie
) {
1388 desc
->iommu_cookie
= NULL
;
1393 * In fact the whole prepare operation should already be serialised by
1394 * irq_domain_mutex further up the callchain, but that's pretty subtle
1395 * on its own, so consider this locking as failsafe documentation...
1397 mutex_lock(&msi_prepare_lock
);
1398 msi_page
= iommu_dma_get_msi_page(dev
, msi_addr
, domain
);
1399 mutex_unlock(&msi_prepare_lock
);
1401 msi_desc_set_iommu_cookie(desc
, msi_page
);
1408 void iommu_dma_compose_msi_msg(struct msi_desc
*desc
,
1409 struct msi_msg
*msg
)
1411 struct device
*dev
= msi_desc_to_dev(desc
);
1412 const struct iommu_domain
*domain
= iommu_get_domain_for_dev(dev
);
1413 const struct iommu_dma_msi_page
*msi_page
;
1415 msi_page
= msi_desc_get_iommu_cookie(desc
);
1417 if (!domain
|| !domain
->iova_cookie
|| WARN_ON(!msi_page
))
1420 msg
->address_hi
= upper_32_bits(msi_page
->iova
);
1421 msg
->address_lo
&= cookie_msi_granule(domain
->iova_cookie
) - 1;
1422 msg
->address_lo
+= lower_32_bits(msi_page
->iova
);
1425 static int iommu_dma_init(void)
1427 if (is_kdump_kernel())
1428 static_branch_enable(&iommu_deferred_attach_enabled
);
1430 return iova_cache_get();
1432 arch_initcall(iommu_dma_init
);