]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
62fdd767 FY |
2 | /* |
3 | * Dynamic DMA mapping support. | |
4 | */ | |
5 | ||
6 | #include <linux/types.h> | |
7 | #include <linux/mm.h> | |
8 | #include <linux/string.h> | |
9 | #include <linux/pci.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/dmar.h> | |
12 | #include <asm/iommu.h> | |
13 | #include <asm/machvec.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | ||
62fdd767 | 16 | |
d3f13810 | 17 | #ifdef CONFIG_INTEL_IOMMU |
62fdd767 FY |
18 | |
19 | #include <linux/kernel.h> | |
62fdd767 FY |
20 | |
21 | #include <asm/page.h> | |
62fdd767 FY |
22 | |
23 | dma_addr_t bad_dma_address __read_mostly; | |
24 | EXPORT_SYMBOL(bad_dma_address); | |
25 | ||
26 | static int iommu_sac_force __read_mostly; | |
27 | ||
28 | int no_iommu __read_mostly; | |
29 | #ifdef CONFIG_IOMMU_DEBUG | |
30 | int force_iommu __read_mostly = 1; | |
31 | #else | |
32 | int force_iommu __read_mostly; | |
33 | #endif | |
34 | ||
aed5d5f4 FY |
35 | int iommu_pass_through; |
36 | ||
160c1d8e | 37 | extern struct dma_map_ops intel_dma_ops; |
62fdd767 FY |
38 | |
39 | static int __init pci_iommu_init(void) | |
40 | { | |
41 | if (iommu_detected) | |
42 | intel_iommu_init(); | |
43 | ||
44 | return 0; | |
45 | } | |
46 | ||
47 | /* Must execute after PCI subsystem */ | |
48 | fs_initcall(pci_iommu_init); | |
49 | ||
50 | void pci_iommu_shutdown(void) | |
51 | { | |
52 | return; | |
53 | } | |
54 | ||
55 | void __init | |
56 | iommu_dma_init(void) | |
57 | { | |
58 | return; | |
59 | } | |
60 | ||
62fdd767 FY |
61 | int iommu_dma_supported(struct device *dev, u64 mask) |
62 | { | |
62fdd767 FY |
63 | /* Copied from i386. Doesn't make much sense, because it will |
64 | only work for pci_alloc_coherent. | |
65 | The caller just has to use GFP_DMA in this case. */ | |
2f4f27d4 | 66 | if (mask < DMA_BIT_MASK(24)) |
62fdd767 FY |
67 | return 0; |
68 | ||
69 | /* Tell the device to use SAC when IOMMU force is on. This | |
70 | allows the driver to use cheaper accesses in some cases. | |
71 | ||
72 | Problem with this is that if we overflow the IOMMU area and | |
73 | return DAC as fallback address the device may not handle it | |
74 | correctly. | |
75 | ||
76 | As a special case some controllers have a 39bit address | |
77 | mode that is as efficient as 32bit (aic79xx). Don't force | |
78 | SAC for these. Assume all masks <= 40 bits are of this | |
79 | type. Normally this doesn't make any difference, but gives | |
80 | more gentle handling of IOMMU overflow. */ | |
50cf156a | 81 | if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { |
e088a4ad | 82 | dev_info(dev, "Force SAC with mask %llx\n", mask); |
62fdd767 FY |
83 | return 0; |
84 | } | |
85 | ||
86 | return 1; | |
87 | } | |
88 | EXPORT_SYMBOL(iommu_dma_supported); | |
89 | ||
160c1d8e FT |
90 | void __init pci_iommu_alloc(void) |
91 | { | |
92 | dma_ops = &intel_dma_ops; | |
93 | ||
5299709d BVA |
94 | intel_dma_ops.sync_single_for_cpu = machvec_dma_sync_single; |
95 | intel_dma_ops.sync_sg_for_cpu = machvec_dma_sync_sg; | |
96 | intel_dma_ops.sync_single_for_device = machvec_dma_sync_single; | |
97 | intel_dma_ops.sync_sg_for_device = machvec_dma_sync_sg; | |
98 | intel_dma_ops.dma_supported = iommu_dma_supported; | |
160c1d8e FT |
99 | |
100 | /* | |
101 | * The order of these functions is important for | |
102 | * fall-back/fail-over reasons | |
103 | */ | |
104 | detect_intel_iommu(); | |
105 | ||
106 | #ifdef CONFIG_SWIOTLB | |
107 | pci_swiotlb_init(); | |
108 | #endif | |
109 | } | |
110 | ||
62fdd767 | 111 | #endif |