]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/kernel/pci-dma.c
Merge tag 'upstream-4.14-rc1' of git://git.infradead.org/linux-ubifs
[mirror_ubuntu-focal-kernel.git] / arch / x86 / kernel / pci-dma.c
1 #include <linux/dma-mapping.h>
2 #include <linux/dma-debug.h>
3 #include <linux/dmar.h>
4 #include <linux/export.h>
5 #include <linux/bootmem.h>
6 #include <linux/gfp.h>
7 #include <linux/pci.h>
8 #include <linux/kmemleak.h>
9
10 #include <asm/proto.h>
11 #include <asm/dma.h>
12 #include <asm/iommu.h>
13 #include <asm/gart.h>
14 #include <asm/calgary.h>
15 #include <asm/x86_init.h>
16 #include <asm/iommu_table.h>
17
18 static int forbid_dac __read_mostly;
19
20 const struct dma_map_ops *dma_ops = &nommu_dma_ops;
21 EXPORT_SYMBOL(dma_ops);
22
23 static int iommu_sac_force __read_mostly;
24
25 #ifdef CONFIG_IOMMU_DEBUG
26 int panic_on_overflow __read_mostly = 1;
27 int force_iommu __read_mostly = 1;
28 #else
29 int panic_on_overflow __read_mostly = 0;
30 int force_iommu __read_mostly = 0;
31 #endif
32
33 int iommu_merge __read_mostly = 0;
34
35 int no_iommu __read_mostly;
36 /* Set this to 1 if there is a HW IOMMU in the system */
37 int iommu_detected __read_mostly = 0;
38
39 /*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
41 * If this variable is 1, IOMMU implementations do no DMA translation for
42 * devices and allow every device to access to whole physical memory. This is
43 * useful if a user wants to use an IOMMU only for KVM device assignment to
44 * guests and not for driver dma translation.
45 */
46 int iommu_pass_through __read_mostly;
47
48 extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
50 /* Dummy device used for NULL arguments (normally ISA). */
51 struct device x86_dma_fallback_dev = {
52 .init_name = "fallback device",
53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
55 };
56 EXPORT_SYMBOL(x86_dma_fallback_dev);
57
58 /* Number of entries preallocated for DMA-API debugging */
59 #define PREALLOC_DMA_DEBUG_ENTRIES 65536
60
61 void __init pci_iommu_alloc(void)
62 {
63 struct iommu_table_entry *p;
64
65 sort_iommu_table(__iommu_table, __iommu_table_end);
66 check_iommu_entries(__iommu_table, __iommu_table_end);
67
68 for (p = __iommu_table; p < __iommu_table_end; p++) {
69 if (p && p->detect && p->detect() > 0) {
70 p->flags |= IOMMU_DETECTED;
71 if (p->early_init)
72 p->early_init();
73 if (p->flags & IOMMU_FINISH_IF_DETECTED)
74 break;
75 }
76 }
77 }
78 void *dma_generic_alloc_coherent(struct device *dev, size_t size,
79 dma_addr_t *dma_addr, gfp_t flag,
80 unsigned long attrs)
81 {
82 unsigned long dma_mask;
83 struct page *page;
84 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
85 dma_addr_t addr;
86
87 dma_mask = dma_alloc_coherent_mask(dev, flag);
88
89 flag &= ~__GFP_ZERO;
90 again:
91 page = NULL;
92 /* CMA can be used only in the context which permits sleeping */
93 if (gfpflags_allow_blocking(flag)) {
94 page = dma_alloc_from_contiguous(dev, count, get_order(size),
95 flag);
96 if (page) {
97 addr = phys_to_dma(dev, page_to_phys(page));
98 if (addr + size > dma_mask) {
99 dma_release_from_contiguous(dev, page, count);
100 page = NULL;
101 }
102 }
103 }
104 /* fallback */
105 if (!page)
106 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
107 if (!page)
108 return NULL;
109
110 addr = phys_to_dma(dev, page_to_phys(page));
111 if (addr + size > dma_mask) {
112 __free_pages(page, get_order(size));
113
114 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
115 flag = (flag & ~GFP_DMA32) | GFP_DMA;
116 goto again;
117 }
118
119 return NULL;
120 }
121 memset(page_address(page), 0, size);
122 *dma_addr = addr;
123 return page_address(page);
124 }
125
126 void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
127 dma_addr_t dma_addr, unsigned long attrs)
128 {
129 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
130 struct page *page = virt_to_page(vaddr);
131
132 if (!dma_release_from_contiguous(dev, page, count))
133 free_pages((unsigned long)vaddr, get_order(size));
134 }
135
136 bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
137 {
138 if (!*dev)
139 *dev = &x86_dma_fallback_dev;
140
141 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
142 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
143
144 if (!is_device_dma_capable(*dev))
145 return false;
146 return true;
147
148 }
149 EXPORT_SYMBOL(arch_dma_alloc_attrs);
150
151 /*
152 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
153 * parameter documentation.
154 */
155 static __init int iommu_setup(char *p)
156 {
157 iommu_merge = 1;
158
159 if (!p)
160 return -EINVAL;
161
162 while (*p) {
163 if (!strncmp(p, "off", 3))
164 no_iommu = 1;
165 /* gart_parse_options has more force support */
166 if (!strncmp(p, "force", 5))
167 force_iommu = 1;
168 if (!strncmp(p, "noforce", 7)) {
169 iommu_merge = 0;
170 force_iommu = 0;
171 }
172
173 if (!strncmp(p, "biomerge", 8)) {
174 iommu_merge = 1;
175 force_iommu = 1;
176 }
177 if (!strncmp(p, "panic", 5))
178 panic_on_overflow = 1;
179 if (!strncmp(p, "nopanic", 7))
180 panic_on_overflow = 0;
181 if (!strncmp(p, "merge", 5)) {
182 iommu_merge = 1;
183 force_iommu = 1;
184 }
185 if (!strncmp(p, "nomerge", 7))
186 iommu_merge = 0;
187 if (!strncmp(p, "forcesac", 8))
188 iommu_sac_force = 1;
189 if (!strncmp(p, "allowdac", 8))
190 forbid_dac = 0;
191 if (!strncmp(p, "nodac", 5))
192 forbid_dac = 1;
193 if (!strncmp(p, "usedac", 6)) {
194 forbid_dac = -1;
195 return 1;
196 }
197 #ifdef CONFIG_SWIOTLB
198 if (!strncmp(p, "soft", 4))
199 swiotlb = 1;
200 #endif
201 if (!strncmp(p, "pt", 2))
202 iommu_pass_through = 1;
203
204 gart_parse_options(p);
205
206 #ifdef CONFIG_CALGARY_IOMMU
207 if (!strncmp(p, "calgary", 7))
208 use_calgary = 1;
209 #endif /* CONFIG_CALGARY_IOMMU */
210
211 p += strcspn(p, ",");
212 if (*p == ',')
213 ++p;
214 }
215 return 0;
216 }
217 early_param("iommu", iommu_setup);
218
219 int x86_dma_supported(struct device *dev, u64 mask)
220 {
221 #ifdef CONFIG_PCI
222 if (mask > 0xffffffff && forbid_dac > 0) {
223 dev_info(dev, "PCI: Disallowing DAC for device\n");
224 return 0;
225 }
226 #endif
227
228 /* Copied from i386. Doesn't make much sense, because it will
229 only work for pci_alloc_coherent.
230 The caller just has to use GFP_DMA in this case. */
231 if (mask < DMA_BIT_MASK(24))
232 return 0;
233
234 /* Tell the device to use SAC when IOMMU force is on. This
235 allows the driver to use cheaper accesses in some cases.
236
237 Problem with this is that if we overflow the IOMMU area and
238 return DAC as fallback address the device may not handle it
239 correctly.
240
241 As a special case some controllers have a 39bit address
242 mode that is as efficient as 32bit (aic79xx). Don't force
243 SAC for these. Assume all masks <= 40 bits are of this
244 type. Normally this doesn't make any difference, but gives
245 more gentle handling of IOMMU overflow. */
246 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
247 dev_info(dev, "Force SAC with mask %Lx\n", mask);
248 return 0;
249 }
250
251 return 1;
252 }
253
254 static int __init pci_iommu_init(void)
255 {
256 struct iommu_table_entry *p;
257 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
258
259 #ifdef CONFIG_PCI
260 dma_debug_add_bus(&pci_bus_type);
261 #endif
262 x86_init.iommu.iommu_init();
263
264 for (p = __iommu_table; p < __iommu_table_end; p++) {
265 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
266 p->late_init();
267 }
268
269 return 0;
270 }
271 /* Must execute after PCI subsystem */
272 rootfs_initcall(pci_iommu_init);
273
274 #ifdef CONFIG_PCI
275 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
276
277 static void via_no_dac(struct pci_dev *dev)
278 {
279 if (forbid_dac == 0) {
280 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
281 forbid_dac = 1;
282 }
283 }
284 DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
285 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
286 #endif