]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/kernel/pci-dma.c
x86: isolate coherent mapping functions
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
cb5867a5 2#include <linux/dmar.h>
116890d5 3#include <linux/bootmem.h>
bca5c096 4#include <linux/pci.h>
cb5867a5 5
116890d5
GC
6#include <asm/proto.h>
7#include <asm/dma.h>
cb5867a5
GC
8#include <asm/gart.h>
9#include <asm/calgary.h>
459121c9 10
bca5c096
GC
11int forbid_dac __read_mostly;
12EXPORT_SYMBOL(forbid_dac);
13
85c246ee
GC
14const struct dma_mapping_ops *dma_ops;
15EXPORT_SYMBOL(dma_ops);
16
8e0c3797
GC
17int iommu_sac_force __read_mostly = 0;
18
f9c258de
GC
19#ifdef CONFIG_IOMMU_DEBUG
20int panic_on_overflow __read_mostly = 1;
21int force_iommu __read_mostly = 1;
22#else
23int panic_on_overflow __read_mostly = 0;
24int force_iommu __read_mostly = 0;
25#endif
26
fae9a0d8
GC
27int iommu_merge __read_mostly = 0;
28
29int no_iommu __read_mostly;
30/* Set this to 1 if there is a HW IOMMU in the system */
31int iommu_detected __read_mostly = 0;
32
33/* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35int iommu_bio_merge __read_mostly = 0;
36EXPORT_SYMBOL(iommu_bio_merge);
37
38
459121c9
GC
39int dma_set_mask(struct device *dev, u64 mask)
40{
41 if (!dev->dma_mask || !dma_supported(dev, mask))
42 return -EIO;
43
44 *dev->dma_mask = mask;
45
46 return 0;
47}
48EXPORT_SYMBOL(dma_set_mask);
49
116890d5
GC
50#ifdef CONFIG_X86_64
51static __initdata void *dma32_bootmem_ptr;
52static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
53
54static int __init parse_dma32_size_opt(char *p)
55{
56 if (!p)
57 return -EINVAL;
58 dma32_bootmem_size = memparse(p, &p);
59 return 0;
60}
61early_param("dma32_size", parse_dma32_size_opt);
62
63void __init dma32_reserve_bootmem(void)
64{
65 unsigned long size, align;
66 if (end_pfn <= MAX_DMA32_PFN)
67 return;
68
69 align = 64ULL<<20;
70 size = round_up(dma32_bootmem_size, align);
71 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
72 __pa(MAX_DMA_ADDRESS));
73 if (dma32_bootmem_ptr)
74 dma32_bootmem_size = size;
75 else
76 dma32_bootmem_size = 0;
77}
78static void __init dma32_free_bootmem(void)
79{
80 int node;
81
82 if (end_pfn <= MAX_DMA32_PFN)
83 return;
84
85 if (!dma32_bootmem_ptr)
86 return;
87
88 for_each_online_node(node)
89 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
90 dma32_bootmem_size);
91
92 dma32_bootmem_ptr = NULL;
93 dma32_bootmem_size = 0;
94}
95
96void __init pci_iommu_alloc(void)
97{
98 /* free the range so iommu could get some range less than 4G */
99 dma32_free_bootmem();
100 /*
101 * The order of these functions is important for
102 * fall-back/fail-over reasons
103 */
104#ifdef CONFIG_GART_IOMMU
105 gart_iommu_hole_init();
106#endif
107
108#ifdef CONFIG_CALGARY_IOMMU
109 detect_calgary();
110#endif
111
112 detect_intel_iommu();
113
114#ifdef CONFIG_SWIOTLB
115 pci_swiotlb_init();
116#endif
117}
118#endif
119
fae9a0d8
GC
120/*
121 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
122 * documentation.
123 */
124static __init int iommu_setup(char *p)
125{
126 iommu_merge = 1;
127
128 if (!p)
129 return -EINVAL;
130
131 while (*p) {
132 if (!strncmp(p, "off", 3))
133 no_iommu = 1;
134 /* gart_parse_options has more force support */
135 if (!strncmp(p, "force", 5))
136 force_iommu = 1;
137 if (!strncmp(p, "noforce", 7)) {
138 iommu_merge = 0;
139 force_iommu = 0;
140 }
141
142 if (!strncmp(p, "biomerge", 8)) {
143 iommu_bio_merge = 4096;
144 iommu_merge = 1;
145 force_iommu = 1;
146 }
147 if (!strncmp(p, "panic", 5))
148 panic_on_overflow = 1;
149 if (!strncmp(p, "nopanic", 7))
150 panic_on_overflow = 0;
151 if (!strncmp(p, "merge", 5)) {
152 iommu_merge = 1;
153 force_iommu = 1;
154 }
155 if (!strncmp(p, "nomerge", 7))
156 iommu_merge = 0;
157 if (!strncmp(p, "forcesac", 8))
158 iommu_sac_force = 1;
159 if (!strncmp(p, "allowdac", 8))
160 forbid_dac = 0;
161 if (!strncmp(p, "nodac", 5))
162 forbid_dac = -1;
163 if (!strncmp(p, "usedac", 6)) {
164 forbid_dac = -1;
165 return 1;
166 }
167#ifdef CONFIG_SWIOTLB
168 if (!strncmp(p, "soft", 4))
169 swiotlb = 1;
170#endif
171
172#ifdef CONFIG_GART_IOMMU
173 gart_parse_options(p);
174#endif
175
176#ifdef CONFIG_CALGARY_IOMMU
177 if (!strncmp(p, "calgary", 7))
178 use_calgary = 1;
179#endif /* CONFIG_CALGARY_IOMMU */
180
181 p += strcspn(p, ",");
182 if (*p == ',')
183 ++p;
184 }
185 return 0;
186}
187early_param("iommu", iommu_setup);
188
8e8edc64
GC
189#ifdef CONFIG_X86_32
190int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
191 dma_addr_t device_addr, size_t size, int flags)
192{
193 void __iomem *mem_base = NULL;
194 int pages = size >> PAGE_SHIFT;
195 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
196
197 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
198 goto out;
199 if (!size)
200 goto out;
201 if (dev->dma_mem)
202 goto out;
203
204 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
205
206 mem_base = ioremap(bus_addr, size);
207 if (!mem_base)
208 goto out;
209
210 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
211 if (!dev->dma_mem)
212 goto out;
213 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
214 if (!dev->dma_mem->bitmap)
215 goto free1_out;
216
217 dev->dma_mem->virt_base = mem_base;
218 dev->dma_mem->device_base = device_addr;
219 dev->dma_mem->size = pages;
220 dev->dma_mem->flags = flags;
221
222 if (flags & DMA_MEMORY_MAP)
223 return DMA_MEMORY_MAP;
224
225 return DMA_MEMORY_IO;
226
227 free1_out:
228 kfree(dev->dma_mem);
229 out:
230 if (mem_base)
231 iounmap(mem_base);
232 return 0;
233}
234EXPORT_SYMBOL(dma_declare_coherent_memory);
235
236void dma_release_declared_memory(struct device *dev)
237{
238 struct dma_coherent_mem *mem = dev->dma_mem;
239
240 if (!mem)
241 return;
242 dev->dma_mem = NULL;
243 iounmap(mem->virt_base);
244 kfree(mem->bitmap);
245 kfree(mem);
246}
247EXPORT_SYMBOL(dma_release_declared_memory);
248
249void *dma_mark_declared_memory_occupied(struct device *dev,
250 dma_addr_t device_addr, size_t size)
251{
252 struct dma_coherent_mem *mem = dev->dma_mem;
253 int pos, err;
254 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
255
256 pages >>= PAGE_SHIFT;
257
258 if (!mem)
259 return ERR_PTR(-EINVAL);
260
261 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
262 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
263 if (err != 0)
264 return ERR_PTR(err);
265 return mem->virt_base + (pos << PAGE_SHIFT);
266}
267EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
268#endif /* CONFIG_X86_32 */
269
8e0c3797
GC
270int dma_supported(struct device *dev, u64 mask)
271{
272#ifdef CONFIG_PCI
273 if (mask > 0xffffffff && forbid_dac > 0) {
274 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
275 dev->bus_id);
276 return 0;
277 }
278#endif
279
280 if (dma_ops->dma_supported)
281 return dma_ops->dma_supported(dev, mask);
282
283 /* Copied from i386. Doesn't make much sense, because it will
284 only work for pci_alloc_coherent.
285 The caller just has to use GFP_DMA in this case. */
286 if (mask < DMA_24BIT_MASK)
287 return 0;
288
289 /* Tell the device to use SAC when IOMMU force is on. This
290 allows the driver to use cheaper accesses in some cases.
291
292 Problem with this is that if we overflow the IOMMU area and
293 return DAC as fallback address the device may not handle it
294 correctly.
295
296 As a special case some controllers have a 39bit address
297 mode that is as efficient as 32bit (aic79xx). Don't force
298 SAC for these. Assume all masks <= 40 bits are of this
299 type. Normally this doesn't make any difference, but gives
300 more gentle handling of IOMMU overflow. */
301 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
302 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
303 dev->bus_id, mask);
304 return 0;
305 }
306
307 return 1;
308}
309EXPORT_SYMBOL(dma_supported);
310
311
cb5867a5
GC
312static int __init pci_iommu_init(void)
313{
314#ifdef CONFIG_CALGARY_IOMMU
315 calgary_iommu_init();
316#endif
317
318 intel_iommu_init();
319
320#ifdef CONFIG_GART_IOMMU
321 gart_iommu_init();
322#endif
459121c9 323
cb5867a5
GC
324 no_iommu_init();
325 return 0;
326}
327
328void pci_iommu_shutdown(void)
329{
330 gart_iommu_shutdown();
331}
332/* Must execute after PCI subsystem */
333fs_initcall(pci_iommu_init);
bca5c096
GC
334
335#ifdef CONFIG_PCI
336/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
337
338static __devinit void via_no_dac(struct pci_dev *dev)
339{
340 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
341 printk(KERN_INFO "PCI: VIA PCI bridge detected."
342 "Disabling DAC.\n");
343 forbid_dac = 1;
344 }
345}
346DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
347#endif