]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/x86/kernel/pci-dma.c
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-kernels.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
116890d5 4#include <linux/bootmem.h>
bca5c096 5#include <linux/pci.h>
acde31dc 6#include <linux/kmemleak.h>
cb5867a5 7
116890d5
GC
8#include <asm/proto.h>
9#include <asm/dma.h>
46a7fa27 10#include <asm/iommu.h>
1d9b16d1 11#include <asm/gart.h>
cb5867a5 12#include <asm/calgary.h>
a69ca340 13#include <asm/amd_iommu.h>
b4941a9a 14#include <asm/x86_init.h>
459121c9 15
3b15e581
FY
16static int forbid_dac __read_mostly;
17
a3b28ee1 18struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
19EXPORT_SYMBOL(dma_ops);
20
b4cdc430 21static int iommu_sac_force __read_mostly;
8e0c3797 22
f9c258de
GC
23#ifdef CONFIG_IOMMU_DEBUG
24int panic_on_overflow __read_mostly = 1;
25int force_iommu __read_mostly = 1;
26#else
27int panic_on_overflow __read_mostly = 0;
28int force_iommu __read_mostly = 0;
29#endif
30
fae9a0d8
GC
31int iommu_merge __read_mostly = 0;
32
33int no_iommu __read_mostly;
34/* Set this to 1 if there is a HW IOMMU in the system */
35int iommu_detected __read_mostly = 0;
36
ac0101d3
JR
37/*
38 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 39 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3
JR
40 * devices and allow every device to access to whole physical memory. This is
41 * useful if a user want to use an IOMMU only for KVM device assignment to
42 * guests and not for driver dma translation.
43 */
44int iommu_pass_through __read_mostly;
aed5d5f4 45
eb647138 46/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 47struct device x86_dma_fallback_dev = {
1a927133 48 .init_name = "fallback device",
eb647138 49 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 50 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 51};
6c505ce3 52EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 53
2118d0c5
JR
54/* Number of entries preallocated for DMA-API debugging */
55#define PREALLOC_DMA_DEBUG_ENTRIES 32768
56
459121c9
GC
57int dma_set_mask(struct device *dev, u64 mask)
58{
59 if (!dev->dma_mask || !dma_supported(dev, mask))
60 return -EIO;
61
62 *dev->dma_mask = mask;
63
64 return 0;
65}
66EXPORT_SYMBOL(dma_set_mask);
67
116890d5
GC
68#ifdef CONFIG_X86_64
69static __initdata void *dma32_bootmem_ptr;
70static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
71
72static int __init parse_dma32_size_opt(char *p)
73{
74 if (!p)
75 return -EINVAL;
76 dma32_bootmem_size = memparse(p, &p);
77 return 0;
78}
79early_param("dma32_size", parse_dma32_size_opt);
80
81void __init dma32_reserve_bootmem(void)
82{
83 unsigned long size, align;
c987d12f 84 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
85 return;
86
7677b2ef
YL
87 /*
88 * check aperture_64.c allocate_aperture() for reason about
89 * using 512M as goal
90 */
116890d5 91 align = 64ULL<<20;
1ddb5518 92 size = roundup(dma32_bootmem_size, align);
116890d5 93 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
7677b2ef 94 512ULL<<20);
acde31dc
CM
95 /*
96 * Kmemleak should not scan this block as it may not be mapped via the
97 * kernel direct mapping.
98 */
99 kmemleak_ignore(dma32_bootmem_ptr);
116890d5
GC
100 if (dma32_bootmem_ptr)
101 dma32_bootmem_size = size;
102 else
103 dma32_bootmem_size = 0;
104}
105static void __init dma32_free_bootmem(void)
106{
116890d5 107
c987d12f 108 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
109 return;
110
111 if (!dma32_bootmem_ptr)
112 return;
113
330fce23 114 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
116890d5
GC
115
116 dma32_bootmem_ptr = NULL;
117 dma32_bootmem_size = 0;
118}
cfb80c9e 119#endif
116890d5
GC
120
121void __init pci_iommu_alloc(void)
122{
f4780ca0
FT
123 int use_swiotlb;
124
125 use_swiotlb = pci_swiotlb_init();
cfb80c9e 126#ifdef CONFIG_X86_64
116890d5
GC
127 /* free the range so iommu could get some range less than 4G */
128 dma32_free_bootmem();
cfb80c9e 129#endif
f4780ca0 130 if (use_swiotlb)
75f1cdf1 131 return;
cfb80c9e 132
116890d5 133 gart_iommu_hole_init();
116890d5 134
116890d5 135 detect_calgary();
116890d5
GC
136
137 detect_intel_iommu();
138
75f1cdf1 139 /* needs to be called after gart_iommu_hole_init */
a69ca340 140 amd_iommu_detect();
116890d5 141}
8978b742 142
9f6ac577
FT
143void *dma_generic_alloc_coherent(struct device *dev, size_t size,
144 dma_addr_t *dma_addr, gfp_t flag)
145{
146 unsigned long dma_mask;
147 struct page *page;
148 dma_addr_t addr;
149
150 dma_mask = dma_alloc_coherent_mask(dev, flag);
151
152 flag |= __GFP_ZERO;
153again:
154 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
155 if (!page)
156 return NULL;
157
158 addr = page_to_phys(page);
a4c2baa6 159 if (addr + size > dma_mask) {
9f6ac577
FT
160 __free_pages(page, get_order(size));
161
284901a9 162 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
163 flag = (flag & ~GFP_DMA32) | GFP_DMA;
164 goto again;
165 }
166
167 return NULL;
168 }
169
170 *dma_addr = addr;
171 return page_address(page);
172}
173
fae9a0d8
GC
174/*
175 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
176 * documentation.
177 */
178static __init int iommu_setup(char *p)
179{
180 iommu_merge = 1;
181
182 if (!p)
183 return -EINVAL;
184
185 while (*p) {
186 if (!strncmp(p, "off", 3))
187 no_iommu = 1;
188 /* gart_parse_options has more force support */
189 if (!strncmp(p, "force", 5))
190 force_iommu = 1;
191 if (!strncmp(p, "noforce", 7)) {
192 iommu_merge = 0;
193 force_iommu = 0;
194 }
195
196 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
197 iommu_merge = 1;
198 force_iommu = 1;
199 }
200 if (!strncmp(p, "panic", 5))
201 panic_on_overflow = 1;
202 if (!strncmp(p, "nopanic", 7))
203 panic_on_overflow = 0;
204 if (!strncmp(p, "merge", 5)) {
205 iommu_merge = 1;
206 force_iommu = 1;
207 }
208 if (!strncmp(p, "nomerge", 7))
209 iommu_merge = 0;
210 if (!strncmp(p, "forcesac", 8))
211 iommu_sac_force = 1;
212 if (!strncmp(p, "allowdac", 8))
213 forbid_dac = 0;
214 if (!strncmp(p, "nodac", 5))
2ae8bb75 215 forbid_dac = 1;
fae9a0d8
GC
216 if (!strncmp(p, "usedac", 6)) {
217 forbid_dac = -1;
218 return 1;
219 }
220#ifdef CONFIG_SWIOTLB
221 if (!strncmp(p, "soft", 4))
222 swiotlb = 1;
3238c0c4 223#endif
80286879 224 if (!strncmp(p, "pt", 2))
4ed0d3e6 225 iommu_pass_through = 1;
fae9a0d8 226
fae9a0d8 227 gart_parse_options(p);
fae9a0d8
GC
228
229#ifdef CONFIG_CALGARY_IOMMU
230 if (!strncmp(p, "calgary", 7))
231 use_calgary = 1;
232#endif /* CONFIG_CALGARY_IOMMU */
233
234 p += strcspn(p, ",");
235 if (*p == ',')
236 ++p;
237 }
238 return 0;
239}
240early_param("iommu", iommu_setup);
241
8e0c3797
GC
242int dma_supported(struct device *dev, u64 mask)
243{
160c1d8e 244 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 245
8e0c3797
GC
246#ifdef CONFIG_PCI
247 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 248 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
249 return 0;
250 }
251#endif
252
8d8bb39b
FT
253 if (ops->dma_supported)
254 return ops->dma_supported(dev, mask);
8e0c3797
GC
255
256 /* Copied from i386. Doesn't make much sense, because it will
257 only work for pci_alloc_coherent.
258 The caller just has to use GFP_DMA in this case. */
2f4f27d4 259 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
260 return 0;
261
262 /* Tell the device to use SAC when IOMMU force is on. This
263 allows the driver to use cheaper accesses in some cases.
264
265 Problem with this is that if we overflow the IOMMU area and
266 return DAC as fallback address the device may not handle it
267 correctly.
268
269 As a special case some controllers have a 39bit address
270 mode that is as efficient as 32bit (aic79xx). Don't force
271 SAC for these. Assume all masks <= 40 bits are of this
272 type. Normally this doesn't make any difference, but gives
273 more gentle handling of IOMMU overflow. */
50cf156a 274 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 275 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
276 return 0;
277 }
278
279 return 1;
280}
281EXPORT_SYMBOL(dma_supported);
282
cb5867a5
GC
283static int __init pci_iommu_init(void)
284{
2118d0c5
JR
285 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
286
86f31952
JR
287#ifdef CONFIG_PCI
288 dma_debug_add_bus(&pci_bus_type);
289#endif
d07c1be0
FT
290 x86_init.iommu.iommu_init();
291
75f1cdf1
FT
292 if (swiotlb) {
293 printk(KERN_INFO "PCI-DMA: "
294 "Using software bounce buffering for IO (SWIOTLB)\n");
295 swiotlb_print_info();
296 } else
297 swiotlb_free();
298
cb5867a5
GC
299 return 0;
300}
cb5867a5 301/* Must execute after PCI subsystem */
9a821b23 302rootfs_initcall(pci_iommu_init);
3b15e581
FY
303
304#ifdef CONFIG_PCI
305/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
306
307static __devinit void via_no_dac(struct pci_dev *dev)
308{
309 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
13bf7576 310 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
311 forbid_dac = 1;
312 }
313}
314DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
315#endif