]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/kernel/pci-dma.c
x86: move GART specific stuff from iommu.h to gart.h
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
cb5867a5 2#include <linux/dmar.h>
116890d5 3#include <linux/bootmem.h>
bca5c096 4#include <linux/pci.h>
cb5867a5 5
116890d5
GC
6#include <asm/proto.h>
7#include <asm/dma.h>
46a7fa27 8#include <asm/iommu.h>
1d9b16d1 9#include <asm/gart.h>
cb5867a5 10#include <asm/calgary.h>
a69ca340 11#include <asm/amd_iommu.h>
459121c9 12
3b15e581
FY
13static int forbid_dac __read_mostly;
14
8d8bb39b 15struct dma_mapping_ops *dma_ops;
85c246ee
GC
16EXPORT_SYMBOL(dma_ops);
17
b4cdc430 18static int iommu_sac_force __read_mostly;
8e0c3797 19
f9c258de
GC
20#ifdef CONFIG_IOMMU_DEBUG
21int panic_on_overflow __read_mostly = 1;
22int force_iommu __read_mostly = 1;
23#else
24int panic_on_overflow __read_mostly = 0;
25int force_iommu __read_mostly = 0;
26#endif
27
fae9a0d8
GC
28int iommu_merge __read_mostly = 0;
29
30int no_iommu __read_mostly;
31/* Set this to 1 if there is a HW IOMMU in the system */
32int iommu_detected __read_mostly = 0;
33
34/* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36int iommu_bio_merge __read_mostly = 0;
37EXPORT_SYMBOL(iommu_bio_merge);
38
cac67877
GC
39dma_addr_t bad_dma_address __read_mostly = 0;
40EXPORT_SYMBOL(bad_dma_address);
fae9a0d8 41
098cb7f2
GC
42/* Dummy device used for NULL arguments (normally ISA). Better would
43 be probably a smaller DMA mask, but this is bug-to-bug compatible
44 to older i386. */
6c505ce3 45struct device x86_dma_fallback_dev = {
098cb7f2
GC
46 .bus_id = "fallback device",
47 .coherent_dma_mask = DMA_32BIT_MASK,
6c505ce3 48 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 49};
6c505ce3 50EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 51
459121c9
GC
52int dma_set_mask(struct device *dev, u64 mask)
53{
54 if (!dev->dma_mask || !dma_supported(dev, mask))
55 return -EIO;
56
57 *dev->dma_mask = mask;
58
59 return 0;
60}
61EXPORT_SYMBOL(dma_set_mask);
62
116890d5
GC
63#ifdef CONFIG_X86_64
64static __initdata void *dma32_bootmem_ptr;
65static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
66
67static int __init parse_dma32_size_opt(char *p)
68{
69 if (!p)
70 return -EINVAL;
71 dma32_bootmem_size = memparse(p, &p);
72 return 0;
73}
74early_param("dma32_size", parse_dma32_size_opt);
75
76void __init dma32_reserve_bootmem(void)
77{
78 unsigned long size, align;
c987d12f 79 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
80 return;
81
7677b2ef
YL
82 /*
83 * check aperture_64.c allocate_aperture() for reason about
84 * using 512M as goal
85 */
116890d5 86 align = 64ULL<<20;
1ddb5518 87 size = roundup(dma32_bootmem_size, align);
116890d5 88 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
7677b2ef 89 512ULL<<20);
116890d5
GC
90 if (dma32_bootmem_ptr)
91 dma32_bootmem_size = size;
92 else
93 dma32_bootmem_size = 0;
94}
95static void __init dma32_free_bootmem(void)
96{
116890d5 97
c987d12f 98 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
99 return;
100
101 if (!dma32_bootmem_ptr)
102 return;
103
330fce23 104 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
116890d5
GC
105
106 dma32_bootmem_ptr = NULL;
107 dma32_bootmem_size = 0;
108}
109
110void __init pci_iommu_alloc(void)
111{
112 /* free the range so iommu could get some range less than 4G */
113 dma32_free_bootmem();
114 /*
115 * The order of these functions is important for
116 * fall-back/fail-over reasons
117 */
116890d5 118 gart_iommu_hole_init();
116890d5 119
116890d5 120 detect_calgary();
116890d5
GC
121
122 detect_intel_iommu();
123
a69ca340
JR
124 amd_iommu_detect();
125
116890d5 126 pci_swiotlb_init();
116890d5 127}
8978b742 128
bdab0ba3 129unsigned long iommu_nr_pages(unsigned long addr, unsigned long len)
8978b742
FT
130{
131 unsigned long size = roundup((addr & ~PAGE_MASK) + len, PAGE_SIZE);
132
133 return size >> PAGE_SHIFT;
134}
bdab0ba3 135EXPORT_SYMBOL(iommu_nr_pages);
116890d5
GC
136#endif
137
9f6ac577
FT
138void *dma_generic_alloc_coherent(struct device *dev, size_t size,
139 dma_addr_t *dma_addr, gfp_t flag)
140{
141 unsigned long dma_mask;
142 struct page *page;
143 dma_addr_t addr;
144
145 dma_mask = dma_alloc_coherent_mask(dev, flag);
146
147 flag |= __GFP_ZERO;
148again:
149 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
150 if (!page)
151 return NULL;
152
153 addr = page_to_phys(page);
154 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
155 __free_pages(page, get_order(size));
156
157 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
158 flag = (flag & ~GFP_DMA32) | GFP_DMA;
159 goto again;
160 }
161
162 return NULL;
163 }
164
165 *dma_addr = addr;
166 return page_address(page);
167}
168
fae9a0d8
GC
169/*
170 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
171 * documentation.
172 */
173static __init int iommu_setup(char *p)
174{
175 iommu_merge = 1;
176
177 if (!p)
178 return -EINVAL;
179
180 while (*p) {
181 if (!strncmp(p, "off", 3))
182 no_iommu = 1;
183 /* gart_parse_options has more force support */
184 if (!strncmp(p, "force", 5))
185 force_iommu = 1;
186 if (!strncmp(p, "noforce", 7)) {
187 iommu_merge = 0;
188 force_iommu = 0;
189 }
190
191 if (!strncmp(p, "biomerge", 8)) {
192 iommu_bio_merge = 4096;
193 iommu_merge = 1;
194 force_iommu = 1;
195 }
196 if (!strncmp(p, "panic", 5))
197 panic_on_overflow = 1;
198 if (!strncmp(p, "nopanic", 7))
199 panic_on_overflow = 0;
200 if (!strncmp(p, "merge", 5)) {
201 iommu_merge = 1;
202 force_iommu = 1;
203 }
204 if (!strncmp(p, "nomerge", 7))
205 iommu_merge = 0;
206 if (!strncmp(p, "forcesac", 8))
207 iommu_sac_force = 1;
208 if (!strncmp(p, "allowdac", 8))
209 forbid_dac = 0;
210 if (!strncmp(p, "nodac", 5))
211 forbid_dac = -1;
212 if (!strncmp(p, "usedac", 6)) {
213 forbid_dac = -1;
214 return 1;
215 }
216#ifdef CONFIG_SWIOTLB
217 if (!strncmp(p, "soft", 4))
218 swiotlb = 1;
219#endif
220
fae9a0d8 221 gart_parse_options(p);
fae9a0d8
GC
222
223#ifdef CONFIG_CALGARY_IOMMU
224 if (!strncmp(p, "calgary", 7))
225 use_calgary = 1;
226#endif /* CONFIG_CALGARY_IOMMU */
227
228 p += strcspn(p, ",");
229 if (*p == ',')
230 ++p;
231 }
232 return 0;
233}
234early_param("iommu", iommu_setup);
235
8e0c3797
GC
236int dma_supported(struct device *dev, u64 mask)
237{
8d8bb39b
FT
238 struct dma_mapping_ops *ops = get_dma_ops(dev);
239
8e0c3797
GC
240#ifdef CONFIG_PCI
241 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 242 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
243 return 0;
244 }
245#endif
246
8d8bb39b
FT
247 if (ops->dma_supported)
248 return ops->dma_supported(dev, mask);
8e0c3797
GC
249
250 /* Copied from i386. Doesn't make much sense, because it will
251 only work for pci_alloc_coherent.
252 The caller just has to use GFP_DMA in this case. */
253 if (mask < DMA_24BIT_MASK)
254 return 0;
255
256 /* Tell the device to use SAC when IOMMU force is on. This
257 allows the driver to use cheaper accesses in some cases.
258
259 Problem with this is that if we overflow the IOMMU area and
260 return DAC as fallback address the device may not handle it
261 correctly.
262
263 As a special case some controllers have a 39bit address
264 mode that is as efficient as 32bit (aic79xx). Don't force
265 SAC for these. Assume all masks <= 40 bits are of this
266 type. Normally this doesn't make any difference, but gives
267 more gentle handling of IOMMU overflow. */
268 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
fc3a8828 269 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
270 return 0;
271 }
272
273 return 1;
274}
275EXPORT_SYMBOL(dma_supported);
276
cb5867a5
GC
277static int __init pci_iommu_init(void)
278{
cb5867a5 279 calgary_iommu_init();
cb5867a5
GC
280
281 intel_iommu_init();
282
a69ca340
JR
283 amd_iommu_init();
284
cb5867a5 285 gart_iommu_init();
459121c9 286
cb5867a5
GC
287 no_iommu_init();
288 return 0;
289}
290
291void pci_iommu_shutdown(void)
292{
293 gart_iommu_shutdown();
294}
295/* Must execute after PCI subsystem */
296fs_initcall(pci_iommu_init);
3b15e581
FY
297
298#ifdef CONFIG_PCI
299/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
300
301static __devinit void via_no_dac(struct pci_dev *dev)
302{
303 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
304 printk(KERN_INFO "PCI: VIA PCI bridge detected."
305 "Disabling DAC.\n");
306 forbid_dac = 1;
307 }
308}
309DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
310#endif