]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/pci-dma.c
PCI: fix incorrect mask of PM No_Soft_Reset bit
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
cb5867a5 2#include <linux/dmar.h>
116890d5 3#include <linux/bootmem.h>
bca5c096 4#include <linux/pci.h>
cb5867a5 5
116890d5
GC
6#include <asm/proto.h>
7#include <asm/dma.h>
46a7fa27 8#include <asm/iommu.h>
1d9b16d1 9#include <asm/gart.h>
cb5867a5 10#include <asm/calgary.h>
a69ca340 11#include <asm/amd_iommu.h>
459121c9 12
3b15e581
FY
13static int forbid_dac __read_mostly;
14
8d8bb39b 15struct dma_mapping_ops *dma_ops;
85c246ee
GC
16EXPORT_SYMBOL(dma_ops);
17
b4cdc430 18static int iommu_sac_force __read_mostly;
8e0c3797 19
f9c258de
GC
20#ifdef CONFIG_IOMMU_DEBUG
21int panic_on_overflow __read_mostly = 1;
22int force_iommu __read_mostly = 1;
23#else
24int panic_on_overflow __read_mostly = 0;
25int force_iommu __read_mostly = 0;
26#endif
27
fae9a0d8
GC
28int iommu_merge __read_mostly = 0;
29
30int no_iommu __read_mostly;
31/* Set this to 1 if there is a HW IOMMU in the system */
32int iommu_detected __read_mostly = 0;
33
cac67877
GC
34dma_addr_t bad_dma_address __read_mostly = 0;
35EXPORT_SYMBOL(bad_dma_address);
fae9a0d8 36
098cb7f2
GC
37/* Dummy device used for NULL arguments (normally ISA). Better would
38 be probably a smaller DMA mask, but this is bug-to-bug compatible
39 to older i386. */
6c505ce3 40struct device x86_dma_fallback_dev = {
1a927133 41 .init_name = "fallback device",
098cb7f2 42 .coherent_dma_mask = DMA_32BIT_MASK,
6c505ce3 43 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 44};
6c505ce3 45EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 46
459121c9
GC
47int dma_set_mask(struct device *dev, u64 mask)
48{
49 if (!dev->dma_mask || !dma_supported(dev, mask))
50 return -EIO;
51
52 *dev->dma_mask = mask;
53
54 return 0;
55}
56EXPORT_SYMBOL(dma_set_mask);
57
116890d5
GC
58#ifdef CONFIG_X86_64
59static __initdata void *dma32_bootmem_ptr;
60static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
61
62static int __init parse_dma32_size_opt(char *p)
63{
64 if (!p)
65 return -EINVAL;
66 dma32_bootmem_size = memparse(p, &p);
67 return 0;
68}
69early_param("dma32_size", parse_dma32_size_opt);
70
71void __init dma32_reserve_bootmem(void)
72{
73 unsigned long size, align;
c987d12f 74 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
75 return;
76
7677b2ef
YL
77 /*
78 * check aperture_64.c allocate_aperture() for reason about
79 * using 512M as goal
80 */
116890d5 81 align = 64ULL<<20;
1ddb5518 82 size = roundup(dma32_bootmem_size, align);
116890d5 83 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
7677b2ef 84 512ULL<<20);
116890d5
GC
85 if (dma32_bootmem_ptr)
86 dma32_bootmem_size = size;
87 else
88 dma32_bootmem_size = 0;
89}
90static void __init dma32_free_bootmem(void)
91{
116890d5 92
c987d12f 93 if (max_pfn <= MAX_DMA32_PFN)
116890d5
GC
94 return;
95
96 if (!dma32_bootmem_ptr)
97 return;
98
330fce23 99 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
116890d5
GC
100
101 dma32_bootmem_ptr = NULL;
102 dma32_bootmem_size = 0;
103}
cfb80c9e 104#endif
116890d5
GC
105
106void __init pci_iommu_alloc(void)
107{
cfb80c9e 108#ifdef CONFIG_X86_64
116890d5
GC
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
cfb80c9e
JF
111#endif
112
116890d5
GC
113 /*
114 * The order of these functions is important for
115 * fall-back/fail-over reasons
116 */
116890d5 117 gart_iommu_hole_init();
116890d5 118
116890d5 119 detect_calgary();
116890d5
GC
120
121 detect_intel_iommu();
122
a69ca340
JR
123 amd_iommu_detect();
124
116890d5 125 pci_swiotlb_init();
116890d5 126}
8978b742 127
9f6ac577
FT
128void *dma_generic_alloc_coherent(struct device *dev, size_t size,
129 dma_addr_t *dma_addr, gfp_t flag)
130{
131 unsigned long dma_mask;
132 struct page *page;
133 dma_addr_t addr;
134
135 dma_mask = dma_alloc_coherent_mask(dev, flag);
136
137 flag |= __GFP_ZERO;
138again:
139 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
140 if (!page)
141 return NULL;
142
143 addr = page_to_phys(page);
144 if (!is_buffer_dma_capable(dma_mask, addr, size)) {
145 __free_pages(page, get_order(size));
146
147 if (dma_mask < DMA_32BIT_MASK && !(flag & GFP_DMA)) {
148 flag = (flag & ~GFP_DMA32) | GFP_DMA;
149 goto again;
150 }
151
152 return NULL;
153 }
154
155 *dma_addr = addr;
156 return page_address(page);
157}
158
fae9a0d8
GC
159/*
160 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
161 * documentation.
162 */
163static __init int iommu_setup(char *p)
164{
165 iommu_merge = 1;
166
167 if (!p)
168 return -EINVAL;
169
170 while (*p) {
171 if (!strncmp(p, "off", 3))
172 no_iommu = 1;
173 /* gart_parse_options has more force support */
174 if (!strncmp(p, "force", 5))
175 force_iommu = 1;
176 if (!strncmp(p, "noforce", 7)) {
177 iommu_merge = 0;
178 force_iommu = 0;
179 }
180
181 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
182 iommu_merge = 1;
183 force_iommu = 1;
184 }
185 if (!strncmp(p, "panic", 5))
186 panic_on_overflow = 1;
187 if (!strncmp(p, "nopanic", 7))
188 panic_on_overflow = 0;
189 if (!strncmp(p, "merge", 5)) {
190 iommu_merge = 1;
191 force_iommu = 1;
192 }
193 if (!strncmp(p, "nomerge", 7))
194 iommu_merge = 0;
195 if (!strncmp(p, "forcesac", 8))
196 iommu_sac_force = 1;
197 if (!strncmp(p, "allowdac", 8))
198 forbid_dac = 0;
199 if (!strncmp(p, "nodac", 5))
200 forbid_dac = -1;
201 if (!strncmp(p, "usedac", 6)) {
202 forbid_dac = -1;
203 return 1;
204 }
205#ifdef CONFIG_SWIOTLB
206 if (!strncmp(p, "soft", 4))
207 swiotlb = 1;
208#endif
209
fae9a0d8 210 gart_parse_options(p);
fae9a0d8
GC
211
212#ifdef CONFIG_CALGARY_IOMMU
213 if (!strncmp(p, "calgary", 7))
214 use_calgary = 1;
215#endif /* CONFIG_CALGARY_IOMMU */
216
217 p += strcspn(p, ",");
218 if (*p == ',')
219 ++p;
220 }
221 return 0;
222}
223early_param("iommu", iommu_setup);
224
8e0c3797
GC
225int dma_supported(struct device *dev, u64 mask)
226{
8d8bb39b
FT
227 struct dma_mapping_ops *ops = get_dma_ops(dev);
228
8e0c3797
GC
229#ifdef CONFIG_PCI
230 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 231 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
232 return 0;
233 }
234#endif
235
8d8bb39b
FT
236 if (ops->dma_supported)
237 return ops->dma_supported(dev, mask);
8e0c3797
GC
238
239 /* Copied from i386. Doesn't make much sense, because it will
240 only work for pci_alloc_coherent.
241 The caller just has to use GFP_DMA in this case. */
242 if (mask < DMA_24BIT_MASK)
243 return 0;
244
245 /* Tell the device to use SAC when IOMMU force is on. This
246 allows the driver to use cheaper accesses in some cases.
247
248 Problem with this is that if we overflow the IOMMU area and
249 return DAC as fallback address the device may not handle it
250 correctly.
251
252 As a special case some controllers have a 39bit address
253 mode that is as efficient as 32bit (aic79xx). Don't force
254 SAC for these. Assume all masks <= 40 bits are of this
255 type. Normally this doesn't make any difference, but gives
256 more gentle handling of IOMMU overflow. */
257 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
fc3a8828 258 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
259 return 0;
260 }
261
262 return 1;
263}
264EXPORT_SYMBOL(dma_supported);
265
cb5867a5
GC
266static int __init pci_iommu_init(void)
267{
cb5867a5 268 calgary_iommu_init();
cb5867a5
GC
269
270 intel_iommu_init();
271
a69ca340
JR
272 amd_iommu_init();
273
cb5867a5 274 gart_iommu_init();
459121c9 275
cb5867a5
GC
276 no_iommu_init();
277 return 0;
278}
279
280void pci_iommu_shutdown(void)
281{
282 gart_iommu_shutdown();
283}
284/* Must execute after PCI subsystem */
285fs_initcall(pci_iommu_init);
3b15e581
FY
286
287#ifdef CONFIG_PCI
288/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
289
290static __devinit void via_no_dac(struct pci_dev *dev)
291{
292 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
a0286c94
MT
293 printk(KERN_INFO
294 "PCI: VIA PCI bridge detected. Disabling DAC.\n");
3b15e581
FY
295 forbid_dac = 1;
296 }
297}
298DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
299#endif