]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/pci-dma.c
perf/x86/intel/lbr: Allow time stamp for free running PEBSv3
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
a3b28ee1 20struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
ee1f284f
KRW
48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
eb647138 50/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 51struct device x86_dma_fallback_dev = {
1a927133 52 .init_name = "fallback device",
eb647138 53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 55};
6c505ce3 56EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 57
2118d0c5 58/* Number of entries preallocated for DMA-API debugging */
73b664ce 59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
2118d0c5 60
459121c9
GC
61int dma_set_mask(struct device *dev, u64 mask)
62{
63 if (!dev->dma_mask || !dma_supported(dev, mask))
64 return -EIO;
65
66 *dev->dma_mask = mask;
67
68 return 0;
69}
70EXPORT_SYMBOL(dma_set_mask);
71
116890d5
GC
72void __init pci_iommu_alloc(void)
73{
ee1f284f
KRW
74 struct iommu_table_entry *p;
75
ee1f284f
KRW
76 sort_iommu_table(__iommu_table, __iommu_table_end);
77 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 78
ee1f284f
KRW
79 for (p = __iommu_table; p < __iommu_table_end; p++) {
80 if (p && p->detect && p->detect() > 0) {
81 p->flags |= IOMMU_DETECTED;
82 if (p->early_init)
83 p->early_init();
84 if (p->flags & IOMMU_FINISH_IF_DETECTED)
85 break;
86 }
87 }
116890d5 88}
9f6ac577 89void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc
AP
90 dma_addr_t *dma_addr, gfp_t flag,
91 struct dma_attrs *attrs)
9f6ac577
FT
92{
93 unsigned long dma_mask;
c080e26e 94 struct page *page;
0a2b9a6e 95 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
96 dma_addr_t addr;
97
98 dma_mask = dma_alloc_coherent_mask(dev, flag);
99
d92ef66c 100 flag &= ~__GFP_ZERO;
9f6ac577 101again:
c080e26e 102 page = NULL;
c091c71a 103 /* CMA can be used only in the context which permits sleeping */
38f7ea5a 104 if (flag & __GFP_WAIT) {
0a2b9a6e 105 page = dma_alloc_from_contiguous(dev, count, get_order(size));
38f7ea5a
AM
106 if (page && page_to_phys(page) + size > dma_mask) {
107 dma_release_from_contiguous(dev, page, count);
108 page = NULL;
109 }
110 }
c091c71a 111 /* fallback */
0a2b9a6e
MS
112 if (!page)
113 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
114 if (!page)
115 return NULL;
116
117 addr = page_to_phys(page);
a4c2baa6 118 if (addr + size > dma_mask) {
9f6ac577
FT
119 __free_pages(page, get_order(size));
120
284901a9 121 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
122 flag = (flag & ~GFP_DMA32) | GFP_DMA;
123 goto again;
124 }
125
126 return NULL;
127 }
d92ef66c 128 memset(page_address(page), 0, size);
9f6ac577
FT
129 *dma_addr = addr;
130 return page_address(page);
131}
132
0a2b9a6e
MS
133void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
134 dma_addr_t dma_addr, struct dma_attrs *attrs)
135{
136 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
137 struct page *page = virt_to_page(vaddr);
138
139 if (!dma_release_from_contiguous(dev, page, count))
140 free_pages((unsigned long)vaddr, get_order(size));
141}
142
0c7965ff
DV
143void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
144 gfp_t gfp, struct dma_attrs *attrs)
145{
146 struct dma_map_ops *ops = get_dma_ops(dev);
147 void *memory;
148
149 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
150
151 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
152 return memory;
153
154 if (!dev)
155 dev = &x86_dma_fallback_dev;
156
157 if (!is_device_dma_capable(dev))
158 return NULL;
159
160 if (!ops->alloc)
161 return NULL;
162
163 memory = ops->alloc(dev, size, dma_handle,
164 dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
165 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
166
167 return memory;
168}
169EXPORT_SYMBOL(dma_alloc_attrs);
170
f1dc154f
DV
171void dma_free_attrs(struct device *dev, size_t size,
172 void *vaddr, dma_addr_t bus,
173 struct dma_attrs *attrs)
174{
175 struct dma_map_ops *ops = get_dma_ops(dev);
176
177 WARN_ON(irqs_disabled()); /* for portability */
178
179 if (dma_release_from_coherent(dev, get_order(size), vaddr))
180 return;
181
182 debug_dma_free_coherent(dev, size, vaddr, bus);
183 if (ops->free)
184 ops->free(dev, size, vaddr, bus, attrs);
185}
186EXPORT_SYMBOL(dma_free_attrs);
187
fae9a0d8 188/*
395cf969
PB
189 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
190 * parameter documentation.
fae9a0d8
GC
191 */
192static __init int iommu_setup(char *p)
193{
194 iommu_merge = 1;
195
196 if (!p)
197 return -EINVAL;
198
199 while (*p) {
200 if (!strncmp(p, "off", 3))
201 no_iommu = 1;
202 /* gart_parse_options has more force support */
203 if (!strncmp(p, "force", 5))
204 force_iommu = 1;
205 if (!strncmp(p, "noforce", 7)) {
206 iommu_merge = 0;
207 force_iommu = 0;
208 }
209
210 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
211 iommu_merge = 1;
212 force_iommu = 1;
213 }
214 if (!strncmp(p, "panic", 5))
215 panic_on_overflow = 1;
216 if (!strncmp(p, "nopanic", 7))
217 panic_on_overflow = 0;
218 if (!strncmp(p, "merge", 5)) {
219 iommu_merge = 1;
220 force_iommu = 1;
221 }
222 if (!strncmp(p, "nomerge", 7))
223 iommu_merge = 0;
224 if (!strncmp(p, "forcesac", 8))
225 iommu_sac_force = 1;
226 if (!strncmp(p, "allowdac", 8))
227 forbid_dac = 0;
228 if (!strncmp(p, "nodac", 5))
2ae8bb75 229 forbid_dac = 1;
fae9a0d8
GC
230 if (!strncmp(p, "usedac", 6)) {
231 forbid_dac = -1;
232 return 1;
233 }
234#ifdef CONFIG_SWIOTLB
235 if (!strncmp(p, "soft", 4))
236 swiotlb = 1;
3238c0c4 237#endif
80286879 238 if (!strncmp(p, "pt", 2))
4ed0d3e6 239 iommu_pass_through = 1;
fae9a0d8 240
fae9a0d8 241 gart_parse_options(p);
fae9a0d8
GC
242
243#ifdef CONFIG_CALGARY_IOMMU
244 if (!strncmp(p, "calgary", 7))
245 use_calgary = 1;
246#endif /* CONFIG_CALGARY_IOMMU */
247
248 p += strcspn(p, ",");
249 if (*p == ',')
250 ++p;
251 }
252 return 0;
253}
254early_param("iommu", iommu_setup);
255
8e0c3797
GC
256int dma_supported(struct device *dev, u64 mask)
257{
160c1d8e 258 struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 259
8e0c3797
GC
260#ifdef CONFIG_PCI
261 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 262 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
263 return 0;
264 }
265#endif
266
8d8bb39b
FT
267 if (ops->dma_supported)
268 return ops->dma_supported(dev, mask);
8e0c3797
GC
269
270 /* Copied from i386. Doesn't make much sense, because it will
271 only work for pci_alloc_coherent.
272 The caller just has to use GFP_DMA in this case. */
2f4f27d4 273 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
274 return 0;
275
276 /* Tell the device to use SAC when IOMMU force is on. This
277 allows the driver to use cheaper accesses in some cases.
278
279 Problem with this is that if we overflow the IOMMU area and
280 return DAC as fallback address the device may not handle it
281 correctly.
282
283 As a special case some controllers have a 39bit address
284 mode that is as efficient as 32bit (aic79xx). Don't force
285 SAC for these. Assume all masks <= 40 bits are of this
286 type. Normally this doesn't make any difference, but gives
287 more gentle handling of IOMMU overflow. */
50cf156a 288 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 289 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
290 return 0;
291 }
292
293 return 1;
294}
295EXPORT_SYMBOL(dma_supported);
296
cb5867a5
GC
297static int __init pci_iommu_init(void)
298{
ee1f284f 299 struct iommu_table_entry *p;
2118d0c5
JR
300 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
301
86f31952
JR
302#ifdef CONFIG_PCI
303 dma_debug_add_bus(&pci_bus_type);
304#endif
d07c1be0
FT
305 x86_init.iommu.iommu_init();
306
ee1f284f
KRW
307 for (p = __iommu_table; p < __iommu_table_end; p++) {
308 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
309 p->late_init();
310 }
75f1cdf1 311
cb5867a5
GC
312 return 0;
313}
cb5867a5 314/* Must execute after PCI subsystem */
9a821b23 315rootfs_initcall(pci_iommu_init);
3b15e581
FY
316
317#ifdef CONFIG_PCI
318/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
319
a18e3690 320static void via_no_dac(struct pci_dev *dev)
3b15e581 321{
c484b241 322 if (forbid_dac == 0) {
13bf7576 323 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
324 forbid_dac = 1;
325 }
326}
c484b241
YL
327DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
328 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 329#endif