]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/pci-swiotlb.c
x86/process: Allow runtime control of Speculative Store Bypass
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / pci-swiotlb.c
CommitLineData
17a941d8
MBY
1/* Glue code to lib/swiotlb.c */
2
3#include <linux/pci.h>
4#include <linux/cache.h>
186f4360 5#include <linux/init.h>
8ce79960
JF
6#include <linux/swiotlb.h>
7#include <linux/bootmem.h>
d6bd3a39
REB
8#include <linux/dma-mapping.h>
9
46a7fa27 10#include <asm/iommu.h>
17a941d8
MBY
11#include <asm/swiotlb.h>
12#include <asm/dma.h>
c116c545
KRW
13#include <asm/xen/swiotlb-xen.h>
14#include <asm/iommu_table.h>
17a941d8 15int swiotlb __read_mostly;
17a941d8 16
9c5a3621 17void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
baa676fc 18 dma_addr_t *dma_handle, gfp_t flags,
00085f1e 19 unsigned long attrs)
03967c52
FT
20{
21 void *vaddr;
22
186dfc9d
JR
23 /*
24 * Don't print a warning when the first allocation attempt fails.
25 * swiotlb_alloc_coherent() will print a warning when the DMA
26 * memory allocation ultimately failed.
27 */
28 flags |= __GFP_NOWARN;
29
baa676fc
AP
30 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
31 attrs);
03967c52
FT
32 if (vaddr)
33 return vaddr;
34
35 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
36}
37
9c5a3621 38void x86_swiotlb_free_coherent(struct device *dev, size_t size,
baa676fc 39 void *vaddr, dma_addr_t dma_addr,
00085f1e 40 unsigned long attrs)
baa676fc 41{
9c5a3621
AM
42 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
43 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
44 else
45 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
baa676fc
AP
46}
47
5299709d 48static const struct dma_map_ops swiotlb_dma_ops = {
17a941d8 49 .mapping_error = swiotlb_dma_mapping_error,
baa676fc
AP
50 .alloc = x86_swiotlb_alloc_coherent,
51 .free = x86_swiotlb_free_coherent,
17a941d8
MBY
52 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
53 .sync_single_for_device = swiotlb_sync_single_for_device,
17a941d8
MBY
54 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
55 .sync_sg_for_device = swiotlb_sync_sg_for_device,
160c1d8e
FT
56 .map_sg = swiotlb_map_sg_attrs,
57 .unmap_sg = swiotlb_unmap_sg_attrs,
4cf37bb7
FT
58 .map_page = swiotlb_map_page,
59 .unmap_page = swiotlb_unmap_page,
17a941d8
MBY
60 .dma_supported = NULL,
61};
62
b18485e7 63/*
efa631c2 64 * pci_swiotlb_detect_override - set swiotlb to 1 if necessary
b18485e7
FT
65 *
66 * This returns non-zero if we are forced to use swiotlb (by the boot
67 * option).
68 */
efa631c2 69int __init pci_swiotlb_detect_override(void)
17a941d8 70{
ae7871be 71 if (swiotlb_force == SWIOTLB_FORCE)
efa631c2
KRW
72 swiotlb = 1;
73
6c206e4d 74 return swiotlb;
efa631c2 75}
c116c545
KRW
76IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
77 pci_xen_swiotlb_detect,
78 pci_swiotlb_init,
79 pci_swiotlb_late_init);
efa631c2
KRW
80
81/*
82 * if 4GB or more detected (and iommu=off not set) return 1
83 * and set swiotlb to 1.
84 */
85int __init pci_swiotlb_detect_4gb(void)
86{
17a941d8 87 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
cfb80c9e 88#ifdef CONFIG_X86_64
ec941c5f 89 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
19943b0e 90 swiotlb = 1;
cfb80c9e 91#endif
efa631c2 92 return swiotlb;
186a2502 93}
c116c545
KRW
94IOMMU_INIT(pci_swiotlb_detect_4gb,
95 pci_swiotlb_detect_override,
96 pci_swiotlb_init,
97 pci_swiotlb_late_init);
186a2502
FT
98
99void __init pci_swiotlb_init(void)
100{
17a941d8 101 if (swiotlb) {
ad32e8cb 102 swiotlb_init(0);
17a941d8 103 dma_ops = &swiotlb_dma_ops;
a3b28ee1 104 }
17a941d8 105}
efa631c2
KRW
106
107void __init pci_swiotlb_late_init(void)
108{
109 /* An IOMMU turned us off. */
110 if (!swiotlb)
111 swiotlb_free();
112 else {
113 printk(KERN_INFO "PCI-DMA: "
114 "Using software bounce buffering for IO (SWIOTLB)\n");
115 swiotlb_print_info();
116 }
117}