]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/pci-swiotlb.c
Merge tag 'for-linus-4.15-rc3-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / pci-swiotlb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Glue code to lib/swiotlb.c */
3
4 #include <linux/pci.h>
5 #include <linux/cache.h>
6 #include <linux/init.h>
7 #include <linux/swiotlb.h>
8 #include <linux/bootmem.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/mem_encrypt.h>
11
12 #include <asm/iommu.h>
13 #include <asm/swiotlb.h>
14 #include <asm/dma.h>
15 #include <asm/xen/swiotlb-xen.h>
16 #include <asm/iommu_table.h>
17
18 int swiotlb __read_mostly;
19
20 void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
21 dma_addr_t *dma_handle, gfp_t flags,
22 unsigned long attrs)
23 {
24 void *vaddr;
25
26 /*
27 * Don't print a warning when the first allocation attempt fails.
28 * swiotlb_alloc_coherent() will print a warning when the DMA
29 * memory allocation ultimately failed.
30 */
31 flags |= __GFP_NOWARN;
32
33 vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
34 attrs);
35 if (vaddr)
36 return vaddr;
37
38 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
39 }
40
41 void x86_swiotlb_free_coherent(struct device *dev, size_t size,
42 void *vaddr, dma_addr_t dma_addr,
43 unsigned long attrs)
44 {
45 if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
46 swiotlb_free_coherent(dev, size, vaddr, dma_addr);
47 else
48 dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
49 }
50
51 static const struct dma_map_ops swiotlb_dma_ops = {
52 .mapping_error = swiotlb_dma_mapping_error,
53 .alloc = x86_swiotlb_alloc_coherent,
54 .free = x86_swiotlb_free_coherent,
55 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
56 .sync_single_for_device = swiotlb_sync_single_for_device,
57 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
58 .sync_sg_for_device = swiotlb_sync_sg_for_device,
59 .map_sg = swiotlb_map_sg_attrs,
60 .unmap_sg = swiotlb_unmap_sg_attrs,
61 .map_page = swiotlb_map_page,
62 .unmap_page = swiotlb_unmap_page,
63 .dma_supported = NULL,
64 };
65
66 /*
67 * pci_swiotlb_detect_override - set swiotlb to 1 if necessary
68 *
69 * This returns non-zero if we are forced to use swiotlb (by the boot
70 * option).
71 */
72 int __init pci_swiotlb_detect_override(void)
73 {
74 if (swiotlb_force == SWIOTLB_FORCE)
75 swiotlb = 1;
76
77 return swiotlb;
78 }
79 IOMMU_INIT_FINISH(pci_swiotlb_detect_override,
80 pci_xen_swiotlb_detect,
81 pci_swiotlb_init,
82 pci_swiotlb_late_init);
83
84 /*
85 * If 4GB or more detected (and iommu=off not set) or if SME is active
86 * then set swiotlb to 1 and return 1.
87 */
88 int __init pci_swiotlb_detect_4gb(void)
89 {
90 /* don't initialize swiotlb if iommu=off (no_iommu=1) */
91 #ifdef CONFIG_X86_64
92 if (!no_iommu && max_possible_pfn > MAX_DMA32_PFN)
93 swiotlb = 1;
94 #endif
95
96 /*
97 * If SME is active then swiotlb will be set to 1 so that bounce
98 * buffers are allocated and used for devices that do not support
99 * the addressing range required for the encryption mask.
100 */
101 if (sme_active())
102 swiotlb = 1;
103
104 return swiotlb;
105 }
106 IOMMU_INIT(pci_swiotlb_detect_4gb,
107 pci_swiotlb_detect_override,
108 pci_swiotlb_init,
109 pci_swiotlb_late_init);
110
111 void __init pci_swiotlb_init(void)
112 {
113 if (swiotlb) {
114 swiotlb_init(0);
115 dma_ops = &swiotlb_dma_ops;
116 }
117 }
118
119 void __init pci_swiotlb_late_init(void)
120 {
121 /* An IOMMU turned us off. */
122 if (!swiotlb)
123 swiotlb_free();
124 else {
125 printk(KERN_INFO "PCI-DMA: "
126 "Using software bounce buffering for IO (SWIOTLB)\n");
127 swiotlb_print_info();
128 }
129 }