]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/pci-dma.c
Merge branch 'pm-cpufreq'
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / pci-dma.c
CommitLineData
459121c9 1#include <linux/dma-mapping.h>
2118d0c5 2#include <linux/dma-debug.h>
cb5867a5 3#include <linux/dmar.h>
69c60c88 4#include <linux/export.h>
116890d5 5#include <linux/bootmem.h>
5a0e3ad6 6#include <linux/gfp.h>
bca5c096 7#include <linux/pci.h>
acde31dc 8#include <linux/kmemleak.h>
cb5867a5 9
116890d5
GC
10#include <asm/proto.h>
11#include <asm/dma.h>
46a7fa27 12#include <asm/iommu.h>
1d9b16d1 13#include <asm/gart.h>
cb5867a5 14#include <asm/calgary.h>
b4941a9a 15#include <asm/x86_init.h>
ee1f284f 16#include <asm/iommu_table.h>
459121c9 17
3b15e581
FY
18static int forbid_dac __read_mostly;
19
5299709d 20const struct dma_map_ops *dma_ops = &nommu_dma_ops;
85c246ee
GC
21EXPORT_SYMBOL(dma_ops);
22
b4cdc430 23static int iommu_sac_force __read_mostly;
8e0c3797 24
f9c258de
GC
25#ifdef CONFIG_IOMMU_DEBUG
26int panic_on_overflow __read_mostly = 1;
27int force_iommu __read_mostly = 1;
28#else
29int panic_on_overflow __read_mostly = 0;
30int force_iommu __read_mostly = 0;
31#endif
32
fae9a0d8
GC
33int iommu_merge __read_mostly = 0;
34
35int no_iommu __read_mostly;
36/* Set this to 1 if there is a HW IOMMU in the system */
37int iommu_detected __read_mostly = 0;
38
ac0101d3
JR
39/*
40 * This variable becomes 1 if iommu=pt is passed on the kernel command line.
e3be785f 41 * If this variable is 1, IOMMU implementations do no DMA translation for
ac0101d3 42 * devices and allow every device to access to whole physical memory. This is
fb637f3c 43 * useful if a user wants to use an IOMMU only for KVM device assignment to
ac0101d3
JR
44 * guests and not for driver dma translation.
45 */
46int iommu_pass_through __read_mostly;
aed5d5f4 47
ee1f284f
KRW
48extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
49
eb647138 50/* Dummy device used for NULL arguments (normally ISA). */
6c505ce3 51struct device x86_dma_fallback_dev = {
1a927133 52 .init_name = "fallback device",
eb647138 53 .coherent_dma_mask = ISA_DMA_BIT_MASK,
6c505ce3 54 .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
098cb7f2 55};
6c505ce3 56EXPORT_SYMBOL(x86_dma_fallback_dev);
098cb7f2 57
2118d0c5 58/* Number of entries preallocated for DMA-API debugging */
73b664ce 59#define PREALLOC_DMA_DEBUG_ENTRIES 65536
2118d0c5 60
116890d5
GC
61void __init pci_iommu_alloc(void)
62{
ee1f284f
KRW
63 struct iommu_table_entry *p;
64
ee1f284f
KRW
65 sort_iommu_table(__iommu_table, __iommu_table_end);
66 check_iommu_entries(__iommu_table, __iommu_table_end);
116890d5 67
ee1f284f
KRW
68 for (p = __iommu_table; p < __iommu_table_end; p++) {
69 if (p && p->detect && p->detect() > 0) {
70 p->flags |= IOMMU_DETECTED;
71 if (p->early_init)
72 p->early_init();
73 if (p->flags & IOMMU_FINISH_IF_DETECTED)
74 break;
75 }
76 }
116890d5 77}
9f6ac577 78void *dma_generic_alloc_coherent(struct device *dev, size_t size,
baa676fc 79 dma_addr_t *dma_addr, gfp_t flag,
00085f1e 80 unsigned long attrs)
9f6ac577
FT
81{
82 unsigned long dma_mask;
c080e26e 83 struct page *page;
0a2b9a6e 84 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
9f6ac577
FT
85 dma_addr_t addr;
86
87 dma_mask = dma_alloc_coherent_mask(dev, flag);
88
d92ef66c 89 flag &= ~__GFP_ZERO;
9f6ac577 90again:
c080e26e 91 page = NULL;
c091c71a 92 /* CMA can be used only in the context which permits sleeping */
d0164adc 93 if (gfpflags_allow_blocking(flag)) {
712c604d
LS
94 page = dma_alloc_from_contiguous(dev, count, get_order(size),
95 flag);
38f7ea5a
AM
96 if (page && page_to_phys(page) + size > dma_mask) {
97 dma_release_from_contiguous(dev, page, count);
98 page = NULL;
99 }
100 }
c091c71a 101 /* fallback */
0a2b9a6e
MS
102 if (!page)
103 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
9f6ac577
FT
104 if (!page)
105 return NULL;
106
107 addr = page_to_phys(page);
a4c2baa6 108 if (addr + size > dma_mask) {
9f6ac577
FT
109 __free_pages(page, get_order(size));
110
284901a9 111 if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) {
9f6ac577
FT
112 flag = (flag & ~GFP_DMA32) | GFP_DMA;
113 goto again;
114 }
115
116 return NULL;
117 }
d92ef66c 118 memset(page_address(page), 0, size);
9f6ac577
FT
119 *dma_addr = addr;
120 return page_address(page);
121}
122
0a2b9a6e 123void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
00085f1e 124 dma_addr_t dma_addr, unsigned long attrs)
0a2b9a6e
MS
125{
126 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
127 struct page *page = virt_to_page(vaddr);
128
129 if (!dma_release_from_contiguous(dev, page, count))
130 free_pages((unsigned long)vaddr, get_order(size));
131}
132
6894258e 133bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
0c7965ff 134{
298a96c1
VS
135 if (!*dev)
136 *dev = &x86_dma_fallback_dev;
137
6894258e 138 *gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
590f0787 139 *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
0c7965ff 140
6894258e
CH
141 if (!is_device_dma_capable(*dev))
142 return false;
143 return true;
0c7965ff 144
f1dc154f 145}
6894258e 146EXPORT_SYMBOL(arch_dma_alloc_attrs);
f1dc154f 147
fae9a0d8 148/*
395cf969
PB
149 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
150 * parameter documentation.
fae9a0d8
GC
151 */
152static __init int iommu_setup(char *p)
153{
154 iommu_merge = 1;
155
156 if (!p)
157 return -EINVAL;
158
159 while (*p) {
160 if (!strncmp(p, "off", 3))
161 no_iommu = 1;
162 /* gart_parse_options has more force support */
163 if (!strncmp(p, "force", 5))
164 force_iommu = 1;
165 if (!strncmp(p, "noforce", 7)) {
166 iommu_merge = 0;
167 force_iommu = 0;
168 }
169
170 if (!strncmp(p, "biomerge", 8)) {
fae9a0d8
GC
171 iommu_merge = 1;
172 force_iommu = 1;
173 }
174 if (!strncmp(p, "panic", 5))
175 panic_on_overflow = 1;
176 if (!strncmp(p, "nopanic", 7))
177 panic_on_overflow = 0;
178 if (!strncmp(p, "merge", 5)) {
179 iommu_merge = 1;
180 force_iommu = 1;
181 }
182 if (!strncmp(p, "nomerge", 7))
183 iommu_merge = 0;
184 if (!strncmp(p, "forcesac", 8))
185 iommu_sac_force = 1;
186 if (!strncmp(p, "allowdac", 8))
187 forbid_dac = 0;
188 if (!strncmp(p, "nodac", 5))
2ae8bb75 189 forbid_dac = 1;
fae9a0d8
GC
190 if (!strncmp(p, "usedac", 6)) {
191 forbid_dac = -1;
192 return 1;
193 }
194#ifdef CONFIG_SWIOTLB
195 if (!strncmp(p, "soft", 4))
196 swiotlb = 1;
3238c0c4 197#endif
80286879 198 if (!strncmp(p, "pt", 2))
4ed0d3e6 199 iommu_pass_through = 1;
fae9a0d8 200
fae9a0d8 201 gart_parse_options(p);
fae9a0d8
GC
202
203#ifdef CONFIG_CALGARY_IOMMU
204 if (!strncmp(p, "calgary", 7))
205 use_calgary = 1;
206#endif /* CONFIG_CALGARY_IOMMU */
207
208 p += strcspn(p, ",");
209 if (*p == ',')
210 ++p;
211 }
212 return 0;
213}
214early_param("iommu", iommu_setup);
215
8e0c3797
GC
216int dma_supported(struct device *dev, u64 mask)
217{
5299709d 218 const struct dma_map_ops *ops = get_dma_ops(dev);
8d8bb39b 219
8e0c3797
GC
220#ifdef CONFIG_PCI
221 if (mask > 0xffffffff && forbid_dac > 0) {
fc3a8828 222 dev_info(dev, "PCI: Disallowing DAC for device\n");
8e0c3797
GC
223 return 0;
224 }
225#endif
226
8d8bb39b
FT
227 if (ops->dma_supported)
228 return ops->dma_supported(dev, mask);
8e0c3797
GC
229
230 /* Copied from i386. Doesn't make much sense, because it will
231 only work for pci_alloc_coherent.
232 The caller just has to use GFP_DMA in this case. */
2f4f27d4 233 if (mask < DMA_BIT_MASK(24))
8e0c3797
GC
234 return 0;
235
236 /* Tell the device to use SAC when IOMMU force is on. This
237 allows the driver to use cheaper accesses in some cases.
238
239 Problem with this is that if we overflow the IOMMU area and
240 return DAC as fallback address the device may not handle it
241 correctly.
242
243 As a special case some controllers have a 39bit address
244 mode that is as efficient as 32bit (aic79xx). Don't force
245 SAC for these. Assume all masks <= 40 bits are of this
246 type. Normally this doesn't make any difference, but gives
247 more gentle handling of IOMMU overflow. */
50cf156a 248 if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) {
fc3a8828 249 dev_info(dev, "Force SAC with mask %Lx\n", mask);
8e0c3797
GC
250 return 0;
251 }
252
253 return 1;
254}
255EXPORT_SYMBOL(dma_supported);
256
cb5867a5
GC
257static int __init pci_iommu_init(void)
258{
ee1f284f 259 struct iommu_table_entry *p;
2118d0c5
JR
260 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
261
86f31952
JR
262#ifdef CONFIG_PCI
263 dma_debug_add_bus(&pci_bus_type);
264#endif
d07c1be0
FT
265 x86_init.iommu.iommu_init();
266
ee1f284f
KRW
267 for (p = __iommu_table; p < __iommu_table_end; p++) {
268 if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
269 p->late_init();
270 }
75f1cdf1 271
cb5867a5
GC
272 return 0;
273}
cb5867a5 274/* Must execute after PCI subsystem */
9a821b23 275rootfs_initcall(pci_iommu_init);
3b15e581
FY
276
277#ifdef CONFIG_PCI
278/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
279
a18e3690 280static void via_no_dac(struct pci_dev *dev)
3b15e581 281{
c484b241 282 if (forbid_dac == 0) {
13bf7576 283 dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
3b15e581
FY
284 forbid_dac = 1;
285 }
286}
c484b241
YL
287DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
288 PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
3b15e581 289#endif