]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86_64/kernel/pci-dma.c
Pull trivial2 into release branch
[mirror_ubuntu-hirsute-kernel.git] / arch / x86_64 / kernel / pci-dma.c
1 /*
2 * Dynamic DMA mapping support.
3 */
4
5 #include <linux/types.h>
6 #include <linux/mm.h>
7 #include <linux/string.h>
8 #include <linux/pci.h>
9 #include <linux/module.h>
10 #include <asm/io.h>
11 #include <asm/proto.h>
12
13 int iommu_merge __read_mostly = 0;
14 EXPORT_SYMBOL(iommu_merge);
15
16 dma_addr_t bad_dma_address __read_mostly;
17 EXPORT_SYMBOL(bad_dma_address);
18
19 /* This tells the BIO block layer to assume merging. Default to off
20 because we cannot guarantee merging later. */
21 int iommu_bio_merge __read_mostly = 0;
22 EXPORT_SYMBOL(iommu_bio_merge);
23
24 int iommu_sac_force __read_mostly = 0;
25 EXPORT_SYMBOL(iommu_sac_force);
26
27 int no_iommu __read_mostly;
28 #ifdef CONFIG_IOMMU_DEBUG
29 int panic_on_overflow __read_mostly = 1;
30 int force_iommu __read_mostly = 1;
31 #else
32 int panic_on_overflow __read_mostly = 0;
33 int force_iommu __read_mostly= 0;
34 #endif
35
36 /* Dummy device used for NULL arguments (normally ISA). Better would
37 be probably a smaller DMA mask, but this is bug-to-bug compatible
38 to i386. */
39 struct device fallback_dev = {
40 .bus_id = "fallback device",
41 .coherent_dma_mask = 0xffffffff,
42 .dma_mask = &fallback_dev.coherent_dma_mask,
43 };
44
45 /* Allocate DMA memory on node near device */
46 noinline static void *
47 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
48 {
49 struct page *page;
50 int node;
51 #ifdef CONFIG_PCI
52 if (dev->bus == &pci_bus_type)
53 node = pcibus_to_node(to_pci_dev(dev)->bus);
54 else
55 #endif
56 node = numa_node_id();
57
58 if (node < first_node(node_online_map))
59 node = first_node(node_online_map);
60
61 page = alloc_pages_node(node, gfp, order);
62 return page ? page_address(page) : NULL;
63 }
64
65 /*
66 * Allocate memory for a coherent mapping.
67 */
68 void *
69 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
70 gfp_t gfp)
71 {
72 void *memory;
73 unsigned long dma_mask = 0;
74 u64 bus;
75
76 if (!dev)
77 dev = &fallback_dev;
78 dma_mask = dev->coherent_dma_mask;
79 if (dma_mask == 0)
80 dma_mask = 0xffffffff;
81
82 /* Don't invoke OOM killer */
83 gfp |= __GFP_NORETRY;
84
85 /* Kludge to make it bug-to-bug compatible with i386. i386
86 uses the normal dma_mask for alloc_coherent. */
87 dma_mask &= *dev->dma_mask;
88
89 /* Why <=? Even when the mask is smaller than 4GB it is often
90 larger than 16MB and in this case we have a chance of
91 finding fitting memory in the next higher zone first. If
92 not retry with true GFP_DMA. -AK */
93 if (dma_mask <= 0xffffffff)
94 gfp |= GFP_DMA32;
95
96 again:
97 memory = dma_alloc_pages(dev, gfp, get_order(size));
98 if (memory == NULL)
99 return NULL;
100
101 {
102 int high, mmu;
103 bus = virt_to_bus(memory);
104 high = (bus + size) >= dma_mask;
105 mmu = high;
106 if (force_iommu && !(gfp & GFP_DMA))
107 mmu = 1;
108 else if (high) {
109 free_pages((unsigned long)memory,
110 get_order(size));
111
112 /* Don't use the 16MB ZONE_DMA unless absolutely
113 needed. It's better to use remapping first. */
114 if (dma_mask < 0xffffffff && !(gfp & GFP_DMA)) {
115 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
116 goto again;
117 }
118
119 /* Let low level make its own zone decisions */
120 gfp &= ~(GFP_DMA32|GFP_DMA);
121
122 if (dma_ops->alloc_coherent)
123 return dma_ops->alloc_coherent(dev, size,
124 dma_handle, gfp);
125 return NULL;
126 }
127
128 memset(memory, 0, size);
129 if (!mmu) {
130 *dma_handle = virt_to_bus(memory);
131 return memory;
132 }
133 }
134
135 if (dma_ops->alloc_coherent) {
136 free_pages((unsigned long)memory, get_order(size));
137 gfp &= ~(GFP_DMA|GFP_DMA32);
138 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
139 }
140
141 if (dma_ops->map_simple) {
142 *dma_handle = dma_ops->map_simple(dev, memory,
143 size,
144 PCI_DMA_BIDIRECTIONAL);
145 if (*dma_handle != bad_dma_address)
146 return memory;
147 }
148
149 if (panic_on_overflow)
150 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",size);
151 free_pages((unsigned long)memory, get_order(size));
152 return NULL;
153 }
154 EXPORT_SYMBOL(dma_alloc_coherent);
155
156 /*
157 * Unmap coherent memory.
158 * The caller must ensure that the device has finished accessing the mapping.
159 */
160 void dma_free_coherent(struct device *dev, size_t size,
161 void *vaddr, dma_addr_t bus)
162 {
163 if (dma_ops->unmap_single)
164 dma_ops->unmap_single(dev, bus, size, 0);
165 free_pages((unsigned long)vaddr, get_order(size));
166 }
167 EXPORT_SYMBOL(dma_free_coherent);
168
169 int dma_supported(struct device *dev, u64 mask)
170 {
171 if (dma_ops->dma_supported)
172 return dma_ops->dma_supported(dev, mask);
173
174 /* Copied from i386. Doesn't make much sense, because it will
175 only work for pci_alloc_coherent.
176 The caller just has to use GFP_DMA in this case. */
177 if (mask < 0x00ffffff)
178 return 0;
179
180 /* Tell the device to use SAC when IOMMU force is on. This
181 allows the driver to use cheaper accesses in some cases.
182
183 Problem with this is that if we overflow the IOMMU area and
184 return DAC as fallback address the device may not handle it
185 correctly.
186
187 As a special case some controllers have a 39bit address
188 mode that is as efficient as 32bit (aic79xx). Don't force
189 SAC for these. Assume all masks <= 40 bits are of this
190 type. Normally this doesn't make any difference, but gives
191 more gentle handling of IOMMU overflow. */
192 if (iommu_sac_force && (mask >= 0xffffffffffULL)) {
193 printk(KERN_INFO "%s: Force SAC with mask %Lx\n", dev->bus_id,mask);
194 return 0;
195 }
196
197 return 1;
198 }
199 EXPORT_SYMBOL(dma_supported);
200
201 int dma_set_mask(struct device *dev, u64 mask)
202 {
203 if (!dev->dma_mask || !dma_supported(dev, mask))
204 return -EIO;
205 *dev->dma_mask = mask;
206 return 0;
207 }
208 EXPORT_SYMBOL(dma_set_mask);
209
210 /* iommu=[size][,noagp][,off][,force][,noforce][,leak][,memaper[=order]][,merge]
211 [,forcesac][,fullflush][,nomerge][,biomerge]
212 size set size of iommu (in bytes)
213 noagp don't initialize the AGP driver and use full aperture.
214 off don't use the IOMMU
215 leak turn on simple iommu leak tracing (only when CONFIG_IOMMU_LEAK is on)
216 memaper[=order] allocate an own aperture over RAM with size 32MB^order.
217 noforce don't force IOMMU usage. Default.
218 force Force IOMMU.
219 merge Do lazy merging. This may improve performance on some block devices.
220 Implies force (experimental)
221 biomerge Do merging at the BIO layer. This is more efficient than merge,
222 but should be only done with very big IOMMUs. Implies merge,force.
223 nomerge Don't do SG merging.
224 forcesac For SAC mode for masks <40bits (experimental)
225 fullflush Flush IOMMU on each allocation (default)
226 nofullflush Don't use IOMMU fullflush
227 allowed overwrite iommu off workarounds for specific chipsets.
228 soft Use software bounce buffering (default for Intel machines)
229 noaperture Don't touch the aperture for AGP.
230 */
231 __init int iommu_setup(char *p)
232 {
233 iommu_merge = 1;
234
235 while (*p) {
236 if (!strncmp(p,"off",3))
237 no_iommu = 1;
238 /* gart_parse_options has more force support */
239 if (!strncmp(p,"force",5))
240 force_iommu = 1;
241 if (!strncmp(p,"noforce",7)) {
242 iommu_merge = 0;
243 force_iommu = 0;
244 }
245
246 if (!strncmp(p, "biomerge",8)) {
247 iommu_bio_merge = 4096;
248 iommu_merge = 1;
249 force_iommu = 1;
250 }
251 if (!strncmp(p, "panic",5))
252 panic_on_overflow = 1;
253 if (!strncmp(p, "nopanic",7))
254 panic_on_overflow = 0;
255 if (!strncmp(p, "merge",5)) {
256 iommu_merge = 1;
257 force_iommu = 1;
258 }
259 if (!strncmp(p, "nomerge",7))
260 iommu_merge = 0;
261 if (!strncmp(p, "forcesac",8))
262 iommu_sac_force = 1;
263
264 #ifdef CONFIG_SWIOTLB
265 if (!strncmp(p, "soft",4))
266 swiotlb = 1;
267 #endif
268
269 #ifdef CONFIG_GART_IOMMU
270 gart_parse_options(p);
271 #endif
272
273 p += strcspn(p, ",");
274 if (*p == ',')
275 ++p;
276 }
277 return 1;
278 }