]>
Commit | Line | Data |
---|---|---|
b097186f KRW |
1 | /* |
2 | * Copyright 2010 | |
3 | * by Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | |
4 | * | |
5 | * This code provides a IOMMU for Xen PV guests with PCI passthrough. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License v2.0 as published by | |
9 | * the Free Software Foundation | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * PV guests under Xen are running in an non-contiguous memory architecture. | |
17 | * | |
18 | * When PCI pass-through is utilized, this necessitates an IOMMU for | |
19 | * translating bus (DMA) to virtual and vice-versa and also providing a | |
20 | * mechanism to have contiguous pages for device drivers operations (say DMA | |
21 | * operations). | |
22 | * | |
23 | * Specifically, under Xen the Linux idea of pages is an illusion. It | |
24 | * assumes that pages start at zero and go up to the available memory. To | |
25 | * help with that, the Linux Xen MMU provides a lookup mechanism to | |
26 | * translate the page frame numbers (PFN) to machine frame numbers (MFN) | |
27 | * and vice-versa. The MFN are the "real" frame numbers. Furthermore | |
28 | * memory is not contiguous. Xen hypervisor stitches memory for guests | |
29 | * from different pools, which means there is no guarantee that PFN==MFN | |
30 | * and PFN+1==MFN+1. Lastly with Xen 4.0, pages (in debug mode) are | |
31 | * allocated in descending order (high to low), meaning the guest might | |
32 | * never get any MFN's under the 4GB mark. | |
33 | * | |
34 | */ | |
35 | ||
283c0972 JP |
36 | #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt |
37 | ||
b097186f KRW |
38 | #include <linux/bootmem.h> |
39 | #include <linux/dma-mapping.h> | |
63c9744b | 40 | #include <linux/export.h> |
b097186f KRW |
41 | #include <xen/swiotlb-xen.h> |
42 | #include <xen/page.h> | |
43 | #include <xen/xen-ops.h> | |
f4b2f07b | 44 | #include <xen/hvc-console.h> |
2b2b614d | 45 | |
2b2b614d | 46 | #include <trace/events/swiotlb.h> |
b097186f KRW |
47 | /* |
48 | * Used to do a quick range check in swiotlb_tbl_unmap_single and | |
49 | * swiotlb_tbl_sync_single_*, to see if the memory was in fact allocated by this | |
50 | * API. | |
51 | */ | |
52 | ||
53 | static char *xen_io_tlb_start, *xen_io_tlb_end; | |
54 | static unsigned long xen_io_tlb_nslabs; | |
55 | /* | |
56 | * Quick lookup value of the bus address of the IOTLB. | |
57 | */ | |
58 | ||
b8b0f559 | 59 | static u64 start_dma_addr; |
b097186f KRW |
60 | |
61 | static dma_addr_t xen_phys_to_bus(phys_addr_t paddr) | |
62 | { | |
6eab04a8 | 63 | return phys_to_machine(XPADDR(paddr)).maddr; |
b097186f KRW |
64 | } |
65 | ||
66 | static phys_addr_t xen_bus_to_phys(dma_addr_t baddr) | |
67 | { | |
68 | return machine_to_phys(XMADDR(baddr)).paddr; | |
69 | } | |
70 | ||
71 | static dma_addr_t xen_virt_to_bus(void *address) | |
72 | { | |
73 | return xen_phys_to_bus(virt_to_phys(address)); | |
74 | } | |
75 | ||
76 | static int check_pages_physically_contiguous(unsigned long pfn, | |
77 | unsigned int offset, | |
78 | size_t length) | |
79 | { | |
80 | unsigned long next_mfn; | |
81 | int i; | |
82 | int nr_pages; | |
83 | ||
84 | next_mfn = pfn_to_mfn(pfn); | |
85 | nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT; | |
86 | ||
87 | for (i = 1; i < nr_pages; i++) { | |
88 | if (pfn_to_mfn(++pfn) != ++next_mfn) | |
89 | return 0; | |
90 | } | |
91 | return 1; | |
92 | } | |
93 | ||
94 | static int range_straddles_page_boundary(phys_addr_t p, size_t size) | |
95 | { | |
96 | unsigned long pfn = PFN_DOWN(p); | |
97 | unsigned int offset = p & ~PAGE_MASK; | |
98 | ||
99 | if (offset + size <= PAGE_SIZE) | |
100 | return 0; | |
101 | if (check_pages_physically_contiguous(pfn, offset, size)) | |
102 | return 0; | |
103 | return 1; | |
104 | } | |
105 | ||
106 | static int is_xen_swiotlb_buffer(dma_addr_t dma_addr) | |
107 | { | |
108 | unsigned long mfn = PFN_DOWN(dma_addr); | |
109 | unsigned long pfn = mfn_to_local_pfn(mfn); | |
110 | phys_addr_t paddr; | |
111 | ||
112 | /* If the address is outside our domain, it CAN | |
113 | * have the same virtual address as another address | |
114 | * in our domain. Therefore _only_ check address within our domain. | |
115 | */ | |
116 | if (pfn_valid(pfn)) { | |
117 | paddr = PFN_PHYS(pfn); | |
118 | return paddr >= virt_to_phys(xen_io_tlb_start) && | |
119 | paddr < virt_to_phys(xen_io_tlb_end); | |
120 | } | |
121 | return 0; | |
122 | } | |
123 | ||
124 | static int max_dma_bits = 32; | |
125 | ||
126 | static int | |
127 | xen_swiotlb_fixup(void *buf, size_t size, unsigned long nslabs) | |
128 | { | |
129 | int i, rc; | |
130 | int dma_bits; | |
131 | ||
132 | dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT; | |
133 | ||
134 | i = 0; | |
135 | do { | |
136 | int slabs = min(nslabs - i, (unsigned long)IO_TLB_SEGSIZE); | |
137 | ||
138 | do { | |
139 | rc = xen_create_contiguous_region( | |
140 | (unsigned long)buf + (i << IO_TLB_SHIFT), | |
141 | get_order(slabs << IO_TLB_SHIFT), | |
142 | dma_bits); | |
143 | } while (rc && dma_bits++ < max_dma_bits); | |
144 | if (rc) | |
145 | return rc; | |
146 | ||
147 | i += slabs; | |
148 | } while (i < nslabs); | |
149 | return 0; | |
150 | } | |
1cef36a5 KRW |
151 | static unsigned long xen_set_nslabs(unsigned long nr_tbl) |
152 | { | |
153 | if (!nr_tbl) { | |
154 | xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); | |
155 | xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); | |
156 | } else | |
157 | xen_io_tlb_nslabs = nr_tbl; | |
b097186f | 158 | |
1cef36a5 KRW |
159 | return xen_io_tlb_nslabs << IO_TLB_SHIFT; |
160 | } | |
b097186f | 161 | |
5bab7864 KRW |
162 | enum xen_swiotlb_err { |
163 | XEN_SWIOTLB_UNKNOWN = 0, | |
164 | XEN_SWIOTLB_ENOMEM, | |
165 | XEN_SWIOTLB_EFIXUP | |
166 | }; | |
167 | ||
168 | static const char *xen_swiotlb_error(enum xen_swiotlb_err err) | |
169 | { | |
170 | switch (err) { | |
171 | case XEN_SWIOTLB_ENOMEM: | |
172 | return "Cannot allocate Xen-SWIOTLB buffer\n"; | |
173 | case XEN_SWIOTLB_EFIXUP: | |
174 | return "Failed to get contiguous memory for DMA from Xen!\n"\ | |
175 | "You either: don't have the permissions, do not have"\ | |
176 | " enough free memory under 4GB, or the hypervisor memory"\ | |
177 | " is too fragmented!"; | |
178 | default: | |
179 | break; | |
180 | } | |
181 | return ""; | |
182 | } | |
b8277600 | 183 | int __ref xen_swiotlb_init(int verbose, bool early) |
b097186f | 184 | { |
b8277600 | 185 | unsigned long bytes, order; |
f4b2f07b | 186 | int rc = -ENOMEM; |
5bab7864 | 187 | enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN; |
f4b2f07b | 188 | unsigned int repeat = 3; |
5f98ecdb | 189 | |
1cef36a5 | 190 | xen_io_tlb_nslabs = swiotlb_nr_tbl(); |
f4b2f07b | 191 | retry: |
1cef36a5 | 192 | bytes = xen_set_nslabs(xen_io_tlb_nslabs); |
b8277600 | 193 | order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT); |
b097186f KRW |
194 | /* |
195 | * Get IO TLB memory from any location. | |
196 | */ | |
b8277600 KRW |
197 | if (early) |
198 | xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes)); | |
199 | else { | |
200 | #define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT)) | |
201 | #define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT) | |
202 | while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) { | |
203 | xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order); | |
204 | if (xen_io_tlb_start) | |
205 | break; | |
206 | order--; | |
207 | } | |
208 | if (order != get_order(bytes)) { | |
283c0972 JP |
209 | pr_warn("Warning: only able to allocate %ld MB for software IO TLB\n", |
210 | (PAGE_SIZE << order) >> 20); | |
b8277600 KRW |
211 | xen_io_tlb_nslabs = SLABS_PER_PAGE << order; |
212 | bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; | |
213 | } | |
214 | } | |
f4b2f07b | 215 | if (!xen_io_tlb_start) { |
5bab7864 | 216 | m_ret = XEN_SWIOTLB_ENOMEM; |
f4b2f07b KRW |
217 | goto error; |
218 | } | |
b097186f KRW |
219 | xen_io_tlb_end = xen_io_tlb_start + bytes; |
220 | /* | |
221 | * And replace that memory with pages under 4GB. | |
222 | */ | |
223 | rc = xen_swiotlb_fixup(xen_io_tlb_start, | |
224 | bytes, | |
225 | xen_io_tlb_nslabs); | |
f4b2f07b | 226 | if (rc) { |
b8277600 KRW |
227 | if (early) |
228 | free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes)); | |
229 | else { | |
230 | free_pages((unsigned long)xen_io_tlb_start, order); | |
231 | xen_io_tlb_start = NULL; | |
232 | } | |
5bab7864 | 233 | m_ret = XEN_SWIOTLB_EFIXUP; |
b097186f | 234 | goto error; |
f4b2f07b | 235 | } |
b097186f | 236 | start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); |
c468bdee | 237 | if (early) { |
ac2cbab2 YL |
238 | if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, |
239 | verbose)) | |
240 | panic("Cannot allocate SWIOTLB buffer"); | |
c468bdee KRW |
241 | rc = 0; |
242 | } else | |
b8277600 KRW |
243 | rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); |
244 | return rc; | |
b097186f | 245 | error: |
f4b2f07b KRW |
246 | if (repeat--) { |
247 | xen_io_tlb_nslabs = max(1024UL, /* Min is 2MB */ | |
248 | (xen_io_tlb_nslabs >> 1)); | |
283c0972 JP |
249 | pr_info("Lowering to %luMB\n", |
250 | (xen_io_tlb_nslabs << IO_TLB_SHIFT) >> 20); | |
f4b2f07b KRW |
251 | goto retry; |
252 | } | |
283c0972 | 253 | pr_err("%s (rc:%d)\n", xen_swiotlb_error(m_ret), rc); |
b8277600 KRW |
254 | if (early) |
255 | panic("%s (rc:%d)", xen_swiotlb_error(m_ret), rc); | |
256 | else | |
257 | free_pages((unsigned long)xen_io_tlb_start, order); | |
258 | return rc; | |
b097186f | 259 | } |
b097186f KRW |
260 | void * |
261 | xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, | |
baa676fc AP |
262 | dma_addr_t *dma_handle, gfp_t flags, |
263 | struct dma_attrs *attrs) | |
b097186f KRW |
264 | { |
265 | void *ret; | |
266 | int order = get_order(size); | |
267 | u64 dma_mask = DMA_BIT_MASK(32); | |
268 | unsigned long vstart; | |
6810df88 KRW |
269 | phys_addr_t phys; |
270 | dma_addr_t dev_addr; | |
b097186f KRW |
271 | |
272 | /* | |
273 | * Ignore region specifiers - the kernel's ideas of | |
274 | * pseudo-phys memory layout has nothing to do with the | |
275 | * machine physical layout. We can't allocate highmem | |
276 | * because we can't return a pointer to it. | |
277 | */ | |
278 | flags &= ~(__GFP_DMA | __GFP_HIGHMEM); | |
279 | ||
280 | if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret)) | |
281 | return ret; | |
282 | ||
283 | vstart = __get_free_pages(flags, order); | |
284 | ret = (void *)vstart; | |
285 | ||
6810df88 KRW |
286 | if (!ret) |
287 | return ret; | |
288 | ||
b097186f | 289 | if (hwdev && hwdev->coherent_dma_mask) |
b5031ed1 | 290 | dma_mask = dma_alloc_coherent_mask(hwdev, flags); |
b097186f | 291 | |
6810df88 KRW |
292 | phys = virt_to_phys(ret); |
293 | dev_addr = xen_phys_to_bus(phys); | |
294 | if (((dev_addr + size - 1 <= dma_mask)) && | |
295 | !range_straddles_page_boundary(phys, size)) | |
296 | *dma_handle = dev_addr; | |
297 | else { | |
b097186f KRW |
298 | if (xen_create_contiguous_region(vstart, order, |
299 | fls64(dma_mask)) != 0) { | |
300 | free_pages(vstart, order); | |
301 | return NULL; | |
302 | } | |
b097186f KRW |
303 | *dma_handle = virt_to_machine(ret).maddr; |
304 | } | |
6810df88 | 305 | memset(ret, 0, size); |
b097186f KRW |
306 | return ret; |
307 | } | |
308 | EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); | |
309 | ||
310 | void | |
311 | xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, | |
baa676fc | 312 | dma_addr_t dev_addr, struct dma_attrs *attrs) |
b097186f KRW |
313 | { |
314 | int order = get_order(size); | |
6810df88 KRW |
315 | phys_addr_t phys; |
316 | u64 dma_mask = DMA_BIT_MASK(32); | |
b097186f KRW |
317 | |
318 | if (dma_release_from_coherent(hwdev, order, vaddr)) | |
319 | return; | |
320 | ||
6810df88 KRW |
321 | if (hwdev && hwdev->coherent_dma_mask) |
322 | dma_mask = hwdev->coherent_dma_mask; | |
323 | ||
324 | phys = virt_to_phys(vaddr); | |
325 | ||
326 | if (((dev_addr + size - 1 > dma_mask)) || | |
327 | range_straddles_page_boundary(phys, size)) | |
328 | xen_destroy_contiguous_region((unsigned long)vaddr, order); | |
329 | ||
b097186f KRW |
330 | free_pages((unsigned long)vaddr, order); |
331 | } | |
332 | EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); | |
333 | ||
334 | ||
335 | /* | |
336 | * Map a single buffer of the indicated size for DMA in streaming mode. The | |
337 | * physical address to use is returned. | |
338 | * | |
339 | * Once the device is given the dma address, the device owns this memory until | |
340 | * either xen_swiotlb_unmap_page or xen_swiotlb_dma_sync_single is performed. | |
341 | */ | |
342 | dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, | |
343 | unsigned long offset, size_t size, | |
344 | enum dma_data_direction dir, | |
345 | struct dma_attrs *attrs) | |
346 | { | |
e05ed4d1 | 347 | phys_addr_t map, phys = page_to_phys(page) + offset; |
b097186f | 348 | dma_addr_t dev_addr = xen_phys_to_bus(phys); |
b097186f KRW |
349 | |
350 | BUG_ON(dir == DMA_NONE); | |
351 | /* | |
352 | * If the address happens to be in the device's DMA window, | |
353 | * we can safely return the device addr and not worry about bounce | |
354 | * buffering it. | |
355 | */ | |
356 | if (dma_capable(dev, dev_addr, size) && | |
357 | !range_straddles_page_boundary(phys, size) && !swiotlb_force) | |
358 | return dev_addr; | |
359 | ||
360 | /* | |
361 | * Oh well, have to allocate and map a bounce buffer. | |
362 | */ | |
2b2b614d ZK |
363 | trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force); |
364 | ||
b097186f | 365 | map = swiotlb_tbl_map_single(dev, start_dma_addr, phys, size, dir); |
e05ed4d1 | 366 | if (map == SWIOTLB_MAP_ERROR) |
b097186f KRW |
367 | return DMA_ERROR_CODE; |
368 | ||
e05ed4d1 | 369 | dev_addr = xen_phys_to_bus(map); |
b097186f KRW |
370 | |
371 | /* | |
372 | * Ensure that the address returned is DMA'ble | |
373 | */ | |
ab2a47bd | 374 | if (!dma_capable(dev, dev_addr, size)) { |
61ca08c3 | 375 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
ab2a47bd KRW |
376 | dev_addr = 0; |
377 | } | |
b097186f KRW |
378 | return dev_addr; |
379 | } | |
380 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_page); | |
381 | ||
382 | /* | |
383 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | |
384 | * match what was provided for in a previous xen_swiotlb_map_page call. All | |
385 | * other usages are undefined. | |
386 | * | |
387 | * After this call, reads by the cpu to the buffer are guaranteed to see | |
388 | * whatever the device wrote there. | |
389 | */ | |
390 | static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr, | |
391 | size_t size, enum dma_data_direction dir) | |
392 | { | |
393 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | |
394 | ||
395 | BUG_ON(dir == DMA_NONE); | |
396 | ||
397 | /* NOTE: We use dev_addr here, not paddr! */ | |
398 | if (is_xen_swiotlb_buffer(dev_addr)) { | |
61ca08c3 | 399 | swiotlb_tbl_unmap_single(hwdev, paddr, size, dir); |
b097186f KRW |
400 | return; |
401 | } | |
402 | ||
403 | if (dir != DMA_FROM_DEVICE) | |
404 | return; | |
405 | ||
406 | /* | |
407 | * phys_to_virt doesn't work with hihgmem page but we could | |
408 | * call dma_mark_clean() with hihgmem page here. However, we | |
409 | * are fine since dma_mark_clean() is null on POWERPC. We can | |
410 | * make dma_mark_clean() take a physical address if necessary. | |
411 | */ | |
412 | dma_mark_clean(phys_to_virt(paddr), size); | |
413 | } | |
414 | ||
415 | void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, | |
416 | size_t size, enum dma_data_direction dir, | |
417 | struct dma_attrs *attrs) | |
418 | { | |
419 | xen_unmap_single(hwdev, dev_addr, size, dir); | |
420 | } | |
421 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_page); | |
422 | ||
423 | /* | |
424 | * Make physical memory consistent for a single streaming mode DMA translation | |
425 | * after a transfer. | |
426 | * | |
427 | * If you perform a xen_swiotlb_map_page() but wish to interrogate the buffer | |
428 | * using the cpu, yet do not wish to teardown the dma mapping, you must | |
429 | * call this function before doing so. At the next point you give the dma | |
430 | * address back to the card, you must first perform a | |
431 | * xen_swiotlb_dma_sync_for_device, and then the device again owns the buffer | |
432 | */ | |
433 | static void | |
434 | xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, | |
435 | size_t size, enum dma_data_direction dir, | |
436 | enum dma_sync_target target) | |
437 | { | |
438 | phys_addr_t paddr = xen_bus_to_phys(dev_addr); | |
439 | ||
440 | BUG_ON(dir == DMA_NONE); | |
441 | ||
442 | /* NOTE: We use dev_addr here, not paddr! */ | |
443 | if (is_xen_swiotlb_buffer(dev_addr)) { | |
fbfda893 | 444 | swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); |
b097186f KRW |
445 | return; |
446 | } | |
447 | ||
448 | if (dir != DMA_FROM_DEVICE) | |
449 | return; | |
450 | ||
451 | dma_mark_clean(phys_to_virt(paddr), size); | |
452 | } | |
453 | ||
454 | void | |
455 | xen_swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, | |
456 | size_t size, enum dma_data_direction dir) | |
457 | { | |
458 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); | |
459 | } | |
460 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_cpu); | |
461 | ||
462 | void | |
463 | xen_swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, | |
464 | size_t size, enum dma_data_direction dir) | |
465 | { | |
466 | xen_swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); | |
467 | } | |
468 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_single_for_device); | |
469 | ||
470 | /* | |
471 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | |
472 | * This is the scatter-gather version of the above xen_swiotlb_map_page | |
473 | * interface. Here the scatter gather list elements are each tagged with the | |
474 | * appropriate dma address and length. They are obtained via | |
475 | * sg_dma_{address,length}(SG). | |
476 | * | |
477 | * NOTE: An implementation may be able to use a smaller number of | |
478 | * DMA address/length pairs than there are SG table elements. | |
479 | * (for example via virtual mapping capabilities) | |
480 | * The routine returns the number of addr/length pairs actually | |
481 | * used, at most nents. | |
482 | * | |
483 | * Device ownership issues as mentioned above for xen_swiotlb_map_page are the | |
484 | * same here. | |
485 | */ | |
486 | int | |
487 | xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |
488 | int nelems, enum dma_data_direction dir, | |
489 | struct dma_attrs *attrs) | |
490 | { | |
491 | struct scatterlist *sg; | |
492 | int i; | |
493 | ||
494 | BUG_ON(dir == DMA_NONE); | |
495 | ||
496 | for_each_sg(sgl, sg, nelems, i) { | |
497 | phys_addr_t paddr = sg_phys(sg); | |
498 | dma_addr_t dev_addr = xen_phys_to_bus(paddr); | |
499 | ||
500 | if (swiotlb_force || | |
501 | !dma_capable(hwdev, dev_addr, sg->length) || | |
502 | range_straddles_page_boundary(paddr, sg->length)) { | |
e05ed4d1 AD |
503 | phys_addr_t map = swiotlb_tbl_map_single(hwdev, |
504 | start_dma_addr, | |
505 | sg_phys(sg), | |
506 | sg->length, | |
507 | dir); | |
508 | if (map == SWIOTLB_MAP_ERROR) { | |
b097186f KRW |
509 | /* Don't panic here, we expect map_sg users |
510 | to do proper error handling. */ | |
511 | xen_swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, | |
512 | attrs); | |
781575cd | 513 | sg_dma_len(sgl) = 0; |
b097186f KRW |
514 | return DMA_ERROR_CODE; |
515 | } | |
e05ed4d1 | 516 | sg->dma_address = xen_phys_to_bus(map); |
b097186f KRW |
517 | } else |
518 | sg->dma_address = dev_addr; | |
781575cd | 519 | sg_dma_len(sg) = sg->length; |
b097186f KRW |
520 | } |
521 | return nelems; | |
522 | } | |
523 | EXPORT_SYMBOL_GPL(xen_swiotlb_map_sg_attrs); | |
524 | ||
b097186f KRW |
525 | /* |
526 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | |
527 | * concerning calls here are the same as for swiotlb_unmap_page() above. | |
528 | */ | |
529 | void | |
530 | xen_swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, | |
531 | int nelems, enum dma_data_direction dir, | |
532 | struct dma_attrs *attrs) | |
533 | { | |
534 | struct scatterlist *sg; | |
535 | int i; | |
536 | ||
537 | BUG_ON(dir == DMA_NONE); | |
538 | ||
539 | for_each_sg(sgl, sg, nelems, i) | |
781575cd | 540 | xen_unmap_single(hwdev, sg->dma_address, sg_dma_len(sg), dir); |
b097186f KRW |
541 | |
542 | } | |
543 | EXPORT_SYMBOL_GPL(xen_swiotlb_unmap_sg_attrs); | |
544 | ||
b097186f KRW |
545 | /* |
546 | * Make physical memory consistent for a set of streaming mode DMA translations | |
547 | * after a transfer. | |
548 | * | |
549 | * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules | |
550 | * and usage. | |
551 | */ | |
552 | static void | |
553 | xen_swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, | |
554 | int nelems, enum dma_data_direction dir, | |
555 | enum dma_sync_target target) | |
556 | { | |
557 | struct scatterlist *sg; | |
558 | int i; | |
559 | ||
560 | for_each_sg(sgl, sg, nelems, i) | |
561 | xen_swiotlb_sync_single(hwdev, sg->dma_address, | |
781575cd | 562 | sg_dma_len(sg), dir, target); |
b097186f KRW |
563 | } |
564 | ||
565 | void | |
566 | xen_swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, | |
567 | int nelems, enum dma_data_direction dir) | |
568 | { | |
569 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); | |
570 | } | |
571 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_cpu); | |
572 | ||
573 | void | |
574 | xen_swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, | |
575 | int nelems, enum dma_data_direction dir) | |
576 | { | |
577 | xen_swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); | |
578 | } | |
579 | EXPORT_SYMBOL_GPL(xen_swiotlb_sync_sg_for_device); | |
580 | ||
581 | int | |
582 | xen_swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr) | |
583 | { | |
584 | return !dma_addr; | |
585 | } | |
586 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_mapping_error); | |
587 | ||
588 | /* | |
589 | * Return whether the given device DMA address mask can be supported | |
590 | * properly. For example, if your device can only drive the low 24-bits | |
591 | * during bus mastering, then you would pass 0x00ffffff as the mask to | |
592 | * this function. | |
593 | */ | |
594 | int | |
595 | xen_swiotlb_dma_supported(struct device *hwdev, u64 mask) | |
596 | { | |
597 | return xen_virt_to_bus(xen_io_tlb_end - 1) <= mask; | |
598 | } | |
599 | EXPORT_SYMBOL_GPL(xen_swiotlb_dma_supported); |