]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm/include/asm/xen/page-coherent.h
spi: docbook: fix parsing error
[mirror_ubuntu-jammy-kernel.git] / arch / arm / include / asm / xen / page-coherent.h
1 #ifndef _ASM_ARM_XEN_PAGE_COHERENT_H
2 #define _ASM_ARM_XEN_PAGE_COHERENT_H
3
4 #include <asm/page.h>
5 #include <linux/dma-attrs.h>
6 #include <linux/dma-mapping.h>
7
8 void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 dma_addr_t dev_addr, unsigned long offset, size_t size,
10 enum dma_data_direction dir, struct dma_attrs *attrs);
11 void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 size_t size, enum dma_data_direction dir,
13 struct dma_attrs *attrs);
14 void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
16
17 void __xen_dma_sync_single_for_device(struct device *hwdev,
18 dma_addr_t handle, size_t size, enum dma_data_direction dir);
19
20 static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
21 dma_addr_t *dma_handle, gfp_t flags,
22 struct dma_attrs *attrs)
23 {
24 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
25 }
26
27 static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
28 void *cpu_addr, dma_addr_t dma_handle,
29 struct dma_attrs *attrs)
30 {
31 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
32 }
33
34 static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
35 dma_addr_t dev_addr, unsigned long offset, size_t size,
36 enum dma_data_direction dir, struct dma_attrs *attrs)
37 {
38 bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
39 /*
40 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
41 * multiple Xen page, it's not possible to have a mix of local and
42 * foreign Xen page. So if the first xen_pfn == mfn the page is local
43 * otherwise it's a foreign page grant-mapped in dom0. If the page is
44 * local we can safely call the native dma_ops function, otherwise we
45 * call the xen specific function.
46 */
47 if (local)
48 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
49 else
50 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
51 }
52
53 static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
54 size_t size, enum dma_data_direction dir,
55 struct dma_attrs *attrs)
56 {
57 unsigned long pfn = PFN_DOWN(handle);
58 /*
59 * Dom0 is mapped 1:1, while the Linux page can be spanned accross
60 * multiple Xen page, it's not possible to have a mix of local and
61 * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a
62 * foreign mfn will always return false. If the page is local we can
63 * safely call the native dma_ops function, otherwise we call the xen
64 * specific function.
65 */
66 if (pfn_valid(pfn)) {
67 if (__generic_dma_ops(hwdev)->unmap_page)
68 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
69 } else
70 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
71 }
72
73 static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
74 dma_addr_t handle, size_t size, enum dma_data_direction dir)
75 {
76 unsigned long pfn = PFN_DOWN(handle);
77 if (pfn_valid(pfn)) {
78 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
79 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
80 } else
81 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
82 }
83
84 static inline void xen_dma_sync_single_for_device(struct device *hwdev,
85 dma_addr_t handle, size_t size, enum dma_data_direction dir)
86 {
87 unsigned long pfn = PFN_DOWN(handle);
88 if (pfn_valid(pfn)) {
89 if (__generic_dma_ops(hwdev)->sync_single_for_device)
90 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
91 } else
92 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
93 }
94
95 #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */