]>
Commit | Line | Data |
---|---|---|
5121872a SS |
1 | #include <linux/cpu.h> |
2 | #include <linux/dma-mapping.h> | |
83862ccf SS |
3 | #include <linux/bootmem.h> |
4 | #include <linux/gfp.h> | |
5121872a | 5 | #include <linux/highmem.h> |
83862ccf | 6 | #include <linux/export.h> |
5121872a | 7 | #include <linux/of_address.h> |
83862ccf SS |
8 | #include <linux/slab.h> |
9 | #include <linux/types.h> | |
10 | #include <linux/dma-mapping.h> | |
11 | #include <linux/vmalloc.h> | |
12 | #include <linux/swiotlb.h> | |
13 | ||
14 | #include <xen/xen.h> | |
da095a99 | 15 | #include <xen/interface/grant_table.h> |
83862ccf SS |
16 | #include <xen/interface/memory.h> |
17 | #include <xen/swiotlb-xen.h> | |
18 | ||
19 | #include <asm/cacheflush.h> | |
20 | #include <asm/xen/page.h> | |
21 | #include <asm/xen/hypercall.h> | |
22 | #include <asm/xen/interface.h> | |
23 | ||
5121872a SS |
24 | enum dma_cache_op { |
25 | DMA_UNMAP, | |
26 | DMA_MAP, | |
27 | }; | |
da095a99 | 28 | static bool hypercall_cflush = false; |
5121872a SS |
29 | |
30 | /* functions called by SWIOTLB */ | |
31 | ||
32 | static void dma_cache_maint(dma_addr_t handle, unsigned long offset, | |
33 | size_t size, enum dma_data_direction dir, enum dma_cache_op op) | |
34 | { | |
da095a99 | 35 | struct gnttab_cache_flush cflush; |
5121872a SS |
36 | unsigned long pfn; |
37 | size_t left = size; | |
38 | ||
39 | pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; | |
40 | offset %= PAGE_SIZE; | |
41 | ||
42 | do { | |
43 | size_t len = left; | |
44 | ||
da095a99 SS |
45 | /* buffers in highmem or foreign pages cannot cross page |
46 | * boundaries */ | |
47 | if (len + offset > PAGE_SIZE) | |
48 | len = PAGE_SIZE - offset; | |
49 | ||
50 | cflush.op = 0; | |
51 | cflush.a.dev_bus_addr = pfn << PAGE_SHIFT; | |
52 | cflush.offset = offset; | |
53 | cflush.length = len; | |
54 | ||
55 | if (op == DMA_UNMAP && dir != DMA_TO_DEVICE) | |
56 | cflush.op = GNTTAB_CACHE_INVAL; | |
57 | if (op == DMA_MAP) { | |
58 | if (dir == DMA_FROM_DEVICE) | |
59 | cflush.op = GNTTAB_CACHE_INVAL; | |
60 | else | |
61 | cflush.op = GNTTAB_CACHE_CLEAN; | |
62 | } | |
63 | if (cflush.op) | |
64 | HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1); | |
5121872a SS |
65 | |
66 | offset = 0; | |
67 | pfn++; | |
68 | left -= len; | |
69 | } while (left); | |
70 | } | |
71 | ||
72 | static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, | |
73 | size_t size, enum dma_data_direction dir) | |
74 | { | |
75 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP); | |
76 | } | |
77 | ||
78 | static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, | |
79 | size_t size, enum dma_data_direction dir) | |
80 | { | |
81 | dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP); | |
82 | } | |
83 | ||
84 | void __xen_dma_map_page(struct device *hwdev, struct page *page, | |
85 | dma_addr_t dev_addr, unsigned long offset, size_t size, | |
86 | enum dma_data_direction dir, struct dma_attrs *attrs) | |
87 | { | |
88 | if (is_device_dma_coherent(hwdev)) | |
89 | return; | |
90 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | |
91 | return; | |
92 | ||
93 | __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir); | |
94 | } | |
95 | ||
96 | void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | |
97 | size_t size, enum dma_data_direction dir, | |
98 | struct dma_attrs *attrs) | |
99 | ||
100 | { | |
101 | if (is_device_dma_coherent(hwdev)) | |
102 | return; | |
103 | if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) | |
104 | return; | |
105 | ||
106 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
107 | } | |
108 | ||
109 | void __xen_dma_sync_single_for_cpu(struct device *hwdev, | |
110 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
111 | { | |
112 | if (is_device_dma_coherent(hwdev)) | |
113 | return; | |
114 | __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); | |
115 | } | |
116 | ||
117 | void __xen_dma_sync_single_for_device(struct device *hwdev, | |
118 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
119 | { | |
120 | if (is_device_dma_coherent(hwdev)) | |
121 | return; | |
122 | __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); | |
123 | } | |
124 | ||
a4dba130 SS |
125 | bool xen_arch_need_swiotlb(struct device *dev, |
126 | unsigned long pfn, | |
127 | unsigned long mfn) | |
128 | { | |
da095a99 | 129 | return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev)); |
a4dba130 SS |
130 | } |
131 | ||
1b65c4e5 | 132 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
83862ccf SS |
133 | unsigned int address_bits, |
134 | dma_addr_t *dma_handle) | |
135 | { | |
136 | if (!xen_initial_domain()) | |
137 | return -EINVAL; | |
138 | ||
139 | /* we assume that dom0 is mapped 1:1 for now */ | |
1b65c4e5 | 140 | *dma_handle = pstart; |
83862ccf SS |
141 | return 0; |
142 | } | |
143 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
144 | ||
1b65c4e5 | 145 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
83862ccf SS |
146 | { |
147 | return; | |
148 | } | |
149 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); | |
150 | ||
151 | struct dma_map_ops *xen_dma_ops; | |
35c8ab4c | 152 | EXPORT_SYMBOL(xen_dma_ops); |
83862ccf SS |
153 | |
154 | static struct dma_map_ops xen_swiotlb_dma_ops = { | |
155 | .mapping_error = xen_swiotlb_dma_mapping_error, | |
156 | .alloc = xen_swiotlb_alloc_coherent, | |
157 | .free = xen_swiotlb_free_coherent, | |
158 | .sync_single_for_cpu = xen_swiotlb_sync_single_for_cpu, | |
159 | .sync_single_for_device = xen_swiotlb_sync_single_for_device, | |
160 | .sync_sg_for_cpu = xen_swiotlb_sync_sg_for_cpu, | |
161 | .sync_sg_for_device = xen_swiotlb_sync_sg_for_device, | |
162 | .map_sg = xen_swiotlb_map_sg_attrs, | |
163 | .unmap_sg = xen_swiotlb_unmap_sg_attrs, | |
164 | .map_page = xen_swiotlb_map_page, | |
165 | .unmap_page = xen_swiotlb_unmap_page, | |
166 | .dma_supported = xen_swiotlb_dma_supported, | |
eb1ddc00 | 167 | .set_dma_mask = xen_swiotlb_set_dma_mask, |
83862ccf SS |
168 | }; |
169 | ||
170 | int __init xen_mm_init(void) | |
171 | { | |
da095a99 | 172 | struct gnttab_cache_flush cflush; |
83862ccf SS |
173 | if (!xen_initial_domain()) |
174 | return 0; | |
175 | xen_swiotlb_init(1, false); | |
176 | xen_dma_ops = &xen_swiotlb_dma_ops; | |
da095a99 SS |
177 | |
178 | cflush.op = 0; | |
179 | cflush.a.dev_bus_addr = 0; | |
180 | cflush.offset = 0; | |
181 | cflush.length = 0; | |
182 | if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS) | |
183 | hypercall_cflush = true; | |
83862ccf SS |
184 | return 0; |
185 | } | |
186 | arch_initcall(xen_mm_init); |