]>
Commit | Line | Data |
---|---|---|
1c51c429 VM |
1 | /* |
2 | * Based on linux/arch/arm/mm/dma-mapping.c | |
3 | * | |
4 | * Copyright (C) 2000-2004 Russell King | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 as | |
8 | * published by the Free Software Foundation. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include <linux/export.h> | |
13 | #include <linux/mm.h> | |
14 | #include <linux/dma-mapping.h> | |
15 | #include <linux/scatterlist.h> | |
16 | ||
17 | #include <asm/cachetype.h> | |
18 | #include <asm/cacheflush.h> | |
19 | #include <asm/outercache.h> | |
20 | #include <asm/cp15.h> | |
21 | ||
22 | #include "dma.h" | |
23 | ||
24 | /* | |
25 | * dma_noop_ops is used if | |
26 | * - MMU/MPU is off | |
27 | * - cpu is v7m w/o cache support | |
28 | * - device is coherent | |
29 | * otherwise arm_nommu_dma_ops is used. | |
30 | * | |
31 | * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to | |
32 | * [1] on how to declare such memory). | |
33 | * | |
34 | * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt | |
35 | */ | |
36 | ||
37 | static void *arm_nommu_dma_alloc(struct device *dev, size_t size, | |
38 | dma_addr_t *dma_handle, gfp_t gfp, | |
39 | unsigned long attrs) | |
40 | ||
41 | { | |
42 | const struct dma_map_ops *ops = &dma_noop_ops; | |
878ec367 | 43 | void *ret; |
1c51c429 VM |
44 | |
45 | /* | |
878ec367 VM |
46 | * Try generic allocator first if we are advertised that |
47 | * consistency is not required. | |
48 | */ | |
49 | ||
50 | if (attrs & DMA_ATTR_NON_CONSISTENT) | |
51 | return ops->alloc(dev, size, dma_handle, gfp, attrs); | |
52 | ||
53 | ret = dma_alloc_from_global_coherent(size, dma_handle); | |
54 | ||
55 | /* | |
56 | * dma_alloc_from_global_coherent() may fail because: | |
57 | * | |
1c51c429 VM |
58 | * - no consistent DMA region has been defined, so we can't |
59 | * continue. | |
60 | * - there is no space left in consistent DMA region, so we | |
61 | * only can fallback to generic allocator if we are | |
62 | * advertised that consistency is not required. | |
63 | */ | |
64 | ||
878ec367 VM |
65 | WARN_ON_ONCE(ret == NULL); |
66 | return ret; | |
1c51c429 VM |
67 | } |
68 | ||
69 | static void arm_nommu_dma_free(struct device *dev, size_t size, | |
70 | void *cpu_addr, dma_addr_t dma_addr, | |
71 | unsigned long attrs) | |
72 | { | |
73 | const struct dma_map_ops *ops = &dma_noop_ops; | |
74 | ||
878ec367 | 75 | if (attrs & DMA_ATTR_NON_CONSISTENT) { |
1c51c429 | 76 | ops->free(dev, size, cpu_addr, dma_addr, attrs); |
878ec367 VM |
77 | } else { |
78 | int ret = dma_release_from_global_coherent(get_order(size), | |
79 | cpu_addr); | |
80 | ||
81 | WARN_ON_ONCE(ret == 0); | |
82 | } | |
1c51c429 VM |
83 | |
84 | return; | |
85 | } | |
86 | ||
878ec367 VM |
87 | static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
88 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | |
89 | unsigned long attrs) | |
90 | { | |
91 | int ret; | |
92 | ||
93 | if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret)) | |
94 | return ret; | |
95 | ||
96 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); | |
97 | } | |
98 | ||
99 | ||
1c51c429 VM |
100 | static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, |
101 | enum dma_data_direction dir) | |
102 | { | |
103 | dmac_map_area(__va(paddr), size, dir); | |
104 | ||
105 | if (dir == DMA_FROM_DEVICE) | |
106 | outer_inv_range(paddr, paddr + size); | |
107 | else | |
108 | outer_clean_range(paddr, paddr + size); | |
109 | } | |
110 | ||
111 | static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size, | |
112 | enum dma_data_direction dir) | |
113 | { | |
114 | if (dir != DMA_TO_DEVICE) { | |
115 | outer_inv_range(paddr, paddr + size); | |
116 | dmac_unmap_area(__va(paddr), size, dir); | |
117 | } | |
118 | } | |
119 | ||
120 | static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page, | |
121 | unsigned long offset, size_t size, | |
122 | enum dma_data_direction dir, | |
123 | unsigned long attrs) | |
124 | { | |
125 | dma_addr_t handle = page_to_phys(page) + offset; | |
126 | ||
127 | __dma_page_cpu_to_dev(handle, size, dir); | |
128 | ||
129 | return handle; | |
130 | } | |
131 | ||
132 | static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle, | |
133 | size_t size, enum dma_data_direction dir, | |
134 | unsigned long attrs) | |
135 | { | |
136 | __dma_page_dev_to_cpu(handle, size, dir); | |
137 | } | |
138 | ||
139 | ||
140 | static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, | |
141 | int nents, enum dma_data_direction dir, | |
142 | unsigned long attrs) | |
143 | { | |
144 | int i; | |
145 | struct scatterlist *sg; | |
146 | ||
147 | for_each_sg(sgl, sg, nents, i) { | |
148 | sg_dma_address(sg) = sg_phys(sg); | |
149 | sg_dma_len(sg) = sg->length; | |
150 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); | |
151 | } | |
152 | ||
153 | return nents; | |
154 | } | |
155 | ||
156 | static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |
157 | int nents, enum dma_data_direction dir, | |
158 | unsigned long attrs) | |
159 | { | |
160 | struct scatterlist *sg; | |
161 | int i; | |
162 | ||
163 | for_each_sg(sgl, sg, nents, i) | |
164 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); | |
165 | } | |
166 | ||
167 | static void arm_nommu_dma_sync_single_for_device(struct device *dev, | |
168 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
169 | { | |
170 | __dma_page_cpu_to_dev(handle, size, dir); | |
171 | } | |
172 | ||
173 | static void arm_nommu_dma_sync_single_for_cpu(struct device *dev, | |
174 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | |
175 | { | |
176 | __dma_page_cpu_to_dev(handle, size, dir); | |
177 | } | |
178 | ||
179 | static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, | |
180 | int nents, enum dma_data_direction dir) | |
181 | { | |
182 | struct scatterlist *sg; | |
183 | int i; | |
184 | ||
185 | for_each_sg(sgl, sg, nents, i) | |
186 | __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir); | |
187 | } | |
188 | ||
189 | static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, | |
190 | int nents, enum dma_data_direction dir) | |
191 | { | |
192 | struct scatterlist *sg; | |
193 | int i; | |
194 | ||
195 | for_each_sg(sgl, sg, nents, i) | |
196 | __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir); | |
197 | } | |
198 | ||
199 | const struct dma_map_ops arm_nommu_dma_ops = { | |
200 | .alloc = arm_nommu_dma_alloc, | |
201 | .free = arm_nommu_dma_free, | |
878ec367 | 202 | .mmap = arm_nommu_dma_mmap, |
1c51c429 VM |
203 | .map_page = arm_nommu_dma_map_page, |
204 | .unmap_page = arm_nommu_dma_unmap_page, | |
205 | .map_sg = arm_nommu_dma_map_sg, | |
206 | .unmap_sg = arm_nommu_dma_unmap_sg, | |
207 | .sync_single_for_device = arm_nommu_dma_sync_single_for_device, | |
208 | .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu, | |
209 | .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device, | |
210 | .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu, | |
211 | }; | |
212 | EXPORT_SYMBOL(arm_nommu_dma_ops); | |
213 | ||
214 | static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) | |
215 | { | |
216 | return coherent ? &dma_noop_ops : &arm_nommu_dma_ops; | |
217 | } | |
218 | ||
219 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | |
220 | const struct iommu_ops *iommu, bool coherent) | |
221 | { | |
222 | const struct dma_map_ops *dma_ops; | |
223 | ||
224 | if (IS_ENABLED(CONFIG_CPU_V7M)) { | |
225 | /* | |
226 | * Cache support for v7m is optional, so can be treated as | |
227 | * coherent if no cache has been detected. Note that it is not | |
228 | * enough to check if MPU is in use or not since in absense of | |
229 | * MPU system memory map is used. | |
230 | */ | |
231 | dev->archdata.dma_coherent = (cacheid) ? coherent : true; | |
232 | } else { | |
233 | /* | |
234 | * Assume coherent DMA in case MMU/MPU has not been set up. | |
235 | */ | |
236 | dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; | |
237 | } | |
238 | ||
239 | dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); | |
240 | ||
241 | set_dma_ops(dev, dma_ops); | |
242 | } | |
243 | ||
244 | void arch_teardown_dma_ops(struct device *dev) | |
245 | { | |
246 | } | |
247 | ||
248 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | |
249 | ||
250 | static int __init dma_debug_do_init(void) | |
251 | { | |
252 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
253 | return 0; | |
254 | } | |
255 | core_initcall(dma_debug_do_init); |