]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/mips/mm/dma-noncoherent.c
[MIPS] Improve branch prediction in ll/sc atomic operations.
[mirror_ubuntu-jammy-kernel.git] / arch / mips / mm / dma-noncoherent.c
CommitLineData
1da177e4
LT
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
1da177e4
LT
10#include <linux/types.h>
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/string.h>
14#include <linux/dma-mapping.h>
15
16#include <asm/cache.h>
17#include <asm/io.h>
18
19/*
20 * Warning on the terminology - Linux calls an uncached area coherent;
21 * MIPS terminology calls memory areas with hardware maintained coherency
22 * coherent.
23 */
24
25void *dma_alloc_noncoherent(struct device *dev, size_t size,
185a8ff5 26 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
27{
28 void *ret;
29 /* ignore region specifiers */
30 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
31
32 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
33 gfp |= GFP_DMA;
34 ret = (void *) __get_free_pages(gfp, get_order(size));
35
36 if (ret != NULL) {
37 memset(ret, 0, size);
38 *dma_handle = virt_to_phys(ret);
39 }
40
41 return ret;
42}
43
44EXPORT_SYMBOL(dma_alloc_noncoherent);
45
46void *dma_alloc_coherent(struct device *dev, size_t size,
185a8ff5 47 dma_addr_t * dma_handle, gfp_t gfp)
1da177e4
LT
48{
49 void *ret;
50
51 ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
52 if (ret) {
53 dma_cache_wback_inv((unsigned long) ret, size);
54 ret = UNCAC_ADDR(ret);
55 }
56
57 return ret;
58}
59
60EXPORT_SYMBOL(dma_alloc_coherent);
61
62void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
63 dma_addr_t dma_handle)
64{
65 free_pages((unsigned long) vaddr, get_order(size));
66}
67
68EXPORT_SYMBOL(dma_free_noncoherent);
69
70void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
71 dma_addr_t dma_handle)
72{
73 unsigned long addr = (unsigned long) vaddr;
74
75 addr = CAC_ADDR(addr);
76 free_pages(addr, get_order(size));
77}
78
79EXPORT_SYMBOL(dma_free_coherent);
80
81static inline void __dma_sync(unsigned long addr, size_t size,
82 enum dma_data_direction direction)
83{
84 switch (direction) {
85 case DMA_TO_DEVICE:
86 dma_cache_wback(addr, size);
87 break;
88
89 case DMA_FROM_DEVICE:
90 dma_cache_inv(addr, size);
91 break;
92
93 case DMA_BIDIRECTIONAL:
94 dma_cache_wback_inv(addr, size);
95 break;
96
97 default:
98 BUG();
99 }
100}
101
102dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
103 enum dma_data_direction direction)
104{
105 unsigned long addr = (unsigned long) ptr;
106
424cadae 107 __dma_sync(addr, size, direction);
1da177e4
LT
108
109 return virt_to_phys(ptr);
110}
111
112EXPORT_SYMBOL(dma_map_single);
113
114void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
115 enum dma_data_direction direction)
116{
117 unsigned long addr;
118 addr = dma_addr + PAGE_OFFSET;
119
424cadae 120 //__dma_sync(addr, size, direction);
1da177e4
LT
121}
122
123EXPORT_SYMBOL(dma_unmap_single);
124
125int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
126 enum dma_data_direction direction)
127{
128 int i;
129
130 BUG_ON(direction == DMA_NONE);
131
132 for (i = 0; i < nents; i++, sg++) {
133 unsigned long addr;
42a3b4f2 134
1da177e4 135 addr = (unsigned long) page_address(sg->page);
424cadae 136 if (addr) {
1da177e4 137 __dma_sync(addr + sg->offset, sg->length, direction);
424cadae
TS
138 sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
139 + sg->offset;
140 }
1da177e4
LT
141 }
142
143 return nents;
144}
145
146EXPORT_SYMBOL(dma_map_sg);
147
148dma_addr_t dma_map_page(struct device *dev, struct page *page,
149 unsigned long offset, size_t size, enum dma_data_direction direction)
150{
151 unsigned long addr;
152
153 BUG_ON(direction == DMA_NONE);
154
155 addr = (unsigned long) page_address(page) + offset;
156 dma_cache_wback_inv(addr, size);
157
158 return page_to_phys(page) + offset;
159}
160
161EXPORT_SYMBOL(dma_map_page);
162
163void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
164 enum dma_data_direction direction)
165{
166 BUG_ON(direction == DMA_NONE);
167
168 if (direction != DMA_TO_DEVICE) {
169 unsigned long addr;
170
171 addr = dma_address + PAGE_OFFSET;
172 dma_cache_wback_inv(addr, size);
173 }
174}
175
176EXPORT_SYMBOL(dma_unmap_page);
177
178void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
179 enum dma_data_direction direction)
180{
181 unsigned long addr;
182 int i;
183
184 BUG_ON(direction == DMA_NONE);
185
186 if (direction == DMA_TO_DEVICE)
187 return;
188
189 for (i = 0; i < nhwentries; i++, sg++) {
190 addr = (unsigned long) page_address(sg->page);
424cadae
TS
191 if (addr)
192 __dma_sync(addr + sg->offset, sg->length, direction);
1da177e4
LT
193 }
194}
195
196EXPORT_SYMBOL(dma_unmap_sg);
197
198void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
199 size_t size, enum dma_data_direction direction)
200{
201 unsigned long addr;
42a3b4f2 202
1da177e4 203 BUG_ON(direction == DMA_NONE);
42a3b4f2 204
1da177e4
LT
205 addr = dma_handle + PAGE_OFFSET;
206 __dma_sync(addr, size, direction);
207}
208
209EXPORT_SYMBOL(dma_sync_single_for_cpu);
210
211void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
212 size_t size, enum dma_data_direction direction)
213{
214 unsigned long addr;
215
216 BUG_ON(direction == DMA_NONE);
217
218 addr = dma_handle + PAGE_OFFSET;
219 __dma_sync(addr, size, direction);
220}
221
222EXPORT_SYMBOL(dma_sync_single_for_device);
223
224void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
225 unsigned long offset, size_t size, enum dma_data_direction direction)
226{
227 unsigned long addr;
228
229 BUG_ON(direction == DMA_NONE);
230
231 addr = dma_handle + offset + PAGE_OFFSET;
232 __dma_sync(addr, size, direction);
233}
234
235EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
236
237void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
238 unsigned long offset, size_t size, enum dma_data_direction direction)
239{
240 unsigned long addr;
241
242 BUG_ON(direction == DMA_NONE);
243
244 addr = dma_handle + offset + PAGE_OFFSET;
245 __dma_sync(addr, size, direction);
246}
247
248EXPORT_SYMBOL(dma_sync_single_range_for_device);
249
250void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
251 enum dma_data_direction direction)
252{
253 int i;
42a3b4f2 254
1da177e4 255 BUG_ON(direction == DMA_NONE);
42a3b4f2 256
1da177e4
LT
257 /* Make sure that gcc doesn't leave the empty loop body. */
258 for (i = 0; i < nelems; i++, sg++)
259 __dma_sync((unsigned long)page_address(sg->page),
260 sg->length, direction);
261}
262
263EXPORT_SYMBOL(dma_sync_sg_for_cpu);
264
265void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
266 enum dma_data_direction direction)
267{
268 int i;
269
270 BUG_ON(direction == DMA_NONE);
271
272 /* Make sure that gcc doesn't leave the empty loop body. */
273 for (i = 0; i < nelems; i++, sg++)
274 __dma_sync((unsigned long)page_address(sg->page),
275 sg->length, direction);
276}
277
278EXPORT_SYMBOL(dma_sync_sg_for_device);
279
280int dma_mapping_error(dma_addr_t dma_addr)
281{
282 return 0;
283}
284
285EXPORT_SYMBOL(dma_mapping_error);
286
287int dma_supported(struct device *dev, u64 mask)
288{
289 /*
290 * we fall back to GFP_DMA when the mask isn't all 1s,
291 * so we can't guarantee allocations that must be
292 * within a tighter range than GFP_DMA..
293 */
294 if (mask < 0x00ffffff)
295 return 0;
296
297 return 1;
298}
299
300EXPORT_SYMBOL(dma_supported);
301
f67637ee 302int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
1da177e4
LT
303{
304 return 1;
305}
306
307EXPORT_SYMBOL(dma_is_consistent);
308
d3fa72e4
RB
309void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
310 enum dma_data_direction direction)
1da177e4
LT
311{
312 if (direction == DMA_NONE)
313 return;
314
315 dma_cache_wback_inv((unsigned long)vaddr, size);
316}
317
318EXPORT_SYMBOL(dma_cache_sync);
319
320/* The DAC routines are a PCIism.. */
321
322#ifdef CONFIG_PCI
323
324#include <linux/pci.h>
325
326dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
327 struct page *page, unsigned long offset, int direction)
328{
329 return (dma64_addr_t)page_to_phys(page) + offset;
330}
331
332EXPORT_SYMBOL(pci_dac_page_to_dma);
333
334struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
335 dma64_addr_t dma_addr)
336{
337 return mem_map + (dma_addr >> PAGE_SHIFT);
338}
339
340EXPORT_SYMBOL(pci_dac_dma_to_page);
341
342unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
343 dma64_addr_t dma_addr)
344{
345 return dma_addr & ~PAGE_MASK;
346}
347
348EXPORT_SYMBOL(pci_dac_dma_to_offset);
349
350void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
351 dma64_addr_t dma_addr, size_t len, int direction)
352{
353 BUG_ON(direction == PCI_DMA_NONE);
354
355 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
356}
357
358EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
359
360void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
361 dma64_addr_t dma_addr, size_t len, int direction)
362{
363 BUG_ON(direction == PCI_DMA_NONE);
364
365 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
366}
367
368EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
369
370#endif /* CONFIG_PCI */