2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
11 #include <linux/types.h>
12 #include <linux/dma-mapping.h>
14 #include <linux/module.h>
15 #include <linux/scatterlist.h>
16 #include <linux/string.h>
17 #include <linux/gfp.h>
18 #include <linux/highmem.h>
20 #include <asm/cache.h>
23 #include <dma-coherence.h>
25 int coherentio
= 0; /* User defined DMA coherency from command line. */
26 EXPORT_SYMBOL_GPL(coherentio
);
27 int hw_coherentio
= 0; /* Actual hardware supported DMA coherency setting. */
29 static int __init
setcoherentio(char *str
)
32 pr_info("Hardware DMA cache coherency (command line)\n");
35 early_param("coherentio", setcoherentio
);
37 static int __init
setnocoherentio(char *str
)
40 pr_info("Software DMA cache coherency (command line)\n");
43 early_param("nocoherentio", setnocoherentio
);
45 static inline struct page
*dma_addr_to_page(struct device
*dev
,
49 plat_dma_addr_to_phys(dev
, dma_addr
) >> PAGE_SHIFT
);
53 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
54 * speculatively fill random cachelines with stale data at any time,
55 * requiring an extra flush post-DMA.
57 * Warning on the terminology - Linux calls an uncached area coherent;
58 * MIPS terminology calls memory areas with hardware maintained coherency
61 static inline int cpu_needs_post_dma_flush(struct device
*dev
)
63 return !plat_device_is_coherent(dev
) &&
64 (current_cpu_type() == CPU_R10000
||
65 current_cpu_type() == CPU_R12000
||
66 current_cpu_type() == CPU_BMIPS5000
);
69 static gfp_t
massage_gfp_flags(const struct device
*dev
, gfp_t gfp
)
73 /* ignore region specifiers */
74 gfp
&= ~(__GFP_DMA
| __GFP_DMA32
| __GFP_HIGHMEM
);
81 #if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
82 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(32))
84 else if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
85 dma_flag
= __GFP_DMA32
;
88 #if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
89 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
90 dma_flag
= __GFP_DMA32
;
93 #if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
94 if (dev
->coherent_dma_mask
< DMA_BIT_MASK(64))
100 /* Don't invoke OOM killer */
101 gfp
|= __GFP_NORETRY
;
103 return gfp
| dma_flag
;
106 void *dma_alloc_noncoherent(struct device
*dev
, size_t size
,
107 dma_addr_t
* dma_handle
, gfp_t gfp
)
111 gfp
= massage_gfp_flags(dev
, gfp
);
113 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
116 memset(ret
, 0, size
);
117 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
122 EXPORT_SYMBOL(dma_alloc_noncoherent
);
124 static void *mips_dma_alloc_coherent(struct device
*dev
, size_t size
,
125 dma_addr_t
* dma_handle
, gfp_t gfp
, struct dma_attrs
*attrs
)
129 if (dma_alloc_from_coherent(dev
, size
, dma_handle
, &ret
))
132 gfp
= massage_gfp_flags(dev
, gfp
);
134 ret
= (void *) __get_free_pages(gfp
, get_order(size
));
137 memset(ret
, 0, size
);
138 *dma_handle
= plat_map_dma_mem(dev
, ret
, size
);
140 if (!plat_device_is_coherent(dev
)) {
141 dma_cache_wback_inv((unsigned long) ret
, size
);
143 ret
= UNCAC_ADDR(ret
);
151 void dma_free_noncoherent(struct device
*dev
, size_t size
, void *vaddr
,
152 dma_addr_t dma_handle
)
154 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
155 free_pages((unsigned long) vaddr
, get_order(size
));
157 EXPORT_SYMBOL(dma_free_noncoherent
);
159 static void mips_dma_free_coherent(struct device
*dev
, size_t size
, void *vaddr
,
160 dma_addr_t dma_handle
, struct dma_attrs
*attrs
)
162 unsigned long addr
= (unsigned long) vaddr
;
163 int order
= get_order(size
);
165 if (dma_release_from_coherent(dev
, order
, vaddr
))
168 plat_unmap_dma_mem(dev
, dma_handle
, size
, DMA_BIDIRECTIONAL
);
170 if (!plat_device_is_coherent(dev
) && !hw_coherentio
)
171 addr
= CAC_ADDR(addr
);
173 free_pages(addr
, get_order(size
));
176 static inline void __dma_sync_virtual(void *addr
, size_t size
,
177 enum dma_data_direction direction
)
181 dma_cache_wback((unsigned long)addr
, size
);
184 case DMA_FROM_DEVICE
:
185 dma_cache_inv((unsigned long)addr
, size
);
188 case DMA_BIDIRECTIONAL
:
189 dma_cache_wback_inv((unsigned long)addr
, size
);
198 * A single sg entry may refer to multiple physically contiguous
199 * pages. But we still need to process highmem pages individually.
200 * If highmem is not configured then the bulk of this loop gets
203 static inline void __dma_sync(struct page
*page
,
204 unsigned long offset
, size_t size
, enum dma_data_direction direction
)
211 if (PageHighMem(page
)) {
214 if (offset
+ len
> PAGE_SIZE
) {
215 if (offset
>= PAGE_SIZE
) {
216 page
+= offset
>> PAGE_SHIFT
;
217 offset
&= ~PAGE_MASK
;
219 len
= PAGE_SIZE
- offset
;
222 addr
= kmap_atomic(page
);
223 __dma_sync_virtual(addr
+ offset
, len
, direction
);
226 __dma_sync_virtual(page_address(page
) + offset
,
234 static void mips_dma_unmap_page(struct device
*dev
, dma_addr_t dma_addr
,
235 size_t size
, enum dma_data_direction direction
, struct dma_attrs
*attrs
)
237 if (cpu_needs_post_dma_flush(dev
))
238 __dma_sync(dma_addr_to_page(dev
, dma_addr
),
239 dma_addr
& ~PAGE_MASK
, size
, direction
);
241 plat_unmap_dma_mem(dev
, dma_addr
, size
, direction
);
244 static int mips_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
245 int nents
, enum dma_data_direction direction
, struct dma_attrs
*attrs
)
249 for (i
= 0; i
< nents
; i
++, sg
++) {
250 if (!plat_device_is_coherent(dev
))
251 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
253 #ifdef CONFIG_NEED_SG_DMA_LENGTH
254 sg
->dma_length
= sg
->length
;
256 sg
->dma_address
= plat_map_dma_mem_page(dev
, sg_page(sg
)) +
263 static dma_addr_t
mips_dma_map_page(struct device
*dev
, struct page
*page
,
264 unsigned long offset
, size_t size
, enum dma_data_direction direction
,
265 struct dma_attrs
*attrs
)
267 if (!plat_device_is_coherent(dev
))
268 __dma_sync(page
, offset
, size
, direction
);
270 return plat_map_dma_mem_page(dev
, page
) + offset
;
273 static void mips_dma_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
274 int nhwentries
, enum dma_data_direction direction
,
275 struct dma_attrs
*attrs
)
279 for (i
= 0; i
< nhwentries
; i
++, sg
++) {
280 if (!plat_device_is_coherent(dev
) &&
281 direction
!= DMA_TO_DEVICE
)
282 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
284 plat_unmap_dma_mem(dev
, sg
->dma_address
, sg
->length
, direction
);
288 static void mips_dma_sync_single_for_cpu(struct device
*dev
,
289 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
291 if (cpu_needs_post_dma_flush(dev
))
292 __dma_sync(dma_addr_to_page(dev
, dma_handle
),
293 dma_handle
& ~PAGE_MASK
, size
, direction
);
296 static void mips_dma_sync_single_for_device(struct device
*dev
,
297 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction direction
)
299 plat_extra_sync_for_device(dev
);
300 if (!plat_device_is_coherent(dev
))
301 __dma_sync(dma_addr_to_page(dev
, dma_handle
),
302 dma_handle
& ~PAGE_MASK
, size
, direction
);
305 static void mips_dma_sync_sg_for_cpu(struct device
*dev
,
306 struct scatterlist
*sg
, int nelems
, enum dma_data_direction direction
)
310 /* Make sure that gcc doesn't leave the empty loop body. */
311 for (i
= 0; i
< nelems
; i
++, sg
++) {
312 if (cpu_needs_post_dma_flush(dev
))
313 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
318 static void mips_dma_sync_sg_for_device(struct device
*dev
,
319 struct scatterlist
*sg
, int nelems
, enum dma_data_direction direction
)
323 /* Make sure that gcc doesn't leave the empty loop body. */
324 for (i
= 0; i
< nelems
; i
++, sg
++) {
325 if (!plat_device_is_coherent(dev
))
326 __dma_sync(sg_page(sg
), sg
->offset
, sg
->length
,
331 int mips_dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
333 return plat_dma_mapping_error(dev
, dma_addr
);
336 int mips_dma_supported(struct device
*dev
, u64 mask
)
338 return plat_dma_supported(dev
, mask
);
341 void dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
342 enum dma_data_direction direction
)
344 BUG_ON(direction
== DMA_NONE
);
346 plat_extra_sync_for_device(dev
);
347 if (!plat_device_is_coherent(dev
))
348 __dma_sync_virtual(vaddr
, size
, direction
);
351 EXPORT_SYMBOL(dma_cache_sync
);
353 static struct dma_map_ops mips_default_dma_map_ops
= {
354 .alloc
= mips_dma_alloc_coherent
,
355 .free
= mips_dma_free_coherent
,
356 .map_page
= mips_dma_map_page
,
357 .unmap_page
= mips_dma_unmap_page
,
358 .map_sg
= mips_dma_map_sg
,
359 .unmap_sg
= mips_dma_unmap_sg
,
360 .sync_single_for_cpu
= mips_dma_sync_single_for_cpu
,
361 .sync_single_for_device
= mips_dma_sync_single_for_device
,
362 .sync_sg_for_cpu
= mips_dma_sync_sg_for_cpu
,
363 .sync_sg_for_device
= mips_dma_sync_sg_for_device
,
364 .mapping_error
= mips_dma_mapping_error
,
365 .dma_supported
= mips_dma_supported
368 struct dma_map_ops
*mips_dma_map_ops
= &mips_default_dma_map_ops
;
369 EXPORT_SYMBOL(mips_dma_map_ops
);
371 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
373 static int __init
mips_dma_init(void)
375 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
379 fs_initcall(mips_dma_init
);