2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * DMA Coherent API Notes
12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
13 * implemented by accessing it using a kernel virtual address, with
14 * Cache bit off in the TLB entry.
16 * The default DMA address == Phy address which is 0x8000_0000 based.
19 #include <linux/dma-mapping.h>
20 #include <asm/cache.h>
21 #include <asm/cacheflush.h>
24 static void *arc_dma_alloc(struct device
*dev
, size_t size
,
25 dma_addr_t
*dma_handle
, gfp_t gfp
, unsigned long attrs
)
27 unsigned long order
= get_order(size
);
31 int need_coh
= 1, need_kvaddr
= 0;
33 page
= alloc_pages(gfp
, order
);
38 * IOC relies on all data (even coherent DMA data) being in cache
39 * Thus allocate normal cached memory
41 * The gains with IOC are two pronged:
42 * -For streaming data, elides need for cache maintenance, saving
43 * cycles in flush code, and bus bandwidth as all the lines of a
44 * buffer need to be flushed out to memory
45 * -For coherent data, Read/Write to buffers terminate early in cache
46 * (vs. always going to memory - thus are faster)
48 if ((is_isa_arcv2() && ioc_enable
) ||
49 (attrs
& DMA_ATTR_NON_CONSISTENT
))
53 * - A coherent buffer needs MMU mapping to enforce non-cachability
54 * - A highmem page needs a virtual handle (hence MMU mapping)
55 * independent of cachability
57 if (PageHighMem(page
) || need_coh
)
60 /* This is linear addr (0x8000_0000 based) */
61 paddr
= page_to_phys(page
);
63 *dma_handle
= plat_phys_to_dma(dev
, paddr
);
65 /* This is kernel Virtual address (0x7000_0000 based) */
67 kvaddr
= ioremap_nocache(paddr
, size
);
69 __free_pages(page
, order
);
73 kvaddr
= (void *)(u32
)paddr
;
77 * Evict any existing L1 and/or L2 lines for the backing page
78 * in case it was used earlier as a normal "cached" page.
79 * Yeah this bit us - STAR 9000898266
81 * Although core does call flush_cache_vmap(), it gets kvaddr hence
82 * can't be used to efficiently flush L1 and/or L2 which need paddr
83 * Currently flush_cache_vmap nukes the L1 cache completely which
84 * will be optimized as a separate commit
87 dma_cache_wback_inv(paddr
, size
);
92 static void arc_dma_free(struct device
*dev
, size_t size
, void *vaddr
,
93 dma_addr_t dma_handle
, unsigned long attrs
)
95 phys_addr_t paddr
= plat_dma_to_phys(dev
, dma_handle
);
96 struct page
*page
= virt_to_page(paddr
);
99 is_non_coh
= (attrs
& DMA_ATTR_NON_CONSISTENT
) ||
100 (is_isa_arcv2() && ioc_enable
);
102 if (PageHighMem(page
) || !is_non_coh
)
103 iounmap((void __force __iomem
*)vaddr
);
105 __free_pages(page
, get_order(size
));
108 static int arc_dma_mmap(struct device
*dev
, struct vm_area_struct
*vma
,
109 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
,
112 unsigned long user_count
= vma_pages(vma
);
113 unsigned long count
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
114 unsigned long pfn
= __phys_to_pfn(plat_dma_to_phys(dev
, dma_addr
));
115 unsigned long off
= vma
->vm_pgoff
;
118 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
120 if (dma_mmap_from_coherent(dev
, vma
, cpu_addr
, size
, &ret
))
123 if (off
< count
&& user_count
<= (count
- off
)) {
124 ret
= remap_pfn_range(vma
, vma
->vm_start
,
126 user_count
<< PAGE_SHIFT
,
134 * streaming DMA Mapping API...
135 * CPU accesses page via normal paddr, thus needs to explicitly made
136 * consistent before each use
138 static void _dma_cache_sync(phys_addr_t paddr
, size_t size
,
139 enum dma_data_direction dir
)
142 case DMA_FROM_DEVICE
:
143 dma_cache_inv(paddr
, size
);
146 dma_cache_wback(paddr
, size
);
148 case DMA_BIDIRECTIONAL
:
149 dma_cache_wback_inv(paddr
, size
);
152 pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir
, &paddr
);
156 static dma_addr_t
arc_dma_map_page(struct device
*dev
, struct page
*page
,
157 unsigned long offset
, size_t size
, enum dma_data_direction dir
,
160 phys_addr_t paddr
= page_to_phys(page
) + offset
;
161 _dma_cache_sync(paddr
, size
, dir
);
162 return plat_phys_to_dma(dev
, paddr
);
165 static int arc_dma_map_sg(struct device
*dev
, struct scatterlist
*sg
,
166 int nents
, enum dma_data_direction dir
, unsigned long attrs
)
168 struct scatterlist
*s
;
171 for_each_sg(sg
, s
, nents
, i
)
172 s
->dma_address
= dma_map_page(dev
, sg_page(s
), s
->offset
,
178 static void arc_dma_sync_single_for_cpu(struct device
*dev
,
179 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
181 _dma_cache_sync(plat_dma_to_phys(dev
, dma_handle
), size
, DMA_FROM_DEVICE
);
184 static void arc_dma_sync_single_for_device(struct device
*dev
,
185 dma_addr_t dma_handle
, size_t size
, enum dma_data_direction dir
)
187 _dma_cache_sync(plat_dma_to_phys(dev
, dma_handle
), size
, DMA_TO_DEVICE
);
190 static void arc_dma_sync_sg_for_cpu(struct device
*dev
,
191 struct scatterlist
*sglist
, int nelems
,
192 enum dma_data_direction dir
)
195 struct scatterlist
*sg
;
197 for_each_sg(sglist
, sg
, nelems
, i
)
198 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
201 static void arc_dma_sync_sg_for_device(struct device
*dev
,
202 struct scatterlist
*sglist
, int nelems
,
203 enum dma_data_direction dir
)
206 struct scatterlist
*sg
;
208 for_each_sg(sglist
, sg
, nelems
, i
)
209 _dma_cache_sync(sg_phys(sg
), sg
->length
, dir
);
212 static int arc_dma_supported(struct device
*dev
, u64 dma_mask
)
214 /* Support 32 bit DMA mask exclusively */
215 return dma_mask
== DMA_BIT_MASK(32);
218 struct dma_map_ops arc_dma_ops
= {
219 .alloc
= arc_dma_alloc
,
220 .free
= arc_dma_free
,
221 .mmap
= arc_dma_mmap
,
222 .map_page
= arc_dma_map_page
,
223 .map_sg
= arc_dma_map_sg
,
224 .sync_single_for_device
= arc_dma_sync_single_for_device
,
225 .sync_single_for_cpu
= arc_dma_sync_single_for_cpu
,
226 .sync_sg_for_cpu
= arc_dma_sync_sg_for_cpu
,
227 .sync_sg_for_device
= arc_dma_sync_sg_for_device
,
228 .dma_supported
= arc_dma_supported
,
230 EXPORT_SYMBOL(arc_dma_ops
);