1 #ifndef _ASM_METAG_DMA_MAPPING_H
2 #define _ASM_METAG_DMA_MAPPING_H
8 #include <linux/scatterlist.h>
11 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
12 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 void *dma_alloc_coherent(struct device
*dev
, size_t size
,
15 dma_addr_t
*dma_handle
, gfp_t flag
);
17 void dma_free_coherent(struct device
*dev
, size_t size
,
18 void *vaddr
, dma_addr_t dma_handle
);
20 void dma_sync_for_device(void *vaddr
, size_t size
, int dma_direction
);
21 void dma_sync_for_cpu(void *vaddr
, size_t size
, int dma_direction
);
23 int dma_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
24 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
26 int dma_mmap_writecombine(struct device
*dev
, struct vm_area_struct
*vma
,
27 void *cpu_addr
, dma_addr_t dma_addr
, size_t size
);
29 static inline dma_addr_t
30 dma_map_single(struct device
*dev
, void *ptr
, size_t size
,
31 enum dma_data_direction direction
)
33 BUG_ON(!valid_dma_direction(direction
));
35 dma_sync_for_device(ptr
, size
, direction
);
36 return virt_to_phys(ptr
);
40 dma_unmap_single(struct device
*dev
, dma_addr_t dma_addr
, size_t size
,
41 enum dma_data_direction direction
)
43 BUG_ON(!valid_dma_direction(direction
));
44 dma_sync_for_cpu(phys_to_virt(dma_addr
), size
, direction
);
48 dma_map_sg(struct device
*dev
, struct scatterlist
*sglist
, int nents
,
49 enum dma_data_direction direction
)
51 struct scatterlist
*sg
;
54 BUG_ON(!valid_dma_direction(direction
));
55 WARN_ON(nents
== 0 || sglist
[0].length
== 0);
57 for_each_sg(sglist
, sg
, nents
, i
) {
60 sg
->dma_address
= sg_phys(sg
);
61 dma_sync_for_device(sg_virt(sg
), sg
->length
, direction
);
67 static inline dma_addr_t
68 dma_map_page(struct device
*dev
, struct page
*page
, unsigned long offset
,
69 size_t size
, enum dma_data_direction direction
)
71 BUG_ON(!valid_dma_direction(direction
));
72 dma_sync_for_device((void *)(page_to_phys(page
) + offset
), size
,
74 return page_to_phys(page
) + offset
;
78 dma_unmap_page(struct device
*dev
, dma_addr_t dma_address
, size_t size
,
79 enum dma_data_direction direction
)
81 BUG_ON(!valid_dma_direction(direction
));
82 dma_sync_for_cpu(phys_to_virt(dma_address
), size
, direction
);
87 dma_unmap_sg(struct device
*dev
, struct scatterlist
*sglist
, int nhwentries
,
88 enum dma_data_direction direction
)
90 struct scatterlist
*sg
;
93 BUG_ON(!valid_dma_direction(direction
));
94 WARN_ON(nhwentries
== 0 || sglist
[0].length
== 0);
96 for_each_sg(sglist
, sg
, nhwentries
, i
) {
99 sg
->dma_address
= sg_phys(sg
);
100 dma_sync_for_cpu(sg_virt(sg
), sg
->length
, direction
);
105 dma_sync_single_for_cpu(struct device
*dev
, dma_addr_t dma_handle
, size_t size
,
106 enum dma_data_direction direction
)
108 dma_sync_for_cpu(phys_to_virt(dma_handle
), size
, direction
);
112 dma_sync_single_for_device(struct device
*dev
, dma_addr_t dma_handle
,
113 size_t size
, enum dma_data_direction direction
)
115 dma_sync_for_device(phys_to_virt(dma_handle
), size
, direction
);
119 dma_sync_single_range_for_cpu(struct device
*dev
, dma_addr_t dma_handle
,
120 unsigned long offset
, size_t size
,
121 enum dma_data_direction direction
)
123 dma_sync_for_cpu(phys_to_virt(dma_handle
)+offset
, size
,
128 dma_sync_single_range_for_device(struct device
*dev
, dma_addr_t dma_handle
,
129 unsigned long offset
, size_t size
,
130 enum dma_data_direction direction
)
132 dma_sync_for_device(phys_to_virt(dma_handle
)+offset
, size
,
137 dma_sync_sg_for_cpu(struct device
*dev
, struct scatterlist
*sglist
, int nelems
,
138 enum dma_data_direction direction
)
141 struct scatterlist
*sg
;
143 for_each_sg(sglist
, sg
, nelems
, i
)
144 dma_sync_for_cpu(sg_virt(sg
), sg
->length
, direction
);
148 dma_sync_sg_for_device(struct device
*dev
, struct scatterlist
*sglist
,
149 int nelems
, enum dma_data_direction direction
)
152 struct scatterlist
*sg
;
154 for_each_sg(sglist
, sg
, nelems
, i
)
155 dma_sync_for_device(sg_virt(sg
), sg
->length
, direction
);
159 dma_mapping_error(struct device
*dev
, dma_addr_t dma_addr
)
164 #define dma_supported(dev, mask) (1)
167 dma_set_mask(struct device
*dev
, u64 mask
)
169 if (!dev
->dma_mask
|| !dma_supported(dev
, mask
))
172 *dev
->dma_mask
= mask
;
178 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no need to
179 * do any flushing here.
182 dma_cache_sync(struct device
*dev
, void *vaddr
, size_t size
,
183 enum dma_data_direction direction
)
187 /* drivers/base/dma-mapping.c */
188 extern int dma_common_get_sgtable(struct device
*dev
, struct sg_table
*sgt
,
189 void *cpu_addr
, dma_addr_t dma_addr
,
192 #define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)