2 * Copyright (C) 2009-2010 PetaLogix
3 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
5 * Provide default implementations of the DMA mapping callbacks for
6 * directly mapped busses.
9 #include <linux/device.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/gfp.h>
12 #include <linux/dma-debug.h>
16 * Generic direct DMA implementation
18 * This implementation supports a per-device offset that can be applied if
19 * the address at which memory is visible to devices is not 0. Platform code
20 * can set archdata.dma_data to an unsigned long holding the offset. By
21 * default the offset is PCI_DRAM_OFFSET.
24 static unsigned long get_dma_direct_offset(struct device
*dev
)
27 return (unsigned long)dev
->archdata
.dma_data
;
29 return PCI_DRAM_OFFSET
; /* FIXME Not sure if is correct */
32 #define NOT_COHERENT_CACHE
34 static void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
35 dma_addr_t
*dma_handle
, gfp_t flag
)
37 #ifdef NOT_COHERENT_CACHE
38 return consistent_alloc(flag
, size
, dma_handle
);
42 int node
= dev_to_node(dev
);
44 /* ignore region specifiers */
45 flag
&= ~(__GFP_HIGHMEM
);
47 page
= alloc_pages_node(node
, flag
, get_order(size
));
50 ret
= page_address(page
);
52 *dma_handle
= virt_to_phys(ret
) + get_dma_direct_offset(dev
);
58 static void dma_direct_free_coherent(struct device
*dev
, size_t size
,
59 void *vaddr
, dma_addr_t dma_handle
)
61 #ifdef NOT_COHERENT_CACHE
62 consistent_free(size
, vaddr
);
64 free_pages((unsigned long)vaddr
, get_order(size
));
68 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
69 int nents
, enum dma_data_direction direction
,
70 struct dma_attrs
*attrs
)
72 struct scatterlist
*sg
;
75 /* FIXME this part of code is untested */
76 for_each_sg(sgl
, sg
, nents
, i
) {
77 sg
->dma_address
= sg_phys(sg
) + get_dma_direct_offset(dev
);
78 __dma_sync(page_to_phys(sg_page(sg
)) + sg
->offset
,
79 sg
->length
, direction
);
85 static void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
86 int nents
, enum dma_data_direction direction
,
87 struct dma_attrs
*attrs
)
91 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
96 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
100 enum dma_data_direction direction
,
101 struct dma_attrs
*attrs
)
103 __dma_sync(page_to_phys(page
) + offset
, size
, direction
);
104 return page_to_phys(page
) + offset
+ get_dma_direct_offset(dev
);
107 static inline void dma_direct_unmap_page(struct device
*dev
,
108 dma_addr_t dma_address
,
110 enum dma_data_direction direction
,
111 struct dma_attrs
*attrs
)
113 /* There is not necessary to do cache cleanup
115 * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and
116 * dma_address is physical address
118 __dma_sync(dma_address
, size
, direction
);
122 dma_direct_sync_single_for_cpu(struct device
*dev
,
123 dma_addr_t dma_handle
, size_t size
,
124 enum dma_data_direction direction
)
127 * It's pointless to flush the cache as the memory segment
128 * is given to the CPU
131 if (direction
== DMA_FROM_DEVICE
)
132 __dma_sync(dma_handle
, size
, direction
);
136 dma_direct_sync_single_for_device(struct device
*dev
,
137 dma_addr_t dma_handle
, size_t size
,
138 enum dma_data_direction direction
)
141 * It's pointless to invalidate the cache if the device isn't
142 * supposed to write to the relevant region
145 if (direction
== DMA_TO_DEVICE
)
146 __dma_sync(dma_handle
, size
, direction
);
150 dma_direct_sync_sg_for_cpu(struct device
*dev
,
151 struct scatterlist
*sgl
, int nents
,
152 enum dma_data_direction direction
)
154 struct scatterlist
*sg
;
157 /* FIXME this part of code is untested */
158 if (direction
== DMA_FROM_DEVICE
)
159 for_each_sg(sgl
, sg
, nents
, i
)
160 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
164 dma_direct_sync_sg_for_device(struct device
*dev
,
165 struct scatterlist
*sgl
, int nents
,
166 enum dma_data_direction direction
)
168 struct scatterlist
*sg
;
171 /* FIXME this part of code is untested */
172 if (direction
== DMA_TO_DEVICE
)
173 for_each_sg(sgl
, sg
, nents
, i
)
174 __dma_sync(sg
->dma_address
, sg
->length
, direction
);
177 struct dma_map_ops dma_direct_ops
= {
178 .alloc_coherent
= dma_direct_alloc_coherent
,
179 .free_coherent
= dma_direct_free_coherent
,
180 .map_sg
= dma_direct_map_sg
,
181 .unmap_sg
= dma_direct_unmap_sg
,
182 .dma_supported
= dma_direct_dma_supported
,
183 .map_page
= dma_direct_map_page
,
184 .unmap_page
= dma_direct_unmap_page
,
185 .sync_single_for_cpu
= dma_direct_sync_single_for_cpu
,
186 .sync_single_for_device
= dma_direct_sync_single_for_device
,
187 .sync_sg_for_cpu
= dma_direct_sync_sg_for_cpu
,
188 .sync_sg_for_device
= dma_direct_sync_sg_for_device
,
190 EXPORT_SYMBOL(dma_direct_ops
);
192 /* Number of entries preallocated for DMA-API debugging */
193 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
195 static int __init
dma_init(void)
197 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
201 fs_initcall(dma_init
);