]>
Commit | Line | Data |
---|---|---|
ccfe27d7 MS |
1 | /* |
2 | * Copyright (C) 2009-2010 PetaLogix | |
3 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation | |
4 | * | |
5 | * Provide default implementations of the DMA mapping callbacks for | |
6 | * directly mapped busses. | |
7 | */ | |
8 | ||
9 | #include <linux/device.h> | |
10 | #include <linux/dma-mapping.h> | |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
ccfe27d7 | 12 | #include <linux/dma-debug.h> |
66421a64 | 13 | #include <linux/export.h> |
ccfe27d7 MS |
14 | #include <asm/bug.h> |
15 | ||
16 | /* | |
17 | * Generic direct DMA implementation | |
18 | * | |
19 | * This implementation supports a per-device offset that can be applied if | |
20 | * the address at which memory is visible to devices is not 0. Platform code | |
21 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
22 | * default the offset is PCI_DRAM_OFFSET. | |
23 | */ | |
2549edd3 | 24 | |
ccfe27d7 MS |
25 | static unsigned long get_dma_direct_offset(struct device *dev) |
26 | { | |
78ebfa88 | 27 | if (likely(dev)) |
ccfe27d7 MS |
28 | return (unsigned long)dev->archdata.dma_data; |
29 | ||
30 | return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */ | |
31 | } | |
32 | ||
1be53e08 MS |
33 | #define NOT_COHERENT_CACHE |
34 | ||
35 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, | |
ccfe27d7 MS |
36 | dma_addr_t *dma_handle, gfp_t flag) |
37 | { | |
1be53e08 MS |
38 | #ifdef NOT_COHERENT_CACHE |
39 | return consistent_alloc(flag, size, dma_handle); | |
40 | #else | |
ccfe27d7 MS |
41 | void *ret; |
42 | struct page *page; | |
43 | int node = dev_to_node(dev); | |
44 | ||
45 | /* ignore region specifiers */ | |
46 | flag &= ~(__GFP_HIGHMEM); | |
47 | ||
48 | page = alloc_pages_node(node, flag, get_order(size)); | |
49 | if (page == NULL) | |
50 | return NULL; | |
51 | ret = page_address(page); | |
52 | memset(ret, 0, size); | |
53 | *dma_handle = virt_to_phys(ret) + get_dma_direct_offset(dev); | |
54 | ||
55 | return ret; | |
1be53e08 | 56 | #endif |
ccfe27d7 MS |
57 | } |
58 | ||
1be53e08 | 59 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
ccfe27d7 MS |
60 | void *vaddr, dma_addr_t dma_handle) |
61 | { | |
1be53e08 | 62 | #ifdef NOT_COHERENT_CACHE |
f1525765 | 63 | consistent_free(size, vaddr); |
1be53e08 | 64 | #else |
ccfe27d7 | 65 | free_pages((unsigned long)vaddr, get_order(size)); |
1be53e08 | 66 | #endif |
ccfe27d7 MS |
67 | } |
68 | ||
69 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, | |
70 | int nents, enum dma_data_direction direction, | |
71 | struct dma_attrs *attrs) | |
72 | { | |
73 | struct scatterlist *sg; | |
74 | int i; | |
75 | ||
d79f3b06 | 76 | /* FIXME this part of code is untested */ |
ccfe27d7 MS |
77 | for_each_sg(sgl, sg, nents, i) { |
78 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); | |
cf560c18 | 79 | __dma_sync(page_to_phys(sg_page(sg)) + sg->offset, |
d79f3b06 | 80 | sg->length, direction); |
ccfe27d7 MS |
81 | } |
82 | ||
83 | return nents; | |
84 | } | |
85 | ||
86 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, | |
87 | int nents, enum dma_data_direction direction, | |
88 | struct dma_attrs *attrs) | |
89 | { | |
90 | } | |
91 | ||
92 | static int dma_direct_dma_supported(struct device *dev, u64 mask) | |
93 | { | |
94 | return 1; | |
95 | } | |
96 | ||
97 | static inline dma_addr_t dma_direct_map_page(struct device *dev, | |
98 | struct page *page, | |
99 | unsigned long offset, | |
100 | size_t size, | |
2549edd3 | 101 | enum dma_data_direction direction, |
ccfe27d7 MS |
102 | struct dma_attrs *attrs) |
103 | { | |
cf560c18 | 104 | __dma_sync(page_to_phys(page) + offset, size, direction); |
ccfe27d7 MS |
105 | return page_to_phys(page) + offset + get_dma_direct_offset(dev); |
106 | } | |
107 | ||
108 | static inline void dma_direct_unmap_page(struct device *dev, | |
109 | dma_addr_t dma_address, | |
110 | size_t size, | |
111 | enum dma_data_direction direction, | |
112 | struct dma_attrs *attrs) | |
113 | { | |
d79f3b06 MS |
114 | /* There is not necessary to do cache cleanup |
115 | * | |
116 | * phys_to_virt is here because in __dma_sync_page is __virt_to_phys and | |
117 | * dma_address is physical address | |
118 | */ | |
cf560c18 | 119 | __dma_sync(dma_address, size, direction); |
ccfe27d7 MS |
120 | } |
121 | ||
0fb2a6f2 EB |
122 | static inline void |
123 | dma_direct_sync_single_for_cpu(struct device *dev, | |
124 | dma_addr_t dma_handle, size_t size, | |
125 | enum dma_data_direction direction) | |
126 | { | |
127 | /* | |
128 | * It's pointless to flush the cache as the memory segment | |
129 | * is given to the CPU | |
130 | */ | |
131 | ||
132 | if (direction == DMA_FROM_DEVICE) | |
133 | __dma_sync(dma_handle, size, direction); | |
134 | } | |
135 | ||
136 | static inline void | |
137 | dma_direct_sync_single_for_device(struct device *dev, | |
138 | dma_addr_t dma_handle, size_t size, | |
139 | enum dma_data_direction direction) | |
140 | { | |
141 | /* | |
142 | * It's pointless to invalidate the cache if the device isn't | |
143 | * supposed to write to the relevant region | |
144 | */ | |
145 | ||
146 | if (direction == DMA_TO_DEVICE) | |
147 | __dma_sync(dma_handle, size, direction); | |
148 | } | |
149 | ||
150 | static inline void | |
151 | dma_direct_sync_sg_for_cpu(struct device *dev, | |
152 | struct scatterlist *sgl, int nents, | |
153 | enum dma_data_direction direction) | |
154 | { | |
155 | struct scatterlist *sg; | |
156 | int i; | |
157 | ||
158 | /* FIXME this part of code is untested */ | |
159 | if (direction == DMA_FROM_DEVICE) | |
160 | for_each_sg(sgl, sg, nents, i) | |
161 | __dma_sync(sg->dma_address, sg->length, direction); | |
162 | } | |
163 | ||
164 | static inline void | |
165 | dma_direct_sync_sg_for_device(struct device *dev, | |
166 | struct scatterlist *sgl, int nents, | |
167 | enum dma_data_direction direction) | |
168 | { | |
169 | struct scatterlist *sg; | |
170 | int i; | |
171 | ||
172 | /* FIXME this part of code is untested */ | |
173 | if (direction == DMA_TO_DEVICE) | |
174 | for_each_sg(sgl, sg, nents, i) | |
175 | __dma_sync(sg->dma_address, sg->length, direction); | |
176 | } | |
177 | ||
ccfe27d7 MS |
178 | struct dma_map_ops dma_direct_ops = { |
179 | .alloc_coherent = dma_direct_alloc_coherent, | |
180 | .free_coherent = dma_direct_free_coherent, | |
181 | .map_sg = dma_direct_map_sg, | |
182 | .unmap_sg = dma_direct_unmap_sg, | |
183 | .dma_supported = dma_direct_dma_supported, | |
184 | .map_page = dma_direct_map_page, | |
185 | .unmap_page = dma_direct_unmap_page, | |
0fb2a6f2 EB |
186 | .sync_single_for_cpu = dma_direct_sync_single_for_cpu, |
187 | .sync_single_for_device = dma_direct_sync_single_for_device, | |
188 | .sync_sg_for_cpu = dma_direct_sync_sg_for_cpu, | |
189 | .sync_sg_for_device = dma_direct_sync_sg_for_device, | |
ccfe27d7 MS |
190 | }; |
191 | EXPORT_SYMBOL(dma_direct_ops); | |
192 | ||
193 | /* Number of entries preallocated for DMA-API debugging */ | |
194 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
195 | ||
196 | static int __init dma_init(void) | |
197 | { | |
198 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | |
199 | ||
200 | return 0; | |
201 | } | |
202 | fs_initcall(dma_init); |