]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
12d04eef | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
1da177e4 | 3 | * |
12d04eef | 4 | * Provide default implementations of the DMA mapping callbacks for |
8dd0e952 | 5 | * directly mapped busses. |
1da177e4 LT |
6 | */ |
7 | ||
8 | #include <linux/device.h> | |
9 | #include <linux/dma-mapping.h> | |
80d3e8ab | 10 | #include <linux/dma-debug.h> |
5a0e3ad6 | 11 | #include <linux/gfp.h> |
95f72d1e | 12 | #include <linux/memblock.h> |
66b15db6 | 13 | #include <linux/export.h> |
a9803497 AB |
14 | #include <linux/pci.h> |
15 | #include <asm/vio.h> | |
1da177e4 | 16 | #include <asm/bug.h> |
5b6e9ff6 | 17 | #include <asm/machdep.h> |
6397fc3f | 18 | #include <asm/swiotlb.h> |
1da177e4 | 19 | |
12d04eef BH |
20 | /* |
21 | * Generic direct DMA implementation | |
92b20c40 | 22 | * |
31d1b493 ME |
23 | * This implementation supports a per-device offset that can be applied if |
24 | * the address at which memory is visible to devices is not 0. Platform code | |
25 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
4fc665b8 | 26 | * default the offset is PCI_DRAM_OFFSET. |
12d04eef | 27 | */ |
5d33eebe | 28 | |
6397fc3f SW |
29 | static u64 __maybe_unused get_pfn_limit(struct device *dev) |
30 | { | |
31 | u64 pfn = (dev->coherent_dma_mask >> PAGE_SHIFT) + 1; | |
32 | struct dev_archdata __maybe_unused *sd = &dev->archdata; | |
33 | ||
34 | #ifdef CONFIG_SWIOTLB | |
35 | if (sd->max_direct_dma_addr && sd->dma_ops == &swiotlb_dma_ops) | |
36 | pfn = min_t(u64, pfn, sd->max_direct_dma_addr >> PAGE_SHIFT); | |
37 | #endif | |
38 | ||
39 | return pfn; | |
40 | } | |
35e4a6e2 | 41 | |
4fc665b8 | 42 | void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
bfbf7d61 AP |
43 | dma_addr_t *dma_handle, gfp_t flag, |
44 | struct dma_attrs *attrs) | |
12d04eef | 45 | { |
8aa26590 | 46 | void *ret; |
4fc665b8 | 47 | #ifdef CONFIG_NOT_COHERENT_CACHE |
8b31e49d | 48 | ret = __dma_alloc_coherent(dev, size, dma_handle, flag); |
8aa26590 BH |
49 | if (ret == NULL) |
50 | return NULL; | |
1cebd7a0 | 51 | *dma_handle += get_dma_offset(dev); |
8aa26590 | 52 | return ret; |
4fc665b8 | 53 | #else |
c80d9133 | 54 | struct page *page; |
8fae0353 | 55 | int node = dev_to_node(dev); |
e89dafb5 | 56 | #ifdef CONFIG_FSL_SOC |
6397fc3f | 57 | u64 pfn = get_pfn_limit(dev); |
1c98025c SW |
58 | int zone; |
59 | ||
e89dafb5 ME |
60 | /* |
61 | * This code should be OK on other platforms, but we have drivers that | |
62 | * don't set coherent_dma_mask. As a workaround we just ifdef it. This | |
63 | * whole routine needs some serious cleanup. | |
64 | */ | |
65 | ||
1c98025c SW |
66 | zone = dma_pfn_limit_to_zone(pfn); |
67 | if (zone < 0) { | |
68 | dev_err(dev, "%s: No suitable zone for pfn %#llx\n", | |
69 | __func__, pfn); | |
70 | return NULL; | |
71 | } | |
72 | ||
73 | switch (zone) { | |
74 | case ZONE_DMA: | |
75 | flag |= GFP_DMA; | |
76 | break; | |
77 | #ifdef CONFIG_ZONE_DMA32 | |
78 | case ZONE_DMA32: | |
79 | flag |= GFP_DMA32; | |
80 | break; | |
81 | #endif | |
82 | }; | |
e89dafb5 | 83 | #endif /* CONFIG_FSL_SOC */ |
12d04eef | 84 | |
4fc665b8 BB |
85 | /* ignore region specifiers */ |
86 | flag &= ~(__GFP_HIGHMEM); | |
87 | ||
c80d9133 BH |
88 | page = alloc_pages_node(node, flag, get_order(size)); |
89 | if (page == NULL) | |
90 | return NULL; | |
91 | ret = page_address(page); | |
92 | memset(ret, 0, size); | |
3d267523 | 93 | *dma_handle = __pa(ret) + get_dma_offset(dev); |
c80d9133 | 94 | |
12d04eef | 95 | return ret; |
4fc665b8 | 96 | #endif |
1da177e4 | 97 | } |
1da177e4 | 98 | |
4fc665b8 | 99 | void dma_direct_free_coherent(struct device *dev, size_t size, |
bfbf7d61 AP |
100 | void *vaddr, dma_addr_t dma_handle, |
101 | struct dma_attrs *attrs) | |
1da177e4 | 102 | { |
4fc665b8 BB |
103 | #ifdef CONFIG_NOT_COHERENT_CACHE |
104 | __dma_free_coherent(size, vaddr); | |
105 | #else | |
12d04eef | 106 | free_pages((unsigned long)vaddr, get_order(size)); |
4fc665b8 | 107 | #endif |
1da177e4 | 108 | } |
1da177e4 | 109 | |
64ccc9c0 MS |
110 | int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
111 | void *cpu_addr, dma_addr_t handle, size_t size, | |
112 | struct dma_attrs *attrs) | |
113 | { | |
114 | unsigned long pfn; | |
115 | ||
116 | #ifdef CONFIG_NOT_COHERENT_CACHE | |
117 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
118 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | |
119 | #else | |
120 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | |
121 | #endif | |
122 | return remap_pfn_range(vma, vma->vm_start, | |
123 | pfn + vma->vm_pgoff, | |
124 | vma->vm_end - vma->vm_start, | |
125 | vma->vm_page_prot); | |
126 | } | |
127 | ||
78bdc310 | 128 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
3affedc4 MN |
129 | int nents, enum dma_data_direction direction, |
130 | struct dma_attrs *attrs) | |
1da177e4 | 131 | { |
78bdc310 | 132 | struct scatterlist *sg; |
12d04eef | 133 | int i; |
1da177e4 | 134 | |
78bdc310 | 135 | for_each_sg(sgl, sg, nents, i) { |
1cebd7a0 | 136 | sg->dma_address = sg_phys(sg) + get_dma_offset(dev); |
12d04eef | 137 | sg->dma_length = sg->length; |
2434bbb3 | 138 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); |
12d04eef | 139 | } |
5d33eebe | 140 | |
12d04eef | 141 | return nents; |
1da177e4 | 142 | } |
1da177e4 | 143 | |
12d04eef | 144 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
3affedc4 MN |
145 | int nents, enum dma_data_direction direction, |
146 | struct dma_attrs *attrs) | |
1da177e4 | 147 | { |
12d04eef | 148 | } |
5d33eebe | 149 | |
12d04eef BH |
150 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
151 | { | |
4fc665b8 | 152 | #ifdef CONFIG_PPC64 |
b2f2e8fe | 153 | /* Could be improved so platforms can set the limit in case |
12d04eef BH |
154 | * they have limited DMA windows |
155 | */ | |
ffa56e55 | 156 | return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
4fc665b8 BB |
157 | #else |
158 | return 1; | |
159 | #endif | |
160 | } | |
161 | ||
d24f9c69 MM |
162 | static u64 dma_direct_get_required_mask(struct device *dev) |
163 | { | |
164 | u64 end, mask; | |
165 | ||
166 | end = memblock_end_of_DRAM() + get_dma_offset(dev); | |
167 | ||
168 | mask = 1ULL << (fls64(end) - 1); | |
169 | mask += mask - 1; | |
170 | ||
171 | return mask; | |
172 | } | |
173 | ||
4fc665b8 BB |
174 | static inline dma_addr_t dma_direct_map_page(struct device *dev, |
175 | struct page *page, | |
176 | unsigned long offset, | |
177 | size_t size, | |
178 | enum dma_data_direction dir, | |
179 | struct dma_attrs *attrs) | |
180 | { | |
181 | BUG_ON(dir == DMA_NONE); | |
182 | __dma_sync_page(page, offset, size, dir); | |
1cebd7a0 | 183 | return page_to_phys(page) + offset + get_dma_offset(dev); |
4fc665b8 BB |
184 | } |
185 | ||
186 | static inline void dma_direct_unmap_page(struct device *dev, | |
187 | dma_addr_t dma_address, | |
188 | size_t size, | |
189 | enum dma_data_direction direction, | |
190 | struct dma_attrs *attrs) | |
191 | { | |
1da177e4 | 192 | } |
12d04eef | 193 | |
15e09c0e BB |
194 | #ifdef CONFIG_NOT_COHERENT_CACHE |
195 | static inline void dma_direct_sync_sg(struct device *dev, | |
196 | struct scatterlist *sgl, int nents, | |
197 | enum dma_data_direction direction) | |
198 | { | |
199 | struct scatterlist *sg; | |
200 | int i; | |
201 | ||
202 | for_each_sg(sgl, sg, nents, i) | |
203 | __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction); | |
204 | } | |
205 | ||
712d3e22 FT |
206 | static inline void dma_direct_sync_single(struct device *dev, |
207 | dma_addr_t dma_handle, size_t size, | |
208 | enum dma_data_direction direction) | |
15e09c0e | 209 | { |
712d3e22 | 210 | __dma_sync(bus_to_virt(dma_handle), size, direction); |
15e09c0e BB |
211 | } |
212 | #endif | |
213 | ||
45223c54 | 214 | struct dma_map_ops dma_direct_ops = { |
bfbf7d61 AP |
215 | .alloc = dma_direct_alloc_coherent, |
216 | .free = dma_direct_free_coherent, | |
64ccc9c0 | 217 | .mmap = dma_direct_mmap_coherent, |
2eccacd0 MM |
218 | .map_sg = dma_direct_map_sg, |
219 | .unmap_sg = dma_direct_unmap_sg, | |
220 | .dma_supported = dma_direct_dma_supported, | |
221 | .map_page = dma_direct_map_page, | |
222 | .unmap_page = dma_direct_unmap_page, | |
223 | .get_required_mask = dma_direct_get_required_mask, | |
15e09c0e | 224 | #ifdef CONFIG_NOT_COHERENT_CACHE |
712d3e22 FT |
225 | .sync_single_for_cpu = dma_direct_sync_single, |
226 | .sync_single_for_device = dma_direct_sync_single, | |
15e09c0e BB |
227 | .sync_sg_for_cpu = dma_direct_sync_sg, |
228 | .sync_sg_for_device = dma_direct_sync_sg, | |
229 | #endif | |
12d04eef BH |
230 | }; |
231 | EXPORT_SYMBOL(dma_direct_ops); | |
80d3e8ab FT |
232 | |
233 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | |
234 | ||
cd15b048 | 235 | int __dma_set_mask(struct device *dev, u64 dma_mask) |
5b6e9ff6 BH |
236 | { |
237 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | |
238 | ||
6471fc66 | 239 | if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL)) |
5b6e9ff6 BH |
240 | return dma_ops->set_dma_mask(dev, dma_mask); |
241 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | |
242 | return -EIO; | |
243 | *dev->dma_mask = dma_mask; | |
244 | return 0; | |
245 | } | |
fe7e85c6 | 246 | |
cd15b048 BH |
247 | int dma_set_mask(struct device *dev, u64 dma_mask) |
248 | { | |
249 | if (ppc_md.dma_set_mask) | |
250 | return ppc_md.dma_set_mask(dev, dma_mask); | |
251 | return __dma_set_mask(dev, dma_mask); | |
252 | } | |
5b6e9ff6 BH |
253 | EXPORT_SYMBOL(dma_set_mask); |
254 | ||
fe7e85c6 | 255 | u64 __dma_get_required_mask(struct device *dev) |
6a5c7be5 MM |
256 | { |
257 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | |
6a5c7be5 | 258 | |
6a5c7be5 MM |
259 | if (unlikely(dma_ops == NULL)) |
260 | return 0; | |
261 | ||
d24f9c69 MM |
262 | if (dma_ops->get_required_mask) |
263 | return dma_ops->get_required_mask(dev); | |
6a5c7be5 | 264 | |
d24f9c69 | 265 | return DMA_BIT_MASK(8 * sizeof(dma_addr_t)); |
6a5c7be5 | 266 | } |
fe7e85c6 GS |
267 | |
268 | u64 dma_get_required_mask(struct device *dev) | |
269 | { | |
270 | if (ppc_md.dma_get_required_mask) | |
271 | return ppc_md.dma_get_required_mask(dev); | |
272 | ||
273 | return __dma_get_required_mask(dev); | |
274 | } | |
6a5c7be5 MM |
275 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
276 | ||
80d3e8ab FT |
277 | static int __init dma_init(void) |
278 | { | |
a9803497 AB |
279 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
280 | #ifdef CONFIG_PCI | |
281 | dma_debug_add_bus(&pci_bus_type); | |
282 | #endif | |
283 | #ifdef CONFIG_IBMVIO | |
284 | dma_debug_add_bus(&vio_bus_type); | |
285 | #endif | |
80d3e8ab FT |
286 | |
287 | return 0; | |
288 | } | |
289 | fs_initcall(dma_init); | |
6090912c | 290 |