2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
4 * Provide default implementations of the DMA mapping callbacks for
5 * directly mapped busses.
8 #include <linux/device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dma-debug.h>
11 #include <linux/gfp.h>
12 #include <linux/memblock.h>
13 #include <linux/export.h>
14 #include <linux/pci.h>
17 #include <asm/machdep.h>
18 #include <asm/swiotlb.h>
21 * Generic direct DMA implementation
23 * This implementation supports a per-device offset that can be applied if
24 * the address at which memory is visible to devices is not 0. Platform code
25 * can set archdata.dma_data to an unsigned long holding the offset. By
26 * default the offset is PCI_DRAM_OFFSET.
29 static u64 __maybe_unused
get_pfn_limit(struct device
*dev
)
31 u64 pfn
= (dev
->coherent_dma_mask
>> PAGE_SHIFT
) + 1;
32 struct dev_archdata __maybe_unused
*sd
= &dev
->archdata
;
35 if (sd
->max_direct_dma_addr
&& sd
->dma_ops
== &swiotlb_dma_ops
)
36 pfn
= min_t(u64
, pfn
, sd
->max_direct_dma_addr
>> PAGE_SHIFT
);
42 void *dma_direct_alloc_coherent(struct device
*dev
, size_t size
,
43 dma_addr_t
*dma_handle
, gfp_t flag
,
44 struct dma_attrs
*attrs
)
47 #ifdef CONFIG_NOT_COHERENT_CACHE
48 ret
= __dma_alloc_coherent(dev
, size
, dma_handle
, flag
);
51 *dma_handle
+= get_dma_offset(dev
);
55 int node
= dev_to_node(dev
);
56 u64 pfn
= get_pfn_limit(dev
);
59 zone
= dma_pfn_limit_to_zone(pfn
);
61 dev_err(dev
, "%s: No suitable zone for pfn %#llx\n",
70 #ifdef CONFIG_ZONE_DMA32
77 /* ignore region specifiers */
78 flag
&= ~(__GFP_HIGHMEM
);
80 page
= alloc_pages_node(node
, flag
, get_order(size
));
83 ret
= page_address(page
);
85 *dma_handle
= __pa(ret
) + get_dma_offset(dev
);
91 void dma_direct_free_coherent(struct device
*dev
, size_t size
,
92 void *vaddr
, dma_addr_t dma_handle
,
93 struct dma_attrs
*attrs
)
95 #ifdef CONFIG_NOT_COHERENT_CACHE
96 __dma_free_coherent(size
, vaddr
);
98 free_pages((unsigned long)vaddr
, get_order(size
));
102 int dma_direct_mmap_coherent(struct device
*dev
, struct vm_area_struct
*vma
,
103 void *cpu_addr
, dma_addr_t handle
, size_t size
,
104 struct dma_attrs
*attrs
)
108 #ifdef CONFIG_NOT_COHERENT_CACHE
109 vma
->vm_page_prot
= pgprot_noncached(vma
->vm_page_prot
);
110 pfn
= __dma_get_coherent_pfn((unsigned long)cpu_addr
);
112 pfn
= page_to_pfn(virt_to_page(cpu_addr
));
114 return remap_pfn_range(vma
, vma
->vm_start
,
116 vma
->vm_end
- vma
->vm_start
,
120 static int dma_direct_map_sg(struct device
*dev
, struct scatterlist
*sgl
,
121 int nents
, enum dma_data_direction direction
,
122 struct dma_attrs
*attrs
)
124 struct scatterlist
*sg
;
127 for_each_sg(sgl
, sg
, nents
, i
) {
128 sg
->dma_address
= sg_phys(sg
) + get_dma_offset(dev
);
129 sg
->dma_length
= sg
->length
;
130 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
136 static void dma_direct_unmap_sg(struct device
*dev
, struct scatterlist
*sg
,
137 int nents
, enum dma_data_direction direction
,
138 struct dma_attrs
*attrs
)
142 static int dma_direct_dma_supported(struct device
*dev
, u64 mask
)
145 /* Could be improved so platforms can set the limit in case
146 * they have limited DMA windows
148 return mask
>= get_dma_offset(dev
) + (memblock_end_of_DRAM() - 1);
154 static u64
dma_direct_get_required_mask(struct device
*dev
)
158 end
= memblock_end_of_DRAM() + get_dma_offset(dev
);
160 mask
= 1ULL << (fls64(end
) - 1);
166 static inline dma_addr_t
dma_direct_map_page(struct device
*dev
,
168 unsigned long offset
,
170 enum dma_data_direction dir
,
171 struct dma_attrs
*attrs
)
173 BUG_ON(dir
== DMA_NONE
);
174 __dma_sync_page(page
, offset
, size
, dir
);
175 return page_to_phys(page
) + offset
+ get_dma_offset(dev
);
178 static inline void dma_direct_unmap_page(struct device
*dev
,
179 dma_addr_t dma_address
,
181 enum dma_data_direction direction
,
182 struct dma_attrs
*attrs
)
186 #ifdef CONFIG_NOT_COHERENT_CACHE
187 static inline void dma_direct_sync_sg(struct device
*dev
,
188 struct scatterlist
*sgl
, int nents
,
189 enum dma_data_direction direction
)
191 struct scatterlist
*sg
;
194 for_each_sg(sgl
, sg
, nents
, i
)
195 __dma_sync_page(sg_page(sg
), sg
->offset
, sg
->length
, direction
);
198 static inline void dma_direct_sync_single(struct device
*dev
,
199 dma_addr_t dma_handle
, size_t size
,
200 enum dma_data_direction direction
)
202 __dma_sync(bus_to_virt(dma_handle
), size
, direction
);
206 struct dma_map_ops dma_direct_ops
= {
207 .alloc
= dma_direct_alloc_coherent
,
208 .free
= dma_direct_free_coherent
,
209 .mmap
= dma_direct_mmap_coherent
,
210 .map_sg
= dma_direct_map_sg
,
211 .unmap_sg
= dma_direct_unmap_sg
,
212 .dma_supported
= dma_direct_dma_supported
,
213 .map_page
= dma_direct_map_page
,
214 .unmap_page
= dma_direct_unmap_page
,
215 .get_required_mask
= dma_direct_get_required_mask
,
216 #ifdef CONFIG_NOT_COHERENT_CACHE
217 .sync_single_for_cpu
= dma_direct_sync_single
,
218 .sync_single_for_device
= dma_direct_sync_single
,
219 .sync_sg_for_cpu
= dma_direct_sync_sg
,
220 .sync_sg_for_device
= dma_direct_sync_sg
,
223 EXPORT_SYMBOL(dma_direct_ops
);
225 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
227 int __dma_set_mask(struct device
*dev
, u64 dma_mask
)
229 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
231 if ((dma_ops
!= NULL
) && (dma_ops
->set_dma_mask
!= NULL
))
232 return dma_ops
->set_dma_mask(dev
, dma_mask
);
233 if (!dev
->dma_mask
|| !dma_supported(dev
, dma_mask
))
235 *dev
->dma_mask
= dma_mask
;
239 int dma_set_mask(struct device
*dev
, u64 dma_mask
)
241 if (ppc_md
.dma_set_mask
)
242 return ppc_md
.dma_set_mask(dev
, dma_mask
);
243 return __dma_set_mask(dev
, dma_mask
);
245 EXPORT_SYMBOL(dma_set_mask
);
247 u64
__dma_get_required_mask(struct device
*dev
)
249 struct dma_map_ops
*dma_ops
= get_dma_ops(dev
);
251 if (unlikely(dma_ops
== NULL
))
254 if (dma_ops
->get_required_mask
)
255 return dma_ops
->get_required_mask(dev
);
257 return DMA_BIT_MASK(8 * sizeof(dma_addr_t
));
260 u64
dma_get_required_mask(struct device
*dev
)
262 if (ppc_md
.dma_get_required_mask
)
263 return ppc_md
.dma_get_required_mask(dev
);
265 return __dma_get_required_mask(dev
);
267 EXPORT_SYMBOL_GPL(dma_get_required_mask
);
269 static int __init
dma_init(void)
271 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES
);
273 dma_debug_add_bus(&pci_bus_type
);
276 dma_debug_add_bus(&vio_bus_type
);
281 fs_initcall(dma_init
);