]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/m68knommu/kernel/dma.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-eoan-kernel.git] / arch / m68knommu / kernel / dma.c
CommitLineData
1da177e4
LT
1/*
2 * Dynamic DMA mapping support.
3 *
4 * We never have any address translations to worry about, so this
5 * is just alloc/free.
6 */
7
8#include <linux/types.h>
5a0e3ad6 9#include <linux/gfp.h>
1da177e4 10#include <linux/mm.h>
85e026bc 11#include <linux/device.h>
bf5fe9ed 12#include <linux/dma-mapping.h>
ec40f95d 13#include <asm/cacheflush.h>
1da177e4
LT
14
15void *dma_alloc_coherent(struct device *dev, size_t size,
bf5fe9ed 16 dma_addr_t *dma_handle, gfp_t gfp)
1da177e4
LT
17{
18 void *ret;
19 /* ignore region specifiers */
20 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
21
22 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
23 gfp |= GFP_DMA;
24 ret = (void *)__get_free_pages(gfp, get_order(size));
25
26 if (ret != NULL) {
27 memset(ret, 0, size);
28 *dma_handle = virt_to_phys(ret);
29 }
30 return ret;
31}
32
33void dma_free_coherent(struct device *dev, size_t size,
34 void *vaddr, dma_addr_t dma_handle)
35{
36 free_pages((unsigned long)vaddr, get_order(size));
37}
bf5fe9ed 38
ec40f95d
GU
39void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
40 size_t size, enum dma_data_direction dir)
bf5fe9ed 41{
ec40f95d
GU
42 switch (dir) {
43 case DMA_TO_DEVICE:
44 flush_dcache_range(handle, size);
45 break;
46 case DMA_FROM_DEVICE:
47 /* Should be clear already */
48 break;
49 default:
50 if (printk_ratelimit())
51 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
52 break;
53 }
54}
55
56EXPORT_SYMBOL(dma_sync_single_for_device);
57dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
58 enum dma_data_direction dir)
59{
60 dma_addr_t handle = virt_to_phys(addr);
61 flush_dcache_range(handle, size);
62 return handle;
bf5fe9ed 63}
ec40f95d 64EXPORT_SYMBOL(dma_map_single);
bf5fe9ed 65
ec40f95d
GU
66dma_addr_t dma_map_page(struct device *dev, struct page *page,
67 unsigned long offset, size_t size,
68 enum dma_data_direction dir)
69{
70 dma_addr_t handle = page_to_phys(page) + offset;
71 dma_sync_single_for_device(dev, handle, size, dir);
72 return handle;
73}
74EXPORT_SYMBOL(dma_map_page);