]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/m68k/kernel/dma.c
Merge tag 'for-linus-4.10-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / m68k / kernel / dma.c
CommitLineData
5641686c
GU
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#undef DEBUG
8
9#include <linux/dma-mapping.h>
10#include <linux/device.h>
11#include <linux/kernel.h>
12#include <linux/scatterlist.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/export.h>
16
17#include <asm/pgalloc.h>
18
b60f187f 19#if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
5641686c 20
340f3039 21static void *m68k_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
00085f1e 22 gfp_t flag, unsigned long attrs)
5641686c
GU
23{
24 struct page *page, **map;
25 pgprot_t pgprot;
26 void *addr;
27 int i, order;
28
29 pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
30
31 size = PAGE_ALIGN(size);
32 order = get_order(size);
33
34 page = alloc_pages(flag, order);
35 if (!page)
36 return NULL;
37
38 *handle = page_to_phys(page);
39 map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
40 if (!map) {
41 __free_pages(page, order);
42 return NULL;
43 }
44 split_page(page, order);
45
46 order = 1 << order;
47 size >>= PAGE_SHIFT;
48 map[0] = page;
49 for (i = 1; i < size; i++)
50 map[i] = page + i;
51 for (; i < order; i++)
52 __free_page(page + i);
53 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
54 if (CPU_IS_040_OR_060)
55 pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
56 else
57 pgprot_val(pgprot) |= _PAGE_NOCACHE030;
58 addr = vmap(map, size, VM_MAP, pgprot);
59 kfree(map);
60
61 return addr;
62}
63
340f3039 64static void m68k_dma_free(struct device *dev, size_t size, void *addr,
00085f1e 65 dma_addr_t handle, unsigned long attrs)
5641686c
GU
66{
67 pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
68 vfree(addr);
69}
70
66d857b0 71#else
5641686c
GU
72
73#include <asm/cacheflush.h>
74
340f3039 75static void *m68k_dma_alloc(struct device *dev, size_t size,
00085f1e 76 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
5641686c
GU
77{
78 void *ret;
79 /* ignore region specifiers */
80 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
81
82 if (dev == NULL || (*dev->dma_mask < 0xffffffff))
83 gfp |= GFP_DMA;
84 ret = (void *)__get_free_pages(gfp, get_order(size));
85
86 if (ret != NULL) {
87 memset(ret, 0, size);
88 *dma_handle = virt_to_phys(ret);
89 }
90 return ret;
91}
92
340f3039 93static void m68k_dma_free(struct device *dev, size_t size, void *vaddr,
00085f1e 94 dma_addr_t dma_handle, unsigned long attrs)
5641686c
GU
95{
96 free_pages((unsigned long)vaddr, get_order(size));
97}
98
b60f187f 99#endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
5641686c 100
340f3039
CH
101static void m68k_dma_sync_single_for_device(struct device *dev,
102 dma_addr_t handle, size_t size, enum dma_data_direction dir)
5641686c
GU
103{
104 switch (dir) {
d2661c62 105 case DMA_BIDIRECTIONAL:
5641686c
GU
106 case DMA_TO_DEVICE:
107 cache_push(handle, size);
108 break;
109 case DMA_FROM_DEVICE:
110 cache_clear(handle, size);
111 break;
112 default:
113 if (printk_ratelimit())
114 printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
115 break;
116 }
117}
5641686c 118
340f3039
CH
119static void m68k_dma_sync_sg_for_device(struct device *dev,
120 struct scatterlist *sglist, int nents, enum dma_data_direction dir)
5641686c
GU
121{
122 int i;
1214c525 123 struct scatterlist *sg;
5641686c 124
1214c525
AM
125 for_each_sg(sglist, sg, nents, i) {
126 dma_sync_single_for_device(dev, sg->dma_address, sg->length,
127 dir);
128 }
5641686c 129}
5641686c 130
340f3039
CH
131static dma_addr_t m68k_dma_map_page(struct device *dev, struct page *page,
132 unsigned long offset, size_t size, enum dma_data_direction dir,
00085f1e 133 unsigned long attrs)
5641686c
GU
134{
135 dma_addr_t handle = page_to_phys(page) + offset;
136
5140d234
AD
137 if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
138 dma_sync_single_for_device(dev, handle, size, dir);
139
5641686c
GU
140 return handle;
141}
5641686c 142
340f3039 143static int m68k_dma_map_sg(struct device *dev, struct scatterlist *sglist,
00085f1e 144 int nents, enum dma_data_direction dir, unsigned long attrs)
5641686c
GU
145{
146 int i;
1214c525 147 struct scatterlist *sg;
5641686c 148
1214c525 149 for_each_sg(sglist, sg, nents, i) {
5641686c 150 sg->dma_address = sg_phys(sg);
5140d234
AD
151
152 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
153 continue;
154
1214c525
AM
155 dma_sync_single_for_device(dev, sg->dma_address, sg->length,
156 dir);
5641686c
GU
157 }
158 return nents;
159}
340f3039
CH
160
161struct dma_map_ops m68k_dma_ops = {
162 .alloc = m68k_dma_alloc,
163 .free = m68k_dma_free,
164 .map_page = m68k_dma_map_page,
165 .map_sg = m68k_dma_map_sg,
166 .sync_single_for_device = m68k_dma_sync_single_for_device,
167 .sync_sg_for_device = m68k_dma_sync_sg_for_device,
168};
169EXPORT_SYMBOL(m68k_dma_ops);