]>
Commit | Line | Data |
---|---|---|
65c3d7c5 RK |
1 | /* |
2 | * DMA implementation for Hexagon | |
3 | * | |
5e115054 | 4 | * Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. |
65c3d7c5 RK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software | |
17 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | |
18 | * 02110-1301, USA. | |
19 | */ | |
20 | ||
21 | #include <linux/dma-mapping.h> | |
22 | #include <linux/bootmem.h> | |
23 | #include <linux/genalloc.h> | |
24 | #include <asm/dma-mapping.h> | |
6bbbc30c | 25 | #include <linux/module.h> |
5e115054 | 26 | #include <asm/page.h> |
65c3d7c5 RK |
27 | |
28 | struct dma_map_ops *dma_ops; | |
29 | EXPORT_SYMBOL(dma_ops); | |
30 | ||
31 | int bad_dma_address; /* globals are automatically initialized to zero */ | |
32 | ||
5e115054 RK |
33 | static inline void *dma_addr_to_virt(dma_addr_t dma_addr) |
34 | { | |
35 | return phys_to_virt((unsigned long) dma_addr); | |
36 | } | |
37 | ||
65c3d7c5 RK |
38 | int dma_supported(struct device *dev, u64 mask) |
39 | { | |
40 | if (mask == DMA_BIT_MASK(32)) | |
41 | return 1; | |
42 | else | |
43 | return 0; | |
44 | } | |
45 | EXPORT_SYMBOL(dma_supported); | |
46 | ||
65c3d7c5 RK |
47 | static struct gen_pool *coherent_pool; |
48 | ||
49 | ||
50 | /* Allocates from a pool of uncached memory that was reserved at boot time */ | |
51 | ||
222c547e | 52 | static void *hexagon_dma_alloc_coherent(struct device *dev, size_t size, |
77345520 | 53 | dma_addr_t *dma_addr, gfp_t flag, |
00085f1e | 54 | unsigned long attrs) |
65c3d7c5 RK |
55 | { |
56 | void *ret; | |
57 | ||
5e115054 RK |
58 | /* |
59 | * Our max_low_pfn should have been backed off by 16MB in | |
60 | * mm/init.c to create DMA coherent space. Use that as the VA | |
61 | * for the pool. | |
62 | */ | |
63 | ||
65c3d7c5 RK |
64 | if (coherent_pool == NULL) { |
65 | coherent_pool = gen_pool_create(PAGE_SHIFT, -1); | |
66 | ||
67 | if (coherent_pool == NULL) | |
68 | panic("Can't create %s() memory pool!", __func__); | |
69 | else | |
70 | gen_pool_add(coherent_pool, | |
5e115054 | 71 | pfn_to_virt(max_low_pfn), |
65c3d7c5 RK |
72 | hexagon_coherent_pool_size, -1); |
73 | } | |
74 | ||
75 | ret = (void *) gen_pool_alloc(coherent_pool, size); | |
76 | ||
77 | if (ret) { | |
78 | memset(ret, 0, size); | |
5e115054 | 79 | *dma_addr = (dma_addr_t) virt_to_phys(ret); |
65c3d7c5 RK |
80 | } else |
81 | *dma_addr = ~0; | |
82 | ||
83 | return ret; | |
84 | } | |
85 | ||
86 | static void hexagon_free_coherent(struct device *dev, size_t size, void *vaddr, | |
00085f1e | 87 | dma_addr_t dma_addr, unsigned long attrs) |
65c3d7c5 RK |
88 | { |
89 | gen_pool_free(coherent_pool, (unsigned long) vaddr, size); | |
90 | } | |
91 | ||
92 | static int check_addr(const char *name, struct device *hwdev, | |
93 | dma_addr_t bus, size_t size) | |
94 | { | |
95 | if (hwdev && hwdev->dma_mask && !dma_capable(hwdev, bus, size)) { | |
96 | if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) | |
97 | printk(KERN_ERR | |
98 | "%s: overflow %Lx+%zu of device mask %Lx\n", | |
99 | name, (long long)bus, size, | |
100 | (long long)*hwdev->dma_mask); | |
101 | return 0; | |
102 | } | |
103 | return 1; | |
104 | } | |
105 | ||
106 | static int hexagon_map_sg(struct device *hwdev, struct scatterlist *sg, | |
107 | int nents, enum dma_data_direction dir, | |
00085f1e | 108 | unsigned long attrs) |
65c3d7c5 RK |
109 | { |
110 | struct scatterlist *s; | |
111 | int i; | |
112 | ||
113 | WARN_ON(nents == 0 || sg[0].length == 0); | |
114 | ||
115 | for_each_sg(sg, s, nents, i) { | |
116 | s->dma_address = sg_phys(s); | |
117 | if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) | |
118 | return 0; | |
119 | ||
120 | s->dma_length = s->length; | |
121 | ||
b8a346dd AD |
122 | if (attrs & DMA_ATTR_SKIP_CPU_SYNC) |
123 | continue; | |
124 | ||
5e115054 RK |
125 | flush_dcache_range(dma_addr_to_virt(s->dma_address), |
126 | dma_addr_to_virt(s->dma_address + s->length)); | |
65c3d7c5 RK |
127 | } |
128 | ||
129 | return nents; | |
130 | } | |
131 | ||
132 | /* | |
133 | * address is virtual | |
134 | */ | |
135 | static inline void dma_sync(void *addr, size_t size, | |
136 | enum dma_data_direction dir) | |
137 | { | |
138 | switch (dir) { | |
139 | case DMA_TO_DEVICE: | |
140 | hexagon_clean_dcache_range((unsigned long) addr, | |
141 | (unsigned long) addr + size); | |
142 | break; | |
143 | case DMA_FROM_DEVICE: | |
144 | hexagon_inv_dcache_range((unsigned long) addr, | |
145 | (unsigned long) addr + size); | |
146 | break; | |
147 | case DMA_BIDIRECTIONAL: | |
148 | flush_dcache_range((unsigned long) addr, | |
149 | (unsigned long) addr + size); | |
150 | break; | |
151 | default: | |
152 | BUG(); | |
153 | } | |
154 | } | |
155 | ||
65c3d7c5 RK |
156 | /** |
157 | * hexagon_map_page() - maps an address for device DMA | |
158 | * @dev: pointer to DMA device | |
159 | * @page: pointer to page struct of DMA memory | |
160 | * @offset: offset within page | |
161 | * @size: size of memory to map | |
162 | * @dir: transfer direction | |
163 | * @attrs: pointer to DMA attrs (not used) | |
164 | * | |
165 | * Called to map a memory address to a DMA address prior | |
166 | * to accesses to/from device. | |
167 | * | |
168 | * We don't particularly have many hoops to jump through | |
169 | * so far. Straight translation between phys and virtual. | |
170 | * | |
171 | * DMA is not cache coherent so sync is necessary; this | |
172 | * seems to be a convenient place to do it. | |
173 | * | |
174 | */ | |
175 | static dma_addr_t hexagon_map_page(struct device *dev, struct page *page, | |
176 | unsigned long offset, size_t size, | |
177 | enum dma_data_direction dir, | |
00085f1e | 178 | unsigned long attrs) |
65c3d7c5 RK |
179 | { |
180 | dma_addr_t bus = page_to_phys(page) + offset; | |
181 | WARN_ON(size == 0); | |
182 | ||
183 | if (!check_addr("map_single", dev, bus, size)) | |
184 | return bad_dma_address; | |
185 | ||
b8a346dd AD |
186 | if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) |
187 | dma_sync(dma_addr_to_virt(bus), size, dir); | |
65c3d7c5 RK |
188 | |
189 | return bus; | |
190 | } | |
191 | ||
192 | static void hexagon_sync_single_for_cpu(struct device *dev, | |
193 | dma_addr_t dma_handle, size_t size, | |
194 | enum dma_data_direction dir) | |
195 | { | |
196 | dma_sync(dma_addr_to_virt(dma_handle), size, dir); | |
197 | } | |
198 | ||
199 | static void hexagon_sync_single_for_device(struct device *dev, | |
200 | dma_addr_t dma_handle, size_t size, | |
201 | enum dma_data_direction dir) | |
202 | { | |
203 | dma_sync(dma_addr_to_virt(dma_handle), size, dir); | |
204 | } | |
205 | ||
206 | struct dma_map_ops hexagon_dma_ops = { | |
77345520 MS |
207 | .alloc = hexagon_dma_alloc_coherent, |
208 | .free = hexagon_free_coherent, | |
65c3d7c5 RK |
209 | .map_sg = hexagon_map_sg, |
210 | .map_page = hexagon_map_page, | |
211 | .sync_single_for_cpu = hexagon_sync_single_for_cpu, | |
212 | .sync_single_for_device = hexagon_sync_single_for_device, | |
213 | .is_phys = 1, | |
214 | }; | |
215 | ||
216 | void __init hexagon_dma_init(void) | |
217 | { | |
218 | if (dma_ops) | |
219 | return; | |
220 | ||
221 | dma_ops = &hexagon_dma_ops; | |
222 | } |