]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/openrisc/kernel/dma.c
dma-mapping: use unsigned long for dma_attrs
[mirror_ubuntu-artful-kernel.git] / arch / openrisc / kernel / dma.c
1 /*
2 * OpenRISC Linux
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * DMA mapping callbacks...
18 * As alloc_coherent is the only DMA callback being used currently, that's
19 * the only thing implemented properly. The rest need looking into...
20 */
21
22 #include <linux/dma-mapping.h>
23 #include <linux/dma-debug.h>
24 #include <linux/export.h>
25
26 #include <asm/cpuinfo.h>
27 #include <asm/spr_defs.h>
28 #include <asm/tlbflush.h>
29
30 static int
31 page_set_nocache(pte_t *pte, unsigned long addr,
32 unsigned long next, struct mm_walk *walk)
33 {
34 unsigned long cl;
35
36 pte_val(*pte) |= _PAGE_CI;
37
38 /*
39 * Flush the page out of the TLB so that the new page flags get
40 * picked up next time there's an access
41 */
42 flush_tlb_page(NULL, addr);
43
44 /* Flush page out of dcache */
45 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
46 mtspr(SPR_DCBFR, cl);
47
48 return 0;
49 }
50
51 static int
52 page_clear_nocache(pte_t *pte, unsigned long addr,
53 unsigned long next, struct mm_walk *walk)
54 {
55 pte_val(*pte) &= ~_PAGE_CI;
56
57 /*
58 * Flush the page out of the TLB so that the new page flags get
59 * picked up next time there's an access
60 */
61 flush_tlb_page(NULL, addr);
62
63 return 0;
64 }
65
66 /*
67 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
68 *
69 * This function effectively just calls __get_free_pages, sets the
70 * cache-inhibit bit on those pages, and makes sure that the pages are
71 * flushed out of the cache before they are used.
72 *
73 * If the NON_CONSISTENT attribute is set, then this function just
74 * returns "normal", cachable memory.
75 *
76 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
77 * into consideration here, too. All current known implementations of
78 * the OR1K support only strongly ordered memory accesses, so that flag
79 * is being ignored for now; uncached but write-combined memory is a
80 * missing feature of the OR1K.
81 */
82 static void *
83 or1k_dma_alloc(struct device *dev, size_t size,
84 dma_addr_t *dma_handle, gfp_t gfp,
85 unsigned long attrs)
86 {
87 unsigned long va;
88 void *page;
89 struct mm_walk walk = {
90 .pte_entry = page_set_nocache,
91 .mm = &init_mm
92 };
93
94 page = alloc_pages_exact(size, gfp);
95 if (!page)
96 return NULL;
97
98 /* This gives us the real physical address of the first page. */
99 *dma_handle = __pa(page);
100
101 va = (unsigned long)page;
102
103 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
104 /*
105 * We need to iterate through the pages, clearing the dcache for
106 * them and setting the cache-inhibit bit.
107 */
108 if (walk_page_range(va, va + size, &walk)) {
109 free_pages_exact(page, size);
110 return NULL;
111 }
112 }
113
114 return (void *)va;
115 }
116
117 static void
118 or1k_dma_free(struct device *dev, size_t size, void *vaddr,
119 dma_addr_t dma_handle, unsigned long attrs)
120 {
121 unsigned long va = (unsigned long)vaddr;
122 struct mm_walk walk = {
123 .pte_entry = page_clear_nocache,
124 .mm = &init_mm
125 };
126
127 if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
128 /* walk_page_range shouldn't be able to fail here */
129 WARN_ON(walk_page_range(va, va + size, &walk));
130 }
131
132 free_pages_exact(vaddr, size);
133 }
134
135 static dma_addr_t
136 or1k_map_page(struct device *dev, struct page *page,
137 unsigned long offset, size_t size,
138 enum dma_data_direction dir,
139 unsigned long attrs)
140 {
141 unsigned long cl;
142 dma_addr_t addr = page_to_phys(page) + offset;
143
144 switch (dir) {
145 case DMA_TO_DEVICE:
146 /* Flush the dcache for the requested range */
147 for (cl = addr; cl < addr + size;
148 cl += cpuinfo.dcache_block_size)
149 mtspr(SPR_DCBFR, cl);
150 break;
151 case DMA_FROM_DEVICE:
152 /* Invalidate the dcache for the requested range */
153 for (cl = addr; cl < addr + size;
154 cl += cpuinfo.dcache_block_size)
155 mtspr(SPR_DCBIR, cl);
156 break;
157 default:
158 /*
159 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
160 * flush nor invalidate the cache here as the area will need
161 * to be manually synced anyway.
162 */
163 break;
164 }
165
166 return addr;
167 }
168
169 static void
170 or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
171 size_t size, enum dma_data_direction dir,
172 unsigned long attrs)
173 {
174 /* Nothing special to do here... */
175 }
176
177 static int
178 or1k_map_sg(struct device *dev, struct scatterlist *sg,
179 int nents, enum dma_data_direction dir,
180 unsigned long attrs)
181 {
182 struct scatterlist *s;
183 int i;
184
185 for_each_sg(sg, s, nents, i) {
186 s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
187 s->length, dir, 0);
188 }
189
190 return nents;
191 }
192
193 static void
194 or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
195 int nents, enum dma_data_direction dir,
196 unsigned long attrs)
197 {
198 struct scatterlist *s;
199 int i;
200
201 for_each_sg(sg, s, nents, i) {
202 or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
203 }
204 }
205
206 static void
207 or1k_sync_single_for_cpu(struct device *dev,
208 dma_addr_t dma_handle, size_t size,
209 enum dma_data_direction dir)
210 {
211 unsigned long cl;
212 dma_addr_t addr = dma_handle;
213
214 /* Invalidate the dcache for the requested range */
215 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
216 mtspr(SPR_DCBIR, cl);
217 }
218
219 static void
220 or1k_sync_single_for_device(struct device *dev,
221 dma_addr_t dma_handle, size_t size,
222 enum dma_data_direction dir)
223 {
224 unsigned long cl;
225 dma_addr_t addr = dma_handle;
226
227 /* Flush the dcache for the requested range */
228 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
229 mtspr(SPR_DCBFR, cl);
230 }
231
232 struct dma_map_ops or1k_dma_map_ops = {
233 .alloc = or1k_dma_alloc,
234 .free = or1k_dma_free,
235 .map_page = or1k_map_page,
236 .unmap_page = or1k_unmap_page,
237 .map_sg = or1k_map_sg,
238 .unmap_sg = or1k_unmap_sg,
239 .sync_single_for_cpu = or1k_sync_single_for_cpu,
240 .sync_single_for_device = or1k_sync_single_for_device,
241 };
242 EXPORT_SYMBOL(or1k_dma_map_ops);
243
244 /* Number of entries preallocated for DMA-API debugging */
245 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
246
247 static int __init dma_init(void)
248 {
249 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
250
251 return 0;
252 }
253 fs_initcall(dma_init);