]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - arch/tile/kernel/pci-dma.c
Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[mirror_ubuntu-eoan-kernel.git] / arch / tile / kernel / pci-dma.c
CommitLineData
867e359b
CM
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/mm.h>
16#include <linux/dma-mapping.h>
17#include <linux/vmalloc.h>
3989efb7 18#include <linux/export.h>
867e359b
CM
19#include <asm/tlbflush.h>
20#include <asm/homecache.h>
21
22/* Generic DMA mapping functions: */
23
24/*
25 * Allocate what Linux calls "coherent" memory, which for us just
26 * means uncached.
27 */
28void *dma_alloc_coherent(struct device *dev,
29 size_t size,
30 dma_addr_t *dma_handle,
31 gfp_t gfp)
32{
33 u64 dma_mask = dev->coherent_dma_mask ?: DMA_BIT_MASK(32);
34 int node = dev_to_node(dev);
35 int order = get_order(size);
36 struct page *pg;
37 dma_addr_t addr;
38
482e6f84 39 gfp |= __GFP_ZERO;
867e359b
CM
40
41 /*
42 * By forcing NUMA node 0 for 32-bit masks we ensure that the
43 * high 32 bits of the resulting PA will be zero. If the mask
44 * size is, e.g., 24, we may still not be able to guarantee a
45 * suitable memory address, in which case we will return NULL.
46 * But such devices are uncommon.
47 */
48 if (dma_mask <= DMA_BIT_MASK(32))
49 node = 0;
50
51 pg = homecache_alloc_pages_node(node, gfp, order, PAGE_HOME_UNCACHED);
52 if (pg == NULL)
53 return NULL;
54
55 addr = page_to_phys(pg);
56 if (addr + size > dma_mask) {
57 homecache_free_pages(addr, order);
58 return NULL;
59 }
60
61 *dma_handle = addr;
62 return page_address(pg);
63}
64EXPORT_SYMBOL(dma_alloc_coherent);
65
66/*
67 * Free memory that was allocated with dma_alloc_coherent.
68 */
69void dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle)
71{
72 homecache_free_pages((unsigned long)vaddr, get_order(size));
73}
74EXPORT_SYMBOL(dma_free_coherent);
75
76/*
77 * The map routines "map" the specified address range for DMA
78 * accesses. The memory belongs to the device after this call is
79 * issued, until it is unmapped with dma_unmap_single.
80 *
81 * We don't need to do any mapping, we just flush the address range
82 * out of the cache and return a DMA address.
83 *
84 * The unmap routines do whatever is necessary before the processor
85 * accesses the memory again, and must be called before the driver
86 * touches the memory. We can get away with a cache invalidate if we
87 * can count on nothing having been touched.
88 */
89
76c567fb
CM
90/* Flush a PA range from cache page by page. */
91static void __dma_map_pa_range(dma_addr_t dma_addr, size_t size)
92{
93 struct page *page = pfn_to_page(PFN_DOWN(dma_addr));
94 size_t bytesleft = PAGE_SIZE - (dma_addr & (PAGE_SIZE - 1));
95
96 while ((ssize_t)size > 0) {
97 /* Flush the page. */
98 homecache_flush_cache(page++, 0);
99
100 /* Figure out if we need to continue on the next page. */
101 size -= bytesleft;
102 bytesleft = PAGE_SIZE;
103 }
104}
867e359b
CM
105
106/*
107 * dma_map_single can be passed any memory address, and there appear
108 * to be no alignment constraints.
109 *
110 * There is a chance that the start of the buffer will share a cache
111 * line with some other data that has been touched in the meantime.
112 */
113dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
114 enum dma_data_direction direction)
115{
76c567fb 116 dma_addr_t dma_addr = __pa(ptr);
867e359b
CM
117
118 BUG_ON(!valid_dma_direction(direction));
119 WARN_ON(size == 0);
120
76c567fb 121 __dma_map_pa_range(dma_addr, size);
867e359b
CM
122
123 return dma_addr;
124}
125EXPORT_SYMBOL(dma_map_single);
126
127void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
128 enum dma_data_direction direction)
129{
130 BUG_ON(!valid_dma_direction(direction));
131}
132EXPORT_SYMBOL(dma_unmap_single);
133
134int dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
135 enum dma_data_direction direction)
136{
137 struct scatterlist *sg;
138 int i;
139
140 BUG_ON(!valid_dma_direction(direction));
141
142 WARN_ON(nents == 0 || sglist->length == 0);
143
144 for_each_sg(sglist, sg, nents, i) {
867e359b 145 sg->dma_address = sg_phys(sg);
76c567fb 146 __dma_map_pa_range(sg->dma_address, sg->length);
867e359b
CM
147 }
148
149 return nents;
150}
151EXPORT_SYMBOL(dma_map_sg);
152
153void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
154 enum dma_data_direction direction)
155{
156 BUG_ON(!valid_dma_direction(direction));
157}
158EXPORT_SYMBOL(dma_unmap_sg);
159
160dma_addr_t dma_map_page(struct device *dev, struct page *page,
161 unsigned long offset, size_t size,
162 enum dma_data_direction direction)
163{
164 BUG_ON(!valid_dma_direction(direction));
165
76c567fb 166 BUG_ON(offset + size > PAGE_SIZE);
867e359b
CM
167 homecache_flush_cache(page, 0);
168
169 return page_to_pa(page) + offset;
170}
171EXPORT_SYMBOL(dma_map_page);
172
173void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
174 enum dma_data_direction direction)
175{
176 BUG_ON(!valid_dma_direction(direction));
177}
178EXPORT_SYMBOL(dma_unmap_page);
179
180void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
181 size_t size, enum dma_data_direction direction)
182{
183 BUG_ON(!valid_dma_direction(direction));
184}
185EXPORT_SYMBOL(dma_sync_single_for_cpu);
186
187void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
188 size_t size, enum dma_data_direction direction)
189{
190 unsigned long start = PFN_DOWN(dma_handle);
191 unsigned long end = PFN_DOWN(dma_handle + size - 1);
192 unsigned long i;
193
194 BUG_ON(!valid_dma_direction(direction));
195 for (i = start; i <= end; ++i)
196 homecache_flush_cache(pfn_to_page(i), 0);
197}
198EXPORT_SYMBOL(dma_sync_single_for_device);
199
200void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
201 enum dma_data_direction direction)
202{
203 BUG_ON(!valid_dma_direction(direction));
204 WARN_ON(nelems == 0 || sg[0].length == 0);
205}
206EXPORT_SYMBOL(dma_sync_sg_for_cpu);
207
208/*
209 * Flush and invalidate cache for scatterlist.
210 */
211void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist,
212 int nelems, enum dma_data_direction direction)
213{
214 struct scatterlist *sg;
215 int i;
216
217 BUG_ON(!valid_dma_direction(direction));
218 WARN_ON(nelems == 0 || sglist->length == 0);
219
220 for_each_sg(sglist, sg, nelems, i) {
221 dma_sync_single_for_device(dev, sg->dma_address,
222 sg_dma_len(sg), direction);
223 }
224}
225EXPORT_SYMBOL(dma_sync_sg_for_device);
226
227void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
228 unsigned long offset, size_t size,
229 enum dma_data_direction direction)
230{
231 dma_sync_single_for_cpu(dev, dma_handle + offset, size, direction);
232}
233EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
234
235void dma_sync_single_range_for_device(struct device *dev,
236 dma_addr_t dma_handle,
237 unsigned long offset, size_t size,
238 enum dma_data_direction direction)
239{
240 dma_sync_single_for_device(dev, dma_handle + offset, size, direction);
241}
242EXPORT_SYMBOL(dma_sync_single_range_for_device);
243
244/*
245 * dma_alloc_noncoherent() returns non-cacheable memory, so there's no
246 * need to do any flushing here.
247 */
ef0aaf87 248void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
867e359b
CM
249 enum dma_data_direction direction)
250{
251}
252EXPORT_SYMBOL(dma_cache_sync);