]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/xtensa/kernel/pci-dma.c
xtensa: enable HAVE_DMA_CONTIGUOUS
[mirror_ubuntu-bionic-kernel.git] / arch / xtensa / kernel / pci-dma.c
CommitLineData
5a0015d6 1/*
5a0015d6
CZ
2 * DMA coherent memory allocation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * Copyright (C) 2002 - 2005 Tensilica Inc.
c75959a6 10 * Copyright (C) 2015 Cadence Design Systems Inc.
5a0015d6
CZ
11 *
12 * Based on version for i386.
13 *
14 * Chris Zankel <chris@zankel.net>
15 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
16 */
17
9d2ffe5c 18#include <linux/dma-contiguous.h>
5a0e3ad6 19#include <linux/gfp.h>
c7ca9fe1
MF
20#include <linux/highmem.h>
21#include <linux/mm.h>
d3738f40 22#include <linux/module.h>
c7ca9fe1
MF
23#include <linux/pci.h>
24#include <linux/string.h>
25#include <linux/types.h>
5a0015d6 26#include <asm/cacheflush.h>
c7ca9fe1 27#include <asm/io.h>
5a0015d6 28
c75959a6
MF
29void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
30 enum dma_data_direction dir)
31{
32 switch (dir) {
33 case DMA_BIDIRECTIONAL:
34 __flush_invalidate_dcache_range((unsigned long)vaddr, size);
35 break;
36
37 case DMA_FROM_DEVICE:
38 __invalidate_dcache_range((unsigned long)vaddr, size);
39 break;
40
41 case DMA_TO_DEVICE:
42 __flush_dcache_range((unsigned long)vaddr, size);
43 break;
44
45 case DMA_NONE:
46 BUG();
47 break;
48 }
49}
50EXPORT_SYMBOL(dma_cache_sync);
51
c7ca9fe1
MF
52static void do_cache_op(dma_addr_t dma_handle, size_t size,
53 void (*fn)(unsigned long, unsigned long))
54{
55 unsigned long off = dma_handle & (PAGE_SIZE - 1);
56 unsigned long pfn = PFN_DOWN(dma_handle);
57 struct page *page = pfn_to_page(pfn);
58
59 if (!PageHighMem(page))
60 fn((unsigned long)bus_to_virt(dma_handle), size);
61 else
62 while (size > 0) {
63 size_t sz = min_t(size_t, size, PAGE_SIZE - off);
64 void *vaddr = kmap_atomic(page);
65
66 fn((unsigned long)vaddr + off, sz);
67 kunmap_atomic(vaddr);
68 off = 0;
69 ++page;
70 size -= sz;
71 }
72}
73
c75959a6
MF
74static void xtensa_sync_single_for_cpu(struct device *dev,
75 dma_addr_t dma_handle, size_t size,
76 enum dma_data_direction dir)
77{
c75959a6
MF
78 switch (dir) {
79 case DMA_BIDIRECTIONAL:
80 case DMA_FROM_DEVICE:
c7ca9fe1 81 do_cache_op(dma_handle, size, __invalidate_dcache_range);
c75959a6
MF
82 break;
83
84 case DMA_NONE:
85 BUG();
86 break;
87
88 default:
89 break;
90 }
91}
92
93static void xtensa_sync_single_for_device(struct device *dev,
94 dma_addr_t dma_handle, size_t size,
95 enum dma_data_direction dir)
96{
c75959a6
MF
97 switch (dir) {
98 case DMA_BIDIRECTIONAL:
99 case DMA_TO_DEVICE:
c7ca9fe1
MF
100 if (XCHAL_DCACHE_IS_WRITEBACK)
101 do_cache_op(dma_handle, size, __flush_dcache_range);
c75959a6
MF
102 break;
103
104 case DMA_NONE:
105 BUG();
106 break;
107
108 default:
109 break;
110 }
111}
112
113static void xtensa_sync_sg_for_cpu(struct device *dev,
114 struct scatterlist *sg, int nents,
115 enum dma_data_direction dir)
116{
117 struct scatterlist *s;
118 int i;
119
120 for_each_sg(sg, s, nents, i) {
121 xtensa_sync_single_for_cpu(dev, sg_dma_address(s),
122 sg_dma_len(s), dir);
123 }
124}
125
126static void xtensa_sync_sg_for_device(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir)
129{
130 struct scatterlist *s;
131 int i;
132
133 for_each_sg(sg, s, nents, i) {
134 xtensa_sync_single_for_device(dev, sg_dma_address(s),
135 sg_dma_len(s), dir);
136 }
137}
138
5a0015d6
CZ
139/*
140 * Note: We assume that the full memory space is always mapped to 'kseg'
141 * Otherwise we have to use page attributes (not implemented).
142 */
143
c75959a6
MF
144static void *xtensa_dma_alloc(struct device *dev, size_t size,
145 dma_addr_t *handle, gfp_t flag,
00085f1e 146 unsigned long attrs)
5a0015d6 147{
173d6681
CZ
148 unsigned long ret;
149 unsigned long uncached = 0;
9d2ffe5c
MF
150 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
151 struct page *page = NULL;
5a0015d6
CZ
152
153 /* ignore region speicifiers */
5a0015d6 154
173d6681 155 flag &= ~(__GFP_DMA | __GFP_HIGHMEM);
5a0015d6 156
173d6681
CZ
157 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
158 flag |= GFP_DMA;
173d6681 159
9d2ffe5c
MF
160 if (gfpflags_allow_blocking(flag))
161 page = dma_alloc_from_contiguous(dev, count, get_order(size));
162
163 if (!page)
164 page = alloc_pages(flag, get_order(size));
165
166 if (!page)
173d6681
CZ
167 return NULL;
168
9d2ffe5c
MF
169 ret = (unsigned long)page_address(page);
170
173d6681
CZ
171 /* We currently don't support coherent memory outside KSEG */
172
1ca49463
AD
173 BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR ||
174 ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
173d6681 175
c75959a6
MF
176 uncached = ret + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
177 *handle = virt_to_bus((void *)ret);
178 __invalidate_dcache_range(ret, size);
173d6681 179
c75959a6 180 return (void *)uncached;
5a0015d6
CZ
181}
182
9d2ffe5c 183static void xtensa_dma_free(struct device *dev, size_t size, void *vaddr,
00085f1e 184 dma_addr_t dma_handle, unsigned long attrs)
5a0015d6 185{
1ca49463
AD
186 unsigned long addr = (unsigned long)vaddr +
187 XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR;
9d2ffe5c
MF
188 struct page *page = virt_to_page(addr);
189 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
173d6681 190
1ca49463
AD
191 BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR ||
192 addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1);
173d6681 193
9d2ffe5c
MF
194 if (!dma_release_from_contiguous(dev, page, count))
195 __free_pages(page, get_order(size));
5a0015d6
CZ
196}
197
c75959a6
MF
198static dma_addr_t xtensa_map_page(struct device *dev, struct page *page,
199 unsigned long offset, size_t size,
200 enum dma_data_direction dir,
00085f1e 201 unsigned long attrs)
c75959a6
MF
202{
203 dma_addr_t dma_handle = page_to_phys(page) + offset;
204
c75959a6
MF
205 xtensa_sync_single_for_device(dev, dma_handle, size, dir);
206 return dma_handle;
207}
5a0015d6 208
c75959a6
MF
209static void xtensa_unmap_page(struct device *dev, dma_addr_t dma_handle,
210 size_t size, enum dma_data_direction dir,
00085f1e 211 unsigned long attrs)
5a0015d6 212{
c75959a6
MF
213 xtensa_sync_single_for_cpu(dev, dma_handle, size, dir);
214}
5a0015d6 215
c75959a6
MF
216static int xtensa_map_sg(struct device *dev, struct scatterlist *sg,
217 int nents, enum dma_data_direction dir,
00085f1e 218 unsigned long attrs)
c75959a6
MF
219{
220 struct scatterlist *s;
221 int i;
222
223 for_each_sg(sg, s, nents, i) {
224 s->dma_address = xtensa_map_page(dev, sg_page(s), s->offset,
225 s->length, dir, attrs);
226 }
227 return nents;
228}
229
230static void xtensa_unmap_sg(struct device *dev,
231 struct scatterlist *sg, int nents,
232 enum dma_data_direction dir,
00085f1e 233 unsigned long attrs)
c75959a6
MF
234{
235 struct scatterlist *s;
236 int i;
237
238 for_each_sg(sg, s, nents, i) {
239 xtensa_unmap_page(dev, sg_dma_address(s),
240 sg_dma_len(s), dir, attrs);
5a0015d6
CZ
241 }
242}
c75959a6
MF
243
244int xtensa_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
245{
246 return 0;
247}
248
249struct dma_map_ops xtensa_dma_map_ops = {
250 .alloc = xtensa_dma_alloc,
251 .free = xtensa_dma_free,
252 .map_page = xtensa_map_page,
253 .unmap_page = xtensa_unmap_page,
254 .map_sg = xtensa_map_sg,
255 .unmap_sg = xtensa_unmap_sg,
256 .sync_single_for_cpu = xtensa_sync_single_for_cpu,
257 .sync_single_for_device = xtensa_sync_single_for_device,
258 .sync_sg_for_cpu = xtensa_sync_sg_for_cpu,
259 .sync_sg_for_device = xtensa_sync_sg_for_device,
260 .mapping_error = xtensa_dma_mapping_error,
261};
262EXPORT_SYMBOL(xtensa_dma_map_ops);
263
264#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
265
266static int __init xtensa_dma_init(void)
267{
268 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
269 return 0;
270}
271fs_initcall(xtensa_dma_init);