]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/avr32/mm/dma-coherent.c
dma-mapping: use unsigned long for dma_attrs
[mirror_ubuntu-artful-kernel.git] / arch / avr32 / mm / dma-coherent.c
CommitLineData
5f97f7f9
HS
1/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/dma-mapping.h>
5a0e3ad6 10#include <linux/gfp.h>
09cf6a29 11#include <linux/export.h>
a34a517a
CH
12#include <linux/mm.h>
13#include <linux/device.h>
14#include <linux/scatterlist.h>
5f97f7f9 15
a34a517a 16#include <asm/processor.h>
5f97f7f9 17#include <asm/cacheflush.h>
a34a517a
CH
18#include <asm/io.h>
19#include <asm/addrspace.h>
5f97f7f9 20
d3fa72e4 21void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
5f97f7f9
HS
22{
23 /*
24 * No need to sync an uncached area
25 */
26 if (PXSEG(vaddr) == P2SEG)
27 return;
28
29 switch (direction) {
30 case DMA_FROM_DEVICE: /* invalidate only */
622a9edd 31 invalidate_dcache_region(vaddr, size);
5f97f7f9
HS
32 break;
33 case DMA_TO_DEVICE: /* writeback only */
622a9edd 34 clean_dcache_region(vaddr, size);
5f97f7f9
HS
35 break;
36 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
622a9edd 37 flush_dcache_region(vaddr, size);
5f97f7f9
HS
38 break;
39 default:
40 BUG();
41 }
42}
43EXPORT_SYMBOL(dma_cache_sync);
44
45static struct page *__dma_alloc(struct device *dev, size_t size,
46 dma_addr_t *handle, gfp_t gfp)
47{
48 struct page *page, *free, *end;
49 int order;
50
3611553e
HS
51 /* Following is a work-around (a.k.a. hack) to prevent pages
52 * with __GFP_COMP being passed to split_page() which cannot
53 * handle them. The real problem is that this flag probably
54 * should be 0 on AVR32 as it is not supported on this
55 * platform--see CONFIG_HUGETLB_PAGE. */
56 gfp &= ~(__GFP_COMP);
57
5f97f7f9
HS
58 size = PAGE_ALIGN(size);
59 order = get_order(size);
60
61 page = alloc_pages(gfp, order);
62 if (!page)
63 return NULL;
64 split_page(page, order);
65
66 /*
67 * When accessing physical memory with valid cache data, we
68 * get a cache hit even if the virtual memory region is marked
69 * as uncached.
70 *
71 * Since the memory is newly allocated, there is no point in
72 * doing a writeback. If the previous owner cares, he should
73 * have flushed the cache before releasing the memory.
74 */
75 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
76
77 *handle = page_to_bus(page);
78 free = page + (size >> PAGE_SHIFT);
79 end = page + (1 << order);
80
81 /*
82 * Free any unused pages
83 */
84 while (free < end) {
85 __free_page(free);
86 free++;
87 }
88
89 return page;
90}
91
92static void __dma_free(struct device *dev, size_t size,
93 struct page *page, dma_addr_t handle)
94{
95 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
96
97 while (page < end)
98 __free_page(page++);
99}
100
a34a517a 101static void *avr32_dma_alloc(struct device *dev, size_t size,
00085f1e 102 dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
5f97f7f9
HS
103{
104 struct page *page;
a34a517a 105 dma_addr_t phys;
5f97f7f9
HS
106
107 page = __dma_alloc(dev, size, handle, gfp);
a34a517a
CH
108 if (!page)
109 return NULL;
110 phys = page_to_phys(page);
5f97f7f9 111
00085f1e 112 if (attrs & DMA_ATTR_WRITE_COMBINE) {
a34a517a
CH
113 /* Now, map the page into P3 with write-combining turned on */
114 *handle = phys;
115 return __ioremap(phys, size, _PAGE_BUFFER);
116 } else {
117 return phys_to_uncached(phys);
118 }
5f97f7f9 119}
5f97f7f9 120
a34a517a 121static void avr32_dma_free(struct device *dev, size_t size,
00085f1e 122 void *cpu_addr, dma_addr_t handle, unsigned long attrs)
5f97f7f9 123{
5f97f7f9
HS
124 struct page *page;
125
00085f1e 126 if (attrs & DMA_ATTR_WRITE_COMBINE) {
a34a517a
CH
127 iounmap(cpu_addr);
128
129 page = phys_to_page(handle);
130 } else {
131 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
132
133 pr_debug("avr32_dma_free addr %p (phys %08lx) size %u\n",
134 cpu_addr, (unsigned long)handle, (unsigned)size);
135
136 BUG_ON(!virt_addr_valid(addr));
137 page = virt_to_page(addr);
138 }
139
5f97f7f9
HS
140 __dma_free(dev, size, page, handle);
141}
5f97f7f9 142
a34a517a
CH
143static dma_addr_t avr32_dma_map_page(struct device *dev, struct page *page,
144 unsigned long offset, size_t size,
00085f1e 145 enum dma_data_direction direction, unsigned long attrs)
5f97f7f9 146{
a34a517a 147 void *cpu_addr = page_address(page) + offset;
5f97f7f9 148
a34a517a
CH
149 dma_cache_sync(dev, cpu_addr, size, direction);
150 return virt_to_bus(cpu_addr);
151}
a492dbb9 152
a34a517a
CH
153static int avr32_dma_map_sg(struct device *dev, struct scatterlist *sglist,
154 int nents, enum dma_data_direction direction,
00085f1e 155 unsigned long attrs)
a34a517a
CH
156{
157 int i;
158 struct scatterlist *sg;
159
160 for_each_sg(sglist, sg, nents, i) {
161 char *virt;
5f97f7f9 162
a34a517a
CH
163 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
164 virt = sg_virt(sg);
165 dma_cache_sync(dev, virt, sg->length, direction);
166 }
167
168 return nents;
5f97f7f9 169}
5f97f7f9 170
a34a517a
CH
171static void avr32_dma_sync_single_for_device(struct device *dev,
172 dma_addr_t dma_handle, size_t size,
173 enum dma_data_direction direction)
5f97f7f9 174{
a34a517a
CH
175 dma_cache_sync(dev, bus_to_virt(dma_handle), size, direction);
176}
5f97f7f9 177
a34a517a
CH
178static void avr32_dma_sync_sg_for_device(struct device *dev,
179 struct scatterlist *sglist, int nents,
180 enum dma_data_direction direction)
181{
182 int i;
183 struct scatterlist *sg;
5f97f7f9 184
a34a517a
CH
185 for_each_sg(sglist, sg, nents, i)
186 dma_cache_sync(dev, sg_virt(sg), sg->length, direction);
5f97f7f9 187}
a34a517a
CH
188
189struct dma_map_ops avr32_dma_ops = {
190 .alloc = avr32_dma_alloc,
191 .free = avr32_dma_free,
192 .map_page = avr32_dma_map_page,
193 .map_sg = avr32_dma_map_sg,
194 .sync_single_for_device = avr32_dma_sync_single_for_device,
195 .sync_sg_for_device = avr32_dma_sync_sg_for_device,
196};
197EXPORT_SYMBOL(avr32_dma_ops);