]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
12d04eef | 2 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation |
1da177e4 | 3 | * |
12d04eef BH |
4 | * Provide default implementations of the DMA mapping callbacks for |
5 | * directly mapped busses and busses using the iommu infrastructure | |
1da177e4 LT |
6 | */ |
7 | ||
8 | #include <linux/device.h> | |
9 | #include <linux/dma-mapping.h> | |
1da177e4 | 10 | #include <asm/bug.h> |
12d04eef BH |
11 | #include <asm/iommu.h> |
12 | #include <asm/abs_addr.h> | |
1da177e4 | 13 | |
12d04eef BH |
14 | /* |
15 | * Generic iommu implementation | |
16 | */ | |
1da177e4 | 17 | |
12d04eef | 18 | static inline unsigned long device_to_mask(struct device *dev) |
1da177e4 | 19 | { |
12d04eef BH |
20 | if (dev->dma_mask && *dev->dma_mask) |
21 | return *dev->dma_mask; | |
22 | /* Assume devices without mask can take 32 bit addresses */ | |
23 | return 0xfffffffful; | |
24 | } | |
1da177e4 | 25 | |
5d33eebe | 26 | |
12d04eef BH |
27 | /* Allocates a contiguous real buffer and creates mappings over it. |
28 | * Returns the virtual address of the buffer and sets dma_handle | |
29 | * to the dma address (mapping) of the first page. | |
30 | */ | |
31 | static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | |
32 | dma_addr_t *dma_handle, gfp_t flag) | |
33 | { | |
fb3475e9 FT |
34 | return iommu_alloc_coherent(dev, dev->archdata.dma_data, size, |
35 | dma_handle, device_to_mask(dev), flag, | |
12d04eef | 36 | dev->archdata.numa_node); |
1da177e4 | 37 | } |
1da177e4 | 38 | |
12d04eef BH |
39 | static void dma_iommu_free_coherent(struct device *dev, size_t size, |
40 | void *vaddr, dma_addr_t dma_handle) | |
1da177e4 | 41 | { |
12d04eef | 42 | iommu_free_coherent(dev->archdata.dma_data, size, vaddr, dma_handle); |
1da177e4 | 43 | } |
1da177e4 | 44 | |
12d04eef BH |
45 | /* Creates TCEs for a user provided buffer. The user buffer must be |
46 | * contiguous real kernel storage (not vmalloc). The address of the buffer | |
47 | * passed here is the kernel (virtual) address of the buffer. The buffer | |
48 | * need not be page aligned, the dma_addr_t returned will point to the same | |
49 | * byte within the page as vaddr. | |
50 | */ | |
51 | static dma_addr_t dma_iommu_map_single(struct device *dev, void *vaddr, | |
52 | size_t size, | |
53 | enum dma_data_direction direction) | |
1da177e4 | 54 | { |
fb3475e9 | 55 | return iommu_map_single(dev, dev->archdata.dma_data, vaddr, size, |
12d04eef | 56 | device_to_mask(dev), direction); |
1da177e4 | 57 | } |
1da177e4 | 58 | |
12d04eef BH |
59 | |
60 | static void dma_iommu_unmap_single(struct device *dev, dma_addr_t dma_handle, | |
61 | size_t size, | |
62 | enum dma_data_direction direction) | |
1da177e4 | 63 | { |
12d04eef BH |
64 | iommu_unmap_single(dev->archdata.dma_data, dma_handle, size, direction); |
65 | } | |
1da177e4 | 66 | |
5d33eebe | 67 | |
12d04eef BH |
68 | static int dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist, |
69 | int nelems, enum dma_data_direction direction) | |
70 | { | |
740c3ce6 | 71 | return iommu_map_sg(dev, sglist, nelems, |
12d04eef | 72 | device_to_mask(dev), direction); |
1da177e4 | 73 | } |
1da177e4 | 74 | |
12d04eef BH |
75 | static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist, |
76 | int nelems, enum dma_data_direction direction) | |
1da177e4 | 77 | { |
12d04eef | 78 | iommu_unmap_sg(dev->archdata.dma_data, sglist, nelems, direction); |
1da177e4 | 79 | } |
1da177e4 | 80 | |
12d04eef BH |
81 | /* We support DMA to/from any memory page via the iommu */ |
82 | static int dma_iommu_dma_supported(struct device *dev, u64 mask) | |
1da177e4 | 83 | { |
12d04eef BH |
84 | struct iommu_table *tbl = dev->archdata.dma_data; |
85 | ||
86 | if (!tbl || tbl->it_offset > mask) { | |
87 | printk(KERN_INFO | |
88 | "Warning: IOMMU offset too big for device mask\n"); | |
89 | if (tbl) | |
90 | printk(KERN_INFO | |
91 | "mask: 0x%08lx, table offset: 0x%08lx\n", | |
92 | mask, tbl->it_offset); | |
93 | else | |
94 | printk(KERN_INFO "mask: 0x%08lx, table unavailable\n", | |
95 | mask); | |
96 | return 0; | |
97 | } else | |
98 | return 1; | |
1da177e4 | 99 | } |
1da177e4 | 100 | |
12d04eef BH |
101 | struct dma_mapping_ops dma_iommu_ops = { |
102 | .alloc_coherent = dma_iommu_alloc_coherent, | |
103 | .free_coherent = dma_iommu_free_coherent, | |
104 | .map_single = dma_iommu_map_single, | |
105 | .unmap_single = dma_iommu_unmap_single, | |
106 | .map_sg = dma_iommu_map_sg, | |
107 | .unmap_sg = dma_iommu_unmap_sg, | |
108 | .dma_supported = dma_iommu_dma_supported, | |
109 | }; | |
110 | EXPORT_SYMBOL(dma_iommu_ops); | |
1da177e4 | 111 | |
12d04eef BH |
112 | /* |
113 | * Generic direct DMA implementation | |
92b20c40 | 114 | * |
31d1b493 ME |
115 | * This implementation supports a per-device offset that can be applied if |
116 | * the address at which memory is visible to devices is not 0. Platform code | |
117 | * can set archdata.dma_data to an unsigned long holding the offset. By | |
118 | * default the offset is zero. | |
12d04eef | 119 | */ |
5d33eebe | 120 | |
35e4a6e2 ME |
121 | static unsigned long get_dma_direct_offset(struct device *dev) |
122 | { | |
123 | return (unsigned long)dev->archdata.dma_data; | |
124 | } | |
125 | ||
12d04eef BH |
126 | static void *dma_direct_alloc_coherent(struct device *dev, size_t size, |
127 | dma_addr_t *dma_handle, gfp_t flag) | |
128 | { | |
c80d9133 | 129 | struct page *page; |
12d04eef | 130 | void *ret; |
c80d9133 | 131 | int node = dev->archdata.numa_node; |
12d04eef | 132 | |
c80d9133 BH |
133 | page = alloc_pages_node(node, flag, get_order(size)); |
134 | if (page == NULL) | |
135 | return NULL; | |
136 | ret = page_address(page); | |
137 | memset(ret, 0, size); | |
35e4a6e2 | 138 | *dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev); |
c80d9133 | 139 | |
12d04eef | 140 | return ret; |
1da177e4 | 141 | } |
1da177e4 | 142 | |
12d04eef BH |
143 | static void dma_direct_free_coherent(struct device *dev, size_t size, |
144 | void *vaddr, dma_addr_t dma_handle) | |
1da177e4 | 145 | { |
12d04eef BH |
146 | free_pages((unsigned long)vaddr, get_order(size)); |
147 | } | |
1da177e4 | 148 | |
12d04eef BH |
149 | static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr, |
150 | size_t size, | |
151 | enum dma_data_direction direction) | |
152 | { | |
35e4a6e2 | 153 | return virt_to_abs(ptr) + get_dma_direct_offset(dev); |
12d04eef | 154 | } |
5d33eebe | 155 | |
12d04eef BH |
156 | static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr, |
157 | size_t size, | |
158 | enum dma_data_direction direction) | |
159 | { | |
1da177e4 | 160 | } |
1da177e4 | 161 | |
78bdc310 | 162 | static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, |
12d04eef | 163 | int nents, enum dma_data_direction direction) |
1da177e4 | 164 | { |
78bdc310 | 165 | struct scatterlist *sg; |
12d04eef | 166 | int i; |
1da177e4 | 167 | |
78bdc310 | 168 | for_each_sg(sgl, sg, nents, i) { |
35e4a6e2 | 169 | sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev); |
12d04eef BH |
170 | sg->dma_length = sg->length; |
171 | } | |
5d33eebe | 172 | |
12d04eef | 173 | return nents; |
1da177e4 | 174 | } |
1da177e4 | 175 | |
12d04eef BH |
176 | static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg, |
177 | int nents, enum dma_data_direction direction) | |
1da177e4 | 178 | { |
12d04eef | 179 | } |
5d33eebe | 180 | |
12d04eef BH |
181 | static int dma_direct_dma_supported(struct device *dev, u64 mask) |
182 | { | |
183 | /* Could be improved to check for memory though it better be | |
184 | * done via some global so platforms can set the limit in case | |
185 | * they have limited DMA windows | |
186 | */ | |
187 | return mask >= DMA_32BIT_MASK; | |
1da177e4 | 188 | } |
12d04eef BH |
189 | |
190 | struct dma_mapping_ops dma_direct_ops = { | |
191 | .alloc_coherent = dma_direct_alloc_coherent, | |
192 | .free_coherent = dma_direct_free_coherent, | |
193 | .map_single = dma_direct_map_single, | |
194 | .unmap_single = dma_direct_unmap_single, | |
195 | .map_sg = dma_direct_map_sg, | |
196 | .unmap_sg = dma_direct_unmap_sg, | |
197 | .dma_supported = dma_direct_dma_supported, | |
198 | }; | |
199 | EXPORT_SYMBOL(dma_direct_ops); |