]>
Commit | Line | Data |
---|---|---|
244ab90e AL |
1 | /* |
2 | * DMA helper functions | |
3 | * | |
4 | * Copyright (c) 2009 Red Hat | |
5 | * | |
6 | * This work is licensed under the terms of the GNU General Public License | |
7 | * (GNU GPL), version 2 or later. | |
8 | */ | |
9 | ||
10 | #ifndef DMA_H | |
11 | #define DMA_H | |
12 | ||
13 | #include <stdio.h> | |
022c62cb | 14 | #include "exec/memory.h" |
1ad2134f | 15 | #include "hw/hw.h" |
737e150e | 16 | #include "block/block.h" |
9c17d615 | 17 | #include "sysemu/kvm.h" |
244ab90e | 18 | |
d86a77f8 | 19 | typedef struct DMAContext DMAContext; |
10dc8aef PB |
20 | typedef struct ScatterGatherEntry ScatterGatherEntry; |
21 | ||
43cf8ae6 DG |
22 | typedef enum { |
23 | DMA_DIRECTION_TO_DEVICE = 0, | |
24 | DMA_DIRECTION_FROM_DEVICE = 1, | |
25 | } DMADirection; | |
26 | ||
fead0c24 PB |
27 | struct QEMUSGList { |
28 | ScatterGatherEntry *sg; | |
29 | int nsg; | |
30 | int nalloc; | |
31 | size_t size; | |
c65bcef3 | 32 | DMAContext *dma; |
fead0c24 PB |
33 | }; |
34 | ||
4be403c8 | 35 | #ifndef CONFIG_USER_ONLY |
d9d1055e | 36 | |
e5332e63 DG |
37 | /* |
38 | * When an IOMMU is present, bus addresses become distinct from | |
39 | * CPU/memory physical addresses and may be a different size. Because | |
40 | * the IOVA size depends more on the bus than on the platform, we more | |
41 | * or less have to treat these as 64-bit always to cover all (or at | |
42 | * least most) cases. | |
43 | */ | |
44 | typedef uint64_t dma_addr_t; | |
45 | ||
46 | #define DMA_ADDR_BITS 64 | |
47 | #define DMA_ADDR_FMT "%" PRIx64 | |
48 | ||
49 | typedef int DMATranslateFunc(DMAContext *dma, | |
50 | dma_addr_t addr, | |
a8170e5e AK |
51 | hwaddr *paddr, |
52 | hwaddr *len, | |
e5332e63 DG |
53 | DMADirection dir); |
54 | typedef void* DMAMapFunc(DMAContext *dma, | |
55 | dma_addr_t addr, | |
56 | dma_addr_t *len, | |
57 | DMADirection dir); | |
58 | typedef void DMAUnmapFunc(DMAContext *dma, | |
59 | void *buffer, | |
60 | dma_addr_t len, | |
61 | DMADirection dir, | |
62 | dma_addr_t access_len); | |
63 | ||
64 | struct DMAContext { | |
b90600ee | 65 | AddressSpace *as; |
e5332e63 DG |
66 | DMATranslateFunc *translate; |
67 | DMAMapFunc *map; | |
68 | DMAUnmapFunc *unmap; | |
69 | }; | |
70 | ||
9e11908f PM |
71 | /* A global DMA context corresponding to the address_space_memory |
72 | * AddressSpace, for sysbus devices which do DMA. | |
73 | */ | |
74 | extern DMAContext dma_context_memory; | |
75 | ||
7a0bac4d BH |
76 | static inline void dma_barrier(DMAContext *dma, DMADirection dir) |
77 | { | |
78 | /* | |
79 | * This is called before DMA read and write operations | |
80 | * unless the _relaxed form is used and is responsible | |
81 | * for providing some sane ordering of accesses vs | |
82 | * concurrently running VCPUs. | |
83 | * | |
84 | * Users of map(), unmap() or lower level st/ld_* | |
85 | * operations are responsible for providing their own | |
86 | * ordering via barriers. | |
87 | * | |
88 | * This primitive implementation does a simple smp_mb() | |
89 | * before each operation which provides pretty much full | |
90 | * ordering. | |
91 | * | |
92 | * A smarter implementation can be devised if needed to | |
93 | * use lighter barriers based on the direction of the | |
94 | * transfer, the DMA context, etc... | |
95 | */ | |
96 | if (kvm_enabled()) { | |
97 | smp_mb(); | |
98 | } | |
99 | } | |
100 | ||
e5332e63 DG |
101 | static inline bool dma_has_iommu(DMAContext *dma) |
102 | { | |
b90600ee | 103 | return dma && dma->translate; |
e5332e63 | 104 | } |
d9d1055e | 105 | |
d86a77f8 DG |
106 | /* Checks that the given range of addresses is valid for DMA. This is |
107 | * useful for certain cases, but usually you should just use | |
108 | * dma_memory_{read,write}() and check for errors */ | |
e5332e63 DG |
109 | bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, |
110 | DMADirection dir); | |
111 | static inline bool dma_memory_valid(DMAContext *dma, | |
112 | dma_addr_t addr, dma_addr_t len, | |
113 | DMADirection dir) | |
d86a77f8 | 114 | { |
e5332e63 | 115 | if (!dma_has_iommu(dma)) { |
51644ab7 PB |
116 | return address_space_access_valid(dma->as, addr, len, |
117 | dir == DMA_DIRECTION_FROM_DEVICE); | |
e5332e63 DG |
118 | } else { |
119 | return iommu_dma_memory_valid(dma, addr, len, dir); | |
120 | } | |
d86a77f8 DG |
121 | } |
122 | ||
e5332e63 DG |
123 | int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, |
124 | void *buf, dma_addr_t len, DMADirection dir); | |
7a0bac4d BH |
125 | static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr, |
126 | void *buf, dma_addr_t len, | |
127 | DMADirection dir) | |
d86a77f8 | 128 | { |
e5332e63 DG |
129 | if (!dma_has_iommu(dma)) { |
130 | /* Fast-path for no IOMMU */ | |
b90600ee | 131 | address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); |
e5332e63 DG |
132 | return 0; |
133 | } else { | |
134 | return iommu_dma_memory_rw(dma, addr, buf, len, dir); | |
135 | } | |
d86a77f8 DG |
136 | } |
137 | ||
7a0bac4d BH |
138 | static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr, |
139 | void *buf, dma_addr_t len) | |
140 | { | |
141 | return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); | |
142 | } | |
143 | ||
144 | static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr, | |
145 | const void *buf, dma_addr_t len) | |
146 | { | |
147 | return dma_memory_rw_relaxed(dma, addr, (void *)buf, len, | |
148 | DMA_DIRECTION_FROM_DEVICE); | |
149 | } | |
150 | ||
151 | static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr, | |
152 | void *buf, dma_addr_t len, | |
153 | DMADirection dir) | |
154 | { | |
155 | dma_barrier(dma, dir); | |
156 | ||
157 | return dma_memory_rw_relaxed(dma, addr, buf, len, dir); | |
158 | } | |
159 | ||
d86a77f8 DG |
160 | static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr, |
161 | void *buf, dma_addr_t len) | |
162 | { | |
163 | return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE); | |
164 | } | |
165 | ||
166 | static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr, | |
167 | const void *buf, dma_addr_t len) | |
168 | { | |
169 | return dma_memory_rw(dma, addr, (void *)buf, len, | |
170 | DMA_DIRECTION_FROM_DEVICE); | |
171 | } | |
172 | ||
e5332e63 DG |
173 | int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, |
174 | dma_addr_t len); | |
175 | ||
d86a77f8 DG |
176 | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len); |
177 | ||
e5332e63 DG |
178 | void *iommu_dma_memory_map(DMAContext *dma, |
179 | dma_addr_t addr, dma_addr_t *len, | |
180 | DMADirection dir); | |
d86a77f8 DG |
181 | static inline void *dma_memory_map(DMAContext *dma, |
182 | dma_addr_t addr, dma_addr_t *len, | |
183 | DMADirection dir) | |
184 | { | |
e5332e63 | 185 | if (!dma_has_iommu(dma)) { |
a8170e5e | 186 | hwaddr xlen = *len; |
e5332e63 DG |
187 | void *p; |
188 | ||
b90600ee | 189 | p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE); |
e5332e63 DG |
190 | *len = xlen; |
191 | return p; | |
192 | } else { | |
193 | return iommu_dma_memory_map(dma, addr, len, dir); | |
194 | } | |
d86a77f8 DG |
195 | } |
196 | ||
e5332e63 DG |
197 | void iommu_dma_memory_unmap(DMAContext *dma, |
198 | void *buffer, dma_addr_t len, | |
199 | DMADirection dir, dma_addr_t access_len); | |
d86a77f8 DG |
200 | static inline void dma_memory_unmap(DMAContext *dma, |
201 | void *buffer, dma_addr_t len, | |
202 | DMADirection dir, dma_addr_t access_len) | |
203 | { | |
e5332e63 | 204 | if (!dma_has_iommu(dma)) { |
a8170e5e | 205 | address_space_unmap(dma->as, buffer, (hwaddr)len, |
b90600ee | 206 | dir == DMA_DIRECTION_FROM_DEVICE, access_len); |
e5332e63 DG |
207 | } else { |
208 | iommu_dma_memory_unmap(dma, buffer, len, dir, access_len); | |
209 | } | |
d86a77f8 DG |
210 | } |
211 | ||
212 | #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ | |
213 | static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \ | |
214 | dma_addr_t addr) \ | |
215 | { \ | |
216 | uint##_bits##_t val; \ | |
217 | dma_memory_read(dma, addr, &val, (_bits) / 8); \ | |
218 | return _end##_bits##_to_cpu(val); \ | |
219 | } \ | |
220 | static inline void st##_sname##_##_end##_dma(DMAContext *dma, \ | |
221 | dma_addr_t addr, \ | |
222 | uint##_bits##_t val) \ | |
223 | { \ | |
224 | val = cpu_to_##_end##_bits(val); \ | |
225 | dma_memory_write(dma, addr, &val, (_bits) / 8); \ | |
226 | } | |
227 | ||
228 | static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr) | |
229 | { | |
230 | uint8_t val; | |
231 | ||
232 | dma_memory_read(dma, addr, &val, 1); | |
233 | return val; | |
234 | } | |
235 | ||
236 | static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val) | |
237 | { | |
238 | dma_memory_write(dma, addr, &val, 1); | |
239 | } | |
240 | ||
241 | DEFINE_LDST_DMA(uw, w, 16, le); | |
242 | DEFINE_LDST_DMA(l, l, 32, le); | |
243 | DEFINE_LDST_DMA(q, q, 64, le); | |
244 | DEFINE_LDST_DMA(uw, w, 16, be); | |
245 | DEFINE_LDST_DMA(l, l, 32, be); | |
246 | DEFINE_LDST_DMA(q, q, 64, be); | |
247 | ||
248 | #undef DEFINE_LDST_DMA | |
249 | ||
b90600ee | 250 | void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate, |
e5332e63 DG |
251 | DMAMapFunc map, DMAUnmapFunc unmap); |
252 | ||
10dc8aef | 253 | struct ScatterGatherEntry { |
d3231181 DG |
254 | dma_addr_t base; |
255 | dma_addr_t len; | |
10dc8aef | 256 | }; |
244ab90e | 257 | |
c65bcef3 | 258 | void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma); |
d3231181 | 259 | void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len); |
244ab90e | 260 | void qemu_sglist_destroy(QEMUSGList *qsg); |
10dc8aef | 261 | #endif |
244ab90e | 262 | |
cb144ccb CH |
263 | typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num, |
264 | QEMUIOVector *iov, int nb_sectors, | |
265 | BlockDriverCompletionFunc *cb, void *opaque); | |
266 | ||
267 | BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs, | |
268 | QEMUSGList *sg, uint64_t sector_num, | |
269 | DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, | |
43cf8ae6 | 270 | void *opaque, DMADirection dir); |
59a703eb AL |
271 | BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs, |
272 | QEMUSGList *sg, uint64_t sector, | |
273 | BlockDriverCompletionFunc *cb, void *opaque); | |
274 | BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs, | |
275 | QEMUSGList *sg, uint64_t sector, | |
276 | BlockDriverCompletionFunc *cb, void *opaque); | |
8171ee35 PB |
277 | uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); |
278 | uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); | |
279 | ||
84a69356 PB |
280 | void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, |
281 | QEMUSGList *sg, enum BlockAcctType type); | |
282 | ||
244ab90e | 283 | #endif |