]> git.proxmox.com Git - qemu.git/blame - dma.h
target-mips: Add ASE DSP resources access check
[qemu.git] / dma.h
CommitLineData
244ab90e
AL
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10#ifndef DMA_H
11#define DMA_H
12
13#include <stdio.h>
b90600ee 14#include "memory.h"
1ad2134f 15#include "hw/hw.h"
59a703eb 16#include "block.h"
7a0bac4d 17#include "kvm.h"
244ab90e 18
d86a77f8 19typedef struct DMAContext DMAContext;
10dc8aef
PB
20typedef struct ScatterGatherEntry ScatterGatherEntry;
21
43cf8ae6
DG
22typedef enum {
23 DMA_DIRECTION_TO_DEVICE = 0,
24 DMA_DIRECTION_FROM_DEVICE = 1,
25} DMADirection;
26
fead0c24
PB
27struct QEMUSGList {
28 ScatterGatherEntry *sg;
29 int nsg;
30 int nalloc;
31 size_t size;
c65bcef3 32 DMAContext *dma;
fead0c24
PB
33};
34
4be403c8 35#ifndef CONFIG_USER_ONLY
d9d1055e 36
e5332e63
DG
37/*
38 * When an IOMMU is present, bus addresses become distinct from
39 * CPU/memory physical addresses and may be a different size. Because
40 * the IOVA size depends more on the bus than on the platform, we more
41 * or less have to treat these as 64-bit always to cover all (or at
42 * least most) cases.
43 */
44typedef uint64_t dma_addr_t;
45
46#define DMA_ADDR_BITS 64
47#define DMA_ADDR_FMT "%" PRIx64
48
49typedef int DMATranslateFunc(DMAContext *dma,
50 dma_addr_t addr,
a8170e5e
AK
51 hwaddr *paddr,
52 hwaddr *len,
e5332e63
DG
53 DMADirection dir);
54typedef void* DMAMapFunc(DMAContext *dma,
55 dma_addr_t addr,
56 dma_addr_t *len,
57 DMADirection dir);
58typedef void DMAUnmapFunc(DMAContext *dma,
59 void *buffer,
60 dma_addr_t len,
61 DMADirection dir,
62 dma_addr_t access_len);
63
64struct DMAContext {
b90600ee 65 AddressSpace *as;
e5332e63
DG
66 DMATranslateFunc *translate;
67 DMAMapFunc *map;
68 DMAUnmapFunc *unmap;
69};
70
7a0bac4d
BH
71static inline void dma_barrier(DMAContext *dma, DMADirection dir)
72{
73 /*
74 * This is called before DMA read and write operations
75 * unless the _relaxed form is used and is responsible
76 * for providing some sane ordering of accesses vs
77 * concurrently running VCPUs.
78 *
79 * Users of map(), unmap() or lower level st/ld_*
80 * operations are responsible for providing their own
81 * ordering via barriers.
82 *
83 * This primitive implementation does a simple smp_mb()
84 * before each operation which provides pretty much full
85 * ordering.
86 *
87 * A smarter implementation can be devised if needed to
88 * use lighter barriers based on the direction of the
89 * transfer, the DMA context, etc...
90 */
91 if (kvm_enabled()) {
92 smp_mb();
93 }
94}
95
e5332e63
DG
96static inline bool dma_has_iommu(DMAContext *dma)
97{
b90600ee 98 return dma && dma->translate;
e5332e63 99}
d9d1055e 100
d86a77f8
DG
101/* Checks that the given range of addresses is valid for DMA. This is
102 * useful for certain cases, but usually you should just use
103 * dma_memory_{read,write}() and check for errors */
e5332e63
DG
104bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
105 DMADirection dir);
106static inline bool dma_memory_valid(DMAContext *dma,
107 dma_addr_t addr, dma_addr_t len,
108 DMADirection dir)
d86a77f8 109{
e5332e63
DG
110 if (!dma_has_iommu(dma)) {
111 return true;
112 } else {
113 return iommu_dma_memory_valid(dma, addr, len, dir);
114 }
d86a77f8
DG
115}
116
e5332e63
DG
117int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
118 void *buf, dma_addr_t len, DMADirection dir);
7a0bac4d
BH
119static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
120 void *buf, dma_addr_t len,
121 DMADirection dir)
d86a77f8 122{
e5332e63
DG
123 if (!dma_has_iommu(dma)) {
124 /* Fast-path for no IOMMU */
b90600ee 125 address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
e5332e63
DG
126 return 0;
127 } else {
128 return iommu_dma_memory_rw(dma, addr, buf, len, dir);
129 }
d86a77f8
DG
130}
131
7a0bac4d
BH
132static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
133 void *buf, dma_addr_t len)
134{
135 return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
136}
137
138static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
139 const void *buf, dma_addr_t len)
140{
141 return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
142 DMA_DIRECTION_FROM_DEVICE);
143}
144
145static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
146 void *buf, dma_addr_t len,
147 DMADirection dir)
148{
149 dma_barrier(dma, dir);
150
151 return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
152}
153
d86a77f8
DG
154static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
155 void *buf, dma_addr_t len)
156{
157 return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
158}
159
160static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
161 const void *buf, dma_addr_t len)
162{
163 return dma_memory_rw(dma, addr, (void *)buf, len,
164 DMA_DIRECTION_FROM_DEVICE);
165}
166
e5332e63
DG
167int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
168 dma_addr_t len);
169
d86a77f8
DG
170int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
171
e5332e63
DG
172void *iommu_dma_memory_map(DMAContext *dma,
173 dma_addr_t addr, dma_addr_t *len,
174 DMADirection dir);
d86a77f8
DG
175static inline void *dma_memory_map(DMAContext *dma,
176 dma_addr_t addr, dma_addr_t *len,
177 DMADirection dir)
178{
e5332e63 179 if (!dma_has_iommu(dma)) {
a8170e5e 180 hwaddr xlen = *len;
e5332e63
DG
181 void *p;
182
b90600ee 183 p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
e5332e63
DG
184 *len = xlen;
185 return p;
186 } else {
187 return iommu_dma_memory_map(dma, addr, len, dir);
188 }
d86a77f8
DG
189}
190
e5332e63
DG
191void iommu_dma_memory_unmap(DMAContext *dma,
192 void *buffer, dma_addr_t len,
193 DMADirection dir, dma_addr_t access_len);
d86a77f8
DG
194static inline void dma_memory_unmap(DMAContext *dma,
195 void *buffer, dma_addr_t len,
196 DMADirection dir, dma_addr_t access_len)
197{
e5332e63 198 if (!dma_has_iommu(dma)) {
a8170e5e 199 address_space_unmap(dma->as, buffer, (hwaddr)len,
b90600ee 200 dir == DMA_DIRECTION_FROM_DEVICE, access_len);
e5332e63
DG
201 } else {
202 iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
203 }
d86a77f8
DG
204}
205
206#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
207 static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
208 dma_addr_t addr) \
209 { \
210 uint##_bits##_t val; \
211 dma_memory_read(dma, addr, &val, (_bits) / 8); \
212 return _end##_bits##_to_cpu(val); \
213 } \
214 static inline void st##_sname##_##_end##_dma(DMAContext *dma, \
215 dma_addr_t addr, \
216 uint##_bits##_t val) \
217 { \
218 val = cpu_to_##_end##_bits(val); \
219 dma_memory_write(dma, addr, &val, (_bits) / 8); \
220 }
221
222static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
223{
224 uint8_t val;
225
226 dma_memory_read(dma, addr, &val, 1);
227 return val;
228}
229
230static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
231{
232 dma_memory_write(dma, addr, &val, 1);
233}
234
235DEFINE_LDST_DMA(uw, w, 16, le);
236DEFINE_LDST_DMA(l, l, 32, le);
237DEFINE_LDST_DMA(q, q, 64, le);
238DEFINE_LDST_DMA(uw, w, 16, be);
239DEFINE_LDST_DMA(l, l, 32, be);
240DEFINE_LDST_DMA(q, q, 64, be);
241
242#undef DEFINE_LDST_DMA
243
b90600ee 244void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
e5332e63
DG
245 DMAMapFunc map, DMAUnmapFunc unmap);
246
10dc8aef 247struct ScatterGatherEntry {
d3231181
DG
248 dma_addr_t base;
249 dma_addr_t len;
10dc8aef 250};
244ab90e 251
c65bcef3 252void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
d3231181 253void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
244ab90e 254void qemu_sglist_destroy(QEMUSGList *qsg);
10dc8aef 255#endif
244ab90e 256
cb144ccb
CH
257typedef BlockDriverAIOCB *DMAIOFunc(BlockDriverState *bs, int64_t sector_num,
258 QEMUIOVector *iov, int nb_sectors,
259 BlockDriverCompletionFunc *cb, void *opaque);
260
261BlockDriverAIOCB *dma_bdrv_io(BlockDriverState *bs,
262 QEMUSGList *sg, uint64_t sector_num,
263 DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
43cf8ae6 264 void *opaque, DMADirection dir);
59a703eb
AL
265BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
266 QEMUSGList *sg, uint64_t sector,
267 BlockDriverCompletionFunc *cb, void *opaque);
268BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
269 QEMUSGList *sg, uint64_t sector,
270 BlockDriverCompletionFunc *cb, void *opaque);
8171ee35
PB
271uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg);
272uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg);
273
84a69356
PB
274void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
275 QEMUSGList *sg, enum BlockAcctType type);
276
244ab90e 277#endif