]> git.proxmox.com Git - mirror_qemu.git/blame - dma-helpers.c
hw/nvram: Always register FW_CFG_DATA_GENERATOR_INTERFACE
[mirror_qemu.git] / dma-helpers.c
CommitLineData
244ab90e
AL
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
d38ea87a 10#include "qemu/osdep.h"
4be74634 11#include "sysemu/block-backend.h"
9c17d615 12#include "sysemu/dma.h"
243af022 13#include "trace/trace-root.h"
1de7afc9 14#include "qemu/thread.h"
6a1751b7 15#include "qemu/main-loop.h"
740b1759 16#include "sysemu/cpu-timers.h"
5fb0a6b5 17#include "qemu/range.h"
244ab90e 18
e5332e63
DG
19/* #define DEBUG_IOMMU */
20
df32fd1c 21int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
d86a77f8 22{
df32fd1c 23 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
24addbc7 24
d86a77f8
DG
25#define FILLBUF_SIZE 512
26 uint8_t fillbuf[FILLBUF_SIZE];
27 int l;
24addbc7 28 bool error = false;
d86a77f8
DG
29
30 memset(fillbuf, c, FILLBUF_SIZE);
31 while (len > 0) {
32 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
19f70347
PM
33 error |= address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED,
34 fillbuf, l);
bc9b78de
BH
35 len -= l;
36 addr += l;
d86a77f8 37 }
e5332e63 38
24addbc7 39 return error;
d86a77f8
DG
40}
41
f487b677
PB
42void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
43 AddressSpace *as)
244ab90e 44{
7267c094 45 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
244ab90e
AL
46 qsg->nsg = 0;
47 qsg->nalloc = alloc_hint;
48 qsg->size = 0;
df32fd1c 49 qsg->as = as;
f487b677
PB
50 qsg->dev = dev;
51 object_ref(OBJECT(dev));
244ab90e
AL
52}
53
d3231181 54void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
244ab90e
AL
55{
56 if (qsg->nsg == qsg->nalloc) {
57 qsg->nalloc = 2 * qsg->nalloc + 1;
7267c094 58 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
244ab90e
AL
59 }
60 qsg->sg[qsg->nsg].base = base;
61 qsg->sg[qsg->nsg].len = len;
62 qsg->size += len;
63 ++qsg->nsg;
64}
65
66void qemu_sglist_destroy(QEMUSGList *qsg)
67{
f487b677 68 object_unref(OBJECT(qsg->dev));
7267c094 69 g_free(qsg->sg);
ea8d82a1 70 memset(qsg, 0, sizeof(*qsg));
244ab90e
AL
71}
72
59a703eb 73typedef struct {
7c84b1b8 74 BlockAIOCB common;
8a8e63eb 75 AioContext *ctx;
7c84b1b8 76 BlockAIOCB *acb;
59a703eb 77 QEMUSGList *sg;
99868af3 78 uint32_t align;
d4f510eb 79 uint64_t offset;
43cf8ae6 80 DMADirection dir;
59a703eb 81 int sg_cur_index;
d3231181 82 dma_addr_t sg_cur_byte;
59a703eb
AL
83 QEMUIOVector iov;
84 QEMUBH *bh;
cb144ccb 85 DMAIOFunc *io_func;
8a8e63eb 86 void *io_func_opaque;
37b7842c 87} DMAAIOCB;
59a703eb 88
4be74634 89static void dma_blk_cb(void *opaque, int ret);
59a703eb
AL
90
91static void reschedule_dma(void *opaque)
92{
37b7842c 93 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb 94
539343c0 95 assert(!dbs->acb && dbs->bh);
59a703eb
AL
96 qemu_bh_delete(dbs->bh);
97 dbs->bh = NULL;
4be74634 98 dma_blk_cb(dbs, 0);
59a703eb
AL
99}
100
4be74634 101static void dma_blk_unmap(DMAAIOCB *dbs)
59a703eb 102{
59a703eb
AL
103 int i;
104
59a703eb 105 for (i = 0; i < dbs->iov.niov; ++i) {
df32fd1c 106 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
c65bcef3
DG
107 dbs->iov.iov[i].iov_len, dbs->dir,
108 dbs->iov.iov[i].iov_len);
59a703eb 109 }
c3adb5b9
PB
110 qemu_iovec_reset(&dbs->iov);
111}
112
113static void dma_complete(DMAAIOCB *dbs, int ret)
114{
c57c4658
KW
115 trace_dma_complete(dbs, ret, dbs->common.cb);
116
539343c0 117 assert(!dbs->acb && !dbs->bh);
4be74634 118 dma_blk_unmap(dbs);
c3adb5b9
PB
119 if (dbs->common.cb) {
120 dbs->common.cb(dbs->common.opaque, ret);
121 }
122 qemu_iovec_destroy(&dbs->iov);
8007429a 123 qemu_aio_unref(dbs);
7403b14e
AL
124}
125
4be74634 126static void dma_blk_cb(void *opaque, int ret)
7403b14e
AL
127{
128 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
c65bcef3 129 dma_addr_t cur_addr, cur_len;
7403b14e
AL
130 void *mem;
131
4be74634 132 trace_dma_blk_cb(dbs, ret);
c57c4658 133
7403b14e 134 dbs->acb = NULL;
d4f510eb 135 dbs->offset += dbs->iov.size;
59a703eb
AL
136
137 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
c3adb5b9 138 dma_complete(dbs, ret);
59a703eb
AL
139 return;
140 }
4be74634 141 dma_blk_unmap(dbs);
59a703eb
AL
142
143 while (dbs->sg_cur_index < dbs->sg->nsg) {
144 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
145 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
df32fd1c 146 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
5fb0a6b5
PD
147 /*
148 * Make reads deterministic in icount mode. Windows sometimes issues
149 * disk read requests with overlapping SGs. It leads
150 * to non-determinism, because resulting buffer contents may be mixed
151 * from several sectors. This code splits all SGs into several
152 * groups. SGs in every group do not overlap.
153 */
740b1759 154 if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) {
5fb0a6b5
PD
155 int i;
156 for (i = 0 ; i < dbs->iov.niov ; ++i) {
157 if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base,
158 dbs->iov.iov[i].iov_len, (intptr_t)mem,
159 cur_len)) {
160 dma_memory_unmap(dbs->sg->as, mem, cur_len,
161 dbs->dir, cur_len);
162 mem = NULL;
163 break;
164 }
165 }
166 }
59a703eb
AL
167 if (!mem)
168 break;
169 qemu_iovec_add(&dbs->iov, mem, cur_len);
170 dbs->sg_cur_byte += cur_len;
171 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
172 dbs->sg_cur_byte = 0;
173 ++dbs->sg_cur_index;
174 }
175 }
176
177 if (dbs->iov.size == 0) {
c57c4658 178 trace_dma_map_wait(dbs);
8a8e63eb 179 dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs);
e95205e1 180 cpu_register_map_client(dbs->bh);
59a703eb
AL
181 return;
182 }
183
99868af3
MCA
184 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) {
185 qemu_iovec_discard_back(&dbs->iov,
186 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align));
58f423fb
KW
187 }
188
1919631e 189 aio_context_acquire(dbs->ctx);
8a8e63eb
PB
190 dbs->acb = dbs->io_func(dbs->offset, &dbs->iov,
191 dma_blk_cb, dbs, dbs->io_func_opaque);
1919631e 192 aio_context_release(dbs->ctx);
6bee44ea 193 assert(dbs->acb);
59a703eb
AL
194}
195
7c84b1b8 196static void dma_aio_cancel(BlockAIOCB *acb)
c16b5a2c
CH
197{
198 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
199
c57c4658
KW
200 trace_dma_aio_cancel(dbs);
201
539343c0 202 assert(!(dbs->acb && dbs->bh));
c16b5a2c 203 if (dbs->acb) {
539343c0 204 /* This will invoke dma_blk_cb. */
4be74634 205 blk_aio_cancel_async(dbs->acb);
539343c0 206 return;
c16b5a2c 207 }
539343c0 208
e95205e1
FZ
209 if (dbs->bh) {
210 cpu_unregister_map_client(dbs->bh);
211 qemu_bh_delete(dbs->bh);
212 dbs->bh = NULL;
213 }
539343c0
PB
214 if (dbs->common.cb) {
215 dbs->common.cb(dbs->common.opaque, -ECANCELED);
216 }
c16b5a2c
CH
217}
218
5fa78b2a
SH
219static AioContext *dma_get_aio_context(BlockAIOCB *acb)
220{
221 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
222
223 return dbs->ctx;
224}
9bb9da46 225
d7331bed 226static const AIOCBInfo dma_aiocb_info = {
c16b5a2c 227 .aiocb_size = sizeof(DMAAIOCB),
9bb9da46 228 .cancel_async = dma_aio_cancel,
5fa78b2a 229 .get_aio_context = dma_get_aio_context,
c16b5a2c
CH
230};
231
8a8e63eb 232BlockAIOCB *dma_blk_io(AioContext *ctx,
99868af3 233 QEMUSGList *sg, uint64_t offset, uint32_t align,
8a8e63eb
PB
234 DMAIOFunc *io_func, void *io_func_opaque,
235 BlockCompletionFunc *cb,
43cf8ae6 236 void *opaque, DMADirection dir)
59a703eb 237{
8a8e63eb 238 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, NULL, cb, opaque);
59a703eb 239
8a8e63eb 240 trace_dma_blk_io(dbs, io_func_opaque, offset, (dir == DMA_DIRECTION_TO_DEVICE));
c57c4658 241
37b7842c 242 dbs->acb = NULL;
59a703eb 243 dbs->sg = sg;
8a8e63eb 244 dbs->ctx = ctx;
cbe0ed62 245 dbs->offset = offset;
99868af3 246 dbs->align = align;
59a703eb
AL
247 dbs->sg_cur_index = 0;
248 dbs->sg_cur_byte = 0;
43cf8ae6 249 dbs->dir = dir;
cb144ccb 250 dbs->io_func = io_func;
8a8e63eb 251 dbs->io_func_opaque = io_func_opaque;
59a703eb
AL
252 dbs->bh = NULL;
253 qemu_iovec_init(&dbs->iov, sg->nsg);
4be74634 254 dma_blk_cb(dbs, 0);
37b7842c 255 return &dbs->common;
59a703eb
AL
256}
257
258
8a8e63eb
PB
259static
260BlockAIOCB *dma_blk_read_io_func(int64_t offset, QEMUIOVector *iov,
261 BlockCompletionFunc *cb, void *cb_opaque,
262 void *opaque)
263{
264 BlockBackend *blk = opaque;
265 return blk_aio_preadv(blk, offset, iov, 0, cb, cb_opaque);
266}
267
4be74634 268BlockAIOCB *dma_blk_read(BlockBackend *blk,
99868af3 269 QEMUSGList *sg, uint64_t offset, uint32_t align,
4be74634 270 void (*cb)(void *opaque, int ret), void *opaque)
59a703eb 271{
99868af3
MCA
272 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
273 dma_blk_read_io_func, blk, cb, opaque,
4be74634 274 DMA_DIRECTION_FROM_DEVICE);
59a703eb
AL
275}
276
8a8e63eb
PB
277static
278BlockAIOCB *dma_blk_write_io_func(int64_t offset, QEMUIOVector *iov,
279 BlockCompletionFunc *cb, void *cb_opaque,
280 void *opaque)
281{
282 BlockBackend *blk = opaque;
283 return blk_aio_pwritev(blk, offset, iov, 0, cb, cb_opaque);
284}
285
4be74634 286BlockAIOCB *dma_blk_write(BlockBackend *blk,
99868af3 287 QEMUSGList *sg, uint64_t offset, uint32_t align,
4be74634 288 void (*cb)(void *opaque, int ret), void *opaque)
59a703eb 289{
99868af3
MCA
290 return dma_blk_io(blk_get_aio_context(blk), sg, offset, align,
291 dma_blk_write_io_func, blk, cb, opaque,
4be74634 292 DMA_DIRECTION_TO_DEVICE);
59a703eb 293}
8171ee35
PB
294
295
c65bcef3
DG
296static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
297 DMADirection dir)
8171ee35
PB
298{
299 uint64_t resid;
300 int sg_cur_index;
301
302 resid = sg->size;
303 sg_cur_index = 0;
304 len = MIN(len, resid);
305 while (len > 0) {
306 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
307 int32_t xfer = MIN(len, entry.len);
df32fd1c 308 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
8171ee35
PB
309 ptr += xfer;
310 len -= xfer;
311 resid -= xfer;
312 }
313
314 return resid;
315}
316
317uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
318{
c65bcef3 319 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
8171ee35
PB
320}
321
322uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
323{
c65bcef3 324 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
8171ee35 325}
84a69356 326
4be74634 327void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie,
84a69356
PB
328 QEMUSGList *sg, enum BlockAcctType type)
329{
4be74634 330 block_acct_start(blk_get_stats(blk), cookie, sg->size, type);
84a69356 331}