]> git.proxmox.com Git - mirror_qemu.git/blame - dma-helpers.c
block: Eliminate DriveInfo member bdrv, use blk_by_legacy_dinfo()
[mirror_qemu.git] / dma-helpers.c
CommitLineData
244ab90e
AL
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
9c17d615 10#include "sysemu/dma.h"
c57c4658 11#include "trace.h"
1de7afc9
PB
12#include "qemu/range.h"
13#include "qemu/thread.h"
6a1751b7 14#include "qemu/main-loop.h"
244ab90e 15
e5332e63
DG
16/* #define DEBUG_IOMMU */
17
df32fd1c 18int dma_memory_set(AddressSpace *as, dma_addr_t addr, uint8_t c, dma_addr_t len)
d86a77f8 19{
df32fd1c 20 dma_barrier(as, DMA_DIRECTION_FROM_DEVICE);
24addbc7 21
d86a77f8
DG
22#define FILLBUF_SIZE 512
23 uint8_t fillbuf[FILLBUF_SIZE];
24 int l;
24addbc7 25 bool error = false;
d86a77f8
DG
26
27 memset(fillbuf, c, FILLBUF_SIZE);
28 while (len > 0) {
29 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
24addbc7 30 error |= address_space_rw(as, addr, fillbuf, l, true);
bc9b78de
BH
31 len -= l;
32 addr += l;
d86a77f8 33 }
e5332e63 34
24addbc7 35 return error;
d86a77f8
DG
36}
37
f487b677
PB
38void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint,
39 AddressSpace *as)
244ab90e 40{
7267c094 41 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
244ab90e
AL
42 qsg->nsg = 0;
43 qsg->nalloc = alloc_hint;
44 qsg->size = 0;
df32fd1c 45 qsg->as = as;
f487b677
PB
46 qsg->dev = dev;
47 object_ref(OBJECT(dev));
244ab90e
AL
48}
49
d3231181 50void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
244ab90e
AL
51{
52 if (qsg->nsg == qsg->nalloc) {
53 qsg->nalloc = 2 * qsg->nalloc + 1;
7267c094 54 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
244ab90e
AL
55 }
56 qsg->sg[qsg->nsg].base = base;
57 qsg->sg[qsg->nsg].len = len;
58 qsg->size += len;
59 ++qsg->nsg;
60}
61
62void qemu_sglist_destroy(QEMUSGList *qsg)
63{
f487b677 64 object_unref(OBJECT(qsg->dev));
7267c094 65 g_free(qsg->sg);
ea8d82a1 66 memset(qsg, 0, sizeof(*qsg));
244ab90e
AL
67}
68
59a703eb 69typedef struct {
37b7842c 70 BlockDriverAIOCB common;
59a703eb
AL
71 BlockDriverState *bs;
72 BlockDriverAIOCB *acb;
73 QEMUSGList *sg;
74 uint64_t sector_num;
43cf8ae6 75 DMADirection dir;
59a703eb 76 int sg_cur_index;
d3231181 77 dma_addr_t sg_cur_byte;
59a703eb
AL
78 QEMUIOVector iov;
79 QEMUBH *bh;
cb144ccb 80 DMAIOFunc *io_func;
37b7842c 81} DMAAIOCB;
59a703eb
AL
82
83static void dma_bdrv_cb(void *opaque, int ret);
84
85static void reschedule_dma(void *opaque)
86{
37b7842c 87 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
88
89 qemu_bh_delete(dbs->bh);
90 dbs->bh = NULL;
c3adb5b9 91 dma_bdrv_cb(dbs, 0);
59a703eb
AL
92}
93
94static void continue_after_map_failure(void *opaque)
95{
37b7842c 96 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
97
98 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
99 qemu_bh_schedule(dbs->bh);
100}
101
7403b14e 102static void dma_bdrv_unmap(DMAAIOCB *dbs)
59a703eb 103{
59a703eb
AL
104 int i;
105
59a703eb 106 for (i = 0; i < dbs->iov.niov; ++i) {
df32fd1c 107 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base,
c65bcef3
DG
108 dbs->iov.iov[i].iov_len, dbs->dir,
109 dbs->iov.iov[i].iov_len);
59a703eb 110 }
c3adb5b9
PB
111 qemu_iovec_reset(&dbs->iov);
112}
113
114static void dma_complete(DMAAIOCB *dbs, int ret)
115{
c57c4658
KW
116 trace_dma_complete(dbs, ret, dbs->common.cb);
117
c3adb5b9
PB
118 dma_bdrv_unmap(dbs);
119 if (dbs->common.cb) {
120 dbs->common.cb(dbs->common.opaque, ret);
121 }
122 qemu_iovec_destroy(&dbs->iov);
123 if (dbs->bh) {
124 qemu_bh_delete(dbs->bh);
125 dbs->bh = NULL;
126 }
8007429a 127 qemu_aio_unref(dbs);
7403b14e
AL
128}
129
856ae5c3 130static void dma_bdrv_cb(void *opaque, int ret)
7403b14e
AL
131{
132 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
c65bcef3 133 dma_addr_t cur_addr, cur_len;
7403b14e
AL
134 void *mem;
135
c57c4658
KW
136 trace_dma_bdrv_cb(dbs, ret);
137
7403b14e
AL
138 dbs->acb = NULL;
139 dbs->sector_num += dbs->iov.size / 512;
59a703eb
AL
140
141 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
c3adb5b9 142 dma_complete(dbs, ret);
59a703eb
AL
143 return;
144 }
9c132c7f 145 dma_bdrv_unmap(dbs);
59a703eb
AL
146
147 while (dbs->sg_cur_index < dbs->sg->nsg) {
148 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
149 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
df32fd1c 150 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir);
59a703eb
AL
151 if (!mem)
152 break;
153 qemu_iovec_add(&dbs->iov, mem, cur_len);
154 dbs->sg_cur_byte += cur_len;
155 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
156 dbs->sg_cur_byte = 0;
157 ++dbs->sg_cur_index;
158 }
159 }
160
161 if (dbs->iov.size == 0) {
c57c4658 162 trace_dma_map_wait(dbs);
59a703eb
AL
163 cpu_register_map_client(dbs, continue_after_map_failure);
164 return;
165 }
166
58f423fb
KW
167 if (dbs->iov.size & ~BDRV_SECTOR_MASK) {
168 qemu_iovec_discard_back(&dbs->iov, dbs->iov.size & ~BDRV_SECTOR_MASK);
169 }
170
cb144ccb
CH
171 dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
172 dbs->iov.size / 512, dma_bdrv_cb, dbs);
6bee44ea 173 assert(dbs->acb);
59a703eb
AL
174}
175
c16b5a2c
CH
176static void dma_aio_cancel(BlockDriverAIOCB *acb)
177{
178 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
179
c57c4658
KW
180 trace_dma_aio_cancel(dbs);
181
c16b5a2c 182 if (dbs->acb) {
9bb9da46 183 bdrv_aio_cancel_async(dbs->acb);
c16b5a2c
CH
184 }
185}
186
9bb9da46 187
d7331bed 188static const AIOCBInfo dma_aiocb_info = {
c16b5a2c 189 .aiocb_size = sizeof(DMAAIOCB),
9bb9da46 190 .cancel_async = dma_aio_cancel,
c16b5a2c
CH
191};
192
cb144ccb 193BlockDriverAIOCB *dma_bdrv_io(
59a703eb 194 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
cb144ccb 195 DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
43cf8ae6 196 void *opaque, DMADirection dir)
59a703eb 197{
d7331bed 198 DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque);
59a703eb 199
43cf8ae6 200 trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
c57c4658 201
37b7842c 202 dbs->acb = NULL;
59a703eb 203 dbs->bs = bs;
59a703eb
AL
204 dbs->sg = sg;
205 dbs->sector_num = sector_num;
206 dbs->sg_cur_index = 0;
207 dbs->sg_cur_byte = 0;
43cf8ae6 208 dbs->dir = dir;
cb144ccb 209 dbs->io_func = io_func;
59a703eb
AL
210 dbs->bh = NULL;
211 qemu_iovec_init(&dbs->iov, sg->nsg);
212 dma_bdrv_cb(dbs, 0);
37b7842c 213 return &dbs->common;
59a703eb
AL
214}
215
216
217BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
218 QEMUSGList *sg, uint64_t sector,
219 void (*cb)(void *opaque, int ret), void *opaque)
220{
43cf8ae6
DG
221 return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
222 DMA_DIRECTION_FROM_DEVICE);
59a703eb
AL
223}
224
225BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
226 QEMUSGList *sg, uint64_t sector,
227 void (*cb)(void *opaque, int ret), void *opaque)
228{
43cf8ae6
DG
229 return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
230 DMA_DIRECTION_TO_DEVICE);
59a703eb 231}
8171ee35
PB
232
233
c65bcef3
DG
234static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
235 DMADirection dir)
8171ee35
PB
236{
237 uint64_t resid;
238 int sg_cur_index;
239
240 resid = sg->size;
241 sg_cur_index = 0;
242 len = MIN(len, resid);
243 while (len > 0) {
244 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
245 int32_t xfer = MIN(len, entry.len);
df32fd1c 246 dma_memory_rw(sg->as, entry.base, ptr, xfer, dir);
8171ee35
PB
247 ptr += xfer;
248 len -= xfer;
249 resid -= xfer;
250 }
251
252 return resid;
253}
254
255uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
256{
c65bcef3 257 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
8171ee35
PB
258}
259
260uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
261{
c65bcef3 262 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
8171ee35 263}
84a69356
PB
264
265void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
266 QEMUSGList *sg, enum BlockAcctType type)
267{
5366d0c8 268 block_acct_start(bdrv_get_stats(bs), cookie, sg->size, type);
84a69356 269}