]> git.proxmox.com Git - mirror_qemu.git/blame - dma-helpers.c
usb: Convert usb_packet_{map, unmap} to universal DMA helpers
[mirror_qemu.git] / dma-helpers.c
CommitLineData
244ab90e
AL
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10#include "dma.h"
c57c4658 11#include "trace.h"
244ab90e 12
d86a77f8
DG
13int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
14{
15#define FILLBUF_SIZE 512
16 uint8_t fillbuf[FILLBUF_SIZE];
17 int l;
18
19 memset(fillbuf, c, FILLBUF_SIZE);
20 while (len > 0) {
21 l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
22 cpu_physical_memory_rw(addr, fillbuf, l, true);
23 len -= len;
24 addr += len;
25 }
26 return 0;
27}
28
c65bcef3 29void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
244ab90e 30{
7267c094 31 qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
244ab90e
AL
32 qsg->nsg = 0;
33 qsg->nalloc = alloc_hint;
34 qsg->size = 0;
c65bcef3 35 qsg->dma = dma;
244ab90e
AL
36}
37
d3231181 38void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
244ab90e
AL
39{
40 if (qsg->nsg == qsg->nalloc) {
41 qsg->nalloc = 2 * qsg->nalloc + 1;
7267c094 42 qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
244ab90e
AL
43 }
44 qsg->sg[qsg->nsg].base = base;
45 qsg->sg[qsg->nsg].len = len;
46 qsg->size += len;
47 ++qsg->nsg;
48}
49
50void qemu_sglist_destroy(QEMUSGList *qsg)
51{
7267c094 52 g_free(qsg->sg);
244ab90e
AL
53}
54
59a703eb 55typedef struct {
37b7842c 56 BlockDriverAIOCB common;
59a703eb
AL
57 BlockDriverState *bs;
58 BlockDriverAIOCB *acb;
59 QEMUSGList *sg;
60 uint64_t sector_num;
43cf8ae6 61 DMADirection dir;
c3adb5b9 62 bool in_cancel;
59a703eb 63 int sg_cur_index;
d3231181 64 dma_addr_t sg_cur_byte;
59a703eb
AL
65 QEMUIOVector iov;
66 QEMUBH *bh;
cb144ccb 67 DMAIOFunc *io_func;
37b7842c 68} DMAAIOCB;
59a703eb
AL
69
70static void dma_bdrv_cb(void *opaque, int ret);
71
72static void reschedule_dma(void *opaque)
73{
37b7842c 74 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
75
76 qemu_bh_delete(dbs->bh);
77 dbs->bh = NULL;
c3adb5b9 78 dma_bdrv_cb(dbs, 0);
59a703eb
AL
79}
80
81static void continue_after_map_failure(void *opaque)
82{
37b7842c 83 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
84
85 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
86 qemu_bh_schedule(dbs->bh);
87}
88
7403b14e 89static void dma_bdrv_unmap(DMAAIOCB *dbs)
59a703eb 90{
59a703eb
AL
91 int i;
92
59a703eb 93 for (i = 0; i < dbs->iov.niov; ++i) {
c65bcef3
DG
94 dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
95 dbs->iov.iov[i].iov_len, dbs->dir,
96 dbs->iov.iov[i].iov_len);
59a703eb 97 }
c3adb5b9
PB
98 qemu_iovec_reset(&dbs->iov);
99}
100
101static void dma_complete(DMAAIOCB *dbs, int ret)
102{
c57c4658
KW
103 trace_dma_complete(dbs, ret, dbs->common.cb);
104
c3adb5b9
PB
105 dma_bdrv_unmap(dbs);
106 if (dbs->common.cb) {
107 dbs->common.cb(dbs->common.opaque, ret);
108 }
109 qemu_iovec_destroy(&dbs->iov);
110 if (dbs->bh) {
111 qemu_bh_delete(dbs->bh);
112 dbs->bh = NULL;
113 }
114 if (!dbs->in_cancel) {
115 /* Requests may complete while dma_aio_cancel is in progress. In
116 * this case, the AIOCB should not be released because it is still
117 * referenced by dma_aio_cancel. */
118 qemu_aio_release(dbs);
119 }
7403b14e
AL
120}
121
856ae5c3 122static void dma_bdrv_cb(void *opaque, int ret)
7403b14e
AL
123{
124 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
c65bcef3 125 dma_addr_t cur_addr, cur_len;
7403b14e
AL
126 void *mem;
127
c57c4658
KW
128 trace_dma_bdrv_cb(dbs, ret);
129
7403b14e
AL
130 dbs->acb = NULL;
131 dbs->sector_num += dbs->iov.size / 512;
132 dma_bdrv_unmap(dbs);
59a703eb
AL
133
134 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
c3adb5b9 135 dma_complete(dbs, ret);
59a703eb
AL
136 return;
137 }
138
139 while (dbs->sg_cur_index < dbs->sg->nsg) {
140 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
141 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
c65bcef3 142 mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
59a703eb
AL
143 if (!mem)
144 break;
145 qemu_iovec_add(&dbs->iov, mem, cur_len);
146 dbs->sg_cur_byte += cur_len;
147 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
148 dbs->sg_cur_byte = 0;
149 ++dbs->sg_cur_index;
150 }
151 }
152
153 if (dbs->iov.size == 0) {
c57c4658 154 trace_dma_map_wait(dbs);
59a703eb
AL
155 cpu_register_map_client(dbs, continue_after_map_failure);
156 return;
157 }
158
cb144ccb
CH
159 dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
160 dbs->iov.size / 512, dma_bdrv_cb, dbs);
6bee44ea 161 assert(dbs->acb);
59a703eb
AL
162}
163
c16b5a2c
CH
164static void dma_aio_cancel(BlockDriverAIOCB *acb)
165{
166 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
167
c57c4658
KW
168 trace_dma_aio_cancel(dbs);
169
c16b5a2c 170 if (dbs->acb) {
c3adb5b9
PB
171 BlockDriverAIOCB *acb = dbs->acb;
172 dbs->acb = NULL;
173 dbs->in_cancel = true;
174 bdrv_aio_cancel(acb);
175 dbs->in_cancel = false;
c16b5a2c 176 }
c3adb5b9
PB
177 dbs->common.cb = NULL;
178 dma_complete(dbs, 0);
c16b5a2c
CH
179}
180
181static AIOPool dma_aio_pool = {
182 .aiocb_size = sizeof(DMAAIOCB),
183 .cancel = dma_aio_cancel,
184};
185
cb144ccb 186BlockDriverAIOCB *dma_bdrv_io(
59a703eb 187 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
cb144ccb 188 DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
43cf8ae6 189 void *opaque, DMADirection dir)
59a703eb 190{
cb144ccb 191 DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
59a703eb 192
43cf8ae6 193 trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
c57c4658 194
37b7842c 195 dbs->acb = NULL;
59a703eb 196 dbs->bs = bs;
59a703eb
AL
197 dbs->sg = sg;
198 dbs->sector_num = sector_num;
199 dbs->sg_cur_index = 0;
200 dbs->sg_cur_byte = 0;
43cf8ae6 201 dbs->dir = dir;
cb144ccb 202 dbs->io_func = io_func;
59a703eb
AL
203 dbs->bh = NULL;
204 qemu_iovec_init(&dbs->iov, sg->nsg);
205 dma_bdrv_cb(dbs, 0);
37b7842c 206 return &dbs->common;
59a703eb
AL
207}
208
209
210BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
211 QEMUSGList *sg, uint64_t sector,
212 void (*cb)(void *opaque, int ret), void *opaque)
213{
43cf8ae6
DG
214 return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
215 DMA_DIRECTION_FROM_DEVICE);
59a703eb
AL
216}
217
218BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
219 QEMUSGList *sg, uint64_t sector,
220 void (*cb)(void *opaque, int ret), void *opaque)
221{
43cf8ae6
DG
222 return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
223 DMA_DIRECTION_TO_DEVICE);
59a703eb 224}
8171ee35
PB
225
226
c65bcef3
DG
227static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
228 DMADirection dir)
8171ee35
PB
229{
230 uint64_t resid;
231 int sg_cur_index;
232
233 resid = sg->size;
234 sg_cur_index = 0;
235 len = MIN(len, resid);
236 while (len > 0) {
237 ScatterGatherEntry entry = sg->sg[sg_cur_index++];
238 int32_t xfer = MIN(len, entry.len);
c65bcef3 239 dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
8171ee35
PB
240 ptr += xfer;
241 len -= xfer;
242 resid -= xfer;
243 }
244
245 return resid;
246}
247
248uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
249{
c65bcef3 250 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
8171ee35
PB
251}
252
253uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
254{
c65bcef3 255 return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
8171ee35 256}
84a69356
PB
257
258void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
259 QEMUSGList *sg, enum BlockAcctType type)
260{
261 bdrv_acct_start(bs, cookie, sg->size, type);
262}