4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
10 #include "sysemu/dma.h"
12 #include "qemu/range.h"
13 #include "qemu/thread.h"
15 /* #define DEBUG_IOMMU */
17 int dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
19 AddressSpace
*as
= dma
->as
;
21 dma_barrier(dma
, DMA_DIRECTION_FROM_DEVICE
);
23 #define FILLBUF_SIZE 512
24 uint8_t fillbuf
[FILLBUF_SIZE
];
28 memset(fillbuf
, c
, FILLBUF_SIZE
);
30 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
31 error
|= address_space_rw(as
, addr
, fillbuf
, l
, true);
39 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
, DMAContext
*dma
)
41 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
43 qsg
->nalloc
= alloc_hint
;
48 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
50 if (qsg
->nsg
== qsg
->nalloc
) {
51 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
52 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
54 qsg
->sg
[qsg
->nsg
].base
= base
;
55 qsg
->sg
[qsg
->nsg
].len
= len
;
60 void qemu_sglist_destroy(QEMUSGList
*qsg
)
63 memset(qsg
, 0, sizeof(*qsg
));
67 BlockDriverAIOCB common
;
69 BlockDriverAIOCB
*acb
;
75 dma_addr_t sg_cur_byte
;
81 static void dma_bdrv_cb(void *opaque
, int ret
);
83 static void reschedule_dma(void *opaque
)
85 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
87 qemu_bh_delete(dbs
->bh
);
92 static void continue_after_map_failure(void *opaque
)
94 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
96 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
97 qemu_bh_schedule(dbs
->bh
);
100 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
104 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
105 dma_memory_unmap(dbs
->sg
->dma
, dbs
->iov
.iov
[i
].iov_base
,
106 dbs
->iov
.iov
[i
].iov_len
, dbs
->dir
,
107 dbs
->iov
.iov
[i
].iov_len
);
109 qemu_iovec_reset(&dbs
->iov
);
112 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
114 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
117 if (dbs
->common
.cb
) {
118 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
120 qemu_iovec_destroy(&dbs
->iov
);
122 qemu_bh_delete(dbs
->bh
);
125 if (!dbs
->in_cancel
) {
126 /* Requests may complete while dma_aio_cancel is in progress. In
127 * this case, the AIOCB should not be released because it is still
128 * referenced by dma_aio_cancel. */
129 qemu_aio_release(dbs
);
133 static void dma_bdrv_cb(void *opaque
, int ret
)
135 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
136 dma_addr_t cur_addr
, cur_len
;
139 trace_dma_bdrv_cb(dbs
, ret
);
142 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
145 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
146 dma_complete(dbs
, ret
);
150 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
151 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
152 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
153 mem
= dma_memory_map(dbs
->sg
->dma
, cur_addr
, &cur_len
, dbs
->dir
);
156 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
157 dbs
->sg_cur_byte
+= cur_len
;
158 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
159 dbs
->sg_cur_byte
= 0;
164 if (dbs
->iov
.size
== 0) {
165 trace_dma_map_wait(dbs
);
166 cpu_register_map_client(dbs
, continue_after_map_failure
);
170 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
171 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
175 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
177 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
179 trace_dma_aio_cancel(dbs
);
182 BlockDriverAIOCB
*acb
= dbs
->acb
;
184 dbs
->in_cancel
= true;
185 bdrv_aio_cancel(acb
);
186 dbs
->in_cancel
= false;
188 dbs
->common
.cb
= NULL
;
189 dma_complete(dbs
, 0);
192 static const AIOCBInfo dma_aiocb_info
= {
193 .aiocb_size
= sizeof(DMAAIOCB
),
194 .cancel
= dma_aio_cancel
,
197 BlockDriverAIOCB
*dma_bdrv_io(
198 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
199 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
200 void *opaque
, DMADirection dir
)
202 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aiocb_info
, bs
, cb
, opaque
);
204 trace_dma_bdrv_io(dbs
, bs
, sector_num
, (dir
== DMA_DIRECTION_TO_DEVICE
));
209 dbs
->sector_num
= sector_num
;
210 dbs
->sg_cur_index
= 0;
211 dbs
->sg_cur_byte
= 0;
213 dbs
->io_func
= io_func
;
215 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
221 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
222 QEMUSGList
*sg
, uint64_t sector
,
223 void (*cb
)(void *opaque
, int ret
), void *opaque
)
225 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
,
226 DMA_DIRECTION_FROM_DEVICE
);
229 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
230 QEMUSGList
*sg
, uint64_t sector
,
231 void (*cb
)(void *opaque
, int ret
), void *opaque
)
233 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
,
234 DMA_DIRECTION_TO_DEVICE
);
238 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
,
246 len
= MIN(len
, resid
);
248 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
249 int32_t xfer
= MIN(len
, entry
.len
);
250 dma_memory_rw(sg
->dma
, entry
.base
, ptr
, xfer
, dir
);
259 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
261 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_FROM_DEVICE
);
264 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
266 return dma_buf_rw(ptr
, len
, sg
, DMA_DIRECTION_TO_DEVICE
);
269 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
270 QEMUSGList
*sg
, enum BlockAcctType type
)
272 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);
275 void dma_context_init(DMAContext
*dma
, AddressSpace
*as
)
278 fprintf(stderr
, "dma_context_init(%p -> %p)\n", dma
, as
);