4 * Copyright (c) 2009 Red Hat
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
13 int dma_memory_set(DMAContext
*dma
, dma_addr_t addr
, uint8_t c
, dma_addr_t len
)
15 #define FILLBUF_SIZE 512
16 uint8_t fillbuf
[FILLBUF_SIZE
];
19 memset(fillbuf
, c
, FILLBUF_SIZE
);
21 l
= len
< FILLBUF_SIZE
? len
: FILLBUF_SIZE
;
22 cpu_physical_memory_rw(addr
, fillbuf
, l
, true);
29 void qemu_sglist_init(QEMUSGList
*qsg
, int alloc_hint
)
31 qsg
->sg
= g_malloc(alloc_hint
* sizeof(ScatterGatherEntry
));
33 qsg
->nalloc
= alloc_hint
;
37 void qemu_sglist_add(QEMUSGList
*qsg
, dma_addr_t base
, dma_addr_t len
)
39 if (qsg
->nsg
== qsg
->nalloc
) {
40 qsg
->nalloc
= 2 * qsg
->nalloc
+ 1;
41 qsg
->sg
= g_realloc(qsg
->sg
, qsg
->nalloc
* sizeof(ScatterGatherEntry
));
43 qsg
->sg
[qsg
->nsg
].base
= base
;
44 qsg
->sg
[qsg
->nsg
].len
= len
;
49 void qemu_sglist_destroy(QEMUSGList
*qsg
)
55 BlockDriverAIOCB common
;
57 BlockDriverAIOCB
*acb
;
63 dma_addr_t sg_cur_byte
;
69 static void dma_bdrv_cb(void *opaque
, int ret
);
71 static void reschedule_dma(void *opaque
)
73 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
75 qemu_bh_delete(dbs
->bh
);
80 static void continue_after_map_failure(void *opaque
)
82 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
84 dbs
->bh
= qemu_bh_new(reschedule_dma
, dbs
);
85 qemu_bh_schedule(dbs
->bh
);
88 static void dma_bdrv_unmap(DMAAIOCB
*dbs
)
92 for (i
= 0; i
< dbs
->iov
.niov
; ++i
) {
93 cpu_physical_memory_unmap(dbs
->iov
.iov
[i
].iov_base
,
94 dbs
->iov
.iov
[i
].iov_len
,
95 dbs
->dir
!= DMA_DIRECTION_TO_DEVICE
,
96 dbs
->iov
.iov
[i
].iov_len
);
98 qemu_iovec_reset(&dbs
->iov
);
101 static void dma_complete(DMAAIOCB
*dbs
, int ret
)
103 trace_dma_complete(dbs
, ret
, dbs
->common
.cb
);
106 if (dbs
->common
.cb
) {
107 dbs
->common
.cb(dbs
->common
.opaque
, ret
);
109 qemu_iovec_destroy(&dbs
->iov
);
111 qemu_bh_delete(dbs
->bh
);
114 if (!dbs
->in_cancel
) {
115 /* Requests may complete while dma_aio_cancel is in progress. In
116 * this case, the AIOCB should not be released because it is still
117 * referenced by dma_aio_cancel. */
118 qemu_aio_release(dbs
);
122 static void dma_bdrv_cb(void *opaque
, int ret
)
124 DMAAIOCB
*dbs
= (DMAAIOCB
*)opaque
;
125 target_phys_addr_t cur_addr
, cur_len
;
128 trace_dma_bdrv_cb(dbs
, ret
);
131 dbs
->sector_num
+= dbs
->iov
.size
/ 512;
134 if (dbs
->sg_cur_index
== dbs
->sg
->nsg
|| ret
< 0) {
135 dma_complete(dbs
, ret
);
139 while (dbs
->sg_cur_index
< dbs
->sg
->nsg
) {
140 cur_addr
= dbs
->sg
->sg
[dbs
->sg_cur_index
].base
+ dbs
->sg_cur_byte
;
141 cur_len
= dbs
->sg
->sg
[dbs
->sg_cur_index
].len
- dbs
->sg_cur_byte
;
142 mem
= cpu_physical_memory_map(cur_addr
, &cur_len
,
143 dbs
->dir
!= DMA_DIRECTION_TO_DEVICE
);
146 qemu_iovec_add(&dbs
->iov
, mem
, cur_len
);
147 dbs
->sg_cur_byte
+= cur_len
;
148 if (dbs
->sg_cur_byte
== dbs
->sg
->sg
[dbs
->sg_cur_index
].len
) {
149 dbs
->sg_cur_byte
= 0;
154 if (dbs
->iov
.size
== 0) {
155 trace_dma_map_wait(dbs
);
156 cpu_register_map_client(dbs
, continue_after_map_failure
);
160 dbs
->acb
= dbs
->io_func(dbs
->bs
, dbs
->sector_num
, &dbs
->iov
,
161 dbs
->iov
.size
/ 512, dma_bdrv_cb
, dbs
);
165 static void dma_aio_cancel(BlockDriverAIOCB
*acb
)
167 DMAAIOCB
*dbs
= container_of(acb
, DMAAIOCB
, common
);
169 trace_dma_aio_cancel(dbs
);
172 BlockDriverAIOCB
*acb
= dbs
->acb
;
174 dbs
->in_cancel
= true;
175 bdrv_aio_cancel(acb
);
176 dbs
->in_cancel
= false;
178 dbs
->common
.cb
= NULL
;
179 dma_complete(dbs
, 0);
182 static AIOPool dma_aio_pool
= {
183 .aiocb_size
= sizeof(DMAAIOCB
),
184 .cancel
= dma_aio_cancel
,
187 BlockDriverAIOCB
*dma_bdrv_io(
188 BlockDriverState
*bs
, QEMUSGList
*sg
, uint64_t sector_num
,
189 DMAIOFunc
*io_func
, BlockDriverCompletionFunc
*cb
,
190 void *opaque
, DMADirection dir
)
192 DMAAIOCB
*dbs
= qemu_aio_get(&dma_aio_pool
, bs
, cb
, opaque
);
194 trace_dma_bdrv_io(dbs
, bs
, sector_num
, (dir
== DMA_DIRECTION_TO_DEVICE
));
199 dbs
->sector_num
= sector_num
;
200 dbs
->sg_cur_index
= 0;
201 dbs
->sg_cur_byte
= 0;
203 dbs
->io_func
= io_func
;
205 qemu_iovec_init(&dbs
->iov
, sg
->nsg
);
211 BlockDriverAIOCB
*dma_bdrv_read(BlockDriverState
*bs
,
212 QEMUSGList
*sg
, uint64_t sector
,
213 void (*cb
)(void *opaque
, int ret
), void *opaque
)
215 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_readv
, cb
, opaque
,
216 DMA_DIRECTION_FROM_DEVICE
);
219 BlockDriverAIOCB
*dma_bdrv_write(BlockDriverState
*bs
,
220 QEMUSGList
*sg
, uint64_t sector
,
221 void (*cb
)(void *opaque
, int ret
), void *opaque
)
223 return dma_bdrv_io(bs
, sg
, sector
, bdrv_aio_writev
, cb
, opaque
,
224 DMA_DIRECTION_TO_DEVICE
);
228 static uint64_t dma_buf_rw(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
, bool to_dev
)
235 len
= MIN(len
, resid
);
237 ScatterGatherEntry entry
= sg
->sg
[sg_cur_index
++];
238 int32_t xfer
= MIN(len
, entry
.len
);
239 cpu_physical_memory_rw(entry
.base
, ptr
, xfer
, !to_dev
);
248 uint64_t dma_buf_read(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
250 return dma_buf_rw(ptr
, len
, sg
, 0);
253 uint64_t dma_buf_write(uint8_t *ptr
, int32_t len
, QEMUSGList
*sg
)
255 return dma_buf_rw(ptr
, len
, sg
, 1);
258 void dma_acct_start(BlockDriverState
*bs
, BlockAcctCookie
*cookie
,
259 QEMUSGList
*sg
, enum BlockAcctType type
)
261 bdrv_acct_start(bs
, cookie
, sg
->size
, type
);