]> git.proxmox.com Git - qemu.git/blame - dma-helpers.c
Don't leak VLANClientState on PCI hot remove
[qemu.git] / dma-helpers.c
CommitLineData
244ab90e
AL
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10#include "dma.h"
59a703eb 11#include "block_int.h"
244ab90e 12
a2b8ec7d
AL
13static AIOPool dma_aio_pool;
14
244ab90e
AL
15void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
16{
17 qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
18 qsg->nsg = 0;
19 qsg->nalloc = alloc_hint;
20 qsg->size = 0;
21}
22
23void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
24 target_phys_addr_t len)
25{
26 if (qsg->nsg == qsg->nalloc) {
27 qsg->nalloc = 2 * qsg->nalloc + 1;
28 qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
29 }
30 qsg->sg[qsg->nsg].base = base;
31 qsg->sg[qsg->nsg].len = len;
32 qsg->size += len;
33 ++qsg->nsg;
34}
35
36void qemu_sglist_destroy(QEMUSGList *qsg)
37{
38 qemu_free(qsg->sg);
39}
40
59a703eb 41typedef struct {
5f6521c7 42 BlockDriverAIOCB common;
59a703eb
AL
43 BlockDriverState *bs;
44 BlockDriverAIOCB *acb;
45 QEMUSGList *sg;
46 uint64_t sector_num;
47 int is_write;
48 int sg_cur_index;
49 target_phys_addr_t sg_cur_byte;
50 QEMUIOVector iov;
51 QEMUBH *bh;
5f6521c7 52} DMAAIOCB;
59a703eb
AL
53
54static void dma_bdrv_cb(void *opaque, int ret);
55
56static void reschedule_dma(void *opaque)
57{
5f6521c7 58 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
59
60 qemu_bh_delete(dbs->bh);
61 dbs->bh = NULL;
62 dma_bdrv_cb(opaque, 0);
63}
64
65static void continue_after_map_failure(void *opaque)
66{
5f6521c7 67 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
59a703eb
AL
68
69 dbs->bh = qemu_bh_new(reschedule_dma, dbs);
70 qemu_bh_schedule(dbs->bh);
71}
72
f1cfb26c 73static void dma_bdrv_unmap(DMAAIOCB *dbs)
59a703eb 74{
59a703eb
AL
75 int i;
76
59a703eb
AL
77 for (i = 0; i < dbs->iov.niov; ++i) {
78 cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
79 dbs->iov.iov[i].iov_len, !dbs->is_write,
80 dbs->iov.iov[i].iov_len);
81 }
f1cfb26c
AL
82}
83
84void dma_bdrv_cb(void *opaque, int ret)
85{
86 DMAAIOCB *dbs = (DMAAIOCB *)opaque;
87 target_phys_addr_t cur_addr, cur_len;
88 void *mem;
89
90 dbs->acb = NULL;
91 dbs->sector_num += dbs->iov.size / 512;
92 dma_bdrv_unmap(dbs);
59a703eb
AL
93 qemu_iovec_reset(&dbs->iov);
94
95 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
5f6521c7 96 dbs->common.cb(dbs->common.opaque, ret);
59a703eb 97 qemu_iovec_destroy(&dbs->iov);
5f6521c7 98 qemu_aio_release(dbs);
59a703eb
AL
99 return;
100 }
101
102 while (dbs->sg_cur_index < dbs->sg->nsg) {
103 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
104 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
105 mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
106 if (!mem)
107 break;
108 qemu_iovec_add(&dbs->iov, mem, cur_len);
109 dbs->sg_cur_byte += cur_len;
110 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
111 dbs->sg_cur_byte = 0;
112 ++dbs->sg_cur_index;
113 }
114 }
115
116 if (dbs->iov.size == 0) {
117 cpu_register_map_client(dbs, continue_after_map_failure);
118 return;
119 }
120
121 if (dbs->is_write) {
5f6521c7
AL
122 dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
123 dbs->iov.size / 512, dma_bdrv_cb, dbs);
59a703eb 124 } else {
5f6521c7
AL
125 dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
126 dbs->iov.size / 512, dma_bdrv_cb, dbs);
59a703eb 127 }
f1cfb26c
AL
128 if (!dbs->acb) {
129 dma_bdrv_unmap(dbs);
130 qemu_iovec_destroy(&dbs->iov);
131 return;
132 }
59a703eb
AL
133}
134
135static BlockDriverAIOCB *dma_bdrv_io(
136 BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
137 BlockDriverCompletionFunc *cb, void *opaque,
138 int is_write)
139{
5f6521c7 140 DMAAIOCB *dbs = qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
59a703eb 141
5f6521c7 142 dbs->acb = NULL;
59a703eb 143 dbs->bs = bs;
59a703eb
AL
144 dbs->sg = sg;
145 dbs->sector_num = sector_num;
146 dbs->sg_cur_index = 0;
147 dbs->sg_cur_byte = 0;
148 dbs->is_write = is_write;
149 dbs->bh = NULL;
150 qemu_iovec_init(&dbs->iov, sg->nsg);
151 dma_bdrv_cb(dbs, 0);
f1cfb26c
AL
152 if (!dbs->acb) {
153 qemu_aio_release(dbs);
154 return NULL;
155 }
5f6521c7 156 return &dbs->common;
59a703eb
AL
157}
158
159
160BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
161 QEMUSGList *sg, uint64_t sector,
162 void (*cb)(void *opaque, int ret), void *opaque)
163{
164 return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
165}
166
167BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
168 QEMUSGList *sg, uint64_t sector,
169 void (*cb)(void *opaque, int ret), void *opaque)
170{
171 return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
172}
173
a2b8ec7d
AL
174static void dma_aio_cancel(BlockDriverAIOCB *acb)
175{
5f6521c7 176 DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
a2b8ec7d 177
5f6521c7
AL
178 if (dbs->acb) {
179 bdrv_aio_cancel(dbs->acb);
180 }
a2b8ec7d
AL
181}
182
183void dma_helper_init(void)
184{
5f6521c7 185 aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
a2b8ec7d 186}