]> git.proxmox.com Git - qemu.git/blobdiff - dma-helpers.c
dma-helpers: Add trace events
[qemu.git] / dma-helpers.c
index 96a120cf3c2ea9d53ea8e75c43e5424b5a1158da..9d6b6fa1fdfd4265e7141c7c09c6835905380d92 100644 (file)
@@ -9,23 +9,21 @@
 
 #include "dma.h"
 #include "block_int.h"
-
-static AIOPool dma_aio_pool;
+#include "trace.h"
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
 {
-    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
+    qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
     qsg->nsg = 0;
     qsg->nalloc = alloc_hint;
     qsg->size = 0;
 }
 
-void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
-                     target_phys_addr_t len)
+void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
 {
     if (qsg->nsg == qsg->nalloc) {
         qsg->nalloc = 2 * qsg->nalloc + 1;
-        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
+        qsg->sg = g_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
     }
     qsg->sg[qsg->nsg].base = base;
     qsg->sg[qsg->nsg].len = len;
@@ -35,7 +33,7 @@ void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
 
 void qemu_sglist_destroy(QEMUSGList *qsg)
 {
-    qemu_free(qsg->sg);
+    g_free(qsg->sg);
 }
 
 typedef struct {
@@ -44,11 +42,13 @@ typedef struct {
     BlockDriverAIOCB *acb;
     QEMUSGList *sg;
     uint64_t sector_num;
-    int is_write;
+    bool to_dev;
+    bool in_cancel;
     int sg_cur_index;
-    target_phys_addr_t sg_cur_byte;
+    dma_addr_t sg_cur_byte;
     QEMUIOVector iov;
     QEMUBH *bh;
+    DMAIOFunc *io_func;
 } DMAAIOCB;
 
 static void dma_bdrv_cb(void *opaque, int ret);
@@ -59,7 +59,7 @@ static void reschedule_dma(void *opaque)
 
     qemu_bh_delete(dbs->bh);
     dbs->bh = NULL;
-    dma_bdrv_cb(opaque, 0);
+    dma_bdrv_cb(dbs, 0);
 }
 
 static void continue_after_map_failure(void *opaque)
@@ -70,33 +70,60 @@ static void continue_after_map_failure(void *opaque)
     qemu_bh_schedule(dbs->bh);
 }
 
-static void dma_bdrv_cb(void *opaque, int ret)
+static void dma_bdrv_unmap(DMAAIOCB *dbs)
 {
-    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
-    target_phys_addr_t cur_addr, cur_len;
-    void *mem;
     int i;
 
-    dbs->acb = NULL;
-    dbs->sector_num += dbs->iov.size / 512;
     for (i = 0; i < dbs->iov.niov; ++i) {
         cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
+                                  dbs->iov.iov[i].iov_len, !dbs->to_dev,
                                   dbs->iov.iov[i].iov_len);
     }
     qemu_iovec_reset(&dbs->iov);
+}
 
-    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
+static void dma_complete(DMAAIOCB *dbs, int ret)
+{
+    trace_dma_complete(dbs, ret, dbs->common.cb);
+
+    dma_bdrv_unmap(dbs);
+    if (dbs->common.cb) {
         dbs->common.cb(dbs->common.opaque, ret);
-        qemu_iovec_destroy(&dbs->iov);
+    }
+    qemu_iovec_destroy(&dbs->iov);
+    if (dbs->bh) {
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
+    if (!dbs->in_cancel) {
+        /* Requests may complete while dma_aio_cancel is in progress.  In
+         * this case, the AIOCB should not be released because it is still
+         * referenced by dma_aio_cancel.  */
         qemu_aio_release(dbs);
+    }
+}
+
+static void dma_bdrv_cb(void *opaque, int ret)
+{
+    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
+    target_phys_addr_t cur_addr, cur_len;
+    void *mem;
+
+    trace_dma_bdrv_cb(dbs, ret);
+
+    dbs->acb = NULL;
+    dbs->sector_num += dbs->iov.size / 512;
+    dma_bdrv_unmap(dbs);
+
+    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
+        dma_complete(dbs, ret);
         return;
     }
 
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->to_dev);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -108,25 +135,48 @@ static void dma_bdrv_cb(void *opaque, int ret)
     }
 
     if (dbs->iov.size == 0) {
+        trace_dma_map_wait(dbs);
         cpu_register_map_client(dbs, continue_after_map_failure);
         return;
     }
 
-    if (dbs->is_write) {
-        dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
-                                   dbs->iov.size / 512, dma_bdrv_cb, dbs);
-    } else {
-        dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
-                                  dbs->iov.size / 512, dma_bdrv_cb, dbs);
+    dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
+                            dbs->iov.size / 512, dma_bdrv_cb, dbs);
+    if (!dbs->acb) {
+        dma_complete(dbs, -EIO);
     }
 }
 
-static BlockDriverAIOCB *dma_bdrv_io(
+static void dma_aio_cancel(BlockDriverAIOCB *acb)
+{
+    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
+
+    trace_dma_aio_cancel(dbs);
+
+    if (dbs->acb) {
+        BlockDriverAIOCB *acb = dbs->acb;
+        dbs->acb = NULL;
+        dbs->in_cancel = true;
+        bdrv_aio_cancel(acb);
+        dbs->in_cancel = false;
+    }
+    dbs->common.cb = NULL;
+    dma_complete(dbs, 0);
+}
+
+static AIOPool dma_aio_pool = {
+    .aiocb_size         = sizeof(DMAAIOCB),
+    .cancel             = dma_aio_cancel,
+};
+
+BlockDriverAIOCB *dma_bdrv_io(
     BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
-    BlockDriverCompletionFunc *cb, void *opaque,
-    int is_write)
+    DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
+    void *opaque, bool to_dev)
 {
-    DMAAIOCB *dbs =  qemu_aio_get_pool(&dma_aio_pool, bs, cb, opaque);
+    DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
+
+    trace_dma_bdrv_io(dbs, bs, sector_num, to_dev);
 
     dbs->acb = NULL;
     dbs->bs = bs;
@@ -134,7 +184,8 @@ static BlockDriverAIOCB *dma_bdrv_io(
     dbs->sector_num = sector_num;
     dbs->sg_cur_index = 0;
     dbs->sg_cur_byte = 0;
-    dbs->is_write = is_write;
+    dbs->to_dev = to_dev;
+    dbs->io_func = io_func;
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
@@ -146,26 +197,12 @@ BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
                                 QEMUSGList *sg, uint64_t sector,
                                 void (*cb)(void *opaque, int ret), void *opaque)
 {
-    return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
+    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, false);
 }
 
 BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
                                  QEMUSGList *sg, uint64_t sector,
                                  void (*cb)(void *opaque, int ret), void *opaque)
 {
-    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
-}
-
-static void dma_aio_cancel(BlockDriverAIOCB *acb)
-{
-    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
-
-    if (dbs->acb) {
-        bdrv_aio_cancel(dbs->acb);
-    }
-}
-
-void dma_helper_init(void)
-{
-    aio_pool_init(&dma_aio_pool, sizeof(DMAAIOCB), dma_aio_cancel);
+    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, true);
 }