]> git.proxmox.com Git - qemu.git/blobdiff - dma-helpers.c
mips-linux-user: Always support rdhwr.
[qemu.git] / dma-helpers.c
index 4610ea0420465ce1e566fc5ac378f18cc1a0aa6b..7e7e415cf9f1d065da43aec6bc60288a5cabe336 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 #include "dma.h"
-#include "block_int.h"
+#include "trace.h"
 
 void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
 {
@@ -18,8 +18,7 @@ void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
     qsg->size = 0;
 }
 
-void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
-                     target_phys_addr_t len)
+void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
 {
     if (qsg->nsg == qsg->nalloc) {
         qsg->nalloc = 2 * qsg->nalloc + 1;
@@ -34,6 +33,7 @@ void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
 void qemu_sglist_destroy(QEMUSGList *qsg)
 {
     g_free(qsg->sg);
+    memset(qsg, 0, sizeof(*qsg));
 }
 
 typedef struct {
@@ -42,9 +42,10 @@ typedef struct {
     BlockDriverAIOCB *acb;
     QEMUSGList *sg;
     uint64_t sector_num;
-    int is_write;
+    DMADirection dir;
+    bool in_cancel;
     int sg_cur_index;
-    target_phys_addr_t sg_cur_byte;
+    dma_addr_t sg_cur_byte;
     QEMUIOVector iov;
     QEMUBH *bh;
     DMAIOFunc *io_func;
@@ -58,7 +59,7 @@ static void reschedule_dma(void *opaque)
 
     qemu_bh_delete(dbs->bh);
     dbs->bh = NULL;
-    dma_bdrv_cb(opaque, 0);
+    dma_bdrv_cb(dbs, 0);
 }
 
 static void continue_after_map_failure(void *opaque)
@@ -75,9 +76,32 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
 
     for (i = 0; i < dbs->iov.niov; ++i) {
         cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
-                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
+                                  dbs->iov.iov[i].iov_len,
+                                  dbs->dir != DMA_DIRECTION_TO_DEVICE,
                                   dbs->iov.iov[i].iov_len);
     }
+    qemu_iovec_reset(&dbs->iov);
+}
+
+static void dma_complete(DMAAIOCB *dbs, int ret)
+{
+    trace_dma_complete(dbs, ret, dbs->common.cb);
+
+    dma_bdrv_unmap(dbs);
+    if (dbs->common.cb) {
+        dbs->common.cb(dbs->common.opaque, ret);
+    }
+    qemu_iovec_destroy(&dbs->iov);
+    if (dbs->bh) {
+        qemu_bh_delete(dbs->bh);
+        dbs->bh = NULL;
+    }
+    if (!dbs->in_cancel) {
+        /* Requests may complete while dma_aio_cancel is in progress.  In
+         * this case, the AIOCB should not be released because it is still
+         * referenced by dma_aio_cancel.  */
+        qemu_aio_release(dbs);
+    }
 }
 
 static void dma_bdrv_cb(void *opaque, int ret)
@@ -86,22 +110,22 @@ static void dma_bdrv_cb(void *opaque, int ret)
     target_phys_addr_t cur_addr, cur_len;
     void *mem;
 
+    trace_dma_bdrv_cb(dbs, ret);
+
     dbs->acb = NULL;
     dbs->sector_num += dbs->iov.size / 512;
     dma_bdrv_unmap(dbs);
-    qemu_iovec_reset(&dbs->iov);
 
     if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
-        dbs->common.cb(dbs->common.opaque, ret);
-        qemu_iovec_destroy(&dbs->iov);
-        qemu_aio_release(dbs);
+        dma_complete(dbs, ret);
         return;
     }
 
     while (dbs->sg_cur_index < dbs->sg->nsg) {
         cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
         cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
-        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
+        mem = cpu_physical_memory_map(cur_addr, &cur_len,
+                                      dbs->dir != DMA_DIRECTION_TO_DEVICE);
         if (!mem)
             break;
         qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -113,26 +137,31 @@ static void dma_bdrv_cb(void *opaque, int ret)
     }
 
     if (dbs->iov.size == 0) {
+        trace_dma_map_wait(dbs);
         cpu_register_map_client(dbs, continue_after_map_failure);
         return;
     }
 
     dbs->acb = dbs->io_func(dbs->bs, dbs->sector_num, &dbs->iov,
                             dbs->iov.size / 512, dma_bdrv_cb, dbs);
-    if (!dbs->acb) {
-        dma_bdrv_unmap(dbs);
-        qemu_iovec_destroy(&dbs->iov);
-        return;
-    }
+    assert(dbs->acb);
 }
 
 static void dma_aio_cancel(BlockDriverAIOCB *acb)
 {
     DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
 
+    trace_dma_aio_cancel(dbs);
+
     if (dbs->acb) {
-        bdrv_aio_cancel(dbs->acb);
+        BlockDriverAIOCB *acb = dbs->acb;
+        dbs->acb = NULL;
+        dbs->in_cancel = true;
+        bdrv_aio_cancel(acb);
+        dbs->in_cancel = false;
     }
+    dbs->common.cb = NULL;
+    dma_complete(dbs, 0);
 }
 
 static AIOPool dma_aio_pool = {
@@ -143,25 +172,23 @@ static AIOPool dma_aio_pool = {
 BlockDriverAIOCB *dma_bdrv_io(
     BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
     DMAIOFunc *io_func, BlockDriverCompletionFunc *cb,
-    void *opaque, int is_write)
+    void *opaque, DMADirection dir)
 {
     DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
 
+    trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE));
+
     dbs->acb = NULL;
     dbs->bs = bs;
     dbs->sg = sg;
     dbs->sector_num = sector_num;
     dbs->sg_cur_index = 0;
     dbs->sg_cur_byte = 0;
-    dbs->is_write = is_write;
+    dbs->dir = dir;
     dbs->io_func = io_func;
     dbs->bh = NULL;
     qemu_iovec_init(&dbs->iov, sg->nsg);
     dma_bdrv_cb(dbs, 0);
-    if (!dbs->acb) {
-        qemu_aio_release(dbs);
-        return NULL;
-    }
     return &dbs->common;
 }
 
@@ -170,12 +197,51 @@ BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
                                 QEMUSGList *sg, uint64_t sector,
                                 void (*cb)(void *opaque, int ret), void *opaque)
 {
-    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque, 0);
+    return dma_bdrv_io(bs, sg, sector, bdrv_aio_readv, cb, opaque,
+                       DMA_DIRECTION_FROM_DEVICE);
 }
 
 BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
                                  QEMUSGList *sg, uint64_t sector,
                                  void (*cb)(void *opaque, int ret), void *opaque)
 {
-    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque, 1);
+    return dma_bdrv_io(bs, sg, sector, bdrv_aio_writev, cb, opaque,
+                       DMA_DIRECTION_TO_DEVICE);
+}
+
+
+static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
+{
+    uint64_t resid;
+    int sg_cur_index;
+
+    resid = sg->size;
+    sg_cur_index = 0;
+    len = MIN(len, resid);
+    while (len > 0) {
+        ScatterGatherEntry entry = sg->sg[sg_cur_index++];
+        int32_t xfer = MIN(len, entry.len);
+        cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
+        ptr += xfer;
+        len -= xfer;
+        resid -= xfer;
+    }
+
+    return resid;
+}
+
+uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
+{
+    return dma_buf_rw(ptr, len, sg, 0);
+}
+
+uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
+{
+    return dma_buf_rw(ptr, len, sg, 1);
+}
+
+void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
+                    QEMUSGList *sg, enum BlockAcctType type)
+{
+    bdrv_acct_start(bs, cookie, sg->size, type);
 }