X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=dma-helpers.c;h=272632f3671988e479d0c3473efbb8c12b1bb075;hb=3568ac2a6e6df89cea52a56f70b95af9e1251bbd;hp=35cb500581ebb62db151ea0566c98d2accec0496;hpb=23797df3d9f08031d19aaaa1d2863d5feebe3d8b;p=qemu.git diff --git a/dma-helpers.c b/dma-helpers.c index 35cb50058..272632f36 100644 --- a/dma-helpers.c +++ b/dma-helpers.c @@ -7,14 +7,15 @@ * (GNU GPL), version 2 or later. */ -#include "dma.h" +#include "sysemu/dma.h" #include "trace.h" -#include "range.h" -#include "qemu-thread.h" +#include "qemu/range.h" +#include "qemu/thread.h" /* #define DEBUG_IOMMU */ -static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) +static void do_dma_memory_set(AddressSpace *as, + dma_addr_t addr, uint8_t c, dma_addr_t len) { #define FILLBUF_SIZE 512 uint8_t fillbuf[FILLBUF_SIZE]; @@ -23,9 +24,9 @@ static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len) memset(fillbuf, c, FILLBUF_SIZE); while (len > 0) { l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; - cpu_physical_memory_rw(addr, fillbuf, l, true); - len -= len; - addr += len; + address_space_rw(as, addr, fillbuf, l, true); + len -= l; + addr += l; } } @@ -36,7 +37,7 @@ int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) if (dma_has_iommu(dma)) { return iommu_dma_memory_set(dma, addr, c, len); } - do_dma_memory_set(addr, c, len); + do_dma_memory_set(dma->as, addr, c, len); return 0; } @@ -65,6 +66,7 @@ void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len) void qemu_sglist_destroy(QEMUSGList *qsg) { g_free(qsg->sg); + memset(qsg, 0, sizeof(*qsg)); } typedef struct { @@ -193,7 +195,7 @@ static void dma_aio_cancel(BlockDriverAIOCB *acb) dma_complete(dbs, 0); } -static AIOPool dma_aio_pool = { +static const AIOCBInfo dma_aiocb_info = { .aiocb_size = sizeof(DMAAIOCB), .cancel = dma_aio_cancel, }; @@ -203,7 +205,7 @@ BlockDriverAIOCB *dma_bdrv_io( DMAIOFunc *io_func, BlockDriverCompletionFunc *cb, void *opaque, DMADirection dir) { - DMAAIOCB *dbs = qemu_aio_get(&dma_aio_pool, bs, cb, opaque); + DMAAIOCB *dbs = qemu_aio_get(&dma_aiocb_info, bs, cb, opaque); trace_dma_bdrv_io(dbs, bs, sector_num, (dir == DMA_DIRECTION_TO_DEVICE)); @@ -279,7 +281,7 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie, bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, DMADirection dir) { - target_phys_addr_t paddr, plen; + hwaddr paddr, plen; #ifdef DEBUG_IOMMU fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT @@ -306,7 +308,7 @@ bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len, int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, void *buf, dma_addr_t len, DMADirection dir) { - target_phys_addr_t paddr, plen; + hwaddr paddr, plen; int err; #ifdef DEBUG_IOMMU @@ -331,8 +333,7 @@ int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, plen = len; } - cpu_physical_memory_rw(paddr, buf, plen, - dir == DMA_DIRECTION_FROM_DEVICE); + address_space_rw(dma->as, paddr, buf, plen, dir == DMA_DIRECTION_FROM_DEVICE); len -= plen; addr += plen; @@ -345,7 +346,7 @@ int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr, int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len) { - target_phys_addr_t paddr, plen; + hwaddr paddr, plen; int err; #ifdef DEBUG_IOMMU @@ -365,7 +366,7 @@ int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, plen = len; } - do_dma_memory_set(paddr, c, plen); + do_dma_memory_set(dma->as, paddr, c, plen); len -= plen; addr += plen; @@ -374,13 +375,14 @@ int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, return 0; } -void dma_context_init(DMAContext *dma, DMATranslateFunc translate, +void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate, DMAMapFunc map, DMAUnmapFunc unmap) { #ifdef DEBUG_IOMMU fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n", dma, translate, map, unmap); #endif + dma->as = as; dma->translate = translate; dma->map = map; dma->unmap = unmap; @@ -390,7 +392,7 @@ void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len, DMADirection dir) { int err; - target_phys_addr_t paddr, plen; + hwaddr paddr, plen; void *buf; if (dma->map) { @@ -406,14 +408,13 @@ void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len, /* * If this is true, the virtual region is contiguous, * but the translated physical region isn't. We just - * clamp *len, much like cpu_physical_memory_map() does. + * clamp *len, much like address_space_map() does. */ if (plen < *len) { *len = plen; } - buf = cpu_physical_memory_map(paddr, &plen, - dir == DMA_DIRECTION_FROM_DEVICE); + buf = address_space_map(dma->as, paddr, &plen, dir == DMA_DIRECTION_FROM_DEVICE); *len = plen; return buf; @@ -427,8 +428,7 @@ void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len, return; } - cpu_physical_memory_unmap(buffer, len, - dir == DMA_DIRECTION_FROM_DEVICE, - access_len); + address_space_unmap(dma->as, buffer, len, dir == DMA_DIRECTION_FROM_DEVICE, + access_len); }