]> git.proxmox.com Git - mirror_qemu.git/blobdiff - dma.h
Rename target_phys_addr_t to hwaddr
[mirror_qemu.git] / dma.h
diff --git a/dma.h b/dma.h
index f52a656fd5caf2a91325082a28fdfe16a1de86fd..91ccdb5eac7923350218defcc1c5f6d5bea8b9cf 100644 (file)
--- a/dma.h
+++ b/dma.h
 #define DMA_H
 
 #include <stdio.h>
+#include "memory.h"
 #include "hw/hw.h"
 #include "block.h"
+#include "kvm.h"
 
 typedef struct DMAContext DMAContext;
 typedef struct ScatterGatherEntry ScatterGatherEntry;
@@ -30,7 +32,7 @@ struct QEMUSGList {
     DMAContext *dma;
 };
 
-#if defined(TARGET_PHYS_ADDR_BITS)
+#ifndef CONFIG_USER_ONLY
 
 /*
  * When an IOMMU is present, bus addresses become distinct from
@@ -46,8 +48,8 @@ typedef uint64_t dma_addr_t;
 
 typedef int DMATranslateFunc(DMAContext *dma,
                              dma_addr_t addr,
-                             target_phys_addr_t *paddr,
-                             target_phys_addr_t *len,
+                             hwaddr *paddr,
+                             hwaddr *len,
                              DMADirection dir);
 typedef void* DMAMapFunc(DMAContext *dma,
                          dma_addr_t addr,
@@ -60,14 +62,40 @@ typedef void DMAUnmapFunc(DMAContext *dma,
                           dma_addr_t access_len);
 
 struct DMAContext {
+    AddressSpace *as;
     DMATranslateFunc *translate;
     DMAMapFunc *map;
     DMAUnmapFunc *unmap;
 };
 
+static inline void dma_barrier(DMAContext *dma, DMADirection dir)
+{
+    /*
+     * This is called before DMA read and write operations
+     * unless the _relaxed form is used and is responsible
+     * for providing some sane ordering of accesses vs
+     * concurrently running VCPUs.
+     *
+     * Users of map(), unmap() or lower level st/ld_*
+     * operations are responsible for providing their own
+     * ordering via barriers.
+     *
+     * This primitive implementation does a simple smp_mb()
+     * before each operation which provides pretty much full
+     * ordering.
+     *
+     * A smarter implementation can be devised if needed to
+     * use lighter barriers based on the direction of the
+     * transfer, the DMA context, etc...
+     */
+    if (kvm_enabled()) {
+        smp_mb();
+    }
+}
+
 static inline bool dma_has_iommu(DMAContext *dma)
 {
-    return !!dma;
+    return dma && dma->translate;
 }
 
 /* Checks that the given range of addresses is valid for DMA.  This is
@@ -88,19 +116,41 @@ static inline bool dma_memory_valid(DMAContext *dma,
 
 int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
                         void *buf, dma_addr_t len, DMADirection dir);
-static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
-                                void *buf, dma_addr_t len, DMADirection dir)
+static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
+                                        void *buf, dma_addr_t len,
+                                        DMADirection dir)
 {
     if (!dma_has_iommu(dma)) {
         /* Fast-path for no IOMMU */
-        cpu_physical_memory_rw(addr, buf, len,
-                               dir == DMA_DIRECTION_FROM_DEVICE);
+        address_space_rw(dma->as, addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
         return 0;
     } else {
         return iommu_dma_memory_rw(dma, addr, buf, len, dir);
     }
 }
 
+static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
+                                          void *buf, dma_addr_t len)
+{
+    return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
+                                           const void *buf, dma_addr_t len)
+{
+    return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
+                                 DMA_DIRECTION_FROM_DEVICE);
+}
+
+static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+                                void *buf, dma_addr_t len,
+                                DMADirection dir)
+{
+    dma_barrier(dma, dir);
+
+    return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
+}
+
 static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
                                   void *buf, dma_addr_t len)
 {
@@ -127,11 +177,10 @@ static inline void *dma_memory_map(DMAContext *dma,
                                    DMADirection dir)
 {
     if (!dma_has_iommu(dma)) {
-        target_phys_addr_t xlen = *len;
+        hwaddr xlen = *len;
         void *p;
 
-        p = cpu_physical_memory_map(addr, &xlen,
-                                    dir == DMA_DIRECTION_FROM_DEVICE);
+        p = address_space_map(dma->as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE);
         *len = xlen;
         return p;
     } else {
@@ -147,9 +196,8 @@ static inline void dma_memory_unmap(DMAContext *dma,
                                     DMADirection dir, dma_addr_t access_len)
 {
     if (!dma_has_iommu(dma)) {
-        return cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
-                                         dir == DMA_DIRECTION_FROM_DEVICE,
-                                         access_len);
+        address_space_unmap(dma->as, buffer, (hwaddr)len,
+                            dir == DMA_DIRECTION_FROM_DEVICE, access_len);
     } else {
         iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
     }
@@ -193,7 +241,7 @@ DEFINE_LDST_DMA(q, q, 64, be);
 
 #undef DEFINE_LDST_DMA
 
-void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
+void dma_context_init(DMAContext *dma, AddressSpace *as, DMATranslateFunc translate,
                       DMAMapFunc map, DMAUnmapFunc unmap);
 
 struct ScatterGatherEntry {