]> git.proxmox.com Git - mirror_qemu.git/commitdiff
exec: make iotlb RCU-friendly
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 16 Aug 2013 06:26:30 +0000 (08:26 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 16 Feb 2015 16:30:19 +0000 (17:30 +0100)
After the previous patch, TLBs will be flushed on every change to
the memory mapping.  This patch augments that with synchronization
of the MemoryRegionSections referred to in the iotlb array.

With this change, it is guaranteed that iotlb_to_region will access
the correct memory map, even once the TLB will be accessed outside
the BQL.

Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
cpu-exec.c
cputlb.c
exec.c
include/exec/cputlb.h
include/exec/exec-all.h
include/qom/cpu.h
softmmu_template.h

index 78fe382162fc68f95e97107b6eb0f159932e274f..98f968df607832b31a3eb081eb9f66681ac736e7 100644 (file)
@@ -24,6 +24,8 @@
 #include "qemu/atomic.h"
 #include "sysemu/qtest.h"
 #include "qemu/timer.h"
+#include "exec/address-spaces.h"
+#include "exec/memory-internal.h"
 
 /* -icount align implementation. */
 
@@ -144,7 +146,9 @@ void cpu_resume_from_signal(CPUState *cpu, void *puc)
 
 void cpu_reload_memory_map(CPUState *cpu)
 {
-    /* The TLB is protected by the iothread lock.  */
+    /* The CPU and TLB are protected by the iothread lock.  */
+    AddressSpaceDispatch *d = cpu->as->dispatch;
+    cpu->memory_dispatch = d;
     tlb_flush(cpu, 1);
 }
 #endif
index 3b271d44d9d41ed17fa859b0bdab91cf5c78c790..f92db5e18341fa78195b1ca3153249a6dda703d3 100644 (file)
--- a/cputlb.c
+++ b/cputlb.c
@@ -265,8 +265,7 @@ void tlb_set_page(CPUState *cpu, target_ulong vaddr,
     }
 
     sz = size;
-    section = address_space_translate_for_iotlb(cpu->as, paddr,
-                                                &xlat, &sz);
+    section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
     assert(sz >= TARGET_PAGE_SIZE);
 
 #if defined(DEBUG_TLB)
@@ -347,7 +346,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
         cpu_ldub_code(env1, addr);
     }
     pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
-    mr = iotlb_to_region(cpu->as, pd);
+    mr = iotlb_to_region(cpu, pd);
     if (memory_region_is_unassigned(mr)) {
         CPUClass *cc = CPU_GET_CLASS(cpu);
 
diff --git a/exec.c b/exec.c
index 2bfb4d361f297ec561287fcebaaa636184d84d89..fe1e60a3b893e5aa28575f277cf56b2af3d00967 100644 (file)
--- a/exec.c
+++ b/exec.c
@@ -401,11 +401,12 @@ MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
 }
 
 MemoryRegionSection *
-address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
-                                  hwaddr *plen)
+address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
+                                  hwaddr *xlat, hwaddr *plen)
 {
     MemoryRegionSection *section;
-    section = address_space_translate_internal(as->dispatch, addr, xlat, plen, false);
+    section = address_space_translate_internal(cpu->memory_dispatch,
+                                               addr, xlat, plen, false);
 
     assert(!section->mr->iommu_ops);
     return section;
@@ -1961,9 +1962,11 @@ static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
     return phys_section_add(map, &section);
 }
 
-MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index)
+MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
 {
-    return as->dispatch->map.sections[index & ~TARGET_PAGE_MASK].mr;
+    MemoryRegionSection *sections = cpu->memory_dispatch->map.sections;
+
+    return sections[index & ~TARGET_PAGE_MASK].mr;
 }
 
 static void io_mem_init(void)
index b8ecd6f68d6e118d19e7a5fcf2878b3d354402ae..e0da9d7ad39def63da2e53c7a6e81c290d39cca6 100644 (file)
@@ -34,7 +34,7 @@ extern int tlb_flush_count;
 void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);
 
 MemoryRegionSection *
-address_space_translate_for_iotlb(AddressSpace *as, hwaddr addr, hwaddr *xlat,
+address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr, hwaddr *xlat,
                                   hwaddr *plen);
 hwaddr memory_region_section_get_iotlb(CPUState *cpu,
                                        MemoryRegionSection *section,
index 1b30813449dd6a2eb04f4ba800f41f16a183a033..bb3fd37dc653e2a1c13504cf2fe5d37e678fb567 100644 (file)
@@ -338,7 +338,8 @@ extern uintptr_t tci_tb_ptr;
 
 void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align));
 
-struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
+struct MemoryRegion *iotlb_to_region(CPUState *cpu,
+                                     hwaddr index);
 bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
                  uint64_t *pvalue, unsigned size);
 bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
index 2098f1cb50573eded54f8a6cf3a8a98cd09c1619..48fd6fb1d2131d5e8ed8c9fac14b4d3206d969e9 100644 (file)
@@ -256,6 +256,7 @@ struct CPUState {
     sigjmp_buf jmp_env;
 
     AddressSpace *as;
+    struct AddressSpaceDispatch *memory_dispatch;
     MemoryListener *tcg_as_listener;
 
     void *env_ptr; /* CPUArchState */
index 6b4e615dbf284c6536f0b0d46b5e8cff25552e6b..0e3dd35fe1e45d9e107da7d4496e1c5c37bb3930 100644 (file)
@@ -149,7 +149,7 @@ static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
 {
     uint64_t val;
     CPUState *cpu = ENV_GET_CPU(env);
-    MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
+    MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
 
     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
     cpu->mem_io_pc = retaddr;
@@ -369,7 +369,7 @@ static inline void glue(io_write, SUFFIX)(CPUArchState *env,
                                           uintptr_t retaddr)
 {
     CPUState *cpu = ENV_GET_CPU(env);
-    MemoryRegion *mr = iotlb_to_region(cpu->as, physaddr);
+    MemoryRegion *mr = iotlb_to_region(cpu, physaddr);
 
     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
     if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu_can_do_io(cpu)) {