]> git.proxmox.com Git - mirror_qemu.git/blobdiff - exec.c
softmmu: move more files to softmmu/
[mirror_qemu.git] / exec.c
diff --git a/exec.c b/exec.c
index 7683afb6a8e1d48e85aa0bb220328f9b5d9bd99b..bca441f7fd358bfbf003b01f2095db512be3de4f 100644 (file)
--- a/exec.c
+++ b/exec.c
@@ -102,10 +102,6 @@ uintptr_t qemu_host_page_size;
 intptr_t qemu_host_page_mask;
 
 #if !defined(CONFIG_USER_ONLY)
-/* 0 = Do not count executed instructions.
-   1 = Precise instruction counting.
-   2 = Adaptive rate instruction counting.  */
-int use_icount;
 
 typedef struct PhysPageEntry PhysPageEntry;
 
@@ -353,13 +349,13 @@ static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
                                                         hwaddr addr,
                                                         bool resolve_subpage)
 {
-    MemoryRegionSection *section = atomic_read(&d->mru_section);
+    MemoryRegionSection *section = qatomic_read(&d->mru_section);
     subpage_t *subpage;
 
     if (!section || section == &d->map.sections[PHYS_SECTION_UNASSIGNED] ||
         !section_covers_addr(section, addr)) {
         section = phys_page_find(d, addr);
-        atomic_set(&d->mru_section, section);
+        qatomic_set(&d->mru_section, section);
     }
     if (resolve_subpage && section->mr->subpage) {
         subpage = container_of(section->mr, subpage_t, iomem);
@@ -627,8 +623,7 @@ static void tcg_register_iommu_notifier(CPUState *cpu,
      */
     MemoryRegion *mr = MEMORY_REGION(iommu_mr);
     TCGIOMMUNotifier *notifier;
-    Error *err = NULL;
-    int i, ret;
+    int i;
 
     for (i = 0; i < cpu->iommu_notifiers->len; i++) {
         notifier = g_array_index(cpu->iommu_notifiers, TCGIOMMUNotifier *, i);
@@ -657,12 +652,8 @@ static void tcg_register_iommu_notifier(CPUState *cpu,
                             0,
                             HWADDR_MAX,
                             iommu_idx);
-        ret = memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
-                                                    &err);
-        if (ret) {
-            error_report_err(err);
-            exit(1);
-        }
+        memory_region_register_iommu_notifier(notifier->mr, &notifier->n,
+                                              &error_fatal);
     }
 
     if (!notifier->active) {
@@ -695,7 +686,8 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
     IOMMUMemoryRegionClass *imrc;
     IOMMUTLBEntry iotlb;
     int iommu_idx;
-    AddressSpaceDispatch *d = atomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
+    AddressSpaceDispatch *d =
+        qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch);
 
     for (;;) {
         section = address_space_translate_internal(d, addr, &addr, plen, false);
@@ -899,6 +891,7 @@ Property cpu_common_props[] = {
     DEFINE_PROP_LINK("memory", CPUState, memory, TYPE_MEMORY_REGION,
                      MemoryRegion *),
 #endif
+    DEFINE_PROP_BOOL("start-powered-off", CPUState, start_powered_off, false),
     DEFINE_PROP_END_OF_LIST(),
 };
 
@@ -1246,7 +1239,7 @@ static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
 {
     RAMBlock *block;
 
-    block = atomic_rcu_read(&ram_list.mru_block);
+    block = qatomic_rcu_read(&ram_list.mru_block);
     if (block && addr - block->offset < block->max_length) {
         return block;
     }
@@ -1272,7 +1265,7 @@ found:
      *                                        call_rcu(reclaim_ramblock, xxx);
      *                  rcu_read_unlock()
      *
-     * atomic_rcu_set is not needed here.  The block was already published
+     * qatomic_rcu_set is not needed here.  The block was already published
      * when it was placed into the list.  Here we're just making an extra
      * copy of the pointer.
      */
@@ -1320,7 +1313,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
     page = start_page;
 
     WITH_RCU_READ_LOCK_GUARD() {
-        blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+        blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
         ramblock = qemu_get_ram_block(start);
         /* Range sanity check on the ramblock */
         assert(start >= ramblock->offset &&
@@ -1370,7 +1363,7 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
     dest = 0;
 
     WITH_RCU_READ_LOCK_GUARD() {
-        blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+        blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]);
 
         while (page < end) {
             unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
@@ -2206,7 +2199,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
         DirtyMemoryBlocks *new_blocks;
         int j;
 
-        old_blocks = atomic_rcu_read(&ram_list.dirty_memory[i]);
+        old_blocks = qatomic_rcu_read(&ram_list.dirty_memory[i]);
         new_blocks = g_malloc(sizeof(*new_blocks) +
                               sizeof(new_blocks->blocks[0]) * new_num_blocks);
 
@@ -2219,7 +2212,7 @@ static void dirty_memory_extend(ram_addr_t old_ram_size,
             new_blocks->blocks[j] = bitmap_new(DIRTY_MEMORY_BLOCK_SIZE);
         }
 
-        atomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
+        qatomic_rcu_set(&ram_list.dirty_memory[i], new_blocks);
 
         if (old_blocks) {
             g_free_rcu(old_blocks, rcu);
@@ -2666,7 +2659,7 @@ RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset,
     }
 
     RCU_READ_LOCK_GUARD();
-    block = atomic_rcu_read(&ram_list.mru_block);
+    block = qatomic_rcu_read(&ram_list.mru_block);
     if (block && block->host && host - block->host < block->max_length) {
         goto found;
     }
@@ -2750,6 +2743,14 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
     QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
         if (watchpoint_address_matches(wp, addr, len)
             && (wp->flags & flags)) {
+            if (replay_running_debug()) {
+                /*
+                 * Don't process the watchpoints when we are
+                 * in a reverse debugging operation.
+                 */
+                replay_breakpoint();
+                return;
+            }
             if (flags == BP_MEM_READ) {
                 wp->flags |= BP_WATCHPOINT_HIT_READ;
             } else {
@@ -2911,7 +2912,7 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu,
 {
     int asidx = cpu_asidx_from_attrs(cpu, attrs);
     CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx];
-    AddressSpaceDispatch *d = atomic_rcu_read(&cpuas->memory_dispatch);
+    AddressSpaceDispatch *d = qatomic_rcu_read(&cpuas->memory_dispatch);
     MemoryRegionSection *sections = d->map.sections;
 
     return &sections[index & ~TARGET_PAGE_MASK];
@@ -2995,7 +2996,7 @@ static void tcg_commit(MemoryListener *listener)
      * may have split the RCU critical section.
      */
     d = address_space_to_dispatch(cpuas->as);
-    atomic_rcu_set(&cpuas->memory_dispatch, d);
+    qatomic_rcu_set(&cpuas->memory_dispatch, d);
     tlb_flush(cpuas->cpu);
 }
 
@@ -3135,7 +3136,7 @@ static bool prepare_mmio_access(MemoryRegion *mr)
     bool unlocked = !qemu_mutex_iothread_locked();
     bool release_lock = false;
 
-    if (unlocked && mr->global_locking) {
+    if (unlocked) {
         qemu_mutex_lock_iothread();
         unlocked = false;
         release_lock = true;
@@ -3442,7 +3443,7 @@ void cpu_register_map_client(QEMUBH *bh)
     qemu_mutex_lock(&map_client_list_lock);
     client->bh = bh;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
-    if (!atomic_read(&bounce.in_use)) {
+    if (!qatomic_read(&bounce.in_use)) {
         cpu_notify_map_clients_locked();
     }
     qemu_mutex_unlock(&map_client_list_lock);
@@ -3576,7 +3577,7 @@ void *address_space_map(AddressSpace *as,
     mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs);
 
     if (!memory_access_is_direct(mr, is_write)) {
-        if (atomic_xchg(&bounce.in_use, true)) {
+        if (qatomic_xchg(&bounce.in_use, true)) {
             *plen = 0;
             return NULL;
         }
@@ -3635,7 +3636,7 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
     qemu_vfree(bounce.buffer);
     bounce.buffer = NULL;
     memory_region_unref(bounce.mr);
-    atomic_mb_set(&bounce.in_use, false);
+    qatomic_mb_set(&bounce.in_use, false);
     cpu_notify_map_clients();
 }
 
@@ -4104,16 +4105,17 @@ int ram_block_discard_disable(bool state)
     int old;
 
     if (!state) {
-        atomic_dec(&ram_block_discard_disabled);
+        qatomic_dec(&ram_block_discard_disabled);
         return 0;
     }
 
     do {
-        old = atomic_read(&ram_block_discard_disabled);
+        old = qatomic_read(&ram_block_discard_disabled);
         if (old < 0) {
             return -EBUSY;
         }
-    } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old + 1) != old);
+    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+                             old, old + 1) != old);
     return 0;
 }
 
@@ -4122,27 +4124,28 @@ int ram_block_discard_require(bool state)
     int old;
 
     if (!state) {
-        atomic_inc(&ram_block_discard_disabled);
+        qatomic_inc(&ram_block_discard_disabled);
         return 0;
     }
 
     do {
-        old = atomic_read(&ram_block_discard_disabled);
+        old = qatomic_read(&ram_block_discard_disabled);
         if (old > 0) {
             return -EBUSY;
         }
-    } while (atomic_cmpxchg(&ram_block_discard_disabled, old, old - 1) != old);
+    } while (qatomic_cmpxchg(&ram_block_discard_disabled,
+                             old, old - 1) != old);
     return 0;
 }
 
 bool ram_block_discard_is_disabled(void)
 {
-    return atomic_read(&ram_block_discard_disabled) > 0;
+    return qatomic_read(&ram_block_discard_disabled) > 0;
 }
 
 bool ram_block_discard_is_required(void)
 {
-    return atomic_read(&ram_block_discard_disabled) < 0;
+    return qatomic_read(&ram_block_discard_disabled) < 0;
 }
 
 #endif