]> git.proxmox.com Git - qemu.git/blobdiff - kvm-all.c
target-mips: don't call cpu_loop_exit() from helper.c
[qemu.git] / kvm-all.c
index 9a822ed88c3b60bbd95655828ad288e430cc0475..15ec38e631f08301b1112c6a997a457fd6d8cba3 100644 (file)
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -57,9 +57,11 @@ struct KVMState
     KVMSlot slots[32];
     int fd;
     int vmfd;
+    int regs_modified;
     int coalesced_mmio;
     int broken_set_mem_region;
     int migration_log;
+    int vcpu_events;
 #ifdef KVM_CAP_SET_GUEST_DEBUG
     struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
 #endif
@@ -149,6 +151,7 @@ static void kvm_reset_vcpu(void *opaque)
 {
     CPUState *env = opaque;
 
+    kvm_arch_reset_vcpu(env);
     if (kvm_arch_put_registers(env)) {
         fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
         abort();
@@ -200,32 +203,13 @@ int kvm_init_vcpu(CPUState *env)
     ret = kvm_arch_init_vcpu(env);
     if (ret == 0) {
         qemu_register_reset(kvm_reset_vcpu, env);
+        kvm_arch_reset_vcpu(env);
         ret = kvm_arch_put_registers(env);
     }
 err:
     return ret;
 }
 
-int kvm_put_mp_state(CPUState *env)
-{
-    struct kvm_mp_state mp_state = { .mp_state = env->mp_state };
-
-    return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state);
-}
-
-int kvm_get_mp_state(CPUState *env)
-{
-    struct kvm_mp_state mp_state;
-    int ret;
-
-    ret = kvm_vcpu_ioctl(env, KVM_GET_MP_STATE, &mp_state);
-    if (ret < 0) {
-        return ret;
-    }
-    env->mp_state = mp_state.mp_state;
-    return 0;
-}
-
 /*
  * dirty pages logging control
  */
@@ -318,7 +302,6 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
     KVMDirtyLog d;
     KVMSlot *mem;
     int ret = 0;
-    int r;
 
     d.dirty_bitmap = NULL;
     while (start_addr < end_addr) {
@@ -327,11 +310,6 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
             break;
         }
 
-        /* We didn't activate dirty logging? Don't care then. */
-        if(!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES)) {
-            continue;
-        }
-
         size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
         if (!d.dirty_bitmap) {
             d.dirty_bitmap = qemu_malloc(size);
@@ -343,8 +321,7 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
 
         d.slot = mem->slot;
 
-        r = kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d);
-        if (r == -EINVAL) {
+        if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
             dprintf("ioctl failed %d\n", errno);
             ret = -1;
             break;
@@ -358,10 +335,6 @@ int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
 
             if (test_le_bit(nr, bitmap)) {
                 cpu_physical_memory_set_dirty(addr);
-            } else if (r < 0) {
-                /* When our KVM implementation doesn't know about dirty logging
-                 * we can just assume it's always dirty and be fine. */
-                cpu_physical_memory_set_dirty(addr);
             }
         }
         start_addr = phys_addr;
@@ -438,13 +411,13 @@ int kvm_init(int smp_cpus)
     s = qemu_mallocz(sizeof(KVMState));
 
 #ifdef KVM_CAP_SET_GUEST_DEBUG
-    TAILQ_INIT(&s->kvm_sw_breakpoints);
+    QTAILQ_INIT(&s->kvm_sw_breakpoints);
 #endif
     for (i = 0; i < ARRAY_SIZE(s->slots); i++)
         s->slots[i].slot = i;
 
     s->vmfd = -1;
-    s->fd = open("/dev/kvm", O_RDWR);
+    s->fd = qemu_open("/dev/kvm", O_RDWR);
     if (s->fd == -1) {
         fprintf(stderr, "Could not access KVM kernel module: %m\n");
         ret = -errno;
@@ -507,6 +480,11 @@ int kvm_init(int smp_cpus)
     }
 #endif
 
+    s->vcpu_events = 0;
+#ifdef KVM_CAP_VCPU_EVENTS
+    s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
+#endif
+
     ret = kvm_arch_init(s, smp_cpus);
     if (ret < 0)
         goto err;
@@ -527,8 +505,8 @@ err:
     return ret;
 }
 
-static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
-                         int direction, int size, uint32_t count)
+static int kvm_handle_io(uint16_t port, void *data, int direction, int size,
+                         uint32_t count)
 {
     int i;
     uint8_t *ptr = data;
@@ -537,25 +515,25 @@ static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
         if (direction == KVM_EXIT_IO_IN) {
             switch (size) {
             case 1:
-                stb_p(ptr, cpu_inb(env, port));
+                stb_p(ptr, cpu_inb(port));
                 break;
             case 2:
-                stw_p(ptr, cpu_inw(env, port));
+                stw_p(ptr, cpu_inw(port));
                 break;
             case 4:
-                stl_p(ptr, cpu_inl(env, port));
+                stl_p(ptr, cpu_inl(port));
                 break;
             }
         } else {
             switch (size) {
             case 1:
-                cpu_outb(env, port, ldub_p(ptr));
+                cpu_outb(port, ldub_p(ptr));
                 break;
             case 2:
-                cpu_outw(env, port, lduw_p(ptr));
+                cpu_outw(port, lduw_p(ptr));
                 break;
             case 4:
-                cpu_outl(env, port, ldl_p(ptr));
+                cpu_outl(port, ldl_p(ptr));
                 break;
             }
         }
@@ -587,6 +565,14 @@ static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
 #endif
 }
 
+void kvm_cpu_synchronize_state(CPUState *env)
+{
+    if (!env->kvm_state->regs_modified) {
+        kvm_arch_get_registers(env);
+        env->kvm_state->regs_modified = 1;
+    }
+}
+
 int kvm_cpu_exec(CPUState *env)
 {
     struct kvm_run *run = env->kvm_run;
@@ -601,8 +587,15 @@ int kvm_cpu_exec(CPUState *env)
             break;
         }
 
+        if (env->kvm_state->regs_modified) {
+            kvm_arch_put_registers(env);
+            env->kvm_state->regs_modified = 0;
+        }
+
         kvm_arch_pre_run(env, run);
+        qemu_mutex_unlock_iothread();
         ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
+        qemu_mutex_lock_iothread();
         kvm_arch_post_run(env, run);
 
         if (ret == -EINTR || ret == -EAGAIN) {
@@ -622,7 +615,7 @@ int kvm_cpu_exec(CPUState *env)
         switch (run->exit_reason) {
         case KVM_EXIT_IO:
             dprintf("handle_io\n");
-            ret = kvm_handle_io(env, run->io.port,
+            ret = kvm_handle_io(run->io.port,
                                 (uint8_t *)run + run->io.data_offset,
                                 run->io.direction,
                                 run->io.size,
@@ -881,6 +874,11 @@ int kvm_has_sync_mmu(void)
 #endif
 }
 
+int kvm_has_vcpu_events(void)
+{
+    return kvm_state->vcpu_events;
+}
+
 void kvm_setup_guest_memory(void *start, size_t size)
 {
     if (!kvm_has_sync_mmu()) {
@@ -902,11 +900,15 @@ void kvm_setup_guest_memory(void *start, size_t size)
 #ifdef KVM_CAP_SET_GUEST_DEBUG
 static void on_vcpu(CPUState *env, void (*func)(void *data), void *data)
 {
+#ifdef CONFIG_IOTHREAD
     if (env == cpu_single_env) {
         func(data);
         return;
     }
     abort();
+#else
+    func(data);
+#endif
 }
 
 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
@@ -914,7 +916,7 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
 {
     struct kvm_sw_breakpoint *bp;
 
-    TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
+    QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
         if (bp->pc == pc)
             return bp;
     }
@@ -923,7 +925,7 @@ struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
 
 int kvm_sw_breakpoints_active(CPUState *env)
 {
-    return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
+    return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
 }
 
 struct kvm_set_guest_debug_data {
@@ -935,7 +937,13 @@ struct kvm_set_guest_debug_data {
 static void kvm_invoke_set_guest_debug(void *data)
 {
     struct kvm_set_guest_debug_data *dbg_data = data;
-    dbg_data->err = kvm_vcpu_ioctl(dbg_data->env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
+    CPUState *env = dbg_data->env;
+
+    if (env->kvm_state->regs_modified) {
+        kvm_arch_put_registers(env);
+        env->kvm_state->regs_modified = 0;
+    }
+    dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
 }
 
 int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
@@ -980,7 +988,7 @@ int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
             return err;
         }
 
-        TAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
+        QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
                           bp, entry);
     } else {
         err = kvm_arch_insert_hw_breakpoint(addr, len, type);
@@ -1017,7 +1025,7 @@ int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
         if (err)
             return err;
 
-        TAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
+        QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
         qemu_free(bp);
     } else {
         err = kvm_arch_remove_hw_breakpoint(addr, len, type);
@@ -1039,7 +1047,7 @@ void kvm_remove_all_breakpoints(CPUState *current_env)
     KVMState *s = current_env->kvm_state;
     CPUState *env;
 
-    TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
+    QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
         if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
             /* Try harder to find a CPU that currently sees the breakpoint. */
             for (env = first_cpu; env != NULL; env = env->next_cpu) {