]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blobdiff - virt/kvm/kvm_main.c
kvm: Convert kvm_lock to a mutex
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / kvm_main.c
index 3d73a3d28d6341512763f90488ab88a2c5cb0a8e..0c55d766d5ede5338e3a35a98403bc1cfc6cae8e 100644 (file)
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  *     kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_MUTEX(kvm_lock);
 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
 LIST_HEAD(vm_list);
 
@@ -666,9 +666,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
        if (r)
                goto out_err;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        preempt_notifier_inc();
 
@@ -714,9 +714,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
        kvm_destroy_vm_debugfs(kvm);
        kvm_arch_sync_events(kvm);
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_del(&kvm->vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
                struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -3824,13 +3824,13 @@ static int vm_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3843,12 +3843,12 @@ static int vm_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -3863,13 +3863,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3882,12 +3882,12 @@ static int vcpu_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -3908,7 +3908,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        if (!kvm_dev.this_device || !kvm)
                return;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        if (type == KVM_EVENT_CREATE_VM) {
                kvm_createvm_count++;
                kvm_active_vms++;
@@ -3917,7 +3917,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        created = kvm_createvm_count;
        active = kvm_active_vms;
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        env = kzalloc(sizeof(*env), GFP_KERNEL);
        if (!env)