]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
kvm: Convert kvm_lock to a mutex
authorJunaid Shahid <junaids@google.com>
Fri, 4 Jan 2019 01:14:28 +0000 (17:14 -0800)
committerStefan Bader <stefan.bader@canonical.com>
Wed, 6 Nov 2019 09:27:24 +0000 (10:27 +0100)
It doesn't seem as if there is any particular need for kvm_lock to be a
spinlock, so convert the lock to a mutex so that sleepable functions (in
particular cond_resched()) can be called while holding it.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
CVE-2018-12207

(backported from commit 0d9ce162cf46c99628cc5da9510b959c7976735b)
[tyhicks: Backport to 4.15
 - kvm_hyperv_tsc_notifier() does not exist]
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
Documentation/virtual/kvm/locking.txt
arch/s390/kvm/kvm-s390.c
arch/x86/kvm/mmu.c
arch/x86/kvm/x86.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 1bb8bcaf8497703f7cdd61538ca1374f0e8ac622..635cd6eaf71495e081de44774e489d622323fcf4 100644 (file)
@@ -15,8 +15,6 @@ The acquisition orders for mutexes are as follows:
 
 On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
 
-For spinlocks, kvm_lock is taken outside kvm->mmu_lock.
-
 Everything else is a leaf: no other lock is taken inside the critical
 sections.
 
@@ -169,7 +167,7 @@ which time it will be set using the Dirty tracking mechanism described above.
 ------------
 
 Name:          kvm_lock
-Type:          spinlock_t
+Type:          mutex
 Arch:          any
 Protects:      - vm_list
 
index 80f80f9d93376aa54efc305b02a61683b73415f0..5b7a165a98897aa8082e581db558dff2f20bf085 100644 (file)
@@ -2260,13 +2260,13 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
        if (!kvm->arch.sca)
                goto out_err;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        sca_offset += 16;
        if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
                sca_offset = 0;
        kvm->arch.sca = (struct bsca_block *)
                        ((char *) kvm->arch.sca + sca_offset);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        sprintf(debug_name, "kvm-%u", current->pid);
 
index 295c678b37f559be6ea03ef814d004c8585cfc14..dc3792e7092c7159b5581d02360335b5a94691ef 100644 (file)
@@ -5474,7 +5474,7 @@ mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        int nr_to_scan = sc->nr_to_scan;
        unsigned long freed = 0;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
 
        list_for_each_entry(kvm, &vm_list, vm_list) {
                int idx;
@@ -5524,7 +5524,7 @@ unlock:
                break;
        }
 
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return freed;
 }
 
index 176c54b0a03f1bd35e3b1e4f201457ede96256d9..747151ff129314c1ac6c39d6909b5829ce01c282 100644 (file)
@@ -6184,17 +6184,17 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
 
        smp_call_function_single(freq->cpu, tsc_khz_changed, freq, 1);
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                kvm_for_each_vcpu(i, vcpu, kvm) {
                        if (vcpu->cpu != freq->cpu)
                                continue;
                        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
-                       if (vcpu->cpu != smp_processor_id())
+                       if (vcpu->cpu != raw_smp_processor_id())
                                send_ipi = 1;
                }
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        if (freq->old < freq->new && send_ipi) {
                /*
@@ -6331,12 +6331,12 @@ static void pvclock_gtod_update_fn(struct work_struct *work)
        struct kvm_vcpu *vcpu;
        int i;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list)
                kvm_for_each_vcpu(i, vcpu, kvm)
                        kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
        atomic_set(&kvm_guest_has_master_clock, 0);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 }
 
 static DECLARE_WORK(pvclock_gtod_work, pvclock_gtod_update_fn);
index 9c51dd3ade6ad72dce5e3408258eb4e605666e63..04114962a8940f4bdd7a419ef244121956a9eac2 100644 (file)
@@ -140,7 +140,7 @@ static inline bool is_error_page(struct page *page)
 
 extern struct kmem_cache *kvm_vcpu_cache;
 
-extern spinlock_t kvm_lock;
+extern struct mutex kvm_lock;
 extern struct list_head vm_list;
 
 struct kvm_io_range {
index 3d73a3d28d6341512763f90488ab88a2c5cb0a8e..0c55d766d5ede5338e3a35a98403bc1cfc6cae8e 100644 (file)
@@ -92,7 +92,7 @@ EXPORT_SYMBOL_GPL(halt_poll_ns_shrink);
  *     kvm->lock --> kvm->slots_lock --> kvm->irq_lock
  */
 
-DEFINE_SPINLOCK(kvm_lock);
+DEFINE_MUTEX(kvm_lock);
 static DEFINE_RAW_SPINLOCK(kvm_count_lock);
 LIST_HEAD(vm_list);
 
@@ -666,9 +666,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
        if (r)
                goto out_err;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_add(&kvm->vm_list, &vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        preempt_notifier_inc();
 
@@ -714,9 +714,9 @@ static void kvm_destroy_vm(struct kvm *kvm)
        kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm);
        kvm_destroy_vm_debugfs(kvm);
        kvm_arch_sync_events(kvm);
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_del(&kvm->vm_list);
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        kvm_free_irq_routing(kvm);
        for (i = 0; i < KVM_NR_BUSES; i++) {
                struct kvm_io_bus *bus = kvm_get_bus(kvm, i);
@@ -3824,13 +3824,13 @@ static int vm_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3843,12 +3843,12 @@ static int vm_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vm_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -3863,13 +3863,13 @@ static int vcpu_stat_get(void *_offset, u64 *val)
        u64 tmp_val;
 
        *val = 0;
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_get_per_vm((void *)&stat_tmp, &tmp_val);
                *val += tmp_val;
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
        return 0;
 }
 
@@ -3882,12 +3882,12 @@ static int vcpu_stat_clear(void *_offset, u64 val)
        if (val)
                return -EINVAL;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        list_for_each_entry(kvm, &vm_list, vm_list) {
                stat_tmp.kvm = kvm;
                vcpu_stat_clear_per_vm((void *)&stat_tmp, 0);
        }
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        return 0;
 }
@@ -3908,7 +3908,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        if (!kvm_dev.this_device || !kvm)
                return;
 
-       spin_lock(&kvm_lock);
+       mutex_lock(&kvm_lock);
        if (type == KVM_EVENT_CREATE_VM) {
                kvm_createvm_count++;
                kvm_active_vms++;
@@ -3917,7 +3917,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
        }
        created = kvm_createvm_count;
        active = kvm_active_vms;
-       spin_unlock(&kvm_lock);
+       mutex_unlock(&kvm_lock);
 
        env = kzalloc(sizeof(*env), GFP_KERNEL);
        if (!env)