2 files changed, 111 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index 76e82a38ba09..0c9417dab9d9 100644
+index a0e45b4c7a08..d22b0fac61c0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -3389,6 +3389,15 @@
+@@ -3395,6 +3395,15 @@
Also, it enforces the PCI Local Bus spec
rule that those bits should be 0 in system reset
events (useful for kexec/kdump cases).
Safety option to keep boot IRQs enabled. This
should never be necessary.
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index 4558d1192817..22104194fc8d 100644
+index 66cd9678c672..7f589736a49e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -193,6 +193,106 @@ static int __init pci_apply_final_quirks(void)
/*
* Decoding should be disabled for a PCI device during BAR sizing to avoid
* conflict. But doing so may cause problems on host bridge and perhaps other
-@@ -4567,6 +4667,8 @@ static const struct pci_dev_acs_enabled {
+@@ -4573,6 +4673,8 @@ static const struct pci_dev_acs_enabled {
{ PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
/* APM X-Gene */
{ PCI_VENDOR_ID_AMCC, 0xE004, pci_quirk_xgene_acs },
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
-index 2a0e281542cc..257896531139 100644
+index 9e4b0036141f..70a2c4c27bed 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -103,7 +103,7 @@ module_param(enable_apicv, bool, S_IRUGO);
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Imre Deak <imre.deak@intel.com>
+Date: Mon, 28 Oct 2019 20:15:17 +0200
+Subject: [PATCH] drm/i915: Avoid HPD poll detect triggering a new detect cycle
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+For the HPD interrupt functionality the HW depends on power wells in the
+display core domain to be on. Accordingly when enabling these power
+wells the HPD polling logic will force an HPD detection cycle to account
+for hotplug events that may have happened when such a power well was
+off.
+
+Thus a detect cycle started by polling could start a new detect cycle if
+a power well in the display core domain gets enabled during detect and
+stays enabled after detect completes. That in turn can lead to a
+detection cycle runaway.
+
+To prevent re-triggering a poll-detect cycle make sure we drop all power
+references we acquired during detect synchronously by the end of detect.
+This will let the poll-detect logic continue with polling (matching the
+off state of the corresponding power wells) instead of scheduling a new
+detection cycle.
+
+Fixes: 6cfe7ec02e85 ("drm/i915: Remove the unneeded AUX power ref from intel_dp_detect()")
+Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112125
+Reported-and-tested-by: Val Kulkov <val.kulkov@gmail.com>
+Reported-and-tested-by: wangqr <wqr.prg@gmail.com>
+Cc: Val Kulkov <val.kulkov@gmail.com>
+Cc: wangqr <wqr.prg@gmail.com>
+Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Signed-off-by: Imre Deak <imre.deak@intel.com>
+Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191028181517.22602-1-imre.deak@intel.com
+(cherry picked from commit a8ddac7c9f06a12227a4f5febd1cbe0575a33179)
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ drivers/gpu/drm/i915/display/intel_crt.c | 7 +++++++
+ drivers/gpu/drm/i915/display/intel_dp.c | 6 ++++++
+ drivers/gpu/drm/i915/display/intel_hdmi.c | 6 ++++++
+ 3 files changed, 19 insertions(+)
+
+diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
+index 3fcf2f84bcce..da1d6be46a0c 100644
+--- a/drivers/gpu/drm/i915/display/intel_crt.c
++++ b/drivers/gpu/drm/i915/display/intel_crt.c
+@@ -867,6 +867,13 @@ intel_crt_detect(struct drm_connector *connector,
+
+ out:
+ intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
++
++ /*
++ * Make sure the refs for power wells enabled during detect are
++ * dropped to avoid a new detect cycle triggered by HPD polling.
++ */
++ intel_display_power_flush_work(dev_priv);
++
+ return status;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
+index 305abddc274a..dbdd46ba9bfe 100644
+--- a/drivers/gpu/drm/i915/display/intel_dp.c
++++ b/drivers/gpu/drm/i915/display/intel_dp.c
+@@ -5649,6 +5649,12 @@ intel_dp_detect(struct drm_connector *connector,
+ if (status != connector_status_connected && !intel_dp->is_mst)
+ intel_dp_unset_edid(intel_dp);
+
++ /*
++ * Make sure the refs for power wells enabled during detect are
++ * dropped to avoid a new detect cycle triggered by HPD polling.
++ */
++ intel_display_power_flush_work(dev_priv);
++
+ return status;
+ }
+
+diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
+index 7ffdfaae7188..5d5453461a6f 100644
+--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
++++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
+@@ -2571,6 +2571,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
++ /*
++ * Make sure the refs for power wells enabled during detect are
++ * dropped to avoid a new detect cycle triggered by HPD polling.
++ */
++ intel_display_power_flush_work(dev_priv);
++
+ return status;
+ }
+
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Yazen Ghannam <yazen.ghannam@amd.com>
-Date: Thu, 21 Nov 2019 08:15:08 -0600
-Subject: [PATCH] x86/MCE/AMD: Allow Reserved types to be overwritten in
- smca_banks[]
-
-Each logical CPU in Scalable MCA systems controls a unique set of MCA
-banks in the system. These banks are not shared between CPUs. The bank
-types and ordering will be the same across CPUs on currently available
-systems.
-
-However, some CPUs may see a bank as Reserved/Read-as-Zero (RAZ) while
-other CPUs do not. In this case, the bank seen as Reserved on one CPU is
-assumed to be the same type as the bank seen as a known type on another
-CPU.
-
-In general, this occurs when the hardware represented by the MCA bank
-is disabled, e.g. disabled memory controllers on certain models, etc.
-The MCA bank is disabled in the hardware, so there is no possibility of
-getting an MCA/MCE from it even if it is assumed to have a known type.
-
-For example:
-
-Full system:
- Bank | Type seen on CPU0 | Type seen on CPU1
- ------------------------------------------------
- 0 | LS | LS
- 1 | UMC | UMC
- 2 | CS | CS
-
-System with hardware disabled:
- Bank | Type seen on CPU0 | Type seen on CPU1
- ------------------------------------------------
- 0 | LS | LS
- 1 | UMC | RAZ
- 2 | CS | CS
-
-For this reason, there is a single, global struct smca_banks[] that is
-initialized at boot time. This array is initialized on each CPU as it
-comes online. However, the array will not be updated if an entry already
-exists.
-
-This works as expected when the first CPU (usually CPU0) has all
-possible MCA banks enabled. But if the first CPU has a subset, then it
-will save a "Reserved" type in smca_banks[]. Successive CPUs will then
-not be able to update smca_banks[] even if they encounter a known bank
-type.
-
-This may result in unexpected behavior. Depending on the system
-configuration, a user may observe issues enumerating the MCA
-thresholding sysfs interface. The issues may be as trivial as sysfs
-entries not being available, or as severe as system hangs.
-
-For example:
-
- Bank | Type seen on CPU0 | Type seen on CPU1
- ------------------------------------------------
- 0 | LS | LS
- 1 | RAZ | UMC
- 2 | CS | CS
-
-Extend the smca_banks[] entry check to return if the entry is a
-non-reserved type. Otherwise, continue so that CPUs that encounter a
-known bank type can update smca_banks[].
-
-Fixes: 68627a697c19 ("x86/mce/AMD, EDAC/mce_amd: Enumerate Reserved SMCA bank type")
-Signed-off-by: Yazen Ghannam <yazen.ghannam@amd.com>
-Signed-off-by: Borislav Petkov <bp@suse.de>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kernel/cpu/mce/amd.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
-index 6ea7fdc82f3c..08e09c8c269f 100644
---- a/arch/x86/kernel/cpu/mce/amd.c
-+++ b/arch/x86/kernel/cpu/mce/amd.c
-@@ -266,7 +266,7 @@ static void smca_configure(unsigned int bank, unsigned int cpu)
- smca_set_misc_banks_map(bank, cpu);
-
- /* Return early if this bank was already initialized. */
-- if (smca_banks[bank].hwid)
-+ if (smca_banks[bank].hwid && smca_banks[bank].hwid->hwid_mcatype != 0)
- return;
-
- if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_IPID(bank), &low, &high)) {
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Imre Deak <imre.deak@intel.com>
-Date: Mon, 28 Oct 2019 20:15:17 +0200
-Subject: [PATCH] drm/i915: Avoid HPD poll detect triggering a new detect cycle
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-For the HPD interrupt functionality the HW depends on power wells in the
-display core domain to be on. Accordingly when enabling these power
-wells the HPD polling logic will force an HPD detection cycle to account
-for hotplug events that may have happened when such a power well was
-off.
-
-Thus a detect cycle started by polling could start a new detect cycle if
-a power well in the display core domain gets enabled during detect and
-stays enabled after detect completes. That in turn can lead to a
-detection cycle runaway.
-
-To prevent re-triggering a poll-detect cycle make sure we drop all power
-references we acquired during detect synchronously by the end of detect.
-This will let the poll-detect logic continue with polling (matching the
-off state of the corresponding power wells) instead of scheduling a new
-detection cycle.
-
-Fixes: 6cfe7ec02e85 ("drm/i915: Remove the unneeded AUX power ref from intel_dp_detect()")
-Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112125
-Reported-and-tested-by: Val Kulkov <val.kulkov@gmail.com>
-Reported-and-tested-by: wangqr <wqr.prg@gmail.com>
-Cc: Val Kulkov <val.kulkov@gmail.com>
-Cc: wangqr <wqr.prg@gmail.com>
-Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
-Signed-off-by: Imre Deak <imre.deak@intel.com>
-Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
-Link: https://patchwork.freedesktop.org/patch/msgid/20191028181517.22602-1-imre.deak@intel.com
-(cherry picked from commit a8ddac7c9f06a12227a4f5febd1cbe0575a33179)
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/gpu/drm/i915/display/intel_crt.c | 7 +++++++
- drivers/gpu/drm/i915/display/intel_dp.c | 6 ++++++
- drivers/gpu/drm/i915/display/intel_hdmi.c | 6 ++++++
- 3 files changed, 19 insertions(+)
-
-diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
-index 3fcf2f84bcce..da1d6be46a0c 100644
---- a/drivers/gpu/drm/i915/display/intel_crt.c
-+++ b/drivers/gpu/drm/i915/display/intel_crt.c
-@@ -867,6 +867,13 @@ intel_crt_detect(struct drm_connector *connector,
-
- out:
- intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
-+
-+ /*
-+ * Make sure the refs for power wells enabled during detect are
-+ * dropped to avoid a new detect cycle triggered by HPD polling.
-+ */
-+ intel_display_power_flush_work(dev_priv);
-+
- return status;
- }
-
-diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
-index 4b4d516b15e0..106b666a2cc3 100644
---- a/drivers/gpu/drm/i915/display/intel_dp.c
-+++ b/drivers/gpu/drm/i915/display/intel_dp.c
-@@ -5649,6 +5649,12 @@ intel_dp_detect(struct drm_connector *connector,
- if (status != connector_status_connected && !intel_dp->is_mst)
- intel_dp_unset_edid(intel_dp);
-
-+ /*
-+ * Make sure the refs for power wells enabled during detect are
-+ * dropped to avoid a new detect cycle triggered by HPD polling.
-+ */
-+ intel_display_power_flush_work(dev_priv);
-+
- return status;
- }
-
-diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
-index 7ffdfaae7188..5d5453461a6f 100644
---- a/drivers/gpu/drm/i915/display/intel_hdmi.c
-+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
-@@ -2571,6 +2571,12 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
- if (status != connector_status_connected)
- cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
-
-+ /*
-+ * Make sure the refs for power wells enabled during detect are
-+ * dropped to avoid a new detect cycle triggered by HPD polling.
-+ */
-+ intel_display_power_flush_work(dev_priv);
-+
- return status;
- }
-
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Fri, 31 Jan 2020 08:06:40 -0300
+Subject: [PATCH] x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
+
+CVE-2019-3016
+CVE-2020-3016
+
+kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
+bit if it is called more than once while VCPU is preempted.
+
+This is part of CVE-2019-3016.
+
+(This bug was also independently discovered by Jim Mattson
+<jmattson@google.com>)
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/x86.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 9c45e6ca30fd..80e860bd39d5 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3399,6 +3399,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
++ if (vcpu->arch.st.steal.preempted)
++ return;
++
+ vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+
+ kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Date: Fri, 31 Jan 2020 08:06:40 -0300
-Subject: [PATCH] x86/kvm: Be careful not to clear KVM_VCPU_FLUSH_TLB bit
-
-CVE-2019-3016
-CVE-2020-3016
-
-kvm_steal_time_set_preempted() may accidentally clear KVM_VCPU_FLUSH_TLB
-bit if it is called more than once while VCPU is preempted.
-
-This is part of CVE-2019-3016.
-
-(This bug was also independently discovered by Jim Mattson
-<jmattson@google.com>)
-
-Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/x86.c | 3 +++
- 1 file changed, 3 insertions(+)
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 778b3a899769..92d8e4ebba16 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -3393,6 +3393,9 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-
-+ if (vcpu->arch.st.steal.preempted)
-+ return;
-+
- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
-
- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Fri, 31 Jan 2020 08:06:41 -0300
+Subject: [PATCH] x86/kvm: Introduce kvm_(un)map_gfn()
+
+CVE-2019-3016
+CVE-2020-3016
+
+kvm_vcpu_(un)map operates on gfns from any current address space.
+In certain cases we want to make sure we are not mapping SMRAM
+and for that we can use kvm_(un)map_gfn() that we are introducing
+in this patch.
+
+This is part of CVE-2019-3016.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ include/linux/kvm_host.h | 2 ++
+ virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++-----
+ 2 files changed, 26 insertions(+), 5 deletions(-)
+
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index d41c521a39da..df4cc0ead363 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
+ kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+ kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
++int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
+ struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
++int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+ unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 91e56a9b0661..6614e030ae75 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+ }
+ EXPORT_SYMBOL_GPL(gfn_to_page);
+
+-static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
++static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
+ struct kvm_host_map *map)
+ {
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
++ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
+
+ if (!map)
+ return -EINVAL;
+@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
+ return 0;
+ }
+
++int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
++{
++ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
++}
++EXPORT_SYMBOL_GPL(kvm_map_gfn);
++
+ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+ {
+- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
++ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+-void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+- bool dirty)
++static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
++ struct kvm_host_map *map, bool dirty)
+ {
+ if (!map)
+ return;
+@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ #endif
+
+ if (dirty) {
+- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
++ mark_page_dirty_in_slot(memslot, map->gfn);
+ kvm_release_pfn_dirty(map->pfn);
+ } else {
+ kvm_release_pfn_clean(map->pfn);
+@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ map->hva = NULL;
+ map->page = NULL;
+ }
++
++int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
++{
++ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
++
++void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
++{
++ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
++}
+ EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
+ struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Fri, 31 Jan 2020 08:06:42 -0300
+Subject: [PATCH] x86/kvm: Cache gfn to pfn translation
+
+CVE-2019-3016
+CVE-2020-3016
+
+__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
+* relatively expensive
+* in certain cases (such as when done from atomic context) cannot be called
+
+Stashing gfn-to-pfn mapping should help with both cases.
+
+This is part of CVE-2019-3016.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/include/asm/kvm_host.h | 1 +
+ arch/x86/kvm/x86.c | 10 ++++
+ include/linux/kvm_host.h | 7 ++-
+ include/linux/kvm_types.h | 9 ++-
+ virt/kvm/kvm_main.c | 98 ++++++++++++++++++++++++++-------
+ 5 files changed, 103 insertions(+), 22 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index f68e174f452f..7c06343614a4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
+ u64 last_steal;
+ struct gfn_to_hva_cache stime;
+ struct kvm_steal_time steal;
++ struct gfn_to_pfn_cache cache;
+ } st;
+
+ u64 tsc_offset;
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 80e860bd39d5..cb18560b07bc 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -8945,6 +8945,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
+ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
+ {
+ void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
++ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
++
++ kvm_release_pfn(cache->pfn, cache->dirty, cache);
+
+ kvmclock_reset(vcpu);
+
+@@ -9611,11 +9614,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
+
+ void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
+ {
++ struct kvm_vcpu *vcpu;
++ int i;
++
+ /*
+ * memslots->generation has been incremented.
+ * mmio generation may have reached its maximum value.
+ */
+ kvm_mmu_invalidate_mmio_sptes(kvm, gen);
++
++ /* Force re-initialization of steal_time cache */
++ kvm_for_each_vcpu(i, vcpu, kvm)
++ kvm_vcpu_kick(vcpu);
+ }
+
+ int kvm_arch_prepare_memory_region(struct kvm *kvm,
+diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
+index df4cc0ead363..abfc2fbde957 100644
+--- a/include/linux/kvm_host.h
++++ b/include/linux/kvm_host.h
+@@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
+ void kvm_set_pfn_accessed(kvm_pfn_t pfn);
+ void kvm_get_pfn(kvm_pfn_t pfn);
+
++void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
+ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
+ int len);
+ int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
+@@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
+ kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
+ kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
+ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
++int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache, bool atomic);
+ struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
+ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
++int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
+ unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
+ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
+ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
+diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
+index bde5374ae021..2382cb58969d 100644
+--- a/include/linux/kvm_types.h
++++ b/include/linux/kvm_types.h
+@@ -18,7 +18,7 @@ struct kvm_memslots;
+
+ enum kvm_mr_change;
+
+-#include <asm/types.h>
++#include <linux/types.h>
+
+ /*
+ * Address types:
+@@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
+ struct kvm_memory_slot *memslot;
+ };
+
++struct gfn_to_pfn_cache {
++ u64 generation;
++ gfn_t gfn;
++ kvm_pfn_t pfn;
++ bool dirty;
++};
++
+ #endif /* __KVM_TYPES_H__ */
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6614e030ae75..f05e5b5c30e8 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
+ }
+ EXPORT_SYMBOL_GPL(gfn_to_page);
+
++void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
++{
++ if (pfn == 0)
++ return;
++
++ if (cache)
++ cache->pfn = cache->gfn = 0;
++
++ if (dirty)
++ kvm_release_pfn_dirty(pfn);
++ else
++ kvm_release_pfn_clean(pfn);
++}
++
++static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
++ struct gfn_to_pfn_cache *cache, u64 gen)
++{
++ kvm_release_pfn(cache->pfn, cache->dirty, cache);
++
++ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
++ cache->gfn = gfn;
++ cache->dirty = false;
++ cache->generation = gen;
++}
++
+ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
+- struct kvm_host_map *map)
++ struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache,
++ bool atomic)
+ {
+ kvm_pfn_t pfn;
+ void *hva = NULL;
+ struct page *page = KVM_UNMAPPED_PAGE;
+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
++ u64 gen = slots->generation;
+
+ if (!map)
+ return -EINVAL;
+
+- pfn = gfn_to_pfn_memslot(slot, gfn);
++ if (cache) {
++ if (!cache->pfn || cache->gfn != gfn ||
++ cache->generation != gen) {
++ if (atomic)
++ return -EAGAIN;
++ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
++ }
++ pfn = cache->pfn;
++ } else {
++ if (atomic)
++ return -EAGAIN;
++ pfn = gfn_to_pfn_memslot(slot, gfn);
++ }
+ if (is_error_noslot_pfn(pfn))
+ return -EINVAL;
+
+ if (pfn_valid(pfn)) {
+ page = pfn_to_page(pfn);
+- hva = kmap(page);
++ if (atomic)
++ hva = kmap_atomic(page);
++ else
++ hva = kmap(page);
+ #ifdef CONFIG_HAS_IOMEM
+- } else {
++ } else if (!atomic) {
+ hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
++ } else {
++ return -EINVAL;
+ #endif
+ }
+
+@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
+ return 0;
+ }
+
+-int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
++int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache, bool atomic)
+ {
+- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
++ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
++ cache, atomic);
+ }
+ EXPORT_SYMBOL_GPL(kvm_map_gfn);
+
+ int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
+ {
+- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
++ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
++ NULL, false);
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_map);
+
+ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+- struct kvm_host_map *map, bool dirty)
++ struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache,
++ bool dirty, bool atomic)
+ {
+ if (!map)
+ return;
+@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
+ if (!map->hva)
+ return;
+
+- if (map->page != KVM_UNMAPPED_PAGE)
+- kunmap(map->page);
++ if (map->page != KVM_UNMAPPED_PAGE) {
++ if (atomic)
++ kunmap_atomic(map->hva);
++ else
++ kunmap(map->page);
++ }
+ #ifdef CONFIG_HAS_IOMEM
+- else
++ else if (!atomic)
+ memunmap(map->hva);
++ else
++ WARN_ONCE(1, "Unexpected unmapping in atomic context");
+ #endif
+
+- if (dirty) {
++ if (dirty)
+ mark_page_dirty_in_slot(memslot, map->gfn);
+- kvm_release_pfn_dirty(map->pfn);
+- } else {
+- kvm_release_pfn_clean(map->pfn);
+- }
++
++ if (cache)
++ cache->dirty |= dirty;
++ else
++ kvm_release_pfn(map->pfn, dirty, NULL);
+
+ map->hva = NULL;
+ map->page = NULL;
+ }
+
+-int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
++int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
++ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
+ {
+- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
++ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
++ cache, dirty, atomic);
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
+
+ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
+ {
+- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
++ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
++ dirty, false);
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Date: Fri, 31 Jan 2020 08:06:41 -0300
-Subject: [PATCH] x86/kvm: Introduce kvm_(un)map_gfn()
-
-CVE-2019-3016
-CVE-2020-3016
-
-kvm_vcpu_(un)map operates on gfns from any current address space.
-In certain cases we want to make sure we are not mapping SMRAM
-and for that we can use kvm_(un)map_gfn() that we are introducing
-in this patch.
-
-This is part of CVE-2019-3016.
-
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- include/linux/kvm_host.h | 2 ++
- virt/kvm/kvm_main.c | 29 ++++++++++++++++++++++++-----
- 2 files changed, 26 insertions(+), 5 deletions(-)
-
-diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
-index d41c521a39da..df4cc0ead363 100644
---- a/include/linux/kvm_host.h
-+++ b/include/linux/kvm_host.h
-@@ -758,8 +758,10 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
- kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
- kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
- int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
-+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
- struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
- void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
- unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
- unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
- int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
-diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 91e56a9b0661..6614e030ae75 100644
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -1792,12 +1792,13 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
- }
- EXPORT_SYMBOL_GPL(gfn_to_page);
-
--static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
-+static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
- struct kvm_host_map *map)
- {
- kvm_pfn_t pfn;
- void *hva = NULL;
- struct page *page = KVM_UNMAPPED_PAGE;
-+ struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
-
- if (!map)
- return -EINVAL;
-@@ -1826,14 +1827,20 @@ static int __kvm_map_gfn(struct kvm_memory_slot *slot, gfn_t gfn,
- return 0;
- }
-
-+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
-+{
-+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
-+}
-+EXPORT_SYMBOL_GPL(kvm_map_gfn);
-+
- int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
- {
-- return __kvm_map_gfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn, map);
-+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
- }
- EXPORT_SYMBOL_GPL(kvm_vcpu_map);
-
--void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-- bool dirty)
-+static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
-+ struct kvm_host_map *map, bool dirty)
- {
- if (!map)
- return;
-@@ -1849,7 +1856,7 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- #endif
-
- if (dirty) {
-- kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
-+ mark_page_dirty_in_slot(memslot, map->gfn);
- kvm_release_pfn_dirty(map->pfn);
- } else {
- kvm_release_pfn_clean(map->pfn);
-@@ -1858,6 +1865,18 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
- map->hva = NULL;
- map->page = NULL;
- }
-+
-+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
-+{
-+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
-+ return 0;
-+}
-+EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
-+
-+void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
-+{
-+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
-+}
- EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
-
- struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Fri, 31 Jan 2020 08:06:43 -0300
+Subject: [PATCH] x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
+
+CVE-2019-3016
+CVE-2020-3016
+
+There is a potential race in record_steal_time() between setting
+host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
+KVM_VCPU_PREEMPTED) and propagating this value to the guest with
+kvm_write_guest_cached(). Between those two events the guest may
+still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
+KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
+thing. Which it won't.
+
+Instad of copying, we should map kvm_steal_time and that will
+guarantee atomicity of accesses to @preempted.
+
+This is part of CVE-2019-3016.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
+ 1 file changed, 29 insertions(+), 20 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index cb18560b07bc..f63fa5846f08 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2488,43 +2488,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+
+ static void record_steal_time(struct kvm_vcpu *vcpu)
+ {
++ struct kvm_host_map map;
++ struct kvm_steal_time *st;
++
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
++ /* -EAGAIN is returned in atomic context so we can just return. */
++ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
++ &map, &vcpu->arch.st.cache, false))
+ return;
+
++ st = map.hva +
++ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
++
+ /*
+ * Doing a TLB flush here, on the guest's behalf, can avoid
+ * expensive IPIs.
+ */
+- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
++ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ kvm_vcpu_flush_tlb(vcpu, false);
+
+- if (vcpu->arch.st.steal.version & 1)
+- vcpu->arch.st.steal.version += 1; /* first time write, random junk */
++ vcpu->arch.st.steal.preempted = 0;
+
+- vcpu->arch.st.steal.version += 1;
++ if (st->version & 1)
++ st->version += 1; /* first time write, random junk */
+
+- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
++ st->version += 1;
+
+ smp_wmb();
+
+- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
++ st->steal += current->sched_info.run_delay -
+ vcpu->arch.st.last_steal;
+ vcpu->arch.st.last_steal = current->sched_info.run_delay;
+
+- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
+-
+ smp_wmb();
+
+- vcpu->arch.st.steal.version += 1;
++ st->version += 1;
+
+- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
+- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
++ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
+ }
+
+ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+@@ -3396,18 +3398,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+
+ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ {
++ struct kvm_host_map map;
++ struct kvm_steal_time *st;
++
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+ if (vcpu->arch.st.steal.preempted)
+ return;
+
+- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
++ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
++ &vcpu->arch.st.cache, true))
++ return;
++
++ st = map.hva +
++ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
++
++ st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
+
+- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
+- &vcpu->arch.st.steal.preempted,
+- offsetof(struct kvm_steal_time, preempted),
+- sizeof(vcpu->arch.st.steal.preempted));
++ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+ }
+
+ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Date: Fri, 31 Jan 2020 08:06:42 -0300
-Subject: [PATCH] x86/kvm: Cache gfn to pfn translation
-
-CVE-2019-3016
-CVE-2020-3016
-
-__kvm_map_gfn()'s call to gfn_to_pfn_memslot() is
-* relatively expensive
-* in certain cases (such as when done from atomic context) cannot be called
-
-Stashing gfn-to-pfn mapping should help with both cases.
-
-This is part of CVE-2019-3016.
-
-Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/include/asm/kvm_host.h | 1 +
- arch/x86/kvm/x86.c | 10 ++++
- include/linux/kvm_host.h | 7 ++-
- include/linux/kvm_types.h | 9 ++-
- virt/kvm/kvm_main.c | 98 ++++++++++++++++++++++++++-------
- 5 files changed, 103 insertions(+), 22 deletions(-)
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index f68e174f452f..7c06343614a4 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -678,6 +678,7 @@ struct kvm_vcpu_arch {
- u64 last_steal;
- struct gfn_to_hva_cache stime;
- struct kvm_steal_time steal;
-+ struct gfn_to_pfn_cache cache;
- } st;
-
- u64 tsc_offset;
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 92d8e4ebba16..41fee3d359ab 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -8936,6 +8936,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
- void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
- {
- void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
-+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
-+
-+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
-
- kvmclock_reset(vcpu);
-
-@@ -9602,11 +9605,18 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
-
- void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
- {
-+ struct kvm_vcpu *vcpu;
-+ int i;
-+
- /*
- * memslots->generation has been incremented.
- * mmio generation may have reached its maximum value.
- */
- kvm_mmu_invalidate_mmio_sptes(kvm, gen);
-+
-+ /* Force re-initialization of steal_time cache */
-+ kvm_for_each_vcpu(i, vcpu, kvm)
-+ kvm_vcpu_kick(vcpu);
- }
-
- int kvm_arch_prepare_memory_region(struct kvm *kvm,
-diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
-index df4cc0ead363..abfc2fbde957 100644
---- a/include/linux/kvm_host.h
-+++ b/include/linux/kvm_host.h
-@@ -728,6 +728,7 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
- void kvm_set_pfn_accessed(kvm_pfn_t pfn);
- void kvm_get_pfn(kvm_pfn_t pfn);
-
-+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
- int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
- int len);
- int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
-@@ -758,10 +759,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
- kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
- kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
- int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
--int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map);
-+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache, bool atomic);
- struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
- void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
--int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
-+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
- unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
- unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
- int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
-diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
-index bde5374ae021..2382cb58969d 100644
---- a/include/linux/kvm_types.h
-+++ b/include/linux/kvm_types.h
-@@ -18,7 +18,7 @@ struct kvm_memslots;
-
- enum kvm_mr_change;
-
--#include <asm/types.h>
-+#include <linux/types.h>
-
- /*
- * Address types:
-@@ -49,4 +49,11 @@ struct gfn_to_hva_cache {
- struct kvm_memory_slot *memslot;
- };
-
-+struct gfn_to_pfn_cache {
-+ u64 generation;
-+ gfn_t gfn;
-+ kvm_pfn_t pfn;
-+ bool dirty;
-+};
-+
- #endif /* __KVM_TYPES_H__ */
-diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index 6614e030ae75..f05e5b5c30e8 100644
---- a/virt/kvm/kvm_main.c
-+++ b/virt/kvm/kvm_main.c
-@@ -1792,27 +1792,72 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
- }
- EXPORT_SYMBOL_GPL(gfn_to_page);
-
-+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache)
-+{
-+ if (pfn == 0)
-+ return;
-+
-+ if (cache)
-+ cache->pfn = cache->gfn = 0;
-+
-+ if (dirty)
-+ kvm_release_pfn_dirty(pfn);
-+ else
-+ kvm_release_pfn_clean(pfn);
-+}
-+
-+static void kvm_cache_gfn_to_pfn(struct kvm_memory_slot *slot, gfn_t gfn,
-+ struct gfn_to_pfn_cache *cache, u64 gen)
-+{
-+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
-+
-+ cache->pfn = gfn_to_pfn_memslot(slot, gfn);
-+ cache->gfn = gfn;
-+ cache->dirty = false;
-+ cache->generation = gen;
-+}
-+
- static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
-- struct kvm_host_map *map)
-+ struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache,
-+ bool atomic)
- {
- kvm_pfn_t pfn;
- void *hva = NULL;
- struct page *page = KVM_UNMAPPED_PAGE;
- struct kvm_memory_slot *slot = __gfn_to_memslot(slots, gfn);
-+ u64 gen = slots->generation;
-
- if (!map)
- return -EINVAL;
-
-- pfn = gfn_to_pfn_memslot(slot, gfn);
-+ if (cache) {
-+ if (!cache->pfn || cache->gfn != gfn ||
-+ cache->generation != gen) {
-+ if (atomic)
-+ return -EAGAIN;
-+ kvm_cache_gfn_to_pfn(slot, gfn, cache, gen);
-+ }
-+ pfn = cache->pfn;
-+ } else {
-+ if (atomic)
-+ return -EAGAIN;
-+ pfn = gfn_to_pfn_memslot(slot, gfn);
-+ }
- if (is_error_noslot_pfn(pfn))
- return -EINVAL;
-
- if (pfn_valid(pfn)) {
- page = pfn_to_page(pfn);
-- hva = kmap(page);
-+ if (atomic)
-+ hva = kmap_atomic(page);
-+ else
-+ hva = kmap(page);
- #ifdef CONFIG_HAS_IOMEM
-- } else {
-+ } else if (!atomic) {
- hva = memremap(pfn_to_hpa(pfn), PAGE_SIZE, MEMREMAP_WB);
-+ } else {
-+ return -EINVAL;
- #endif
- }
-
-@@ -1827,20 +1872,25 @@ static int __kvm_map_gfn(struct kvm_memslots *slots, gfn_t gfn,
- return 0;
- }
-
--int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
-+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache, bool atomic)
- {
-- return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map);
-+ return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map,
-+ cache, atomic);
- }
- EXPORT_SYMBOL_GPL(kvm_map_gfn);
-
- int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
- {
-- return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map);
-+ return __kvm_map_gfn(kvm_vcpu_memslots(vcpu), gfn, map,
-+ NULL, false);
- }
- EXPORT_SYMBOL_GPL(kvm_vcpu_map);
-
- static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
-- struct kvm_host_map *map, bool dirty)
-+ struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache,
-+ bool dirty, bool atomic)
- {
- if (!map)
- return;
-@@ -1848,34 +1898,44 @@ static void __kvm_unmap_gfn(struct kvm_memory_slot *memslot,
- if (!map->hva)
- return;
-
-- if (map->page != KVM_UNMAPPED_PAGE)
-- kunmap(map->page);
-+ if (map->page != KVM_UNMAPPED_PAGE) {
-+ if (atomic)
-+ kunmap_atomic(map->hva);
-+ else
-+ kunmap(map->page);
-+ }
- #ifdef CONFIG_HAS_IOMEM
-- else
-+ else if (!atomic)
- memunmap(map->hva);
-+ else
-+ WARN_ONCE(1, "Unexpected unmapping in atomic context");
- #endif
-
-- if (dirty) {
-+ if (dirty)
- mark_page_dirty_in_slot(memslot, map->gfn);
-- kvm_release_pfn_dirty(map->pfn);
-- } else {
-- kvm_release_pfn_clean(map->pfn);
-- }
-+
-+ if (cache)
-+ cache->dirty |= dirty;
-+ else
-+ kvm_release_pfn(map->pfn, dirty, NULL);
-
- map->hva = NULL;
- map->page = NULL;
- }
-
--int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
-+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
-+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic)
- {
-- __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map, dirty);
-+ __kvm_unmap_gfn(gfn_to_memslot(vcpu->kvm, map->gfn), map,
-+ cache, dirty, atomic);
- return 0;
- }
- EXPORT_SYMBOL_GPL(kvm_unmap_gfn);
-
- void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
- {
-- __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, dirty);
-+ __kvm_unmap_gfn(kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), map, NULL,
-+ dirty, false);
- }
- EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
-
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Date: Fri, 31 Jan 2020 08:06:44 -0300
+Subject: [PATCH] x86/KVM: Clean up host's steal time structure
+
+CVE-2019-3016
+CVE-2020-3016
+
+Now that we are mapping kvm_steal_time from the guest directly we
+don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
+for the stime field.
+
+This is part of CVE-2019-3016.
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/include/asm/kvm_host.h | 3 +--
+ arch/x86/kvm/x86.c | 11 +++--------
+ 2 files changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
+index 7c06343614a4..f62f4ff5f4f4 100644
+--- a/arch/x86/include/asm/kvm_host.h
++++ b/arch/x86/include/asm/kvm_host.h
+@@ -674,10 +674,9 @@ struct kvm_vcpu_arch {
+ bool pvclock_set_guest_stopped_request;
+
+ struct {
++ u8 preempted;
+ u64 msr_val;
+ u64 last_steal;
+- struct gfn_to_hva_cache stime;
+- struct kvm_steal_time steal;
+ struct gfn_to_pfn_cache cache;
+ } st;
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index f63fa5846f08..6ce9ace8a801 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -2509,7 +2509,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
+ kvm_vcpu_flush_tlb(vcpu, false);
+
+- vcpu->arch.st.steal.preempted = 0;
++ vcpu->arch.st.preempted = 0;
+
+ if (st->version & 1)
+ st->version += 1; /* first time write, random junk */
+@@ -2682,11 +2682,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+ if (data & KVM_STEAL_RESERVED_MASK)
+ return 1;
+
+- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
+- data & KVM_STEAL_VALID_BITS,
+- sizeof(struct kvm_steal_time)))
+- return 1;
+-
+ vcpu->arch.st.msr_val = data;
+
+ if (!(data & KVM_MSR_ENABLED))
+@@ -3404,7 +3399,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+
+- if (vcpu->arch.st.steal.preempted)
++ if (vcpu->arch.st.preempted)
+ return;
+
+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
+@@ -3414,7 +3409,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ st = map.hva +
+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
+
+- st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
++ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
+
+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
+ }
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Date: Fri, 31 Jan 2020 08:06:43 -0300
-Subject: [PATCH] x86/KVM: Make sure KVM_VCPU_FLUSH_TLB flag is not missed
-
-CVE-2019-3016
-CVE-2020-3016
-
-There is a potential race in record_steal_time() between setting
-host-local vcpu->arch.st.steal.preempted to zero (i.e. clearing
-KVM_VCPU_PREEMPTED) and propagating this value to the guest with
-kvm_write_guest_cached(). Between those two events the guest may
-still see KVM_VCPU_PREEMPTED in its copy of kvm_steal_time, set
-KVM_VCPU_FLUSH_TLB and assume that hypervisor will do the right
-thing. Which it won't.
-
-Instad of copying, we should map kvm_steal_time and that will
-guarantee atomicity of accesses to @preempted.
-
-This is part of CVE-2019-3016.
-
-Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/x86.c | 49 +++++++++++++++++++++++++++-------------------
- 1 file changed, 29 insertions(+), 20 deletions(-)
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 41fee3d359ab..431e34965707 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -2482,43 +2482,45 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
-
- static void record_steal_time(struct kvm_vcpu *vcpu)
- {
-+ struct kvm_host_map map;
-+ struct kvm_steal_time *st;
-+
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-
-- if (unlikely(kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
-+ /* -EAGAIN is returned in atomic context so we can just return. */
-+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT,
-+ &map, &vcpu->arch.st.cache, false))
- return;
-
-+ st = map.hva +
-+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
-+
- /*
- * Doing a TLB flush here, on the guest's behalf, can avoid
- * expensive IPIs.
- */
-- if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
-+ if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
- kvm_vcpu_flush_tlb(vcpu, false);
-
-- if (vcpu->arch.st.steal.version & 1)
-- vcpu->arch.st.steal.version += 1; /* first time write, random junk */
-+ vcpu->arch.st.steal.preempted = 0;
-
-- vcpu->arch.st.steal.version += 1;
-+ if (st->version & 1)
-+ st->version += 1; /* first time write, random junk */
-
-- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-+ st->version += 1;
-
- smp_wmb();
-
-- vcpu->arch.st.steal.steal += current->sched_info.run_delay -
-+ st->steal += current->sched_info.run_delay -
- vcpu->arch.st.last_steal;
- vcpu->arch.st.last_steal = current->sched_info.run_delay;
-
-- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
--
- smp_wmb();
-
-- vcpu->arch.st.steal.version += 1;
-+ st->version += 1;
-
-- kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.st.stime,
-- &vcpu->arch.st.steal, sizeof(struct kvm_steal_time));
-+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, false);
- }
-
- int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
-@@ -3390,18 +3392,25 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-
- static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- {
-+ struct kvm_host_map map;
-+ struct kvm_steal_time *st;
-+
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-
- if (vcpu->arch.st.steal.preempted)
- return;
-
-- vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
-+ if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
-+ &vcpu->arch.st.cache, true))
-+ return;
-+
-+ st = map.hva +
-+ offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
-+
-+ st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
-
-- kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
-- &vcpu->arch.st.steal.preempted,
-- offsetof(struct kvm_steal_time, preempted),
-- sizeof(vcpu->arch.st.steal.preempted));
-+ kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
- }
-
- void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Date: Fri, 31 Jan 2020 08:06:44 -0300
-Subject: [PATCH] x86/KVM: Clean up host's steal time structure
-
-CVE-2019-3016
-CVE-2020-3016
-
-Now that we are mapping kvm_steal_time from the guest directly we
-don't need keep a copy of it in kvm_vcpu_arch.st. The same is true
-for the stime field.
-
-This is part of CVE-2019-3016.
-
-Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
-Reviewed-by: Joao Martins <joao.m.martins@oracle.com>
-Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/include/asm/kvm_host.h | 3 +--
- arch/x86/kvm/x86.c | 11 +++--------
- 2 files changed, 4 insertions(+), 10 deletions(-)
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 7c06343614a4..f62f4ff5f4f4 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -674,10 +674,9 @@ struct kvm_vcpu_arch {
- bool pvclock_set_guest_stopped_request;
-
- struct {
-+ u8 preempted;
- u64 msr_val;
- u64 last_steal;
-- struct gfn_to_hva_cache stime;
-- struct kvm_steal_time steal;
- struct gfn_to_pfn_cache cache;
- } st;
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 431e34965707..c059728f8a44 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -2503,7 +2503,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
- if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB)
- kvm_vcpu_flush_tlb(vcpu, false);
-
-- vcpu->arch.st.steal.preempted = 0;
-+ vcpu->arch.st.preempted = 0;
-
- if (st->version & 1)
- st->version += 1; /* first time write, random junk */
-@@ -2676,11 +2676,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
- if (data & KVM_STEAL_RESERVED_MASK)
- return 1;
-
-- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
-- data & KVM_STEAL_VALID_BITS,
-- sizeof(struct kvm_steal_time)))
-- return 1;
--
- vcpu->arch.st.msr_val = data;
-
- if (!(data & KVM_MSR_ENABLED))
-@@ -3398,7 +3393,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-
-- if (vcpu->arch.st.steal.preempted)
-+ if (vcpu->arch.st.preempted)
- return;
-
- if (kvm_map_gfn(vcpu, vcpu->arch.st.msr_val >> PAGE_SHIFT, &map,
-@@ -3408,7 +3403,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- st = map.hva +
- offset_in_page(vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS);
-
-- st->preempted = vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
-+ st->preempted = vcpu->arch.st.preempted = KVM_VCPU_PREEMPTED;
-
- kvm_unmap_gfn(vcpu, &map, &vcpu->arch.st.cache, true, true);
- }