# also bump pve-kernel-meta if either of MAJ.MIN, PATCHLEVEL or KREL change
KERNEL_MAJ=5
-KERNEL_MIN=15
-KERNEL_PATCHLEVEL=53
+KERNEL_MIN=18
+KERNEL_PATCHLEVEL=0
# increment KREL if the ABI changes (abicheck target in debian/rules)
# rebuild packages with new KREL and run 'make abiupdate'
KREL=1
BUILD_DIR=build
-KERNEL_SRC=ubuntu-jammy
+KERNEL_SRC=ubuntu-kinetic
KERNEL_SRC_SUBMODULE=submodules/$(KERNEL_SRC)
KERNEL_CFG_ORG=config-${KERNEL_VER}.org
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
-index 6a2a04d92f42..6c9430ee1a09 100755
+index ca40a5258c87..6ae930a732f0 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
-@@ -22,10 +22,14 @@ else
+@@ -23,10 +23,14 @@ else
VERSION=$KBUILD_BUILD_VERSION
fi
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
-index ba55851fe132..82675e1ecfb8 100644
+index 75204d36d7f9..1fb5ff73ec1e 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -265,10 +265,7 @@ bool br_stp_recalculate_bridge_id(struct net_bridge *br)
2 files changed, 111 insertions(+)
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
-index efb9e8b66652..b2331a9c08dd 100644
+index 56d616a3cf24..fdaa6af6129a 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
-@@ -3943,6 +3943,15 @@
+@@ -4019,6 +4019,15 @@
Also, it enforces the PCI Local Bus spec
rule that those bits should be 0 in system reset
events (useful for kexec/kdump cases).
Safety option to keep boot IRQs enabled. This
should never be necessary.
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
-index 1c566b0cbee9..d49c54c579bb 100644
+index 338c743085d1..5f796b084627 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -193,6 +193,106 @@ static int __init pci_apply_final_quirks(void)
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
-index fefdf3a6dae3..b1f35bc88be5 100644
+index 5ab12214e18d..f19406d0bce6 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -79,7 +79,7 @@ module_param(halt_poll_ns, uint, 0644);
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/net/core/dev.c b/net/core/dev.c
-index 6111506a4105..564801ce82ba 100644
+index 2771fd22dc6a..37e18f93de23 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -10528,7 +10528,7 @@ static void netdev_wait_allrefs(struct net_device *dev)
- if (refcnt != 1 &&
- time_after(jiffies, warning_time +
+@@ -10177,7 +10177,7 @@ static struct net_device *netdev_wait_allrefs_any(struct list_head *list)
+ if (time_after(jiffies, warning_time +
netdev_unregister_timeout_secs * HZ)) {
-- pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
-+ pr_err("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
- dev->name, refcnt);
- warning_time = jiffies;
- }
+ list_for_each_entry(dev, list, todo_list) {
+- pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
++ pr_err("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
+ dev->name, netdev_refcnt_read(dev));
+ ref_tracker_dir_print(&dev->refcnt_tracker, 10);
+ }
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Thomas Lamprecht <t.lamprecht@proxmox.com>
-Date: Mon, 27 Sep 2021 11:28:39 +0200
-Subject: [PATCH] Revert "PCI: Coalesce host bridge contiguous apertures"
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-This reverts commit ab20e43b20b60f5cc8e2ea3763ffa388158469ac.
-
-was reverted upstream because of reports similar to
-
-Link: https://bugzilla.proxmox.com/show_bug.cgi?id=3552
-Link: https://lore.kernel.org/r/20210709231529.GA3270116@roeck-us.net
-Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/pci/probe.c | 50 ++++-----------------------------------------
- 1 file changed, 4 insertions(+), 46 deletions(-)
-
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index 4f26c222f5f2..c0abe906b84e 100644
---- a/drivers/pci/probe.c
-+++ b/drivers/pci/probe.c
-@@ -20,7 +20,6 @@
- #include <linux/irqdomain.h>
- #include <linux/pm_runtime.h>
- #include <linux/bitfield.h>
--#include <linux/list_sort.h>
- #include "pci.h"
-
- #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
-@@ -881,31 +880,14 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
- dev_set_msi_domain(&bus->dev, d);
- }
-
--static int res_cmp(void *priv, const struct list_head *a,
-- const struct list_head *b)
--{
-- struct resource_entry *entry1, *entry2;
--
-- entry1 = container_of(a, struct resource_entry, node);
-- entry2 = container_of(b, struct resource_entry, node);
--
-- if (entry1->res->flags != entry2->res->flags)
-- return entry1->res->flags > entry2->res->flags;
--
-- if (entry1->offset != entry2->offset)
-- return entry1->offset > entry2->offset;
--
-- return entry1->res->start > entry2->res->start;
--}
--
- static int pci_register_host_bridge(struct pci_host_bridge *bridge)
- {
- struct device *parent = bridge->dev.parent;
-- struct resource_entry *window, *next, *n;
-+ struct resource_entry *window, *n;
- struct pci_bus *bus, *b;
-- resource_size_t offset, next_offset;
-+ resource_size_t offset;
- LIST_HEAD(resources);
-- struct resource *res, *next_res;
-+ struct resource *res;
- char addr[64], *fmt;
- const char *name;
- int err;
-@@ -988,35 +970,11 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
- if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
- dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
-
-- /* Sort and coalesce contiguous windows */
-- list_sort(NULL, &resources, res_cmp);
-- resource_list_for_each_entry_safe(window, n, &resources) {
-- if (list_is_last(&window->node, &resources))
-- break;
--
-- next = list_next_entry(window, node);
-- offset = window->offset;
-- res = window->res;
-- next_offset = next->offset;
-- next_res = next->res;
--
-- if (res->flags != next_res->flags || offset != next_offset)
-- continue;
--
-- if (res->end + 1 == next_res->start) {
-- next_res->start = res->start;
-- res->flags = res->start = res->end = 0;
-- }
-- }
--
- /* Add initial resources to the bus */
- resource_list_for_each_entry_safe(window, n, &resources) {
-+ list_move_tail(&window->node, &bridge->windows);
- offset = window->offset;
- res = window->res;
-- if (!res->end)
-- continue;
--
-- list_move_tail(&window->node, &bridge->windows);
-
- if (res->flags & IORESOURCE_BUS)
- pci_bus_insert_busn_res(bus, bus->number, res->end);
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
+Date: Thu, 14 Sep 2017 11:09:58 +0200
+Subject: [PATCH] do not generate split BTF type info per default
+
+This reverts commit a8ed1a0607cfa5478ff6009539f44790c4d0956d.
+
+It breaks ZFS sometimes:
+https://github.com/openzfs/zfs/issues/12301#issuecomment-873303739
+
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ lib/Kconfig.debug | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
+index 075cd25363ac..8d55eb706a52 100644
+--- a/lib/Kconfig.debug
++++ b/lib/Kconfig.debug
+@@ -357,7 +357,7 @@ config PAHOLE_HAS_BTF_TAG
+ these attributes, so make the config depend on CC_IS_CLANG.
+
+ config DEBUG_INFO_BTF_MODULES
+- def_bool y
++ def_bool n
+ depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
+ help
+ Generate compact split BTF type information for kernel modules.
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Kai-Heng Feng <kai.heng.feng@canonical.com>
-Date: Tue, 13 Jul 2021 20:50:07 +0800
-Subject: [PATCH] PCI: Reinstate "PCI: Coalesce host bridge contiguous
- apertures"
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-Built-in graphics on HP EliteDesk 805 G6 doesn't work because graphics
-can't get the BAR it needs:
- pci_bus 0000:00: root bus resource [mem 0x10020200000-0x100303fffff window]
- pci_bus 0000:00: root bus resource [mem 0x10030400000-0x100401fffff window]
-
- pci 0000:00:08.1: bridge window [mem 0xd2000000-0xd23fffff]
- pci 0000:00:08.1: bridge window [mem 0x10030000000-0x100401fffff 64bit pref]
- pci 0000:00:08.1: can't claim BAR 15 [mem 0x10030000000-0x100401fffff 64bit pref]: no compatible bridge window
- pci 0000:00:08.1: [mem 0x10030000000-0x100401fffff 64bit pref] clipped to [mem 0x10030000000-0x100303fffff 64bit pref]
- pci 0000:00:08.1: bridge window [mem 0x10030000000-0x100303fffff 64bit pref]
- pci 0000:07:00.0: can't claim BAR 0 [mem 0x10030000000-0x1003fffffff 64bit pref]: no compatible bridge window
- pci 0000:07:00.0: can't claim BAR 2 [mem 0x10040000000-0x100401fffff 64bit pref]: no compatible bridge window
-
-However, the root bus has two contiguous apertures that can contain the
-child resource requested.
-
-Coalesce contiguous apertures so we can allocate from the entire contiguous
-region.
-
-This is the second take of commit 65db04053efe ("PCI: Coalesce host
-bridge contiguous apertures"). The original approach sorts the apertures
-by address, but that makes NVMe stop working on QEMU ppc:sam460ex:
- PCI host bridge to bus 0002:00
- pci_bus 0002:00: root bus resource [io 0x0000-0xffff]
- pci_bus 0002:00: root bus resource [mem 0xd80000000-0xdffffffff] (bus address [0x80000000-0xffffffff])
- pci_bus 0002:00: root bus resource [mem 0xc0ee00000-0xc0eefffff] (bus address [0x00000000-0x000fffff])
-
-After the offending commit:
- PCI host bridge to bus 0002:00
- pci_bus 0002:00: root bus resource [io 0x0000-0xffff]
- pci_bus 0002:00: root bus resource [mem 0xc0ee00000-0xc0eefffff] (bus address [0x00000000-0x000fffff])
- pci_bus 0002:00: root bus resource [mem 0xd80000000-0xdffffffff] (bus address [0x80000000-0xffffffff])
-
-Since the apertures on HP EliteDesk 805 G6 are already in ascending
-order, doing a precautious sorting is not necessary.
-
-Remove the sorting part to avoid the regression on ppc:sam460ex.
-
-Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=212013
-Cc: Guenter Roeck <linux@roeck-us.net>
-Suggested-by: Bjorn Helgaas <bhelgaas@google.com>
-Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
-Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/pci/probe.c | 31 +++++++++++++++++++++++++++----
- 1 file changed, 27 insertions(+), 4 deletions(-)
-
-diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
-index c0abe906b84e..8b7c58dec9e4 100644
---- a/drivers/pci/probe.c
-+++ b/drivers/pci/probe.c
-@@ -883,11 +883,11 @@ static void pci_set_bus_msi_domain(struct pci_bus *bus)
- static int pci_register_host_bridge(struct pci_host_bridge *bridge)
- {
- struct device *parent = bridge->dev.parent;
-- struct resource_entry *window, *n;
-+ struct resource_entry *window, *next, *n;
- struct pci_bus *bus, *b;
-- resource_size_t offset;
-+ resource_size_t offset, next_offset;
- LIST_HEAD(resources);
-- struct resource *res;
-+ struct resource *res, *next_res;
- char addr[64], *fmt;
- const char *name;
- int err;
-@@ -970,11 +970,34 @@ static int pci_register_host_bridge(struct pci_host_bridge *bridge)
- if (nr_node_ids > 1 && pcibus_to_node(bus) == NUMA_NO_NODE)
- dev_warn(&bus->dev, "Unknown NUMA node; performance will be reduced\n");
-
-+ /* Coalesce contiguous windows */
-+ resource_list_for_each_entry_safe(window, n, &resources) {
-+ if (list_is_last(&window->node, &resources))
-+ break;
-+
-+ next = list_next_entry(window, node);
-+ offset = window->offset;
-+ res = window->res;
-+ next_offset = next->offset;
-+ next_res = next->res;
-+
-+ if (res->flags != next_res->flags || offset != next_offset)
-+ continue;
-+
-+ if (res->end + 1 == next_res->start) {
-+ next_res->start = res->start;
-+ res->flags = res->start = res->end = 0;
-+ }
-+ }
-+
- /* Add initial resources to the bus */
- resource_list_for_each_entry_safe(window, n, &resources) {
-- list_move_tail(&window->node, &bridge->windows);
- offset = window->offset;
- res = window->res;
-+ if (!res->end)
-+ continue;
-+
-+ list_move_tail(&window->node, &bridge->windows);
-
- if (res->flags & IORESOURCE_BUS)
- pci_bus_insert_busn_res(bus, bus->number, res->end);
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:49:59 +0300
+Subject: [PATCH] bug: introduce ASSERT_STRUCT_OFFSET
+
+ASSERT_STRUCT_OFFSET allows to assert during the build of
+the kernel that a field in a struct have an expected offset.
+
+KVM used to have such macro, but there is almost nothing KVM specific
+in it so move it to build_bug.h, so that it can be used in other
+places in KVM.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/vmx/vmcs12.h | 5 ++---
+ include/linux/build_bug.h | 9 +++++++++
+ 2 files changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
+index 746129ddd5ae..01936013428b 100644
+--- a/arch/x86/kvm/vmx/vmcs12.h
++++ b/arch/x86/kvm/vmx/vmcs12.h
+@@ -208,9 +208,8 @@ struct __packed vmcs12 {
+ /*
+ * For save/restore compatibility, the vmcs12 field offsets must not change.
+ */
+-#define CHECK_OFFSET(field, loc) \
+- BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
+- "Offset of " #field " in struct vmcs12 has changed.")
++#define CHECK_OFFSET(field, loc) \
++ ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc)
+
+ static inline void vmx_check_vmcs12_offsets(void)
+ {
+diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
+index e3a0be2c90ad..3aa3640f8c18 100644
+--- a/include/linux/build_bug.h
++++ b/include/linux/build_bug.h
+@@ -77,4 +77,13 @@
+ #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
+ #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
+
++
++/*
++ * Compile time check that field has an expected offset
++ */
++#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
++ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
++ "Offset of " #field " in " #type " has changed.")
++
++
+ #endif /* _LINUX_BUILD_BUG_H */
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:50:00 +0300
+Subject: [PATCH] KVM: x86: emulator: em_sysexit should update ctxt->mode
+
+This is one of the instructions that can change the
+processor mode.
+
+Note that this is likely a benign bug, because the only problematic
+mode change is from 32 bit to 64 bit which can lead to truncation of RIP,
+and it is not possible to do with sysexit,
+since sysexit running in 32 bit mode will be limited to 32 bit version.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/emulate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 89b11e7dca8a..93349b54ef56 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2875,6 +2875,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
+ ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
+
+ ctxt->_eip = rdx;
++ ctxt->mode = usermode;
+ *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
+
+ return X86EMUL_CONTINUE;
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: =?UTF-8?q?Fabian=20Gr=C3=BCnbichler?= <f.gruenbichler@proxmox.com>
-Date: Thu, 14 Sep 2017 11:09:58 +0200
-Subject: [PATCH] do not generate split BTF type info per default
-
-This reverts commit a8ed1a0607cfa5478ff6009539f44790c4d0956d.
-
-It breaks ZFS sometimes:
-https://github.com/openzfs/zfs/issues/12301#issuecomment-873303739
-
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- lib/Kconfig.debug | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
-index ead81fad883c..9d76f3c39735 100644
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -325,7 +325,7 @@ config PAHOLE_HAS_SPLIT_BTF
- def_bool PAHOLE_VERSION >= 119
-
- config DEBUG_INFO_BTF_MODULES
-- def_bool y
-+ def_bool n
- depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
- help
- Generate compact split BTF type information for kernel modules.
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:50:01 +0300
+Subject: [PATCH] KVM: x86: emulator: introduce emulator_recalc_and_set_mode
+
+Some instructions update the cpu execution mode, which needs
+to update the emulation mode.
+
+Extract this code, and make assign_eip_far use it.
+
+assign_eip_far now reads CS, instead of getting it via a parameter,
+which is ok, because callers always assign CS to the
+same value before calling it.
+
+No functional change is intended.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/emulate.c | 85 ++++++++++++++++++++++++++++--------------
+ 1 file changed, 57 insertions(+), 28 deletions(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 93349b54ef56..61b38c03606a 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -792,8 +792,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
+ ctxt->mode, linear);
+ }
+
+-static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+- enum x86emul_mode mode)
++static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+ ulong linear;
+ int rc;
+@@ -803,41 +802,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
+
+ if (ctxt->op_bytes != sizeof(unsigned long))
+ addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
+- rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
++ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
+ if (rc == X86EMUL_CONTINUE)
+ ctxt->_eip = addr.ea;
+ return rc;
+ }
+
++static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
++{
++ u64 efer;
++ struct desc_struct cs;
++ u16 selector;
++ u32 base3;
++
++ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
++
++ if (!ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE) {
++ /* Real mode. cpu must not have long mode active */
++ if (efer & EFER_LMA)
++ return X86EMUL_UNHANDLEABLE;
++ ctxt->mode = X86EMUL_MODE_REAL;
++ return X86EMUL_CONTINUE;
++ }
++
++ if (ctxt->eflags & X86_EFLAGS_VM) {
++ /* Protected/VM86 mode. cpu must not have long mode active */
++ if (efer & EFER_LMA)
++ return X86EMUL_UNHANDLEABLE;
++ ctxt->mode = X86EMUL_MODE_VM86;
++ return X86EMUL_CONTINUE;
++ }
++
++ if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
++ return X86EMUL_UNHANDLEABLE;
++
++ if (efer & EFER_LMA) {
++ if (cs.l) {
++ /* Proper long mode */
++ ctxt->mode = X86EMUL_MODE_PROT64;
++ } else if (cs.d) {
++ /* 32 bit compatibility mode*/
++ ctxt->mode = X86EMUL_MODE_PROT32;
++ } else {
++ ctxt->mode = X86EMUL_MODE_PROT16;
++ }
++ } else {
++ /* Legacy 32 bit / 16 bit mode */
++ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
++ }
++
++ return X86EMUL_CONTINUE;
++}
++
+ static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+- return assign_eip(ctxt, dst, ctxt->mode);
++ return assign_eip(ctxt, dst);
+ }
+
+-static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
+- const struct desc_struct *cs_desc)
++static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
+ {
+- enum x86emul_mode mode = ctxt->mode;
+- int rc;
++ int rc = emulator_recalc_and_set_mode(ctxt);
+
+-#ifdef CONFIG_X86_64
+- if (ctxt->mode >= X86EMUL_MODE_PROT16) {
+- if (cs_desc->l) {
+- u64 efer = 0;
++ if (rc != X86EMUL_CONTINUE)
++ return rc;
+
+- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
+- if (efer & EFER_LMA)
+- mode = X86EMUL_MODE_PROT64;
+- } else
+- mode = X86EMUL_MODE_PROT32; /* temporary value */
+- }
+-#endif
+- if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
+- mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
+- rc = assign_eip(ctxt, dst, mode);
+- if (rc == X86EMUL_CONTINUE)
+- ctxt->mode = mode;
+- return rc;
++ return assign_eip(ctxt, dst);
+ }
+
+ static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
+@@ -2171,7 +2200,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++ rc = assign_eip_far(ctxt, ctxt->src.val);
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+@@ -2249,7 +2278,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
+ &new_desc);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+- rc = assign_eip_far(ctxt, eip, &new_desc);
++ rc = assign_eip_far(ctxt, eip);
+ /* Error handling is not implemented. */
+ if (rc != X86EMUL_CONTINUE)
+ return X86EMUL_UNHANDLEABLE;
+@@ -3469,7 +3498,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
+- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
++ rc = assign_eip_far(ctxt, ctxt->src.val);
+ if (rc != X86EMUL_CONTINUE)
+ goto fail;
+
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Wolfgang Bumiller <w.bumiller@proxmox.com>
-Date: Tue, 11 Jan 2022 09:31:59 +0100
-Subject: [PATCH] blk-cgroup: always terminate io.stat lines
-
-With the removal of seq_get_buf in blkcg_print_one_stat, we
-cannot make adding the newline conditional on there being
-relevant stats because the name was already written out
-unconditionally.
-Otherwise we may end up with multiple device names in one
-line which is confusing and doesn't follow the nested-keyed
-file format.
-
-Signed-off-by: Wolfgang Bumiller <w.bumiller@proxmox.com>
-Fixes: 252c651a4c85 ("blk-cgroup: stop using seq_get_buf")
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- block/blk-cgroup.c | 9 ++-------
- block/blk-iocost.c | 5 ++---
- block/blk-iolatency.c | 8 +++-----
- include/linux/blk-cgroup.h | 2 +-
- 4 files changed, 8 insertions(+), 16 deletions(-)
-
-diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
-index ce5858dadca5..aa43ed94deb6 100644
---- a/block/blk-cgroup.c
-+++ b/block/blk-cgroup.c
-@@ -887,7 +887,6 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
- {
- struct blkg_iostat_set *bis = &blkg->iostat;
- u64 rbytes, wbytes, rios, wios, dbytes, dios;
-- bool has_stats = false;
- const char *dname;
- unsigned seq;
- int i;
-@@ -913,14 +912,12 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
- } while (u64_stats_fetch_retry(&bis->sync, seq));
-
- if (rbytes || wbytes || rios || wios) {
-- has_stats = true;
- seq_printf(s, "rbytes=%llu wbytes=%llu rios=%llu wios=%llu dbytes=%llu dios=%llu",
- rbytes, wbytes, rios, wios,
- dbytes, dios);
- }
-
- if (blkcg_debug_stats && atomic_read(&blkg->use_delay)) {
-- has_stats = true;
- seq_printf(s, " use_delay=%d delay_nsec=%llu",
- atomic_read(&blkg->use_delay),
- atomic64_read(&blkg->delay_nsec));
-@@ -932,12 +929,10 @@ static void blkcg_print_one_stat(struct blkcg_gq *blkg, struct seq_file *s)
- if (!blkg->pd[i] || !pol->pd_stat_fn)
- continue;
-
-- if (pol->pd_stat_fn(blkg->pd[i], s))
-- has_stats = true;
-+ pol->pd_stat_fn(blkg->pd[i], s);
- }
-
-- if (has_stats)
-- seq_printf(s, "\n");
-+ seq_puts(s, "\n");
- }
-
- static int blkcg_print_stat(struct seq_file *sf, void *v)
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index 10851493940c..21db328c0bcc 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -3005,13 +3005,13 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
- kfree(iocg);
- }
-
--static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
-+static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
- {
- struct ioc_gq *iocg = pd_to_iocg(pd);
- struct ioc *ioc = iocg->ioc;
-
- if (!ioc->enabled)
-- return false;
-+ return;
-
- if (iocg->level == 0) {
- unsigned vp10k = DIV64_U64_ROUND_CLOSEST(
-@@ -3027,7 +3027,6 @@ static bool ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
- iocg->last_stat.wait_us,
- iocg->last_stat.indebt_us,
- iocg->last_stat.indelay_us);
-- return true;
- }
-
- static u64 ioc_weight_prfill(struct seq_file *sf, struct blkg_policy_data *pd,
-diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
-index ce3847499d85..0bac2c9ebb4a 100644
---- a/block/blk-iolatency.c
-+++ b/block/blk-iolatency.c
-@@ -902,7 +902,7 @@ static int iolatency_print_limit(struct seq_file *sf, void *v)
- return 0;
- }
-
--static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
-+static void iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
- {
- struct latency_stat stat;
- int cpu;
-@@ -925,17 +925,16 @@ static bool iolatency_ssd_stat(struct iolatency_grp *iolat, struct seq_file *s)
- (unsigned long long)stat.ps.missed,
- (unsigned long long)stat.ps.total,
- iolat->rq_depth.max_depth);
-- return true;
- }
-
--static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
-+static void iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
- {
- struct iolatency_grp *iolat = pd_to_lat(pd);
- unsigned long long avg_lat;
- unsigned long long cur_win;
-
- if (!blkcg_debug_stats)
-- return false;
-+ return;
-
- if (iolat->ssd)
- return iolatency_ssd_stat(iolat, s);
-@@ -948,7 +947,6 @@ static bool iolatency_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
- else
- seq_printf(s, " depth=%u avg_lat=%llu win=%llu",
- iolat->rq_depth.max_depth, avg_lat, cur_win);
-- return true;
- }
-
- static struct blkg_policy_data *iolatency_pd_alloc(gfp_t gfp,
-diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
-index bc5c04d711bb..618359e3beca 100644
---- a/include/linux/blk-cgroup.h
-+++ b/include/linux/blk-cgroup.h
-@@ -153,7 +153,7 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
- typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
- typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
- typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
--typedef bool (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
-+typedef void (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd,
- struct seq_file *s);
-
- struct blkcg_policy {
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:50:02 +0300
+Subject: [PATCH] KVM: x86: emulator: update the emulation mode after rsm
+
+This ensures that RIP will be correctly written back,
+because the RSM instruction can switch the CPU mode from
+32 bit (or less) to 64 bit.
+
+This fixes a guest crash in case the #SMI is received
+while the guest runs a code from an address > 32 bit.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/emulate.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 61b38c03606a..f2a0a34f4687 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -2653,6 +2653,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
+ if (ret != X86EMUL_CONTINUE)
+ goto emulate_shutdown;
+
++
++ ret = emulator_recalc_and_set_mode(ctxt);
++ if (ret != X86EMUL_CONTINUE)
++ goto emulate_shutdown;
++
+ /*
+ * Note, the ctxt->ops callbacks are responsible for handling side
+ * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Javier Martinez Canillas <javierm@redhat.com>
-Date: Tue, 25 Jan 2022 10:12:19 +0100
-Subject: [PATCH] drivers/firmware: Don't mark as busy the simple-framebuffer
- IO resource
-
-The sysfb_create_simplefb() function requests a IO memory resource for the
-simple-framebuffer platform device, but it also marks it as busy which can
-lead to drivers requesting the same memory resource to fail.
-
-Let's drop the IORESOURCE_BUSY flag and let drivers to request it as busy
-instead.
-
-Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
-Reviewed-by: Zack Rusin <zackr@vmware.com>
-Reviewed-by: Thomas Zimmermann <tzimmermann@suse.de>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/firmware/sysfb_simplefb.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/firmware/sysfb_simplefb.c b/drivers/firmware/sysfb_simplefb.c
-index 757cc8b9f3de..bda8712bfd8c 100644
---- a/drivers/firmware/sysfb_simplefb.c
-+++ b/drivers/firmware/sysfb_simplefb.c
-@@ -99,7 +99,7 @@ __init int sysfb_create_simplefb(const struct screen_info *si,
-
- /* setup IORESOURCE_MEM as framebuffer memory */
- memset(&res, 0, sizeof(res));
-- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
-+ res.flags = IORESOURCE_MEM;
- res.name = simplefb_resname;
- res.start = base;
- res.end = res.start + length - 1;
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:50:03 +0300
+Subject: [PATCH] KVM: x86: emulator: update the emulation mode after CR0 write
+
+CR0.PE toggles real/protected mode, thus its update
+should update the emulation mode.
+
+This is likely a benign bug because there is no writeback
+of state, other than the RIP increment, and when toggling
+CR0.PE, the CPU has to execute code from a very low memory address.
+
+Also CR0.PG toggle when EFER.LMA is set, toggles the long mode.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/emulate.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index f2a0a34f4687..874d124438d1 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -3645,11 +3645,23 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
+
+ static int em_cr_write(struct x86_emulate_ctxt *ctxt)
+ {
+- if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
++ int cr_num = ctxt->modrm_reg;
++ int r;
++
++ if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
+ return emulate_gp(ctxt, 0);
+
+ /* Disable writeback. */
+ ctxt->dst.type = OP_NONE;
++
++ if (cr_num == 0) {
++ /* CR0 write might have updated CR0.PE and/or CR0.PG
++ * which can affect the cpu execution mode */
++ r = emulator_recalc_and_set_mode(ctxt);
++ if (r != X86EMUL_CONTINUE)
++ return r;
++ }
++
+ return X86EMUL_CONTINUE;
+ }
+
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Thomas Zimmermann <tzimmermann@suse.de>
-Date: Tue, 25 Jan 2022 10:12:20 +0100
-Subject: [PATCH] drm/simpledrm: Request memory region in driver
-
-Requesting the framebuffer memory in simpledrm marks the memory
-range as busy. This used to be done by the firmware sysfb code,
-but the driver is the correct place.
-
-v2:
- * use I/O memory if request_mem_region() fails (Jocelyn)
-
-Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
-Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
-Reviewed-by: Jocelyn Falempe <jfalempe@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/gpu/drm/tiny/simpledrm.c | 22 +++++++++++++++++-----
- 1 file changed, 17 insertions(+), 5 deletions(-)
-
-diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
-index 3e3f9ba1e885..806fdc3237b3 100644
---- a/drivers/gpu/drm/tiny/simpledrm.c
-+++ b/drivers/gpu/drm/tiny/simpledrm.c
-@@ -525,21 +525,33 @@ static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
- {
- struct drm_device *dev = &sdev->dev;
- struct platform_device *pdev = sdev->pdev;
-- struct resource *mem;
-+ struct resource *res, *mem;
- void __iomem *screen_base;
- int ret;
-
-- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-- if (!mem)
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ if (!res)
- return -EINVAL;
-
-- ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
-+ ret = devm_aperture_acquire_from_firmware(dev, res->start, resource_size(res));
- if (ret) {
- drm_err(dev, "could not acquire memory range %pr: error %d\n",
-- mem, ret);
-+ res, ret);
- return ret;
- }
-
-+ mem = devm_request_mem_region(&pdev->dev, res->start, resource_size(res),
-+ sdev->dev.driver->name);
-+ if (!mem) {
-+ /*
-+ * We cannot make this fatal. Sometimes this comes from magic
-+ * spaces our resource handlers simply don't know about. Use
-+ * the I/O-memory resource as-is and try to map that instead.
-+ */
-+ drm_warn(dev, "could not acquire memory region %pr\n", res);
-+ mem = res;
-+ }
-+
- screen_base = devm_ioremap_wc(&pdev->dev, mem->start,
- resource_size(mem));
- if (!screen_base)
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maxim Levitsky <mlevitsk@redhat.com>
+Date: Wed, 3 Aug 2022 18:50:05 +0300
+Subject: [PATCH] KVM: x86: emulator/smm: add structs for KVM's smram layout
+
+Those structs will be used to read/write the smram state image.
+
+Also document the differences between KVM's SMRAM layout and SMRAM
+layout that is used by real Intel/AMD cpus.
+
+Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/emulate.c | 6 +
+ arch/x86/kvm/kvm_emulate.h | 218 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kvm/x86.c | 1 +
+ 3 files changed, 225 insertions(+)
+
+diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
+index 874d124438d1..bf1238152318 100644
+--- a/arch/x86/kvm/emulate.c
++++ b/arch/x86/kvm/emulate.c
+@@ -5850,3 +5850,9 @@ bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
+
+ return true;
+ }
++
++void __init kvm_emulator_init(void)
++{
++ __check_smram32_offsets();
++ __check_smram64_offsets();
++}
+diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
+index 8dff25d267b7..0eb13204bbc2 100644
+--- a/arch/x86/kvm/kvm_emulate.h
++++ b/arch/x86/kvm/kvm_emulate.h
+@@ -13,6 +13,7 @@
+ #define _ASM_X86_KVM_X86_EMULATE_H
+
+ #include <asm/desc_defs.h>
++#include <linux/build_bug.h>
+ #include "fpu.h"
+
+ struct x86_emulate_ctxt;
+@@ -481,6 +482,223 @@ enum x86_intercept {
+ nr_x86_intercepts
+ };
+
++
++/* 32 bit KVM's emulated SMM layout. Loosely based on Intel's layout */
++
++struct kvm_smm_seg_state_32 {
++ u32 flags;
++ u32 limit;
++ u32 base;
++} __packed;
++
++struct kvm_smram_state_32 {
++ u32 reserved1[62];
++ u32 smbase;
++ u32 smm_revision;
++ u32 reserved2[5];
++ u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
++ u32 reserved3[5];
++
++ /*
++ * Segment state is not present/documented in the Intel/AMD SMRAM image
++ * Instead this area on Intel/AMD contains IO/HLT restart flags.
++ */
++ struct kvm_smm_seg_state_32 ds;
++ struct kvm_smm_seg_state_32 fs;
++ struct kvm_smm_seg_state_32 gs;
++ struct kvm_smm_seg_state_32 idtr; /* IDTR has only base and limit */
++ struct kvm_smm_seg_state_32 tr;
++ u32 reserved;
++ struct kvm_smm_seg_state_32 gdtr; /* GDTR has only base and limit */
++ struct kvm_smm_seg_state_32 ldtr;
++ struct kvm_smm_seg_state_32 es;
++ struct kvm_smm_seg_state_32 cs;
++ struct kvm_smm_seg_state_32 ss;
++
++ u32 es_sel;
++ u32 cs_sel;
++ u32 ss_sel;
++ u32 ds_sel;
++ u32 fs_sel;
++ u32 gs_sel;
++ u32 ldtr_sel;
++ u32 tr_sel;
++
++ u32 dr7;
++ u32 dr6;
++ u32 gprs[8]; /* GPRS in the "natural" X86 order (EAX/ECX/EDX.../EDI) */
++ u32 eip;
++ u32 eflags;
++ u32 cr3;
++ u32 cr0;
++} __packed;
++
++
++static inline void __check_smram32_offsets(void)
++{
++#define __CHECK_SMRAM32_OFFSET(field, offset) \
++ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
++
++ __CHECK_SMRAM32_OFFSET(reserved1, 0xFE00);
++ __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
++ __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
++ __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
++ __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
++ __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
++ __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
++ __CHECK_SMRAM32_OFFSET(fs, 0xFF38);
++ __CHECK_SMRAM32_OFFSET(gs, 0xFF44);
++ __CHECK_SMRAM32_OFFSET(idtr, 0xFF50);
++ __CHECK_SMRAM32_OFFSET(tr, 0xFF5C);
++ __CHECK_SMRAM32_OFFSET(gdtr, 0xFF6C);
++ __CHECK_SMRAM32_OFFSET(ldtr, 0xFF78);
++ __CHECK_SMRAM32_OFFSET(es, 0xFF84);
++ __CHECK_SMRAM32_OFFSET(cs, 0xFF90);
++ __CHECK_SMRAM32_OFFSET(ss, 0xFF9C);
++ __CHECK_SMRAM32_OFFSET(es_sel, 0xFFA8);
++ __CHECK_SMRAM32_OFFSET(cs_sel, 0xFFAC);
++ __CHECK_SMRAM32_OFFSET(ss_sel, 0xFFB0);
++ __CHECK_SMRAM32_OFFSET(ds_sel, 0xFFB4);
++ __CHECK_SMRAM32_OFFSET(fs_sel, 0xFFB8);
++ __CHECK_SMRAM32_OFFSET(gs_sel, 0xFFBC);
++ __CHECK_SMRAM32_OFFSET(ldtr_sel, 0xFFC0);
++ __CHECK_SMRAM32_OFFSET(tr_sel, 0xFFC4);
++ __CHECK_SMRAM32_OFFSET(dr7, 0xFFC8);
++ __CHECK_SMRAM32_OFFSET(dr6, 0xFFCC);
++ __CHECK_SMRAM32_OFFSET(gprs, 0xFFD0);
++ __CHECK_SMRAM32_OFFSET(eip, 0xFFF0);
++ __CHECK_SMRAM32_OFFSET(eflags, 0xFFF4);
++ __CHECK_SMRAM32_OFFSET(cr3, 0xFFF8);
++ __CHECK_SMRAM32_OFFSET(cr0, 0xFFFC);
++#undef __CHECK_SMRAM32_OFFSET
++}
++
++
++/* 64 bit KVM's emulated SMM layout. Based on AMD64 layout */
++
++struct kvm_smm_seg_state_64 {
++ u16 selector;
++ u16 attributes;
++ u32 limit;
++ u64 base;
++};
++
++struct kvm_smram_state_64 {
++
++ struct kvm_smm_seg_state_64 es;
++ struct kvm_smm_seg_state_64 cs;
++ struct kvm_smm_seg_state_64 ss;
++ struct kvm_smm_seg_state_64 ds;
++ struct kvm_smm_seg_state_64 fs;
++ struct kvm_smm_seg_state_64 gs;
++ struct kvm_smm_seg_state_64 gdtr; /* GDTR has only base and limit*/
++ struct kvm_smm_seg_state_64 ldtr;
++ struct kvm_smm_seg_state_64 idtr; /* IDTR has only base and limit*/
++ struct kvm_smm_seg_state_64 tr;
++
++ /* I/O restart and auto halt restart are not implemented by KVM */
++ u64 io_restart_rip;
++ u64 io_restart_rcx;
++ u64 io_restart_rsi;
++ u64 io_restart_rdi;
++ u32 io_restart_dword;
++ u32 reserved1;
++ u8 io_inst_restart;
++ u8 auto_hlt_restart;
++ u8 reserved2[6];
++
++ u64 efer;
++
++ /*
++ * Two fields below are implemented on AMD only, to store
++ * SVM guest vmcb address if the #SMI was received while in the guest mode.
++ */
++ u64 svm_guest_flag;
++ u64 svm_guest_vmcb_gpa;
++ u64 svm_guest_virtual_int; /* unknown purpose, not implemented */
++
++ u32 reserved3[3];
++ u32 smm_revison;
++ u32 smbase;
++ u32 reserved4[5];
++
++ /* ssp and svm_* fields below are not implemented by KVM */
++ u64 ssp;
++ u64 svm_guest_pat;
++ u64 svm_host_efer;
++ u64 svm_host_cr4;
++ u64 svm_host_cr3;
++ u64 svm_host_cr0;
++
++ u64 cr4;
++ u64 cr3;
++ u64 cr0;
++ u64 dr7;
++ u64 dr6;
++ u64 rflags;
++ u64 rip;
++ u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
++};
++
++
++static inline void __check_smram64_offsets(void)
++{
++#define __CHECK_SMRAM64_OFFSET(field, offset) \
++ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
++
++ __CHECK_SMRAM64_OFFSET(es, 0xFE00);
++ __CHECK_SMRAM64_OFFSET(cs, 0xFE10);
++ __CHECK_SMRAM64_OFFSET(ss, 0xFE20);
++ __CHECK_SMRAM64_OFFSET(ds, 0xFE30);
++ __CHECK_SMRAM64_OFFSET(fs, 0xFE40);
++ __CHECK_SMRAM64_OFFSET(gs, 0xFE50);
++ __CHECK_SMRAM64_OFFSET(gdtr, 0xFE60);
++ __CHECK_SMRAM64_OFFSET(ldtr, 0xFE70);
++ __CHECK_SMRAM64_OFFSET(idtr, 0xFE80);
++ __CHECK_SMRAM64_OFFSET(tr, 0xFE90);
++ __CHECK_SMRAM64_OFFSET(io_restart_rip, 0xFEA0);
++ __CHECK_SMRAM64_OFFSET(io_restart_rcx, 0xFEA8);
++ __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
++ __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
++ __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
++ __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
++ __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
++ __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
++ __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
++ __CHECK_SMRAM64_OFFSET(efer, 0xFED0);
++ __CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8);
++ __CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0);
++ __CHECK_SMRAM64_OFFSET(svm_guest_virtual_int, 0xFEE8);
++ __CHECK_SMRAM64_OFFSET(reserved3, 0xFEF0);
++ __CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC);
++ __CHECK_SMRAM64_OFFSET(smbase, 0xFF00);
++ __CHECK_SMRAM64_OFFSET(reserved4, 0xFF04);
++ __CHECK_SMRAM64_OFFSET(ssp, 0xFF18);
++ __CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20);
++ __CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28);
++ __CHECK_SMRAM64_OFFSET(svm_host_cr4, 0xFF30);
++ __CHECK_SMRAM64_OFFSET(svm_host_cr3, 0xFF38);
++ __CHECK_SMRAM64_OFFSET(svm_host_cr0, 0xFF40);
++ __CHECK_SMRAM64_OFFSET(cr4, 0xFF48);
++ __CHECK_SMRAM64_OFFSET(cr3, 0xFF50);
++ __CHECK_SMRAM64_OFFSET(cr0, 0xFF58);
++ __CHECK_SMRAM64_OFFSET(dr7, 0xFF60);
++ __CHECK_SMRAM64_OFFSET(dr6, 0xFF68);
++ __CHECK_SMRAM64_OFFSET(rflags, 0xFF70);
++ __CHECK_SMRAM64_OFFSET(rip, 0xFF78);
++ __CHECK_SMRAM64_OFFSET(gprs, 0xFF80);
++#undef __CHECK_SMRAM64_OFFSET
++}
++
++union kvm_smram {
++ struct kvm_smram_state_64 smram64;
++ struct kvm_smram_state_32 smram32;
++ u8 bytes[512];
++};
++
++void __init kvm_emulator_init(void);
++
++
+ /* Host execution mode. */
+ #if defined(CONFIG_X86_32)
+ #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index dbaff0c7c8c2..aec63cebe0b7 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -13009,6 +13009,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
+ static int __init kvm_x86_init(void)
+ {
+ kvm_mmu_x86_module_init();
++ kvm_emulator_init();
+ return 0;
+ }
+ module_init(kvm_x86_init);
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Thomas Zimmermann <tzimmermann@suse.de>
-Date: Tue, 25 Jan 2022 10:12:21 +0100
-Subject: [PATCH] fbdev/simplefb: Request memory region in driver
-
-Requesting the framebuffer memory in simpledrm marks the memory
-range as busy. This used to be done by the firmware sysfb code,
-but the driver is the correct place.
-
-v2:
- * store memory region in struct for later cleanup (Javier)
-
-Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
-Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/video/fbdev/simplefb.c | 65 +++++++++++++++++++++++-----------
- 1 file changed, 45 insertions(+), 20 deletions(-)
-
-diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
-index a2e3a4690025..8acfb12abfee 100644
---- a/drivers/video/fbdev/simplefb.c
-+++ b/drivers/video/fbdev/simplefb.c
-@@ -66,7 +66,21 @@ static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- return 0;
- }
-
--struct simplefb_par;
-+struct simplefb_par {
-+ u32 palette[PSEUDO_PALETTE_SIZE];
-+ struct resource *mem;
-+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
-+ bool clks_enabled;
-+ unsigned int clk_count;
-+ struct clk **clks;
-+#endif
-+#if defined CONFIG_OF && defined CONFIG_REGULATOR
-+ bool regulators_enabled;
-+ u32 regulator_count;
-+ struct regulator **regulators;
-+#endif
-+};
-+
- static void simplefb_clocks_destroy(struct simplefb_par *par);
- static void simplefb_regulators_destroy(struct simplefb_par *par);
-
-@@ -76,12 +90,18 @@ static void simplefb_regulators_destroy(struct simplefb_par *par);
- */
- static void simplefb_destroy(struct fb_info *info)
- {
-+ struct simplefb_par *par = info->par;
-+ struct resource *mem = par->mem;
-+
- simplefb_regulators_destroy(info->par);
- simplefb_clocks_destroy(info->par);
- if (info->screen_base)
- iounmap(info->screen_base);
-
- framebuffer_release(info);
-+
-+ if (mem)
-+ release_mem_region(mem->start, resource_size(mem));
- }
-
- static const struct fb_ops simplefb_ops = {
-@@ -175,20 +195,6 @@ static int simplefb_parse_pd(struct platform_device *pdev,
- return 0;
- }
-
--struct simplefb_par {
-- u32 palette[PSEUDO_PALETTE_SIZE];
--#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
-- bool clks_enabled;
-- unsigned int clk_count;
-- struct clk **clks;
--#endif
--#if defined CONFIG_OF && defined CONFIG_REGULATOR
-- bool regulators_enabled;
-- u32 regulator_count;
-- struct regulator **regulators;
--#endif
--};
--
- #if defined CONFIG_OF && defined CONFIG_COMMON_CLK
- /*
- * Clock handling code.
-@@ -411,7 +417,7 @@ static int simplefb_probe(struct platform_device *pdev)
- struct simplefb_params params;
- struct fb_info *info;
- struct simplefb_par *par;
-- struct resource *mem;
-+ struct resource *res, *mem;
-
- /*
- * Generic drivers must not be registered if a framebuffer exists.
-@@ -436,15 +442,28 @@ static int simplefb_probe(struct platform_device *pdev)
- if (ret)
- return ret;
-
-- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-- if (!mem) {
-+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-+ if (!res) {
- dev_err(&pdev->dev, "No memory resource\n");
- return -EINVAL;
- }
-
-+ mem = request_mem_region(res->start, resource_size(res), "simplefb");
-+ if (!mem) {
-+ /*
-+ * We cannot make this fatal. Sometimes this comes from magic
-+ * spaces our resource handlers simply don't know about. Use
-+ * the I/O-memory resource as-is and try to map that instead.
-+ */
-+ dev_warn(&pdev->dev, "simplefb: cannot reserve video memory at %pR\n", res);
-+ mem = res;
-+ }
-+
- info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev);
-- if (!info)
-- return -ENOMEM;
-+ if (!info) {
-+ ret = -ENOMEM;
-+ goto error_release_mem_region;
-+ }
- platform_set_drvdata(pdev, info);
-
- par = info->par;
-@@ -501,6 +520,9 @@ static int simplefb_probe(struct platform_device *pdev)
- info->var.xres, info->var.yres,
- info->var.bits_per_pixel, info->fix.line_length);
-
-+ if (mem != res)
-+ par->mem = mem; /* release in clean-up handler */
-+
- ret = register_framebuffer(info);
- if (ret < 0) {
- dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
-@@ -519,6 +541,9 @@ static int simplefb_probe(struct platform_device *pdev)
- iounmap(info->screen_base);
- error_fb_release:
- framebuffer_release(info);
-+error_release_mem_region:
-+ if (mem != res)
-+ release_mem_region(mem->start, resource_size(mem));
- return ret;
- }
-
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Olga Kornievskaia <kolga@netapp.com>
-Date: Wed, 16 Mar 2022 18:24:26 -0400
-Subject: [PATCH] NFSv4.1 provide mount option to toggle trunking discovery
-
-Introduce a new mount option -- trunkdiscovery,notrunkdiscovery -- to
-toggle whether or not the client will engage in actively discovery
-of trunking locations.
-
-v2 make notrunkdiscovery default
-
-Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
-Fixes: 1976b2b31462 ("NFSv4.1 query for fs_location attr on a new file system")
-Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
-(cherry picked from commit a43bf604446414103b7535f38e739b65601c4fb2)
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- fs/nfs/client.c | 3 ++-
- fs/nfs/fs_context.c | 8 ++++++++
- include/linux/nfs_fs_sb.h | 1 +
- 3 files changed, 11 insertions(+), 1 deletion(-)
-
-diff --git a/fs/nfs/client.c b/fs/nfs/client.c
-index 090b16890e3d..f303e96ce165 100644
---- a/fs/nfs/client.c
-+++ b/fs/nfs/client.c
-@@ -861,7 +861,8 @@ int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs
- }
-
- if (clp->rpc_ops->discover_trunking != NULL &&
-- (server->caps & NFS_CAP_FS_LOCATIONS)) {
-+ (server->caps & NFS_CAP_FS_LOCATIONS &&
-+ (server->flags & NFS_MOUNT_TRUNK_DISCOVERY))) {
- error = clp->rpc_ops->discover_trunking(server, mntfh);
- if (error < 0)
- return error;
-diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c
-index fb3cad38b149..0166370f088e 100644
---- a/fs/nfs/fs_context.c
-+++ b/fs/nfs/fs_context.c
-@@ -79,6 +79,7 @@ enum nfs_param {
- Opt_source,
- Opt_tcp,
- Opt_timeo,
-+ Opt_trunkdiscovery,
- Opt_udp,
- Opt_v,
- Opt_vers,
-@@ -179,6 +180,7 @@ static const struct fs_parameter_spec nfs_fs_parameters[] = {
- fsparam_string("source", Opt_source),
- fsparam_flag ("tcp", Opt_tcp),
- fsparam_u32 ("timeo", Opt_timeo),
-+ fsparam_flag_no("trunkdiscovery", Opt_trunkdiscovery),
- fsparam_flag ("udp", Opt_udp),
- fsparam_flag ("v2", Opt_v),
- fsparam_flag ("v3", Opt_v),
-@@ -528,6 +530,12 @@ static int nfs_fs_context_parse_param(struct fs_context *fc,
- else
- ctx->flags &= ~NFS_MOUNT_NOCTO;
- break;
-+ case Opt_trunkdiscovery:
-+ if (result.negated)
-+ ctx->flags &= ~NFS_MOUNT_TRUNK_DISCOVERY;
-+ else
-+ ctx->flags |= NFS_MOUNT_TRUNK_DISCOVERY;
-+ break;
- case Opt_ac:
- if (result.negated)
- ctx->flags |= NFS_MOUNT_NOAC;
-diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
-index da9ef0ab9b4b..5336e494703b 100644
---- a/include/linux/nfs_fs_sb.h
-+++ b/include/linux/nfs_fs_sb.h
-@@ -156,6 +156,7 @@ struct nfs_server {
- #define NFS_MOUNT_SOFTREVAL 0x800000
- #define NFS_MOUNT_WRITE_EAGER 0x01000000
- #define NFS_MOUNT_WRITE_WAIT 0x02000000
-+#define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000
-
- unsigned int fattr_valid; /* Valid attributes */
- unsigned int caps; /* server capabilities */
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Maher Sanalla <msanalla@nvidia.com>
+Date: Sun, 24 Jul 2022 11:28:21 +0300
+Subject: [PATCH] net/mlx5: Adjust log_max_qp to be 18 at most
+
+[ Upstream commit a6e9085d791f8306084fd5bc44dd3fdd4e1ac27b ]
+
+The cited commit limited log_max_qp to be 17 due to FW capabilities.
+Recently, it turned out that there are old FW versions that supported
+more than 17, so the cited commit caused a degradation.
+
+Thus, set the maximum log_max_qp back to 18 as it was before the
+cited commit.
+
+Fixes: 7f839965b2d7 ("net/mlx5: Update log_max_qp value to be 17 at most")
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+index ef196cb764e2..2ad8027cb745 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
+@@ -526,7 +526,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
+
+ /* Check log_max_qp from HCA caps to set in current profile */
+ if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
+- prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
++ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
+ } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
+ mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
+ prof->log_max_qp,
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Marc Bevand <m@zorinaq.com>
-Date: Tue, 21 Dec 2021 15:31:12 -0800
-Subject: [PATCH] EDAC/amd64: Add PCI device IDs for family 19h model 50h
-
-Add the new family 19h model 50h PCI IDs (device 18h functions 0 and 6)
-to support Ryzen 5000 APUs ("Cezanne").
-
-Signed-off-by: Marc Bevand <m@zorinaq.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/edac/amd64_edac.c | 15 +++++++++++++++
- drivers/edac/amd64_edac.h | 3 +++
- 2 files changed, 18 insertions(+)
-
-diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
-index c6c58f01067f..f8ef2edf8abf 100644
---- a/drivers/edac/amd64_edac.c
-+++ b/drivers/edac/amd64_edac.c
-@@ -2660,6 +2660,16 @@ static struct amd64_family_type family_types[] = {
- .dbam_to_cs = f17_addr_mask_to_cs_size,
- }
- },
-+ [F19_M50H_CPUS] = {
-+ .ctl_name = "F19h_M50h",
-+ .f0_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F0,
-+ .f6_id = PCI_DEVICE_ID_AMD_19H_M50H_DF_F6,
-+ .max_mcs = 2,
-+ .ops = {
-+ .early_channel_count = f17_early_channel_count,
-+ .dbam_to_cs = f17_addr_mask_to_cs_size,
-+ }
-+ },
- };
-
- /*
-@@ -3706,6 +3716,11 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt)
- pvt->ops = &family_types[F17_M70H_CPUS].ops;
- fam_type->ctl_name = "F19h_M20h";
- break;
-+ } else if (pvt->model >= 0x50 && pvt->model <= 0x5f) {
-+ fam_type = &family_types[F19_M50H_CPUS];
-+ pvt->ops = &family_types[F19_M50H_CPUS].ops;
-+ fam_type->ctl_name = "F19h_M50h";
-+ break;
- } else if (pvt->model >= 0xa0 && pvt->model <= 0xaf) {
- fam_type = &family_types[F19_M10H_CPUS];
- pvt->ops = &family_types[F19_M10H_CPUS].ops;
-diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
-index 650cab401e21..352bda9803f6 100644
---- a/drivers/edac/amd64_edac.h
-+++ b/drivers/edac/amd64_edac.h
-@@ -128,6 +128,8 @@
- #define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656
- #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F0 0x14ad
- #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F6 0x14b3
-+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F0 0x166a
-+#define PCI_DEVICE_ID_AMD_19H_M50H_DF_F6 0x1670
-
- /*
- * Function 1 - Address Map
-@@ -301,6 +303,7 @@ enum amd_families {
- F17_M70H_CPUS,
- F19_CPUS,
- F19_M10H_CPUS,
-+ F19_M50H_CPUS,
- NUM_FAMILIES,
- };
-
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 4 Aug 2022 15:28:32 +0200
+Subject: [PATCH] KVM: x86: revalidate steal time cache if MSR value changes
+
+commit 901d3765fa804ce42812f1d5b1f3de2dfbb26723 upstream.
+
+Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
+/ preempted status", 2021-11-11) open coded the previous call to
+kvm_map_gfn, but in doing so it dropped the comparison between the cached
+guest physical address and the one in the MSR. This cause an incorrect
+cache hit if the guest modifies the steal time address while the memslots
+remain the same. This can happen with kexec, in which case the steal
+time data is written at the address used by the old kernel instead of
+the old one.
+
+While at it, rename the variable from gfn to gpa since it is a plain
+physical address and not a right-shifted one.
+
+Reported-by: Dave Young <ruyang@redhat.com>
+Reported-by: Xiaoying Yan <yiyan@redhat.com>
+Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/x86.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index aec63cebe0b7..a99eec435652 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -3356,6 +3356,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
+ struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
+ struct kvm_steal_time __user *st;
+ struct kvm_memslots *slots;
++ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+ u64 steal;
+ u32 version;
+
+@@ -3373,13 +3374,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
+ slots = kvm_memslots(vcpu->kvm);
+
+ if (unlikely(slots->generation != ghc->generation ||
++ gpa != ghc->gpa ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
+- gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+-
+ /* We rely on the fact that it fits in a single page. */
+ BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
+
+- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
++ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot)
+ return;
+ }
--- /dev/null
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: Paolo Bonzini <pbonzini@redhat.com>
+Date: Thu, 4 Aug 2022 15:28:32 +0200
+Subject: [PATCH] KVM: x86: do not report preemption if the steal time cache is
+ stale
+
+commit c3c28d24d910a746b02f496d190e0e8c6560224b upstream.
+
+Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
+/ preempted status", 2021-11-11) open coded the previous call to
+kvm_map_gfn, but in doing so it dropped the comparison between the cached
+guest physical address and the one in the MSR. This cause an incorrect
+cache hit if the guest modifies the steal time address while the memslots
+remain the same. This can happen with kexec, in which case the preempted
+bit is written at the address used by the old kernel instead of
+the old one.
+
+Cc: David Woodhouse <dwmw@amazon.co.uk>
+Cc: stable@vger.kernel.org
+Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
+---
+ arch/x86/kvm/x86.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index a99eec435652..a088f5e76966 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4603,6 +4603,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ struct kvm_steal_time __user *st;
+ struct kvm_memslots *slots;
+ static const u8 preempted = KVM_VCPU_PREEMPTED;
++ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
+
+ if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
+ return;
+@@ -4617,6 +4618,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
+ slots = kvm_memslots(vcpu->kvm);
+
+ if (unlikely(slots->generation != ghc->generation ||
++ gpa != ghc->gpa ||
+ kvm_is_error_hva(ghc->hva) || !ghc->memslot))
+ return;
+
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:49:59 +0300
-Subject: [PATCH] bug: introduce ASSERT_STRUCT_OFFSET
-
-ASSERT_STRUCT_OFFSET allows to assert during the build of
-the kernel that a field in a struct have an expected offset.
-
-KVM used to have such macro, but there is almost nothing KVM specific
-in it so move it to build_bug.h, so that it can be used in other
-places in KVM.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/vmx/vmcs12.h | 5 ++---
- include/linux/build_bug.h | 9 +++++++++
- 2 files changed, 11 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/kvm/vmx/vmcs12.h b/arch/x86/kvm/vmx/vmcs12.h
-index 2a45f026ee11..ba8617964982 100644
---- a/arch/x86/kvm/vmx/vmcs12.h
-+++ b/arch/x86/kvm/vmx/vmcs12.h
-@@ -208,9 +208,8 @@ struct __packed vmcs12 {
- /*
- * For save/restore compatibility, the vmcs12 field offsets must not change.
- */
--#define CHECK_OFFSET(field, loc) \
-- BUILD_BUG_ON_MSG(offsetof(struct vmcs12, field) != (loc), \
-- "Offset of " #field " in struct vmcs12 has changed.")
-+#define CHECK_OFFSET(field, loc) \
-+ ASSERT_STRUCT_OFFSET(struct vmcs12, field, loc)
-
- static inline void vmx_check_vmcs12_offsets(void)
- {
-diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
-index e3a0be2c90ad..3aa3640f8c18 100644
---- a/include/linux/build_bug.h
-+++ b/include/linux/build_bug.h
-@@ -77,4 +77,13 @@
- #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr)
- #define __static_assert(expr, msg, ...) _Static_assert(expr, msg)
-
-+
-+/*
-+ * Compile time check that field has an expected offset
-+ */
-+#define ASSERT_STRUCT_OFFSET(type, field, expected_offset) \
-+ BUILD_BUG_ON_MSG(offsetof(type, field) != (expected_offset), \
-+ "Offset of " #field " in " #type " has changed.")
-+
-+
- #endif /* _LINUX_BUILD_BUG_H */
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:00 +0300
-Subject: [PATCH] KVM: x86: emulator: em_sysexit should update ctxt->mode
-
-This is one of the instructions that can change the
-processor mode.
-
-Note that this is likely a benign bug, because the only problematic
-mode change is from 32 bit to 64 bit which can lead to truncation of RIP,
-and it is not possible to do with sysexit,
-since sysexit running in 32 bit mode will be limited to 32 bit version.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 318a78379ca6..35b12692739c 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2862,6 +2862,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
- ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
-
- ctxt->_eip = rdx;
-+ ctxt->mode = usermode;
- *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
-
- return X86EMUL_CONTINUE;
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:01 +0300
-Subject: [PATCH] KVM: x86: emulator: introduce emulator_recalc_and_set_mode
-
-Some instructions update the cpu execution mode, which needs
-to update the emulation mode.
-
-Extract this code, and make assign_eip_far use it.
-
-assign_eip_far now reads CS, instead of getting it via a parameter,
-which is ok, because callers always assign CS to the
-same value before calling it.
-
-No functional change is intended.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 85 ++++++++++++++++++++++++++++--------------
- 1 file changed, 57 insertions(+), 28 deletions(-)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 35b12692739c..6a597d68d456 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -795,8 +795,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
- ctxt->mode, linear);
- }
-
--static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
-- enum x86emul_mode mode)
-+static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
- {
- ulong linear;
- int rc;
-@@ -806,41 +805,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
-
- if (ctxt->op_bytes != sizeof(unsigned long))
- addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
-- rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
-+ rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
- if (rc == X86EMUL_CONTINUE)
- ctxt->_eip = addr.ea;
- return rc;
- }
-
-+static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
-+{
-+ u64 efer;
-+ struct desc_struct cs;
-+ u16 selector;
-+ u32 base3;
-+
-+ ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-+
-+ if (!ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE) {
-+ /* Real mode. cpu must not have long mode active */
-+ if (efer & EFER_LMA)
-+ return X86EMUL_UNHANDLEABLE;
-+ ctxt->mode = X86EMUL_MODE_REAL;
-+ return X86EMUL_CONTINUE;
-+ }
-+
-+ if (ctxt->eflags & X86_EFLAGS_VM) {
-+ /* Protected/VM86 mode. cpu must not have long mode active */
-+ if (efer & EFER_LMA)
-+ return X86EMUL_UNHANDLEABLE;
-+ ctxt->mode = X86EMUL_MODE_VM86;
-+ return X86EMUL_CONTINUE;
-+ }
-+
-+ if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
-+ return X86EMUL_UNHANDLEABLE;
-+
-+ if (efer & EFER_LMA) {
-+ if (cs.l) {
-+ /* Proper long mode */
-+ ctxt->mode = X86EMUL_MODE_PROT64;
-+ } else if (cs.d) {
-+ /* 32 bit compatibility mode*/
-+ ctxt->mode = X86EMUL_MODE_PROT32;
-+ } else {
-+ ctxt->mode = X86EMUL_MODE_PROT16;
-+ }
-+ } else {
-+ /* Legacy 32 bit / 16 bit mode */
-+ ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-+ }
-+
-+ return X86EMUL_CONTINUE;
-+}
-+
- static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
- {
-- return assign_eip(ctxt, dst, ctxt->mode);
-+ return assign_eip(ctxt, dst);
- }
-
--static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
-- const struct desc_struct *cs_desc)
-+static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
- {
-- enum x86emul_mode mode = ctxt->mode;
-- int rc;
-+ int rc = emulator_recalc_and_set_mode(ctxt);
-
--#ifdef CONFIG_X86_64
-- if (ctxt->mode >= X86EMUL_MODE_PROT16) {
-- if (cs_desc->l) {
-- u64 efer = 0;
-+ if (rc != X86EMUL_CONTINUE)
-+ return rc;
-
-- ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
-- if (efer & EFER_LMA)
-- mode = X86EMUL_MODE_PROT64;
-- } else
-- mode = X86EMUL_MODE_PROT32; /* temporary value */
-- }
--#endif
-- if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
-- mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
-- rc = assign_eip(ctxt, dst, mode);
-- if (rc == X86EMUL_CONTINUE)
-- ctxt->mode = mode;
-- return rc;
-+ return assign_eip(ctxt, dst);
- }
-
- static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
-@@ -2154,7 +2183,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
- if (rc != X86EMUL_CONTINUE)
- return rc;
-
-- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
-+ rc = assign_eip_far(ctxt, ctxt->src.val);
- /* Error handling is not implemented. */
- if (rc != X86EMUL_CONTINUE)
- return X86EMUL_UNHANDLEABLE;
-@@ -2235,7 +2264,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
- &new_desc);
- if (rc != X86EMUL_CONTINUE)
- return rc;
-- rc = assign_eip_far(ctxt, eip, &new_desc);
-+ rc = assign_eip_far(ctxt, eip);
- /* Error handling is not implemented. */
- if (rc != X86EMUL_CONTINUE)
- return X86EMUL_UNHANDLEABLE;
-@@ -3459,7 +3488,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
- if (rc != X86EMUL_CONTINUE)
- return rc;
-
-- rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
-+ rc = assign_eip_far(ctxt, ctxt->src.val);
- if (rc != X86EMUL_CONTINUE)
- goto fail;
-
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:02 +0300
-Subject: [PATCH] KVM: x86: emulator: update the emulation mode after rsm
-
-This ensures that RIP will be correctly written back,
-because the RSM instruction can switch the CPU mode from
-32 bit (or less) to 64 bit.
-
-This fixes a guest crash in case the #SMI is received
-while the guest runs a code from an address > 32 bit.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 5 +++++
- 1 file changed, 5 insertions(+)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 6a597d68d456..49697d589f87 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2639,6 +2639,11 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
- if (ret != X86EMUL_CONTINUE)
- goto emulate_shutdown;
-
-+
-+ ret = emulator_recalc_and_set_mode(ctxt);
-+ if (ret != X86EMUL_CONTINUE)
-+ goto emulate_shutdown;
-+
- /*
- * Note, the ctxt->ops callbacks are responsible for handling side
- * effects when writing MSRs and CRs, e.g. MMU context resets, CPUID
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:03 +0300
-Subject: [PATCH] KVM: x86: emulator: update the emulation mode after CR0 write
-
-CR0.PE toggles real/protected mode, thus its update
-should update the emulation mode.
-
-This is likely a benign bug because there is no writeback
-of state, other than the RIP increment, and when toggling
-CR0.PE, the CPU has to execute code from a very low memory address.
-
-Also CR0.PG toggle when EFER.LMA is set, toggles the long mode.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 14 +++++++++++++-
- 1 file changed, 13 insertions(+), 1 deletion(-)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 49697d589f87..89f035fc52e7 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -3635,11 +3635,23 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
-
- static int em_cr_write(struct x86_emulate_ctxt *ctxt)
- {
-- if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
-+ int cr_num = ctxt->modrm_reg;
-+ int r;
-+
-+ if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
- return emulate_gp(ctxt, 0);
-
- /* Disable writeback. */
- ctxt->dst.type = OP_NONE;
-+
-+ if (cr_num == 0) {
-+ /* CR0 write might have updated CR0.PE and/or CR0.PG
-+ * which can affect the cpu execution mode */
-+ r = emulator_recalc_and_set_mode(ctxt);
-+ if (r != X86EMUL_CONTINUE)
-+ return r;
-+ }
-+
- return X86EMUL_CONTINUE;
- }
-
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:05 +0300
-Subject: [PATCH] KVM: x86: emulator/smm: add structs for KVM's smram layout
-
-Those structs will be used to read/write the smram state image.
-
-Also document the differences between KVM's SMRAM layout and SMRAM
-layout that is used by real Intel/AMD cpus.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 6 +
- arch/x86/kvm/kvm_emulate.h | 218 +++++++++++++++++++++++++++++++++++++
- arch/x86/kvm/x86.c | 1 +
- 3 files changed, 225 insertions(+)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 89f035fc52e7..bfaf5d24bf1e 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -5825,3 +5825,9 @@ bool emulator_can_use_gpa(struct x86_emulate_ctxt *ctxt)
-
- return true;
- }
-+
-+void __init kvm_emulator_init(void)
-+{
-+ __check_smram32_offsets();
-+ __check_smram64_offsets();
-+}
-diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
-index fb09cd22cb7f..0b2bbcce321a 100644
---- a/arch/x86/kvm/kvm_emulate.h
-+++ b/arch/x86/kvm/kvm_emulate.h
-@@ -13,6 +13,7 @@
- #define _ASM_X86_KVM_X86_EMULATE_H
-
- #include <asm/desc_defs.h>
-+#include <linux/build_bug.h>
- #include "fpu.h"
-
- struct x86_emulate_ctxt;
-@@ -482,6 +483,223 @@ enum x86_intercept {
- nr_x86_intercepts
- };
-
-+
-+/* 32 bit KVM's emulated SMM layout. Loosely based on Intel's layout */
-+
-+struct kvm_smm_seg_state_32 {
-+ u32 flags;
-+ u32 limit;
-+ u32 base;
-+} __packed;
-+
-+struct kvm_smram_state_32 {
-+ u32 reserved1[62];
-+ u32 smbase;
-+ u32 smm_revision;
-+ u32 reserved2[5];
-+ u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
-+ u32 reserved3[5];
-+
-+ /*
-+ * Segment state is not present/documented in the Intel/AMD SMRAM image
-+ * Instead this area on Intel/AMD contains IO/HLT restart flags.
-+ */
-+ struct kvm_smm_seg_state_32 ds;
-+ struct kvm_smm_seg_state_32 fs;
-+ struct kvm_smm_seg_state_32 gs;
-+ struct kvm_smm_seg_state_32 idtr; /* IDTR has only base and limit */
-+ struct kvm_smm_seg_state_32 tr;
-+ u32 reserved;
-+ struct kvm_smm_seg_state_32 gdtr; /* GDTR has only base and limit */
-+ struct kvm_smm_seg_state_32 ldtr;
-+ struct kvm_smm_seg_state_32 es;
-+ struct kvm_smm_seg_state_32 cs;
-+ struct kvm_smm_seg_state_32 ss;
-+
-+ u32 es_sel;
-+ u32 cs_sel;
-+ u32 ss_sel;
-+ u32 ds_sel;
-+ u32 fs_sel;
-+ u32 gs_sel;
-+ u32 ldtr_sel;
-+ u32 tr_sel;
-+
-+ u32 dr7;
-+ u32 dr6;
-+ u32 gprs[8]; /* GPRS in the "natural" X86 order (EAX/ECX/EDX.../EDI) */
-+ u32 eip;
-+ u32 eflags;
-+ u32 cr3;
-+ u32 cr0;
-+} __packed;
-+
-+
-+static inline void __check_smram32_offsets(void)
-+{
-+#define __CHECK_SMRAM32_OFFSET(field, offset) \
-+ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_32, field, offset - 0xFE00)
-+
-+ __CHECK_SMRAM32_OFFSET(reserved1, 0xFE00);
-+ __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
-+ __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
-+ __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
-+ __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
-+ __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
-+ __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
-+ __CHECK_SMRAM32_OFFSET(fs, 0xFF38);
-+ __CHECK_SMRAM32_OFFSET(gs, 0xFF44);
-+ __CHECK_SMRAM32_OFFSET(idtr, 0xFF50);
-+ __CHECK_SMRAM32_OFFSET(tr, 0xFF5C);
-+ __CHECK_SMRAM32_OFFSET(gdtr, 0xFF6C);
-+ __CHECK_SMRAM32_OFFSET(ldtr, 0xFF78);
-+ __CHECK_SMRAM32_OFFSET(es, 0xFF84);
-+ __CHECK_SMRAM32_OFFSET(cs, 0xFF90);
-+ __CHECK_SMRAM32_OFFSET(ss, 0xFF9C);
-+ __CHECK_SMRAM32_OFFSET(es_sel, 0xFFA8);
-+ __CHECK_SMRAM32_OFFSET(cs_sel, 0xFFAC);
-+ __CHECK_SMRAM32_OFFSET(ss_sel, 0xFFB0);
-+ __CHECK_SMRAM32_OFFSET(ds_sel, 0xFFB4);
-+ __CHECK_SMRAM32_OFFSET(fs_sel, 0xFFB8);
-+ __CHECK_SMRAM32_OFFSET(gs_sel, 0xFFBC);
-+ __CHECK_SMRAM32_OFFSET(ldtr_sel, 0xFFC0);
-+ __CHECK_SMRAM32_OFFSET(tr_sel, 0xFFC4);
-+ __CHECK_SMRAM32_OFFSET(dr7, 0xFFC8);
-+ __CHECK_SMRAM32_OFFSET(dr6, 0xFFCC);
-+ __CHECK_SMRAM32_OFFSET(gprs, 0xFFD0);
-+ __CHECK_SMRAM32_OFFSET(eip, 0xFFF0);
-+ __CHECK_SMRAM32_OFFSET(eflags, 0xFFF4);
-+ __CHECK_SMRAM32_OFFSET(cr3, 0xFFF8);
-+ __CHECK_SMRAM32_OFFSET(cr0, 0xFFFC);
-+#undef __CHECK_SMRAM32_OFFSET
-+}
-+
-+
-+/* 64 bit KVM's emulated SMM layout. Based on AMD64 layout */
-+
-+struct kvm_smm_seg_state_64 {
-+ u16 selector;
-+ u16 attributes;
-+ u32 limit;
-+ u64 base;
-+};
-+
-+struct kvm_smram_state_64 {
-+
-+ struct kvm_smm_seg_state_64 es;
-+ struct kvm_smm_seg_state_64 cs;
-+ struct kvm_smm_seg_state_64 ss;
-+ struct kvm_smm_seg_state_64 ds;
-+ struct kvm_smm_seg_state_64 fs;
-+ struct kvm_smm_seg_state_64 gs;
-+ struct kvm_smm_seg_state_64 gdtr; /* GDTR has only base and limit*/
-+ struct kvm_smm_seg_state_64 ldtr;
-+ struct kvm_smm_seg_state_64 idtr; /* IDTR has only base and limit*/
-+ struct kvm_smm_seg_state_64 tr;
-+
-+ /* I/O restart and auto halt restart are not implemented by KVM */
-+ u64 io_restart_rip;
-+ u64 io_restart_rcx;
-+ u64 io_restart_rsi;
-+ u64 io_restart_rdi;
-+ u32 io_restart_dword;
-+ u32 reserved1;
-+ u8 io_inst_restart;
-+ u8 auto_hlt_restart;
-+ u8 reserved2[6];
-+
-+ u64 efer;
-+
-+ /*
-+ * Two fields below are implemented on AMD only, to store
-+ * SVM guest vmcb address if the #SMI was received while in the guest mode.
-+ */
-+ u64 svm_guest_flag;
-+ u64 svm_guest_vmcb_gpa;
-+ u64 svm_guest_virtual_int; /* unknown purpose, not implemented */
-+
-+ u32 reserved3[3];
-+ u32 smm_revison;
-+ u32 smbase;
-+ u32 reserved4[5];
-+
-+ /* ssp and svm_* fields below are not implemented by KVM */
-+ u64 ssp;
-+ u64 svm_guest_pat;
-+ u64 svm_host_efer;
-+ u64 svm_host_cr4;
-+ u64 svm_host_cr3;
-+ u64 svm_host_cr0;
-+
-+ u64 cr4;
-+ u64 cr3;
-+ u64 cr0;
-+ u64 dr7;
-+ u64 dr6;
-+ u64 rflags;
-+ u64 rip;
-+ u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
-+};
-+
-+
-+static inline void __check_smram64_offsets(void)
-+{
-+#define __CHECK_SMRAM64_OFFSET(field, offset) \
-+ ASSERT_STRUCT_OFFSET(struct kvm_smram_state_64, field, offset - 0xFE00)
-+
-+ __CHECK_SMRAM64_OFFSET(es, 0xFE00);
-+ __CHECK_SMRAM64_OFFSET(cs, 0xFE10);
-+ __CHECK_SMRAM64_OFFSET(ss, 0xFE20);
-+ __CHECK_SMRAM64_OFFSET(ds, 0xFE30);
-+ __CHECK_SMRAM64_OFFSET(fs, 0xFE40);
-+ __CHECK_SMRAM64_OFFSET(gs, 0xFE50);
-+ __CHECK_SMRAM64_OFFSET(gdtr, 0xFE60);
-+ __CHECK_SMRAM64_OFFSET(ldtr, 0xFE70);
-+ __CHECK_SMRAM64_OFFSET(idtr, 0xFE80);
-+ __CHECK_SMRAM64_OFFSET(tr, 0xFE90);
-+ __CHECK_SMRAM64_OFFSET(io_restart_rip, 0xFEA0);
-+ __CHECK_SMRAM64_OFFSET(io_restart_rcx, 0xFEA8);
-+ __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
-+ __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
-+ __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
-+ __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
-+ __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
-+ __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
-+ __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
-+ __CHECK_SMRAM64_OFFSET(efer, 0xFED0);
-+ __CHECK_SMRAM64_OFFSET(svm_guest_flag, 0xFED8);
-+ __CHECK_SMRAM64_OFFSET(svm_guest_vmcb_gpa, 0xFEE0);
-+ __CHECK_SMRAM64_OFFSET(svm_guest_virtual_int, 0xFEE8);
-+ __CHECK_SMRAM64_OFFSET(reserved3, 0xFEF0);
-+ __CHECK_SMRAM64_OFFSET(smm_revison, 0xFEFC);
-+ __CHECK_SMRAM64_OFFSET(smbase, 0xFF00);
-+ __CHECK_SMRAM64_OFFSET(reserved4, 0xFF04);
-+ __CHECK_SMRAM64_OFFSET(ssp, 0xFF18);
-+ __CHECK_SMRAM64_OFFSET(svm_guest_pat, 0xFF20);
-+ __CHECK_SMRAM64_OFFSET(svm_host_efer, 0xFF28);
-+ __CHECK_SMRAM64_OFFSET(svm_host_cr4, 0xFF30);
-+ __CHECK_SMRAM64_OFFSET(svm_host_cr3, 0xFF38);
-+ __CHECK_SMRAM64_OFFSET(svm_host_cr0, 0xFF40);
-+ __CHECK_SMRAM64_OFFSET(cr4, 0xFF48);
-+ __CHECK_SMRAM64_OFFSET(cr3, 0xFF50);
-+ __CHECK_SMRAM64_OFFSET(cr0, 0xFF58);
-+ __CHECK_SMRAM64_OFFSET(dr7, 0xFF60);
-+ __CHECK_SMRAM64_OFFSET(dr6, 0xFF68);
-+ __CHECK_SMRAM64_OFFSET(rflags, 0xFF70);
-+ __CHECK_SMRAM64_OFFSET(rip, 0xFF78);
-+ __CHECK_SMRAM64_OFFSET(gprs, 0xFF80);
-+#undef __CHECK_SMRAM64_OFFSET
-+}
-+
-+union kvm_smram {
-+ struct kvm_smram_state_64 smram64;
-+ struct kvm_smram_state_32 smram32;
-+ u8 bytes[512];
-+};
-+
-+void __init kvm_emulator_init(void);
-+
-+
- /* Host execution mode. */
- #if defined(CONFIG_X86_32)
- #define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 604716996c5d..673262228f3e 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -12449,6 +12449,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_vmgexit_msr_protocol_exit);
- static int __init kvm_x86_init(void)
- {
- kvm_mmu_x86_module_init();
-+ kvm_emulator_init();
- return 0;
- }
- module_init(kvm_x86_init);
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:06 +0300
-Subject: [PATCH] KVM: x86: emulator/smm: use smram structs in the common code
-
-Switch from using a raw array to 'union kvm_smram'.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/include/asm/kvm_host.h | 5 +++--
- arch/x86/kvm/emulate.c | 12 +++++++-----
- arch/x86/kvm/kvm_emulate.h | 3 ++-
- arch/x86/kvm/svm/svm.c | 8 ++++++--
- arch/x86/kvm/vmx/vmx.c | 4 ++--
- arch/x86/kvm/x86.c | 16 ++++++++--------
- 6 files changed, 28 insertions(+), 20 deletions(-)
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index 867febee8fc3..fb48dd8773e1 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -200,6 +200,7 @@ typedef enum exit_fastpath_completion fastpath_t;
-
- struct x86_emulate_ctxt;
- struct x86_exception;
-+union kvm_smram;
- enum x86_intercept;
- enum x86_intercept_stage;
-
-@@ -1463,8 +1464,8 @@ struct kvm_x86_ops {
- void (*setup_mce)(struct kvm_vcpu *vcpu);
-
- int (*smi_allowed)(struct kvm_vcpu *vcpu, bool for_injection);
-- int (*enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
-- int (*leave_smm)(struct kvm_vcpu *vcpu, const char *smstate);
-+ int (*enter_smm)(struct kvm_vcpu *vcpu, union kvm_smram *smram);
-+ int (*leave_smm)(struct kvm_vcpu *vcpu, const union kvm_smram *smram);
- void (*enable_smi_window)(struct kvm_vcpu *vcpu);
-
- int (*mem_enc_op)(struct kvm *kvm, void __user *argp);
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index bfaf5d24bf1e..730c3e2662d6 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2567,16 +2567,18 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
- static int em_rsm(struct x86_emulate_ctxt *ctxt)
- {
- unsigned long cr0, cr4, efer;
-- char buf[512];
-+ const union kvm_smram smram;
- u64 smbase;
- int ret;
-
-+ BUILD_BUG_ON(sizeof(smram) != 512);
-+
- if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
- return emulate_ud(ctxt);
-
- smbase = ctxt->ops->get_smbase(ctxt);
-
-- ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, buf, sizeof(buf));
-+ ret = ctxt->ops->read_phys(ctxt, smbase + 0xfe00, (void *)&smram, sizeof(smram));
- if (ret != X86EMUL_CONTINUE)
- return X86EMUL_UNHANDLEABLE;
-
-@@ -2626,15 +2628,15 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
- * state (e.g. enter guest mode) before loading state from the SMM
- * state-save area.
- */
-- if (ctxt->ops->leave_smm(ctxt, buf))
-+ if (ctxt->ops->leave_smm(ctxt, &smram))
- goto emulate_shutdown;
-
- #ifdef CONFIG_X86_64
- if (emulator_has_longmode(ctxt))
-- ret = rsm_load_state_64(ctxt, buf);
-+ ret = rsm_load_state_64(ctxt, (const char *)&smram);
- else
- #endif
-- ret = rsm_load_state_32(ctxt, buf);
-+ ret = rsm_load_state_32(ctxt, (const char *)&smram);
-
- if (ret != X86EMUL_CONTINUE)
- goto emulate_shutdown;
-diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
-index 0b2bbcce321a..3b37b3e17379 100644
---- a/arch/x86/kvm/kvm_emulate.h
-+++ b/arch/x86/kvm/kvm_emulate.h
-@@ -19,6 +19,7 @@
- struct x86_emulate_ctxt;
- enum x86_intercept;
- enum x86_intercept_stage;
-+union kvm_smram;
-
- struct x86_exception {
- u8 vector;
-@@ -233,7 +234,7 @@ struct x86_emulate_ops {
-
- unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
- void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
-- int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const char *smstate);
-+ int (*leave_smm)(struct x86_emulate_ctxt *ctxt, const union kvm_smram *smram);
- void (*triple_fault)(struct x86_emulate_ctxt *ctxt);
- int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr);
- };
-diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index 21f747eacc9a..d903120811b9 100644
---- a/arch/x86/kvm/svm/svm.c
-+++ b/arch/x86/kvm/svm/svm.c
-@@ -4302,12 +4302,14 @@ static int svm_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
- return !svm_smi_blocked(vcpu);
- }
-
--static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
-+static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_host_map map_save;
- int ret;
-
-+ char *smstate = (char *)smram;
-+
- if (!is_guest_mode(vcpu))
- return 0;
-
-@@ -4349,7 +4351,7 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
- return 0;
- }
-
--static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
-+static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_host_map map, map_save;
-@@ -4357,6 +4359,8 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
- struct vmcb *vmcb12;
- int ret;
-
-+ const char *smstate = (const char *)smram;
-+
- if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
- return 0;
-
-diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
-index 417176817d80..a45a43bcc844 100644
---- a/arch/x86/kvm/vmx/vmx.c
-+++ b/arch/x86/kvm/vmx/vmx.c
-@@ -7594,7 +7594,7 @@ static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
- return !is_smm(vcpu);
- }
-
--static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
-+static int vmx_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
-
-@@ -7608,7 +7608,7 @@ static int vmx_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
- return 0;
- }
-
--static int vmx_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
-+static int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
- {
- struct vcpu_vmx *vmx = to_vmx(vcpu);
- int ret;
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 673262228f3e..37edf00584f8 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -7312,9 +7312,9 @@ static void emulator_exiting_smm(struct x86_emulate_ctxt *ctxt)
- }
-
- static int emulator_leave_smm(struct x86_emulate_ctxt *ctxt,
-- const char *smstate)
-+ const union kvm_smram *smram)
- {
-- return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smstate);
-+ return static_call(kvm_x86_leave_smm)(emul_to_vcpu(ctxt), smram);
- }
-
- static void emulator_triple_fault(struct x86_emulate_ctxt *ctxt)
-@@ -9171,25 +9171,25 @@ static void enter_smm(struct kvm_vcpu *vcpu)
- struct kvm_segment cs, ds;
- struct desc_ptr dt;
- unsigned long cr0;
-- char buf[512];
-+ union kvm_smram smram;
-
-- memset(buf, 0, 512);
-+ memset(smram.bytes, 0, sizeof(smram.bytes));
- #ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-- enter_smm_save_state_64(vcpu, buf);
-+ enter_smm_save_state_64(vcpu, (char *)&smram);
- else
- #endif
-- enter_smm_save_state_32(vcpu, buf);
-+ enter_smm_save_state_32(vcpu, (char *)&smram);
-
- /*
- * Give enter_smm() a chance to make ISA-specific changes to the vCPU
- * state (e.g. leave guest mode) after we've saved the state into the
- * SMM state-save area.
- */
-- static_call(kvm_x86_enter_smm)(vcpu, buf);
-+ static_call(kvm_x86_enter_smm)(vcpu, &smram);
-
- kvm_smm_changed(vcpu, true);
-- kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf));
-+ kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram));
-
- if (static_call(kvm_x86_get_nmi_mask)(vcpu))
- vcpu->arch.hflags |= HF_SMM_INSIDE_NMI_MASK;
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:07 +0300
-Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 32 bit smram
- load/restore
-
-Use kvm_smram_state_32 struct to save/restore 32 bit SMM state
-(used when X86_FEATURE_LM is not present in the guest CPUID).
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 81 +++++++++++++++---------------------------
- arch/x86/kvm/x86.c | 75 +++++++++++++++++---------------------
- 2 files changed, 60 insertions(+), 96 deletions(-)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 730c3e2662d6..ad5d2ab9ab84 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2344,25 +2344,17 @@ static void rsm_set_desc_flags(struct desc_struct *desc, u32 flags)
- desc->type = (flags >> 8) & 15;
- }
-
--static int rsm_load_seg_32(struct x86_emulate_ctxt *ctxt, const char *smstate,
-+static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt,
-+ const struct kvm_smm_seg_state_32 *state,
-+ u16 selector,
- int n)
- {
- struct desc_struct desc;
-- int offset;
-- u16 selector;
--
-- selector = GET_SMSTATE(u32, smstate, 0x7fa8 + n * 4);
--
-- if (n < 3)
-- offset = 0x7f84 + n * 12;
-- else
-- offset = 0x7f2c + (n - 3) * 12;
-
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, offset));
-+ set_desc_base(&desc, state->base);
-+ set_desc_limit(&desc, state->limit);
-+ rsm_set_desc_flags(&desc, state->flags);
- ctxt->ops->set_segment(ctxt, selector, &desc, 0, n);
-- return X86EMUL_CONTINUE;
- }
-
- #ifdef CONFIG_X86_64
-@@ -2433,63 +2425,46 @@ static int rsm_enter_protected_mode(struct x86_emulate_ctxt *ctxt,
- }
-
- static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
-- const char *smstate)
-+ const struct kvm_smram_state_32 *smstate)
- {
-- struct desc_struct desc;
- struct desc_ptr dt;
-- u16 selector;
-- u32 val, cr0, cr3, cr4;
- int i;
-
-- cr0 = GET_SMSTATE(u32, smstate, 0x7ffc);
-- cr3 = GET_SMSTATE(u32, smstate, 0x7ff8);
-- ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
-- ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
-+ ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
-+ ctxt->_eip = smstate->eip;
-
- for (i = 0; i < 8; i++)
-- *reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
--
-- val = GET_SMSTATE(u32, smstate, 0x7fcc);
-+ *reg_write(ctxt, i) = smstate->gprs[i];
-
-- if (ctxt->ops->set_dr(ctxt, 6, val))
-+ if (ctxt->ops->set_dr(ctxt, 6, smstate->dr6))
- return X86EMUL_UNHANDLEABLE;
--
-- val = GET_SMSTATE(u32, smstate, 0x7fc8);
--
-- if (ctxt->ops->set_dr(ctxt, 7, val))
-+ if (ctxt->ops->set_dr(ctxt, 7, smstate->dr7))
- return X86EMUL_UNHANDLEABLE;
-
-- selector = GET_SMSTATE(u32, smstate, 0x7fc4);
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f64));
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f60));
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f5c));
-- ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_TR);
-+ rsm_load_seg_32(ctxt, &smstate->tr, smstate->tr_sel, VCPU_SREG_TR);
-+ rsm_load_seg_32(ctxt, &smstate->ldtr, smstate->ldtr_sel, VCPU_SREG_LDTR);
-
-- selector = GET_SMSTATE(u32, smstate, 0x7fc0);
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7f80));
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7f7c));
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7f78));
-- ctxt->ops->set_segment(ctxt, selector, &desc, 0, VCPU_SREG_LDTR);
-
-- dt.address = GET_SMSTATE(u32, smstate, 0x7f74);
-- dt.size = GET_SMSTATE(u32, smstate, 0x7f70);
-+ dt.address = smstate->gdtr.base;
-+ dt.size = smstate->gdtr.limit;
- ctxt->ops->set_gdt(ctxt, &dt);
-
-- dt.address = GET_SMSTATE(u32, smstate, 0x7f58);
-- dt.size = GET_SMSTATE(u32, smstate, 0x7f54);
-+ dt.address = smstate->idtr.base;
-+ dt.size = smstate->idtr.limit;
- ctxt->ops->set_idt(ctxt, &dt);
-
-- for (i = 0; i < 6; i++) {
-- int r = rsm_load_seg_32(ctxt, smstate, i);
-- if (r != X86EMUL_CONTINUE)
-- return r;
-- }
-+ rsm_load_seg_32(ctxt, &smstate->es, smstate->es_sel, VCPU_SREG_ES);
-+ rsm_load_seg_32(ctxt, &smstate->cs, smstate->cs_sel, VCPU_SREG_CS);
-+ rsm_load_seg_32(ctxt, &smstate->ss, smstate->ss_sel, VCPU_SREG_SS);
-
-- cr4 = GET_SMSTATE(u32, smstate, 0x7f14);
-+ rsm_load_seg_32(ctxt, &smstate->ds, smstate->ds_sel, VCPU_SREG_DS);
-+ rsm_load_seg_32(ctxt, &smstate->fs, smstate->fs_sel, VCPU_SREG_FS);
-+ rsm_load_seg_32(ctxt, &smstate->gs, smstate->gs_sel, VCPU_SREG_GS);
-
-- ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7ef8));
-+ ctxt->ops->set_smbase(ctxt, smstate->smbase);
-
-- return rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
-+ return rsm_enter_protected_mode(ctxt, smstate->cr0,
-+ smstate->cr3, smstate->cr4);
- }
-
- #ifdef CONFIG_X86_64
-@@ -2636,7 +2611,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
- ret = rsm_load_state_64(ctxt, (const char *)&smram);
- else
- #endif
-- ret = rsm_load_state_32(ctxt, (const char *)&smram);
-+ ret = rsm_load_state_32(ctxt, &smram.smram32);
-
- if (ret != X86EMUL_CONTINUE)
- goto emulate_shutdown;
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 37edf00584f8..11e62b1f1764 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -9025,22 +9025,18 @@ static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
- return flags;
- }
-
--static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu, char *buf, int n)
-+static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
-+ struct kvm_smm_seg_state_32 *state,
-+ u32 *selector,
-+ int n)
- {
- struct kvm_segment seg;
-- int offset;
-
- kvm_get_segment(vcpu, &seg, n);
-- put_smstate(u32, buf, 0x7fa8 + n * 4, seg.selector);
--
-- if (n < 3)
-- offset = 0x7f84 + n * 12;
-- else
-- offset = 0x7f2c + (n - 3) * 12;
--
-- put_smstate(u32, buf, offset + 8, seg.base);
-- put_smstate(u32, buf, offset + 4, seg.limit);
-- put_smstate(u32, buf, offset, enter_smm_get_segment_flags(&seg));
-+ *selector = seg.selector;
-+ state->base = seg.base;
-+ state->limit = seg.limit;
-+ state->flags = enter_smm_get_segment_flags(&seg);
- }
-
- #ifdef CONFIG_X86_64
-@@ -9061,54 +9057,47 @@ static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
- }
- #endif
-
--static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, char *buf)
-+static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_state_32 *smram)
- {
- struct desc_ptr dt;
-- struct kvm_segment seg;
- unsigned long val;
- int i;
-
-- put_smstate(u32, buf, 0x7ffc, kvm_read_cr0(vcpu));
-- put_smstate(u32, buf, 0x7ff8, kvm_read_cr3(vcpu));
-- put_smstate(u32, buf, 0x7ff4, kvm_get_rflags(vcpu));
-- put_smstate(u32, buf, 0x7ff0, kvm_rip_read(vcpu));
-+ smram->cr0 = kvm_read_cr0(vcpu);
-+ smram->cr3 = kvm_read_cr3(vcpu);
-+ smram->eflags = kvm_get_rflags(vcpu);
-+ smram->eip = kvm_rip_read(vcpu);
-
- for (i = 0; i < 8; i++)
-- put_smstate(u32, buf, 0x7fd0 + i * 4, kvm_register_read_raw(vcpu, i));
-+ smram->gprs[i] = kvm_register_read_raw(vcpu, i);
-
- kvm_get_dr(vcpu, 6, &val);
-- put_smstate(u32, buf, 0x7fcc, (u32)val);
-+ smram->dr6 = (u32)val;
- kvm_get_dr(vcpu, 7, &val);
-- put_smstate(u32, buf, 0x7fc8, (u32)val);
-+ smram->dr7 = (u32)val;
-
-- kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
-- put_smstate(u32, buf, 0x7fc4, seg.selector);
-- put_smstate(u32, buf, 0x7f64, seg.base);
-- put_smstate(u32, buf, 0x7f60, seg.limit);
-- put_smstate(u32, buf, 0x7f5c, enter_smm_get_segment_flags(&seg));
--
-- kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
-- put_smstate(u32, buf, 0x7fc0, seg.selector);
-- put_smstate(u32, buf, 0x7f80, seg.base);
-- put_smstate(u32, buf, 0x7f7c, seg.limit);
-- put_smstate(u32, buf, 0x7f78, enter_smm_get_segment_flags(&seg));
-+ enter_smm_save_seg_32(vcpu, &smram->tr, &smram->tr_sel, VCPU_SREG_TR);
-+ enter_smm_save_seg_32(vcpu, &smram->ldtr, &smram->ldtr_sel, VCPU_SREG_LDTR);
-
- static_call(kvm_x86_get_gdt)(vcpu, &dt);
-- put_smstate(u32, buf, 0x7f74, dt.address);
-- put_smstate(u32, buf, 0x7f70, dt.size);
-+ smram->gdtr.base = dt.address;
-+ smram->gdtr.limit = dt.size;
-
- static_call(kvm_x86_get_idt)(vcpu, &dt);
-- put_smstate(u32, buf, 0x7f58, dt.address);
-- put_smstate(u32, buf, 0x7f54, dt.size);
-+ smram->idtr.base = dt.address;
-+ smram->idtr.limit = dt.size;
-
-- for (i = 0; i < 6; i++)
-- enter_smm_save_seg_32(vcpu, buf, i);
-+ enter_smm_save_seg_32(vcpu, &smram->es, &smram->es_sel, VCPU_SREG_ES);
-+ enter_smm_save_seg_32(vcpu, &smram->cs, &smram->cs_sel, VCPU_SREG_CS);
-+ enter_smm_save_seg_32(vcpu, &smram->ss, &smram->ss_sel, VCPU_SREG_SS);
-
-- put_smstate(u32, buf, 0x7f14, kvm_read_cr4(vcpu));
-+ enter_smm_save_seg_32(vcpu, &smram->ds, &smram->ds_sel, VCPU_SREG_DS);
-+ enter_smm_save_seg_32(vcpu, &smram->fs, &smram->fs_sel, VCPU_SREG_FS);
-+ enter_smm_save_seg_32(vcpu, &smram->gs, &smram->gs_sel, VCPU_SREG_GS);
-
-- /* revision id */
-- put_smstate(u32, buf, 0x7efc, 0x00020000);
-- put_smstate(u32, buf, 0x7ef8, vcpu->arch.smbase);
-+ smram->cr4 = kvm_read_cr4(vcpu);
-+ smram->smm_revision = 0x00020000;
-+ smram->smbase = vcpu->arch.smbase;
- }
-
- #ifdef CONFIG_X86_64
-@@ -9179,7 +9168,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
- enter_smm_save_state_64(vcpu, (char *)&smram);
- else
- #endif
-- enter_smm_save_state_32(vcpu, (char *)&smram);
-+ enter_smm_save_state_32(vcpu, &smram.smram32);
-
- /*
- * Give enter_smm() a chance to make ISA-specific changes to the vCPU
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:08 +0300
-Subject: [PATCH] KVM: x86: emulator/smm: use smram struct for 64 bit smram
- load/restore
-
-Use kvm_smram_state_64 struct to save/restore the 64 bit SMM state
-(used when X86_FEATURE_LM is present in the guest CPUID,
-regardless of 32-bitness of the guest).
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 88 ++++++++++++++----------------------------
- arch/x86/kvm/x86.c | 75 ++++++++++++++++-------------------
- 2 files changed, 62 insertions(+), 101 deletions(-)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index ad5d2ab9ab84..4eb35a0a33a5 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2358,24 +2358,16 @@ static void rsm_load_seg_32(struct x86_emulate_ctxt *ctxt,
- }
-
- #ifdef CONFIG_X86_64
--static int rsm_load_seg_64(struct x86_emulate_ctxt *ctxt, const char *smstate,
-- int n)
-+static void rsm_load_seg_64(struct x86_emulate_ctxt *ctxt,
-+ const struct kvm_smm_seg_state_64 *state,
-+ int n)
- {
- struct desc_struct desc;
-- int offset;
-- u16 selector;
-- u32 base3;
--
-- offset = 0x7e00 + n * 16;
--
-- selector = GET_SMSTATE(u16, smstate, offset);
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u16, smstate, offset + 2) << 8);
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, offset + 4));
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, offset + 8));
-- base3 = GET_SMSTATE(u32, smstate, offset + 12);
-
-- ctxt->ops->set_segment(ctxt, selector, &desc, base3, n);
-- return X86EMUL_CONTINUE;
-+ rsm_set_desc_flags(&desc, state->attributes << 8);
-+ set_desc_limit(&desc, state->limit);
-+ set_desc_base(&desc, (u32)state->base);
-+ ctxt->ops->set_segment(ctxt, state->selector, &desc, state->base >> 32, n);
- }
- #endif
-
-@@ -2469,71 +2461,49 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
-
- #ifdef CONFIG_X86_64
- static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
-- const char *smstate)
-+ const struct kvm_smram_state_64 *smstate)
- {
-- struct desc_struct desc;
- struct desc_ptr dt;
-- u64 val, cr0, cr3, cr4;
-- u32 base3;
-- u16 selector;
- int i, r;
-
- for (i = 0; i < 16; i++)
-- *reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
-+ *reg_write(ctxt, i) = smstate->gprs[15 - i];
-
-- ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
-- ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7f70) | X86_EFLAGS_FIXED;
-+ ctxt->_eip = smstate->rip;
-+ ctxt->eflags = smstate->rflags | X86_EFLAGS_FIXED;
-
-- val = GET_SMSTATE(u64, smstate, 0x7f68);
--
-- if (ctxt->ops->set_dr(ctxt, 6, val))
-+ if (ctxt->ops->set_dr(ctxt, 6, smstate->dr6))
- return X86EMUL_UNHANDLEABLE;
--
-- val = GET_SMSTATE(u64, smstate, 0x7f60);
--
-- if (ctxt->ops->set_dr(ctxt, 7, val))
-+ if (ctxt->ops->set_dr(ctxt, 7, smstate->dr7))
- return X86EMUL_UNHANDLEABLE;
-
-- cr0 = GET_SMSTATE(u64, smstate, 0x7f58);
-- cr3 = GET_SMSTATE(u64, smstate, 0x7f50);
-- cr4 = GET_SMSTATE(u64, smstate, 0x7f48);
-- ctxt->ops->set_smbase(ctxt, GET_SMSTATE(u32, smstate, 0x7f00));
-- val = GET_SMSTATE(u64, smstate, 0x7ed0);
-+ ctxt->ops->set_smbase(ctxt, smstate->smbase);
-
-- if (ctxt->ops->set_msr(ctxt, MSR_EFER, val & ~EFER_LMA))
-+ if (ctxt->ops->set_msr(ctxt, MSR_EFER, smstate->efer & ~EFER_LMA))
- return X86EMUL_UNHANDLEABLE;
-
-- selector = GET_SMSTATE(u32, smstate, 0x7e90);
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e92) << 8);
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e94));
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e98));
-- base3 = GET_SMSTATE(u32, smstate, 0x7e9c);
-- ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_TR);
-+ rsm_load_seg_64(ctxt, &smstate->tr, VCPU_SREG_TR);
-
-- dt.size = GET_SMSTATE(u32, smstate, 0x7e84);
-- dt.address = GET_SMSTATE(u64, smstate, 0x7e88);
-+ dt.size = smstate->idtr.limit;
-+ dt.address = smstate->idtr.base;
- ctxt->ops->set_idt(ctxt, &dt);
-
-- selector = GET_SMSTATE(u32, smstate, 0x7e70);
-- rsm_set_desc_flags(&desc, GET_SMSTATE(u32, smstate, 0x7e72) << 8);
-- set_desc_limit(&desc, GET_SMSTATE(u32, smstate, 0x7e74));
-- set_desc_base(&desc, GET_SMSTATE(u32, smstate, 0x7e78));
-- base3 = GET_SMSTATE(u32, smstate, 0x7e7c);
-- ctxt->ops->set_segment(ctxt, selector, &desc, base3, VCPU_SREG_LDTR);
-+ rsm_load_seg_64(ctxt, &smstate->ldtr, VCPU_SREG_LDTR);
-
-- dt.size = GET_SMSTATE(u32, smstate, 0x7e64);
-- dt.address = GET_SMSTATE(u64, smstate, 0x7e68);
-+ dt.size = smstate->gdtr.limit;
-+ dt.address = smstate->gdtr.base;
- ctxt->ops->set_gdt(ctxt, &dt);
-
-- r = rsm_enter_protected_mode(ctxt, cr0, cr3, cr4);
-+ r = rsm_enter_protected_mode(ctxt, smstate->cr0, smstate->cr3, smstate->cr4);
- if (r != X86EMUL_CONTINUE)
- return r;
-
-- for (i = 0; i < 6; i++) {
-- r = rsm_load_seg_64(ctxt, smstate, i);
-- if (r != X86EMUL_CONTINUE)
-- return r;
-- }
-+ rsm_load_seg_64(ctxt, &smstate->es, VCPU_SREG_ES);
-+ rsm_load_seg_64(ctxt, &smstate->cs, VCPU_SREG_CS);
-+ rsm_load_seg_64(ctxt, &smstate->ss, VCPU_SREG_SS);
-+ rsm_load_seg_64(ctxt, &smstate->ds, VCPU_SREG_DS);
-+ rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
-+ rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
-
- return X86EMUL_CONTINUE;
- }
-@@ -2608,7 +2578,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
-
- #ifdef CONFIG_X86_64
- if (emulator_has_longmode(ctxt))
-- ret = rsm_load_state_64(ctxt, (const char *)&smram);
-+ ret = rsm_load_state_64(ctxt, &smram.smram64);
- else
- #endif
- ret = rsm_load_state_32(ctxt, &smram.smram32);
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 11e62b1f1764..5c4be3873c0d 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -9040,20 +9040,17 @@ static void enter_smm_save_seg_32(struct kvm_vcpu *vcpu,
- }
-
- #ifdef CONFIG_X86_64
--static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu, char *buf, int n)
-+static void enter_smm_save_seg_64(struct kvm_vcpu *vcpu,
-+ struct kvm_smm_seg_state_64 *state,
-+ int n)
- {
- struct kvm_segment seg;
-- int offset;
-- u16 flags;
-
- kvm_get_segment(vcpu, &seg, n);
-- offset = 0x7e00 + n * 16;
--
-- flags = enter_smm_get_segment_flags(&seg) >> 8;
-- put_smstate(u16, buf, offset, seg.selector);
-- put_smstate(u16, buf, offset + 2, flags);
-- put_smstate(u32, buf, offset + 4, seg.limit);
-- put_smstate(u64, buf, offset + 8, seg.base);
-+ state->selector = seg.selector;
-+ state->attributes = enter_smm_get_segment_flags(&seg) >> 8;
-+ state->limit = seg.limit;
-+ state->base = seg.base;
- }
- #endif
-
-@@ -9101,57 +9098,51 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
- }
-
- #ifdef CONFIG_X86_64
--static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, char *buf)
-+static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_state_64 *smram)
- {
- struct desc_ptr dt;
-- struct kvm_segment seg;
- unsigned long val;
- int i;
-
- for (i = 0; i < 16; i++)
-- put_smstate(u64, buf, 0x7ff8 - i * 8, kvm_register_read_raw(vcpu, i));
-+ smram->gprs[15 - i] = kvm_register_read_raw(vcpu, i);
-+
-+ smram->rip = kvm_rip_read(vcpu);
-+ smram->rflags = kvm_get_rflags(vcpu);
-
-- put_smstate(u64, buf, 0x7f78, kvm_rip_read(vcpu));
-- put_smstate(u32, buf, 0x7f70, kvm_get_rflags(vcpu));
-
- kvm_get_dr(vcpu, 6, &val);
-- put_smstate(u64, buf, 0x7f68, val);
-+ smram->dr6 = val;
- kvm_get_dr(vcpu, 7, &val);
-- put_smstate(u64, buf, 0x7f60, val);
--
-- put_smstate(u64, buf, 0x7f58, kvm_read_cr0(vcpu));
-- put_smstate(u64, buf, 0x7f50, kvm_read_cr3(vcpu));
-- put_smstate(u64, buf, 0x7f48, kvm_read_cr4(vcpu));
-+ smram->dr7 = val;
-
-- put_smstate(u32, buf, 0x7f00, vcpu->arch.smbase);
-+ smram->cr0 = kvm_read_cr0(vcpu);
-+ smram->cr3 = kvm_read_cr3(vcpu);
-+ smram->cr4 = kvm_read_cr4(vcpu);
-
-- /* revision id */
-- put_smstate(u32, buf, 0x7efc, 0x00020064);
-+ smram->smbase = vcpu->arch.smbase;
-+ smram->smm_revison = 0x00020064;
-
-- put_smstate(u64, buf, 0x7ed0, vcpu->arch.efer);
-+ smram->efer = vcpu->arch.efer;
-
-- kvm_get_segment(vcpu, &seg, VCPU_SREG_TR);
-- put_smstate(u16, buf, 0x7e90, seg.selector);
-- put_smstate(u16, buf, 0x7e92, enter_smm_get_segment_flags(&seg) >> 8);
-- put_smstate(u32, buf, 0x7e94, seg.limit);
-- put_smstate(u64, buf, 0x7e98, seg.base);
-+ enter_smm_save_seg_64(vcpu, &smram->tr, VCPU_SREG_TR);
-
- static_call(kvm_x86_get_idt)(vcpu, &dt);
-- put_smstate(u32, buf, 0x7e84, dt.size);
-- put_smstate(u64, buf, 0x7e88, dt.address);
-+ smram->idtr.limit = dt.size;
-+ smram->idtr.base = dt.address;
-
-- kvm_get_segment(vcpu, &seg, VCPU_SREG_LDTR);
-- put_smstate(u16, buf, 0x7e70, seg.selector);
-- put_smstate(u16, buf, 0x7e72, enter_smm_get_segment_flags(&seg) >> 8);
-- put_smstate(u32, buf, 0x7e74, seg.limit);
-- put_smstate(u64, buf, 0x7e78, seg.base);
-+ enter_smm_save_seg_64(vcpu, &smram->ldtr, VCPU_SREG_LDTR);
-
- static_call(kvm_x86_get_gdt)(vcpu, &dt);
-- put_smstate(u32, buf, 0x7e64, dt.size);
-- put_smstate(u64, buf, 0x7e68, dt.address);
-+ smram->gdtr.limit = dt.size;
-+ smram->gdtr.base = dt.address;
-
-- for (i = 0; i < 6; i++)
-- enter_smm_save_seg_64(vcpu, buf, i);
-+ enter_smm_save_seg_64(vcpu, &smram->es, VCPU_SREG_ES);
-+ enter_smm_save_seg_64(vcpu, &smram->cs, VCPU_SREG_CS);
-+ enter_smm_save_seg_64(vcpu, &smram->ss, VCPU_SREG_SS);
-+ enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
-+ enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
-+ enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
- }
- #endif
-
-@@ -9165,7 +9156,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
- memset(smram.bytes, 0, sizeof(smram.bytes));
- #ifdef CONFIG_X86_64
- if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
-- enter_smm_save_state_64(vcpu, (char *)&smram);
-+ enter_smm_save_state_64(vcpu, &smram.smram64);
- else
- #endif
- enter_smm_save_state_32(vcpu, &smram.smram32);
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:09 +0300
-Subject: [PATCH] KVM: x86: SVM: use smram structs
-
-This removes the last user of put_smstate/GET_SMSTATE so
-remove these functions as well.
-
-Also add a sanity check that we don't attempt to enter the SMM
-on non long mode capable guest CPU with a running nested guest.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/include/asm/kvm_host.h | 6 ------
- arch/x86/kvm/svm/svm.c | 21 ++++++---------------
- 2 files changed, 6 insertions(+), 21 deletions(-)
-
-diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
-index fb48dd8773e1..0362d3fba42a 100644
---- a/arch/x86/include/asm/kvm_host.h
-+++ b/arch/x86/include/asm/kvm_host.h
-@@ -1932,12 +1932,6 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
- #endif
- }
-
--#define put_smstate(type, buf, offset, val) \
-- *(type *)((buf) + (offset) - 0x7e00) = val
--
--#define GET_SMSTATE(type, buf, offset) \
-- (*(type *)((buf) + (offset) - 0x7e00))
--
- int kvm_cpu_dirty_log_size(void);
-
- int alloc_all_memslots_rmaps(struct kvm *kvm);
-diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index d903120811b9..742497b1d4c3 100644
---- a/arch/x86/kvm/svm/svm.c
-+++ b/arch/x86/kvm/svm/svm.c
-@@ -4308,15 +4308,11 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
- struct kvm_host_map map_save;
- int ret;
-
-- char *smstate = (char *)smram;
--
- if (!is_guest_mode(vcpu))
- return 0;
-
-- /* FED8h - SVM Guest */
-- put_smstate(u64, smstate, 0x7ed8, 1);
-- /* FEE0h - SVM Guest VMCB Physical Address */
-- put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
-+ smram->smram64.svm_guest_flag = 1;
-+ smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
-
- svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
- svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-@@ -4355,28 +4351,23 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
- {
- struct vcpu_svm *svm = to_svm(vcpu);
- struct kvm_host_map map, map_save;
-- u64 saved_efer, vmcb12_gpa;
- struct vmcb *vmcb12;
- int ret;
-
-- const char *smstate = (const char *)smram;
--
- if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
- return 0;
-
- /* Non-zero if SMI arrived while vCPU was in guest mode. */
-- if (!GET_SMSTATE(u64, smstate, 0x7ed8))
-+ if (!smram->smram64.svm_guest_flag)
- return 0;
-
- if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
- return 1;
-
-- saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-- if (!(saved_efer & EFER_SVME))
-+ if (!(smram->smram64.efer & EFER_SVME))
- return 1;
-
-- vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-- if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
-+ if (kvm_vcpu_map(vcpu, gpa_to_gfn(smram->smram64.svm_guest_vmcb_gpa), &map) == -EINVAL)
- return 1;
-
- ret = 1;
-@@ -4401,7 +4392,7 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
-
- vmcb12 = map.hva;
- nested_load_control_from_vmcb12(svm, &vmcb12->control);
-- ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
-+ ret = enter_svm_guest_mode(vcpu, smram->smram64.svm_guest_vmcb_gpa, vmcb12, false);
-
- if (ret)
- goto unmap_save;
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:10 +0300
-Subject: [PATCH] KVM: x86: SVM: don't save SVM state to SMRAM when VM is not
- long mode capable
-
-When the guest CPUID doesn't have support for long mode, 32 bit SMRAM
-layout is used and it has no support for preserving EFER and/or SVM
-state.
-
-Note that this isn't relevant to running 32 bit guests on VM which is
-long mode capable - such VM can still run 32 bit guests in compatibility
-mode.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/svm/svm.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
-diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
-index 742497b1d4c3..938b9b24f0ee 100644
---- a/arch/x86/kvm/svm/svm.c
-+++ b/arch/x86/kvm/svm/svm.c
-@@ -4311,6 +4311,15 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, union kvm_smram *smram)
- if (!is_guest_mode(vcpu))
- return 0;
-
-+ /*
-+ * 32 bit SMRAM format doesn't preserve EFER and SVM state.
-+ * SVM should not be enabled by the userspace without marking
-+ * the CPU as at least long mode capable.
-+ */
-+
-+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
-+ return 1;
-+
- smram->smram64.svm_guest_flag = 1;
- smram->smram64.svm_guest_vmcb_gpa = svm->nested.vmcb12_gpa;
-
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maxim Levitsky <mlevitsk@redhat.com>
-Date: Wed, 3 Aug 2022 18:50:11 +0300
-Subject: [PATCH] KVM: x86: emulator/smm: preserve interrupt shadow in SMRAM
-
-When #SMI is asserted, the CPU can be in interrupt shadow
-due to sti or mov ss.
-
-It is not mandatory in Intel/AMD prm to have the #SMI
-blocked during the shadow, and on top of
-that, since neither SVM nor VMX has true support for SMI
-window, waiting for one instruction would mean single stepping
-the guest.
-
-Instead, allow #SMI in this case, but both reset the interrupt
-window and stash its value in SMRAM to restore it on exit
-from SMM.
-
-This fixes rare failures seen mostly on windows guests on VMX,
-when #SMI falls on the sti instruction which mainfest in
-VM entry failure due to EFLAGS.IF not being set, but STI interrupt
-window still being set in the VMCS.
-
-Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/emulate.c | 17 ++++++++++++++---
- arch/x86/kvm/kvm_emulate.h | 10 ++++++----
- arch/x86/kvm/x86.c | 12 ++++++++++++
- 3 files changed, 32 insertions(+), 7 deletions(-)
-
-diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
-index 4eb35a0a33a5..3e6ea2951e2b 100644
---- a/arch/x86/kvm/emulate.c
-+++ b/arch/x86/kvm/emulate.c
-@@ -2420,7 +2420,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
- const struct kvm_smram_state_32 *smstate)
- {
- struct desc_ptr dt;
-- int i;
-+ int i, r;
-
- ctxt->eflags = smstate->eflags | X86_EFLAGS_FIXED;
- ctxt->_eip = smstate->eip;
-@@ -2455,8 +2455,16 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
-
- ctxt->ops->set_smbase(ctxt, smstate->smbase);
-
-- return rsm_enter_protected_mode(ctxt, smstate->cr0,
-- smstate->cr3, smstate->cr4);
-+ r = rsm_enter_protected_mode(ctxt, smstate->cr0,
-+ smstate->cr3, smstate->cr4);
-+
-+ if (r != X86EMUL_CONTINUE)
-+ return r;
-+
-+ ctxt->ops->set_int_shadow(ctxt, 0);
-+ ctxt->interruptibility = (u8)smstate->int_shadow;
-+
-+ return X86EMUL_CONTINUE;
- }
-
- #ifdef CONFIG_X86_64
-@@ -2505,6 +2513,9 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
- rsm_load_seg_64(ctxt, &smstate->fs, VCPU_SREG_FS);
- rsm_load_seg_64(ctxt, &smstate->gs, VCPU_SREG_GS);
-
-+ ctxt->ops->set_int_shadow(ctxt, 0);
-+ ctxt->interruptibility = (u8)smstate->int_shadow;
-+
- return X86EMUL_CONTINUE;
- }
- #endif
-diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h
-index 3b37b3e17379..a64c190abf28 100644
---- a/arch/x86/kvm/kvm_emulate.h
-+++ b/arch/x86/kvm/kvm_emulate.h
-@@ -231,6 +231,7 @@ struct x86_emulate_ops {
- bool (*guest_has_rdpid)(struct x86_emulate_ctxt *ctxt);
-
- void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
-+ void (*set_int_shadow)(struct x86_emulate_ctxt *ctxt, u8 shadow);
-
- unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
- void (*exiting_smm)(struct x86_emulate_ctxt *ctxt);
-@@ -497,7 +498,8 @@ struct kvm_smram_state_32 {
- u32 reserved1[62];
- u32 smbase;
- u32 smm_revision;
-- u32 reserved2[5];
-+ u32 reserved2[4];
-+ u32 int_shadow; /* KVM extension */
- u32 cr4; /* CR4 is not present in Intel/AMD SMRAM image */
- u32 reserved3[5];
-
-@@ -545,6 +547,7 @@ static inline void __check_smram32_offsets(void)
- __CHECK_SMRAM32_OFFSET(smbase, 0xFEF8);
- __CHECK_SMRAM32_OFFSET(smm_revision, 0xFEFC);
- __CHECK_SMRAM32_OFFSET(reserved2, 0xFF00);
-+ __CHECK_SMRAM32_OFFSET(int_shadow, 0xFF10);
- __CHECK_SMRAM32_OFFSET(cr4, 0xFF14);
- __CHECK_SMRAM32_OFFSET(reserved3, 0xFF18);
- __CHECK_SMRAM32_OFFSET(ds, 0xFF2C);
-@@ -604,7 +607,7 @@ struct kvm_smram_state_64 {
- u64 io_restart_rsi;
- u64 io_restart_rdi;
- u32 io_restart_dword;
-- u32 reserved1;
-+ u32 int_shadow;
- u8 io_inst_restart;
- u8 auto_hlt_restart;
- u8 reserved2[6];
-@@ -642,7 +645,6 @@ struct kvm_smram_state_64 {
- u64 gprs[16]; /* GPRS in a reversed "natural" X86 order (R15/R14/../RCX/RAX.) */
- };
-
--
- static inline void __check_smram64_offsets(void)
- {
- #define __CHECK_SMRAM64_OFFSET(field, offset) \
-@@ -663,7 +665,7 @@ static inline void __check_smram64_offsets(void)
- __CHECK_SMRAM64_OFFSET(io_restart_rsi, 0xFEB0);
- __CHECK_SMRAM64_OFFSET(io_restart_rdi, 0xFEB8);
- __CHECK_SMRAM64_OFFSET(io_restart_dword, 0xFEC0);
-- __CHECK_SMRAM64_OFFSET(reserved1, 0xFEC4);
-+ __CHECK_SMRAM64_OFFSET(int_shadow, 0xFEC4);
- __CHECK_SMRAM64_OFFSET(io_inst_restart, 0xFEC8);
- __CHECK_SMRAM64_OFFSET(auto_hlt_restart, 0xFEC9);
- __CHECK_SMRAM64_OFFSET(reserved2, 0xFECA);
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 5c4be3873c0d..461c9d815d6c 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -7299,6 +7299,11 @@ static void emulator_set_nmi_mask(struct x86_emulate_ctxt *ctxt, bool masked)
- static_call(kvm_x86_set_nmi_mask)(emul_to_vcpu(ctxt), masked);
- }
-
-+static void emulator_set_int_shadow(struct x86_emulate_ctxt *ctxt, u8 shadow)
-+{
-+ static_call(kvm_x86_set_interrupt_shadow)(emul_to_vcpu(ctxt), shadow);
-+}
-+
- static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
- {
- return emul_to_vcpu(ctxt)->arch.hflags;
-@@ -7368,6 +7373,7 @@ static const struct x86_emulate_ops emulate_ops = {
- .guest_has_fxsr = emulator_guest_has_fxsr,
- .guest_has_rdpid = emulator_guest_has_rdpid,
- .set_nmi_mask = emulator_set_nmi_mask,
-+ .set_int_shadow = emulator_set_int_shadow,
- .get_hflags = emulator_get_hflags,
- .exiting_smm = emulator_exiting_smm,
- .leave_smm = emulator_leave_smm,
-@@ -9095,6 +9101,8 @@ static void enter_smm_save_state_32(struct kvm_vcpu *vcpu, struct kvm_smram_stat
- smram->cr4 = kvm_read_cr4(vcpu);
- smram->smm_revision = 0x00020000;
- smram->smbase = vcpu->arch.smbase;
-+
-+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
- }
-
- #ifdef CONFIG_X86_64
-@@ -9143,6 +9151,8 @@ static void enter_smm_save_state_64(struct kvm_vcpu *vcpu, struct kvm_smram_stat
- enter_smm_save_seg_64(vcpu, &smram->ds, VCPU_SREG_DS);
- enter_smm_save_seg_64(vcpu, &smram->fs, VCPU_SREG_FS);
- enter_smm_save_seg_64(vcpu, &smram->gs, VCPU_SREG_GS);
-+
-+ smram->int_shadow = static_call(kvm_x86_get_interrupt_shadow)(vcpu);
- }
- #endif
-
-@@ -9179,6 +9189,8 @@ static void enter_smm(struct kvm_vcpu *vcpu)
- kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
- kvm_rip_write(vcpu, 0x8000);
-
-+ static_call(kvm_x86_set_interrupt_shadow)(vcpu, 0);
-+
- cr0 = vcpu->arch.cr0 & ~(X86_CR0_PE | X86_CR0_EM | X86_CR0_TS | X86_CR0_PG);
- static_call(kvm_x86_set_cr0)(vcpu, cr0);
- vcpu->arch.cr0 = cr0;
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Maher Sanalla <msanalla@nvidia.com>
-Date: Sun, 24 Jul 2022 11:28:21 +0300
-Subject: [PATCH] net/mlx5: Adjust log_max_qp to be 18 at most
-
-[ Upstream commit a6e9085d791f8306084fd5bc44dd3fdd4e1ac27b ]
-
-The cited commit limited log_max_qp to be 17 due to FW capabilities.
-Recently, it turned out that there are old FW versions that supported
-more than 17, so the cited commit caused a degradation.
-
-Thus, set the maximum log_max_qp back to 18 as it was before the
-cited commit.
-
-Fixes: 7f839965b2d7 ("net/mlx5: Update log_max_qp value to be 17 at most")
-Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
-Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
-Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/net/ethernet/mellanox/mlx5/core/main.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
-index 4ed740994279..5a6606c843ed 100644
---- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
-+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
-@@ -516,7 +516,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
-
- /* Check log_max_qp from HCA caps to set in current profile */
- if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
-- prof->log_max_qp = min_t(u8, 17, MLX5_CAP_GEN_MAX(dev, log_max_qp));
-+ prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
- } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
- mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
- prof->log_max_qp,
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Paolo Bonzini <pbonzini@redhat.com>
-Date: Thu, 4 Aug 2022 15:28:32 +0200
-Subject: [PATCH] KVM: x86: revalidate steal time cache if MSR value changes
-
-commit 901d3765fa804ce42812f1d5b1f3de2dfbb26723 upstream.
-
-Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
-/ preempted status", 2021-11-11) open coded the previous call to
-kvm_map_gfn, but in doing so it dropped the comparison between the cached
-guest physical address and the one in the MSR. This cause an incorrect
-cache hit if the guest modifies the steal time address while the memslots
-remain the same. This can happen with kexec, in which case the steal
-time data is written at the address used by the old kernel instead of
-the old one.
-
-While at it, rename the variable from gfn to gpa since it is a plain
-physical address and not a right-shifted one.
-
-Reported-by: Dave Young <ruyang@redhat.com>
-Reported-by: Xiaoying Yan <yiyan@redhat.com>
-Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Cc: stable@vger.kernel.org
-Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/x86.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index 461c9d815d6c..b46677baf396 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -3236,6 +3236,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
- struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
- struct kvm_steal_time __user *st;
- struct kvm_memslots *slots;
-+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
- u64 steal;
- u32 version;
-
-@@ -3253,13 +3254,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
- slots = kvm_memslots(vcpu->kvm);
-
- if (unlikely(slots->generation != ghc->generation ||
-+ gpa != ghc->gpa ||
- kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
-- gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
--
- /* We rely on the fact that it fits in a single page. */
- BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
-
-- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
-+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
- kvm_is_error_hva(ghc->hva) || !ghc->memslot)
- return;
- }
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Paolo Bonzini <pbonzini@redhat.com>
-Date: Thu, 4 Aug 2022 15:28:32 +0200
-Subject: [PATCH] KVM: x86: do not report preemption if the steal time cache is
- stale
-
-commit c3c28d24d910a746b02f496d190e0e8c6560224b upstream.
-
-Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
-/ preempted status", 2021-11-11) open coded the previous call to
-kvm_map_gfn, but in doing so it dropped the comparison between the cached
-guest physical address and the one in the MSR. This cause an incorrect
-cache hit if the guest modifies the steal time address while the memslots
-remain the same. This can happen with kexec, in which case the preempted
-bit is written at the address used by the old kernel instead of
-the old one.
-
-Cc: David Woodhouse <dwmw@amazon.co.uk>
-Cc: stable@vger.kernel.org
-Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- arch/x86/kvm/x86.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
-index b46677baf396..48aaff0ce3b9 100644
---- a/arch/x86/kvm/x86.c
-+++ b/arch/x86/kvm/x86.c
-@@ -4370,6 +4370,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- struct kvm_steal_time __user *st;
- struct kvm_memslots *slots;
- static const u8 preempted = KVM_VCPU_PREEMPTED;
-+ gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
-
- if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
- return;
-@@ -4384,6 +4385,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
- slots = kvm_memslots(vcpu->kvm);
-
- if (unlikely(slots->generation != ghc->generation ||
-+ gpa != ghc->gpa ||
- kvm_is_error_hva(ghc->hva) || !ghc->memslot))
- return;
-
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Nilesh Javali <njavali@marvell.com>
-Date: Tue, 12 Jul 2022 22:20:36 -0700
-Subject: [PATCH] scsi: Revert "scsi: qla2xxx: Fix disk failure to rediscover"
-
-commit 5bc7b01c513a4a9b4cfe306e8d1720cfcfd3b8a3 upstream.
-
-This fixes the regression of NVMe discovery failure during driver load
-time.
-
-This reverts commit 6a45c8e137d4e2c72eecf1ac7cf64f2fdfcead99.
-
-Link: https://lore.kernel.org/r/20220713052045.10683-2-njavali@marvell.com
-Cc: stable@vger.kernel.org
-Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
-Signed-off-by: Nilesh Javali <njavali@marvell.com>
-Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- drivers/scsi/qla2xxx/qla_init.c | 5 ++---
- drivers/scsi/qla2xxx/qla_nvme.c | 5 -----
- 2 files changed, 2 insertions(+), 8 deletions(-)
-
-diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
-index af8df5a800c6..7caf573af58e 100644
---- a/drivers/scsi/qla2xxx/qla_init.c
-+++ b/drivers/scsi/qla2xxx/qla_init.c
-@@ -5749,8 +5749,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
- if (atomic_read(&fcport->state) == FCS_ONLINE)
- return;
-
-- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
--
- rport_ids.node_name = wwn_to_u64(fcport->node_name);
- rport_ids.port_name = wwn_to_u64(fcport->port_name);
- rport_ids.port_id = fcport->d_id.b.domain << 16 |
-@@ -5858,7 +5856,6 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
- qla2x00_reg_remote_port(vha, fcport);
- break;
- case MODE_TARGET:
-- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
- if (!vha->vha_tgt.qla_tgt->tgt_stop &&
- !vha->vha_tgt.qla_tgt->tgt_stopped)
- qlt_fc_port_added(vha, fcport);
-@@ -5873,6 +5870,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
- break;
- }
-
-+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
-+
- if (IS_IIDMA_CAPABLE(vha->hw) && vha->hw->flags.gpsc_supported) {
- if (fcport->id_changed) {
- fcport->id_changed = 0;
-diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
-index 42b29f4fd937..e63272487788 100644
---- a/drivers/scsi/qla2xxx/qla_nvme.c
-+++ b/drivers/scsi/qla2xxx/qla_nvme.c
-@@ -35,11 +35,6 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
- (fcport->nvme_flag & NVME_FLAG_REGISTERED))
- return 0;
-
-- if (atomic_read(&fcport->state) == FCS_ONLINE)
-- return 0;
--
-- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
--
- fcport->nvme_flag &= ~NVME_FLAG_RESETTING;
-
- memset(&req, 0, sizeof(struct nvme_fc_port_info));
+++ /dev/null
-From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
-From: Andreas Gruenbacher <agruenba@redhat.com>
-Date: Thu, 17 Mar 2022 14:47:24 +0100
-Subject: [PATCH] gfs2: Fix gfs2_file_buffered_write endless loop workaround
-
-[ Upstream commit 46f3e0421ccb5474b5c006b0089b9dfd42534bb6 ]
-
-Since commit 554c577cee95b, gfs2_file_buffered_write() can accidentally
-return a truncated iov_iter, which might confuse callers. Fix that.
-
-Fixes: 554c577cee95b ("gfs2: Prevent endless loops in gfs2_file_buffered_write")
-Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
-Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
----
- fs/gfs2/file.c | 1 +
- 1 file changed, 1 insertion(+)
-
-diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
-index 60390f9dc31f..e93185d804e0 100644
---- a/fs/gfs2/file.c
-+++ b/fs/gfs2/file.c
-@@ -1086,6 +1086,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
- gfs2_holder_uninit(gh);
- if (statfs_gh)
- kfree(statfs_gh);
-+ from->count = orig_count - read;
- return read ? read : ret;
- }
-
-Subproject commit ab2e786e8b1e6690c98424277abe512970850bd6
+Subproject commit 4776867185fb9aaa12a203a3e8f458b45822b5aa