]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/bugs: Rework spec_ctrl base and mask logic
authorThomas Gleixner <tglx@linutronix.de>
Sat, 12 May 2018 18:10:00 +0000 (20:10 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Thu, 24 May 2018 07:59:19 +0000 (09:59 +0200)
x86_spec_ctrL_mask is intended to mask out bits from a MSR_SPEC_CTRL value
which are not to be modified. However the implementation is not really used
and the bitmask was inverted to make a check easier, which was removed in
"x86/bugs: Remove x86_spec_ctrl_set()"

Aside of that it is missing the STIBP bit if it is supported by the
platform, so if the mask would be used in x86_virt_spec_ctrl() then it
would prevent a guest from setting STIBP.

Add the STIBP bit if supported and use the mask in x86_virt_spec_ctrl() to
sanitize the value which is supplied by the guest.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
CVE-2018-3639 (x86)

(cherry-picked from commit be6fcb5478e95bb1c91f489121238deb3abca46a)
Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/kernel/cpu/bugs.c

index 2c7206fdc09e4f3200405429d635066624fcfe28..050af4672ca008aa84c61f23b162b98b18d4b52e 100644 (file)
@@ -41,7 +41,7 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
  * The vendor and possibly platform specific bits which can be modified in
  * x86_spec_ctrl_base.
  */
-static u64 __ro_after_init x86_spec_ctrl_mask = ~SPEC_CTRL_IBRS;
+static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
 
 /*
  * AMD specific MSR info for Speculative Store Bypass control.
@@ -135,18 +135,26 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
 void
 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
 {
+       u64 msrval, guestval, hostval = x86_spec_ctrl_base;
        struct thread_info *ti = current_thread_info();
-       u64 msr, host = x86_spec_ctrl_base;
 
        /* Is MSR_SPEC_CTRL implemented ? */
        if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
+               /*
+                * Restrict guest_spec_ctrl to supported values. Clear the
+                * modifiable bits in the host base value and or the
+                * modifiable bits from the guest value.
+                */
+               guestval = hostval & ~x86_spec_ctrl_mask;
+               guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
+
                /* SSBD controlled in MSR_SPEC_CTRL */
                if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
-                       host |= ssbd_tif_to_spec_ctrl(ti->flags);
+                       hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
 
-               if (host != guest_spec_ctrl) {
-                       msr = setguest ? guest_spec_ctrl : host;
-                       wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+               if (hostval != guestval) {
+                       msrval = setguest ? guestval : hostval;
+                       wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
                }
        }
 }
@@ -452,7 +460,7 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
                switch (boot_cpu_data.x86_vendor) {
                case X86_VENDOR_INTEL:
                        x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
-                       x86_spec_ctrl_mask &= ~SPEC_CTRL_SSBD;
+                       x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
                        wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
                        break;
                case X86_VENDOR_AMD: