]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/process: Allow runtime control of Speculative Store Bypass
authorThomas Gleixner <tglx@linutronix.de>
Sun, 29 Apr 2018 13:21:42 +0000 (15:21 +0200)
committerStefan Bader <stefan.bader@canonical.com>
Mon, 14 May 2018 10:09:54 +0000 (12:09 +0200)
The Speculative Store Bypass vulnerability can be mitigated with the
Reduced Data Speculation (RDS) feature. To allow finer grained control of
this eventually expensive mitigation a per task mitigation control is
required.

Add a new TIF_RDS flag and put it into the group of TIF flags which are
evaluated for mismatch in switch_to(). If these bits differ in the previous
and the next task, then the slow path function __switch_to_xtra() is
invoked. Implement the TIF_RDS dependent mitigation control in the slow
path.

If the prctl for controlling Speculative Store Bypass is disabled or no
task uses the prctl then there is no overhead in the switch_to() fast
path.

Update the KVM related speculation control functions to take TID_RDS into
account as well.

Based on a patch from Tim Chen. Completely rewritten.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
CVE-2018-3639 (x86)

Signed-off-by: Tyler Hicks <tyhicks@canonical.com>
Signed-off-by: Stefan Bader <stefan.bader@canonical.com>
arch/x86/include/asm/msr-index.h
arch/x86/include/asm/spec-ctrl.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/process.c

index ceeeadb38f925148d2a080ac6bb4764873550c7f..7500aa7a603ea146c938a0595a5b4f9caed9150b 100644 (file)
@@ -41,7 +41,8 @@
 #define MSR_IA32_SPEC_CTRL             0x00000048      /* Speculation Control */
 #define SPEC_CTRL_IBRS                 (1 << 0)        /* Indirect Branch Restricted Speculation */
 #define SPEC_CTRL_STIBP                        (1 << 1)        /* Single Thread Indirect Branch Predictors */
-#define SPEC_CTRL_RDS                  (1 << 2)        /* Reduced Data Speculation */
+#define SPEC_CTRL_RDS_SHIFT            2               /* Reduced Data Speculation bit */
+#define SPEC_CTRL_RDS                  (1 << SPEC_CTRL_RDS_SHIFT) /* Reduced Data Speculation */
 
 #define MSR_IA32_PRED_CMD              0x00000049      /* Prediction Command */
 #define PRED_CMD_IBPB                  (1 << 0)        /* Indirect Branch Prediction Barrier */
index 0a9d1e5adb70350a989159a395f93c87c3feb573..4f0072b6b7f1e1598bbf2c383f5de5c238228253 100644 (file)
@@ -2,6 +2,7 @@
 #ifndef _ASM_X86_SPECCTRL_H_
 #define _ASM_X86_SPECCTRL_H_
 
+#include <linux/thread_info.h>
 #include <asm/nospec-branch.h>
 
 /*
@@ -34,4 +35,20 @@ static inline void vmexit_fill_RSB(void)
 extern u64 x86_amd_ls_cfg_base;
 extern u64 x86_amd_ls_cfg_rds_mask;
 
+/* The Intel SPEC CTRL MSR base value cache */
+extern u64 x86_spec_ctrl_base;
+
+static inline u64 rds_tif_to_spec_ctrl(u64 tifn)
+{
+       BUILD_BUG_ON(TIF_RDS < SPEC_CTRL_RDS_SHIFT);
+       return (tifn & _TIF_RDS) >> (TIF_RDS - SPEC_CTRL_RDS_SHIFT);
+}
+
+static inline u64 rds_tif_to_amd_ls_cfg(u64 tifn)
+{
+       return (tifn & _TIF_RDS) ? x86_amd_ls_cfg_rds_mask : 0ULL;
+}
+
+extern void speculative_store_bypass_update(void);
+
 #endif
index edd34398e9b9f6f734f3d603d0dd8c0c3ba7779e..15cb3db26782380494e472361780003364668804 100644 (file)
@@ -90,6 +90,7 @@ struct thread_info {
 #define TIF_SIGPENDING         2       /* signal pending */
 #define TIF_NEED_RESCHED       3       /* rescheduling necessary */
 #define TIF_SINGLESTEP         4       /* reenable singlestep on user return*/
+#define TIF_RDS                        5       /* Reduced data speculation */
 #define TIF_SYSCALL_EMU                6       /* syscall emulation active */
 #define TIF_SYSCALL_AUDIT      7       /* syscall auditing active */
 #define TIF_SECCOMP            8       /* secure computing */
@@ -116,6 +117,7 @@ struct thread_info {
 #define _TIF_SIGPENDING                (1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_SINGLESTEP                (1 << TIF_SINGLESTEP)
+#define _TIF_RDS               (1 << TIF_RDS)
 #define _TIF_SYSCALL_EMU       (1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
@@ -155,7 +157,7 @@ struct thread_info {
 
 /* flags to check in __switch_to() */
 #define _TIF_WORK_CTXSW                                                        \
-       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP)
+       (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_RDS)
 
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
index a4e7da002e958109e325059b8aea51fa5cdff04c..3c906e5e3e691203b16ae6206904b841e8556518 100644 (file)
@@ -32,7 +32,7 @@ static void __init ssb_select_mitigation(void);
  * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
  * writes to SPEC_CTRL contain whatever reserved bits have been set.
  */
-static u64 __ro_after_init x86_spec_ctrl_base;
+u64 __ro_after_init x86_spec_ctrl_base;
 
 /*
  * The vendor and possibly platform specific bits which can be modified in
@@ -139,25 +139,41 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_set);
 
 u64 x86_spec_ctrl_get_default(void)
 {
-       return x86_spec_ctrl_base;
+       u64 msrval = x86_spec_ctrl_base;
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               msrval |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+       return msrval;
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
 
 void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
 {
+       u64 host = x86_spec_ctrl_base;
+
        if (!ibrs_inuse)
                return;
-       if (x86_spec_ctrl_base != guest_spec_ctrl)
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+       if (host != guest_spec_ctrl)
                wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
 
 void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
 {
+       u64 host = x86_spec_ctrl_base;
+
        if (!ibrs_inuse)
                return;
-       if (x86_spec_ctrl_base != guest_spec_ctrl)
-               wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+
+       if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+               host |= rds_tif_to_spec_ctrl(current_thread_info()->flags);
+
+       if (host != guest_spec_ctrl)
+               wrmsrl(MSR_IA32_SPEC_CTRL, host);
 }
 EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
 
index 3fdf5358998e3a0b57b3b1996a37317117c73a92..d4dffef4f33dae9d926cbd80480d266c2db1ddd6 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/switch_to.h>
 #include <asm/desc.h>
 #include <asm/prctl.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
@@ -278,6 +279,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
        }
 }
 
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+       u64 msr;
+
+       if (static_cpu_has(X86_FEATURE_AMD_RDS)) {
+               msr = x86_amd_ls_cfg_base | rds_tif_to_amd_ls_cfg(tifn);
+               wrmsrl(MSR_AMD64_LS_CFG, msr);
+       } else {
+               msr = x86_spec_ctrl_base | rds_tif_to_spec_ctrl(tifn);
+               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+       }
+}
+
+void speculative_store_bypass_update(void)
+{
+       __speculative_store_bypass_update(current_thread_info()->flags);
+}
+
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss)
 {
@@ -309,6 +328,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+       if ((tifp ^ tifn) & _TIF_RDS)
+               __speculative_store_bypass_update(tifn);
 }
 
 /*