]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/kernel/process.c
x86/cpufeatures: Disentangle SSBD enumeration
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / process.c
index 3ca198080ea9294486ae9a1121e7815dfba7cb19..d8abf4146c3298588b0ec3a3f22bcaec9bf31bec 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/switch_to.h>
 #include <asm/desc.h>
 #include <asm/prctl.h>
+#include <asm/spec-ctrl.h>
 
 /*
  * per-CPU TSS segments. Threads are completely 'soft' on Linux,
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
-               .sp0 = TOP_OF_INIT_STACK,
+               /*
+                * .sp0 is only used when entering ring 0 from a lower
+                * privilege level.  Since the init task never runs anything
+                * but ring 0 code, there is no need for a valid value here.
+                * Poison it.
+                */
+               .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -64,11 +81,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -97,7 +111,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
@@ -265,6 +279,24 @@ static inline void switch_to_bitmap(struct tss_struct *tss,
        }
 }
 
+static __always_inline void __speculative_store_bypass_update(unsigned long tifn)
+{
+       u64 msr;
+
+       if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) {
+               msr = x86_amd_ls_cfg_base | ssbd_tif_to_amd_ls_cfg(tifn);
+               wrmsrl(MSR_AMD64_LS_CFG, msr);
+       } else {
+               msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn);
+               wrmsrl(MSR_IA32_SPEC_CTRL, msr);
+       }
+}
+
+void speculative_store_bypass_update(void)
+{
+       __speculative_store_bypass_update(current_thread_info()->flags);
+}
+
 void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
                      struct tss_struct *tss)
 {
@@ -296,6 +328,9 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
 
        if ((tifp ^ tifn) & _TIF_NOCPUID)
                set_cpuid_faulting(!!(tifn & _TIF_NOCPUID));
+
+       if ((tifp ^ tifn) & _TIF_SSBD)
+               __speculative_store_bypass_update(tifn);
 }
 
 /*
@@ -434,11 +469,19 @@ static __cpuidle void mwait_idle(void)
                        mb(); /* quirk */
                }
 
+               if (ibrs_inuse)
+                        native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default());
+
                __monitor((void *)&current_thread_info()->flags, 0, 0);
-               if (!need_resched())
+               if (!need_resched()) {
                        __sti_mwait(0, 0);
-               else
+                       if (ibrs_inuse)
+                               native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
+               } else {
+                       if (ibrs_inuse)
+                               native_wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_get_default() | SPEC_CTRL_IBRS);
                        local_irq_enable();
+               }
                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();