]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - arch/x86/kernel/process.c
x86/bugs/AMD: Add support to disable RDS on Fam[15,16,17]h if requested
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / process.c
index 3ca198080ea9294486ae9a1121e7815dfba7cb19..3fdf5358998e3a0b57b3b1996a37317117c73a92 100644 (file)
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
-               .sp0 = TOP_OF_INIT_STACK,
+               /*
+                * .sp0 is only used when entering ring 0 from a lower
+                * privilege level.  Since the init task never runs anything
+                * but ring 0 code, there is no need for a valid value here.
+                * Poison it.
+                */
+               .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
+
+#ifdef CONFIG_X86_64
+               /*
+                * .sp1 is cpu_current_top_of_stack.  The init task never
+                * runs user code, but cpu_current_top_of_stack should still
+                * be well defined before the first context switch.
+                */
+               .sp1 = TOP_OF_INIT_STACK,
+#endif
+
 #ifdef CONFIG_X86_32
                .ss0 = __KERNEL_DS,
                .ss1 = __KERNEL_CS,
@@ -64,11 +80,8 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
          */
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
-#ifdef CONFIG_X86_32
-       .SYSENTER_stack_canary  = STACK_END_MAGIC,
-#endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -97,7 +110,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
@@ -434,11 +447,19 @@ static __cpuidle void mwait_idle(void)
                        mb(); /* quirk */
                }
 
+               if (ibrs_inuse)
+                        native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
+
                __monitor((void *)&current_thread_info()->flags, 0, 0);
-               if (!need_resched())
+               if (!need_resched()) {
                        __sti_mwait(0, 0);
-               else
+                       if (ibrs_inuse)
+                               native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
+               } else {
+                       if (ibrs_inuse)
+                               native_wrmsrl(MSR_IA32_SPEC_CTRL, FEATURE_ENABLE_IBRS);
                        local_irq_enable();
+               }
                trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();