]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
x86/entry/64: Make cpu_entry_area.tss read-only
authorAndy Lutomirski <luto@kernel.org>
Mon, 4 Dec 2017 14:07:29 +0000 (15:07 +0100)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Sat, 6 Jan 2018 12:23:23 +0000 (13:23 +0100)
CVE-2017-5754

The TSS is a fairly juicy target for exploits, and, now that the TSS
is in the cpu_entry_area, it's no longer protected by kASLR.  Make it
read-only on x86_64.

On x86_32, it can't be RO because it's written by the CPU during task
switches, and we use a task gate for double faults.  I'd also be
nervous about errata if we tried to make it RO even on configurations
without double fault handling.

[ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO.  So
   it's probably safe to assume that it's a non issue, though Intel
   might have been creative in that area. Still waiting for
   confirmation. ]

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bpetkov@suse.de>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Laight <David.Laight@aculab.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Eduardo Valentin <eduval@amazon.com>
Cc: Greg KH <gregkh@linuxfoundation.org>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Juergen Gross <jgross@suse.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: aliguori@amazon.com
Cc: daniel.gruss@iaik.tugraz.at
Cc: hughd@google.com
Cc: keescook@google.com
Link: https://lkml.kernel.org/r/20171204150606.733700132@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
(backported from commit c482feefe1aeb150156248ba0fd3e029bc886605)
Signed-off-by: Andy Whitcroft <apw@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
16 files changed:
arch/x86/entry/entry_32.S
arch/x86/entry/entry_64.S
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/switch_to.h
arch/x86/include/asm/thread_info.h
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/ioport.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/traps.c
arch/x86/lib/delay.c
arch/x86/xen/enlighten_pv.c

index 04abcd3f8e2dac5131be5ec5f5de1cc0160990e8..3ef7800007f8230850c3474b3bdcf44a01e15dcc 100644 (file)
@@ -949,7 +949,7 @@ ENTRY(debug)
 
        /* Are we currently on the SYSENTER stack? */
        movl    PER_CPU_VAR(cpu_entry_area), %ecx
-       addl    $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Ldebug_from_sysenter_stack
@@ -993,7 +993,7 @@ ENTRY(nmi)
 
        /* Are we currently on the SYSENTER stack? */
        movl    PER_CPU_VAR(cpu_entry_area), %ecx
-       addl    $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
+       addl    $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx
        subl    %eax, %ecx      /* ecx = (end of SYSENTER_stack) - esp */
        cmpl    $SIZEOF_SYSENTER_stack, %ecx
        jb      .Lnmi_from_sysenter_stack
index 7a5e9edcdaf46cc2575643c40a196e5d51acb177..157860b3569f452189385f49f338562abdf5ad08 100644 (file)
@@ -153,7 +153,7 @@ END(native_usergs_sysret64)
        _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip)
 
 /* The top word of the SYSENTER stack is hot and is usable as scratch space. */
-#define RSP_SCRATCH    CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \
+#define RSP_SCRATCH    CPU_ENTRY_AREA_SYSENTER_stack + \
                        SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA
 
 ENTRY(entry_SYSCALL_64_trampoline)
@@ -389,7 +389,7 @@ syscall_return_via_sysret:
         * Save old stack pointer and switch to trampoline stack.
         */
        movq    %rsp, %rdi
-       movq    PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
 
        pushq   RSP-RDI(%rdi)   /* RSP */
        pushq   (%rdi)          /* RDI */
@@ -718,7 +718,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode)
         * Save old stack pointer and switch to trampoline stack.
         */
        movq    %rsp, %rdi
-       movq    PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
+       movq    PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp
 
        /* Copy the IRET frame to the trampoline stack. */
        pushq   6*8(%rdi)       /* SS */
@@ -946,7 +946,7 @@ apicinterrupt IRQ_WORK_VECTOR                       irq_work_interrupt              smp_irq_work_interrupt
 /*
  * Exception entry points.
  */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8)
 
 /*
  * Switch to the thread stack.  This is called with the IRET frame and
index 56aaffbbffd6318c34ff5f8c5e29be78f2f7e228..5dc269ff408556a45ed313d202d064544f8fa188 100644 (file)
@@ -56,9 +56,14 @@ struct cpu_entry_area {
        char gdt[PAGE_SIZE];
 
        /*
-        * The GDT is just below cpu_tss and thus serves (on x86_64) as a
-        * a read-only guard page for the SYSENTER stack at the bottom
-        * of the TSS region.
+        * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as
+        * a a read-only guard page.
+        */
+       struct SYSENTER_stack_page SYSENTER_stack_page;
+
+       /*
+        * On x86_64, the TSS is mapped RO.  On x86_32, it's mapped RW because
+        * we need task switches to work, and task switches write to the TSS.
         */
        struct tss_struct tss;
 
@@ -227,7 +232,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu)
 
 static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu)
 {
-       return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack;
+       return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack;
 }
 
 #endif /* !__ASSEMBLY__ */
index 2d489a414a869de1278e871fe46d4763f8f6a416..bccec7ed1676a15c6a8edd3e2b21d060c2bfe81e 100644 (file)
@@ -334,13 +334,11 @@ struct SYSENTER_stack {
        unsigned long           words[64];
 };
 
-struct tss_struct {
-       /*
-        * Space for the temporary SYSENTER stack, used for SYSENTER
-        * and the entry trampoline as well.
-        */
-       struct SYSENTER_stack   SYSENTER_stack;
+struct SYSENTER_stack_page {
+       struct SYSENTER_stack stack;
+} __aligned(PAGE_SIZE);
 
+struct tss_struct {
        /*
         * The fixed hardware portion.  This must not cross a page boundary
         * at risk of violating the SDM's advice and potentially triggering
@@ -357,7 +355,7 @@ struct tss_struct {
        unsigned long           io_bitmap[IO_BITMAP_LONGS + 1];
 } __aligned(PAGE_SIZE);
 
-DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
+DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw);
 
 /*
  * sizeof(unsigned long) coming from an extra "long" at the end
@@ -372,7 +370,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
 #ifdef CONFIG_X86_32
 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
 #else
-#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
+/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */
+#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1
 #endif
 
 /*
@@ -532,7 +531,7 @@ static inline void native_set_iopl_mask(unsigned mask)
 static inline void
 native_load_sp0(unsigned long sp0)
 {
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 static inline void native_swapgs(void)
index ca2fc84ad278cb9ea162b717a0f04216e88ec4f1..cfb6dfe4c457708cf359cb55ed76a2015a549d4e 100644 (file)
@@ -78,10 +78,10 @@ do {                                                                        \
 static inline void refresh_sysenter_cs(struct thread_struct *thread)
 {
        /* Only happens when SEP is enabled, no need to test "SEP"arately: */
-       if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs))
+       if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs))
                return;
 
-       this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs);
+       this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs);
        wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
 }
 #endif
index 760dd8a739272898a01910cae446f4c0ddc3dfb9..6275b391ac617206e789dac92a30cca17f04dbd6 100644 (file)
@@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
 #else /* !__ASSEMBLY__ */
 
 #ifdef CONFIG_X86_64
-# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
+# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1)
 #endif
 
 #endif
index 00ea20bfa857571ab916ac52e00df47e981517e8..40c3fab107accb7fdb70a8ca599d697d9f1f6695 100644 (file)
@@ -93,10 +93,9 @@ void common(void) {
        BLANK();
        DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
 
-       OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack);
-       DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
-
        /* Layout info for cpu_entry_area */
        OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss);
        OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline);
+       OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page);
+       DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack));
 }
index d09b161a3bd0a31a5867965cbf4f9e314626b799..c4f23da7a0f0a23ab33dc5c57e756b3d11d684ea 100644 (file)
@@ -49,8 +49,8 @@ void foo(void)
        BLANK();
 
        /* Offset from the sysenter stack to tss.sp0 */
-       DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
-              offsetofend(struct tss_struct, SYSENTER_stack));
+       DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) -
+              offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack));
 
 #ifdef CONFIG_CC_STACKPROTECTOR
        BLANK();
index f9541c48c290c68df35b5d51004029576dc7aca1..7992e5a8076ccdb40c6009eed9ccd1c2e68cc807 100644 (file)
@@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
        [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
 #endif
 
+static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page,
+                                  SYSENTER_stack_storage);
+
 static void __init
 set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
 {
@@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu)
 #ifdef CONFIG_X86_64
        extern char _entry_trampoline[];
 
-       /* On 64-bit systems, we use a read-only fixmap GDT. */
+       /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
        pgprot_t gdt_prot = PAGE_KERNEL_RO;
+       pgprot_t tss_prot = PAGE_KERNEL_RO;
 #else
        /*
         * On native 32-bit systems, the GDT cannot be read-only because
         * our double fault handler uses a task gate, and entering through
-        * a task gate needs to change an available TSS to busy.  If the GDT
-        * is read-only, that will triple fault.
+        * a task gate needs to change an available TSS to busy.  If the
+        * GDT is read-only, that will triple fault.  The TSS cannot be
+        * read-only because the CPU writes to it on task switches.
         *
-        * On Xen PV, the GDT must be read-only because the hypervisor requires
-        * it.
+        * On Xen PV, the GDT must be read-only because the hypervisor
+        * requires it.
         */
        pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
                PAGE_KERNEL_RO : PAGE_KERNEL;
+       pgprot_t tss_prot = PAGE_KERNEL;
 #endif
 
        __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
+       set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page),
+                               per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1,
+                               PAGE_KERNEL);
 
        /*
         * The Intel SDM says (Volume 3, 7.2.1):
@@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu)
                      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
        BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
        set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
-                               &per_cpu(cpu_tss, cpu),
+                               &per_cpu(cpu_tss_rw, cpu),
                                sizeof(struct tss_struct) / PAGE_SIZE,
-                               PAGE_KERNEL);
+                               tss_prot);
 
 #ifdef CONFIG_X86_32
        per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
@@ -1297,7 +1306,7 @@ void enable_sep_cpu(void)
                return;
 
        cpu = get_cpu();
-       tss = &per_cpu(cpu_tss, cpu);
+       tss = &per_cpu(cpu_tss_rw, cpu);
 
        /*
         * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field --
@@ -1576,7 +1585,7 @@ void cpu_init(void)
        if (cpu)
                load_ucode_ap();
 
-       t = &per_cpu(cpu_tss, cpu);
+       t = &per_cpu(cpu_tss_rw, cpu);
        oist = &per_cpu(orig_ist, cpu);
 
 #ifdef CONFIG_NUMA
@@ -1667,7 +1676,7 @@ void cpu_init(void)
 {
        int cpu = smp_processor_id();
        struct task_struct *curr = current;
-       struct tss_struct *t = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu);
 
        wait_for_master_cpu(cpu);
 
index 4a613fed94b6546da1f09ca3c5fb01079992125c..d13777d49d8baeb72c9494f6612b602044a825ba 100644 (file)
@@ -66,7 +66,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
         * because the ->io_bitmap_max value must match the bitmap
         * contents:
         */
-       tss = &per_cpu(cpu_tss, get_cpu());
+       tss = &per_cpu(cpu_tss_rw, get_cpu());
 
        if (turn_on)
                bitmap_clear(t->io_bitmap_ptr, from, num);
index ec758390d24eceeb07db99e12cf0d533602b3238..3688a7b9d05542fcb3699110d3fc4df286459d75 100644 (file)
@@ -46,7 +46,7 @@
  * section. Since TSS's are completely CPU-local, we want them
  * on exact cacheline boundaries, to eliminate cacheline ping-pong.
  */
-__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
+__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = {
        .x86_tss = {
                /*
                 * .sp0 is only used when entering ring 0 from a lower
@@ -81,7 +81,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
        .io_bitmap              = { [0 ... IO_BITMAP_LONGS] = ~0 },
 #endif
 };
-EXPORT_PER_CPU_SYMBOL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss_rw);
 
 DEFINE_PER_CPU(bool, __tss_limit_invalid);
 EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid);
@@ -110,7 +110,7 @@ void exit_thread(struct task_struct *tsk)
        struct fpu *fpu = &t->fpu;
 
        if (bp) {
-               struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
+               struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu());
 
                t->io_bitmap_ptr = NULL;
                clear_thread_flag(TIF_IO_BITMAP);
index c0d60420466c5983e85958962d50bf202b6782ba..784ff9147172d5c3e0b3f11b0d15e2c29b2162cc 100644 (file)
@@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
index 157f81816915246948e33d56e9c03f34d472a9c2..c754662320163107ca3a254362ce0e404a8d3c11 100644 (file)
@@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        struct fpu *prev_fpu = &prev->fpu;
        struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
-       struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
+       struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
 
        WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
                     this_cpu_read(irq_count) != -1);
index 2818c83892b3d98aa121e0158adc1a64fe833e1c..14b462eefa17bcbc6dbc0522ff442c964af7df41 100644 (file)
@@ -376,7 +376,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
                regs->cs == __KERNEL_CS &&
                regs->ip == (unsigned long)native_irq_return_iret)
        {
-               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
+               struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
                /*
                 * regs->sp points to the failing IRET frame on the
@@ -661,7 +661,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
         * exception came from the IRET target.
         */
        struct bad_iret_stack *new_stack =
-               (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1;
+               (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1;
 
        /* Copy the IRET target to the new stack. */
        memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
index 29df077cb0899be78cf604a3e21eb7262a729159..cf2ac227c2aca27c4d44e6d49768e94586c13471 100644 (file)
@@ -106,10 +106,10 @@ static void delay_mwaitx(unsigned long __loops)
                delay = min_t(u64, MWAITX_MAX_LOOPS, loops);
 
                /*
-                * Use cpu_tss as a cacheline-aligned, seldomly
+                * Use cpu_tss_rw as a cacheline-aligned, seldomly
                 * accessed per-cpu variable as the monitor target.
                 */
-               __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0);
+               __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0);
 
                /*
                 * AMD, like Intel, supports the EAX hint and EAX=0xf
index 63c81154083b4fbe28bd978d62cbf2cc697f497b..3b76cf85e3066f36ab4e29e58d10218f07f393c9 100644 (file)
@@ -817,7 +817,7 @@ static void xen_load_sp0(unsigned long sp0)
        mcs = xen_mc_entry(0);
        MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0);
        xen_mc_issue(PARAVIRT_LAZY_CPU);
-       this_cpu_write(cpu_tss.x86_tss.sp0, sp0);
+       this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
 }
 
 void xen_set_iopl_mask(unsigned mask)