]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
arm64: kpti: Fix the interaction between ASID switching and software PAN
authorCatalin Marinas <catalin.marinas@arm.com>
Wed, 10 Jan 2018 13:18:30 +0000 (13:18 +0000)
committerKhalid Elmously <khalid.elmously@canonical.com>
Tue, 27 Feb 2018 16:32:52 +0000 (11:32 -0500)
Commit 6b88a32c7af6 upstream.

With ARM64_SW_TTBR0_PAN enabled, the exception entry code checks the
active ASID to decide whether user access was enabled (non-zero ASID)
when the exception was taken. On return from exception, if user access
was previously disabled, it re-instates TTBR0_EL1 from the per-thread
saved value (updated in switch_mm() or efi_set_pgd()).

Commit 7655abb95386 ("arm64: mm: Move ASID from TTBR0 to TTBR1") makes a
TTBR0_EL1 + ASID switching non-atomic. Subsequently, commit 27a921e75711
("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN") changes the
__uaccess_ttbr0_disable() function and asm macro to first write the
reserved TTBR0_EL1 followed by the ASID=0 update in TTBR1_EL1. If an
exception occurs between these two, the exception return code will
re-instate a valid TTBR0_EL1. Similar scenario can happen in
cpu_switch_mm() between setting the reserved TTBR0_EL1 and the ASID
update in cpu_do_switch_mm().

This patch reverts the entry.S check for ASID == 0 to TTBR0_EL1 and
disables the interrupts around the TTBR0_EL1 and ASID switching code in
__uaccess_ttbr0_disable(). It also ensures that, when returning from the
EFI runtime services, efi_set_pgd() doesn't leave a non-zero ASID in
TTBR1_EL1 by using uaccess_ttbr0_{enable,disable}.

The accesses to current_thread_info()->ttbr0 are updated to use
READ_ONCE/WRITE_ONCE.

As a safety measure, __uaccess_ttbr0_enable() always masks out any
existing non-zero ASID TTBR1_EL1 before writing in the new ASID.

Fixes: 27a921e75711 ("arm64: mm: Fix and re-enable ARM64_SW_TTBR0_PAN")
Acked-by: Will Deacon <will.deacon@arm.com>
Reported-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Tested-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Reviewed-by: James Morse <james.morse@arm.com>
Tested-by: James Morse <james.morse@arm.com>
Co-developed-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
(cherry picked from commit fedf5a743cf2e024c5b557abfb1a45f3c06b3c81)

CVE-2017-5753
CVE-2017-5715
CVE-2017-5754

Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Acked-by: Brad Figg <brad.figg@canonical.com>
Acked-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
12 files changed:
arch/arm64/include/asm/asm-uaccess.h
arch/arm64/include/asm/efi.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/include/asm/uaccess.h
arch/arm64/kernel/entry.S
arch/arm64/lib/clear_user.S
arch/arm64/lib/copy_from_user.S
arch/arm64/lib/copy_in_user.S
arch/arm64/lib/copy_to_user.S
arch/arm64/mm/cache.S
arch/arm64/mm/proc.S
arch/arm64/xen/hypercall.S

index e8327fa7953c7ae9876cc5715e3e17b94ff1891c..1c568fd2863cf2c198df61542131a5bcb03eb680 100644 (file)
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
        .macro  __uaccess_ttbr0_disable, tmp1
        mrs     \tmp1, ttbr1_el1                // swapper_pg_dir
+       bic     \tmp1, \tmp1, #TTBR_ASID_MASK
        add     \tmp1, \tmp1, #SWAPPER_DIR_SIZE // reserved_ttbr0 at the end of swapper_pg_dir
        msr     ttbr0_el1, \tmp1                // set reserved TTBR0_EL1
        isb
        sub     \tmp1, \tmp1, #SWAPPER_DIR_SIZE
-       bic     \tmp1, \tmp1, #TTBR_ASID_MASK
        msr     ttbr1_el1, \tmp1                // set reserved ASID
        isb
        .endm
        isb
        .endm
 
-       .macro  uaccess_ttbr0_disable, tmp1
+       .macro  uaccess_ttbr0_disable, tmp1, tmp2
 alternative_if_not ARM64_HAS_PAN
+       save_and_disable_irq \tmp2              // avoid preemption
        __uaccess_ttbr0_disable \tmp1
+       restore_irq \tmp2
 alternative_else_nop_endif
        .endm
 
@@ -48,7 +50,7 @@ alternative_if_not ARM64_HAS_PAN
 alternative_else_nop_endif
        .endm
 #else
-       .macro  uaccess_ttbr0_disable, tmp1
+       .macro  uaccess_ttbr0_disable, tmp1, tmp2
        .endm
 
        .macro  uaccess_ttbr0_enable, tmp1, tmp2, tmp3
@@ -58,8 +60,8 @@ alternative_else_nop_endif
 /*
  * These macros are no-ops when UAO is present.
  */
-       .macro  uaccess_disable_not_uao, tmp1
-       uaccess_ttbr0_disable \tmp1
+       .macro  uaccess_disable_not_uao, tmp1, tmp2
+       uaccess_ttbr0_disable \tmp1, \tmp2
 alternative_if ARM64_ALT_PAN_NOT_UAO
        SET_PSTATE_PAN(1)
 alternative_else_nop_endif
index dee3d5465816509cb1331062ac9d133b5741a500..e2351d2e92c277fb4781a6a1f398b5fc1793eddf 100644 (file)
@@ -116,19 +116,21 @@ static inline void efi_set_pgd(struct mm_struct *mm)
                if (mm != current->active_mm) {
                        /*
                         * Update the current thread's saved ttbr0 since it is
-                        * restored as part of a return from exception. Set
-                        * the hardware TTBR0_EL1 using cpu_switch_mm()
-                        * directly to enable potential errata workarounds.
+                        * restored as part of a return from exception. Enable
+                        * access to the valid TTBR0_EL1 and invoke the errata
+                        * workaround directly since there is no return from
+                        * exception when invoking the EFI run-time services.
                         */
                        update_saved_ttbr0(current, mm);
-                       cpu_switch_mm(mm->pgd, mm);
+                       uaccess_ttbr0_enable();
+                       post_ttbr_update_workaround();
                } else {
                        /*
                         * Defer the switch to the current thread's TTBR0_EL1
                         * until uaccess_enable(). Restore the current
                         * thread's saved ttbr0 corresponding to its active_mm
                         */
-                       cpu_set_reserved_ttbr0();
+                       uaccess_ttbr0_disable();
                        update_saved_ttbr0(current, current->active_mm);
                }
        }
index da29766a181ca71740686e65fca72b975f377b3e..779d7a2ec5eccb7ad165ddca044952df22eb0623 100644 (file)
@@ -175,7 +175,7 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
        else
                ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
 
-       task_thread_info(tsk)->ttbr0 = ttbr;
+       WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
 }
 #else
 static inline void update_saved_ttbr0(struct task_struct *tsk,
@@ -230,6 +230,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 #define activate_mm(prev,next) switch_mm(prev, next, current)
 
 void verify_cpu_asid_bits(void);
+void post_ttbr_update_workaround(void);
 
 #endif /* !__ASSEMBLY__ */
 
index 6eadf55ebaf042fac833638939e45818c25c3cd4..335dfe02a846fece1429556038bfa6ebfe23dd5c 100644 (file)
@@ -105,16 +105,18 @@ static inline void set_fs(mm_segment_t fs)
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 static inline void __uaccess_ttbr0_disable(void)
 {
-       unsigned long ttbr;
+       unsigned long flags, ttbr;
 
+       local_irq_save(flags);
        ttbr = read_sysreg(ttbr1_el1);
+       ttbr &= ~TTBR_ASID_MASK;
        /* reserved_ttbr0 placed at the end of swapper_pg_dir */
        write_sysreg(ttbr + SWAPPER_DIR_SIZE, ttbr0_el1);
        isb();
        /* Set reserved ASID */
-       ttbr &= ~TTBR_ASID_MASK;
        write_sysreg(ttbr, ttbr1_el1);
        isb();
+       local_irq_restore(flags);
 }
 
 static inline void __uaccess_ttbr0_enable(void)
@@ -127,10 +129,11 @@ static inline void __uaccess_ttbr0_enable(void)
         * roll-over and an update of 'ttbr0'.
         */
        local_irq_save(flags);
-       ttbr0 = current_thread_info()->ttbr0;
+       ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
 
        /* Restore active ASID */
        ttbr1 = read_sysreg(ttbr1_el1);
+       ttbr1 &= ~TTBR_ASID_MASK;               /* safety measure */
        ttbr1 |= ttbr0 & TTBR_ASID_MASK;
        write_sysreg(ttbr1, ttbr1_el1);
        isb();
index ee9d85595c399d46ddcf6d490658fb421fe71c96..b2e5b78a5e3c79f307595ec3f3952ec2167d41bc 100644 (file)
@@ -204,7 +204,7 @@ alternative_if ARM64_HAS_PAN
 alternative_else_nop_endif
 
        .if     \el != 0
-       mrs     x21, ttbr1_el1
+       mrs     x21, ttbr0_el1
        tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
        orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
        b.eq    1f                              // TTBR0 access already disabled
index 8f9c4641e706b26e2956d50556a51d15716f007c..3d69a8d41fa5e59217d5a451cf2ed67bb460a483 100644 (file)
@@ -50,7 +50,7 @@ uao_user_alternative 9f, strh, sttrh, wzr, x0, 2
        b.mi    5f
 uao_user_alternative 9f, strb, sttrb, wzr, x0, 0
 5:     mov     x0, #0
-       uaccess_disable_not_uao x2
+       uaccess_disable_not_uao x2, x3
        ret
 ENDPROC(__clear_user)
 
index 69d86a80f3e2a3e3cae9c90afdfd7aee013e4924..20305d485046754c7c59c48ef28c77dfc54f2ca0 100644 (file)
@@ -67,7 +67,7 @@ ENTRY(__arch_copy_from_user)
        uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3
+       uaccess_disable_not_uao x3, x4
        mov     x0, #0                          // Nothing to copy
        ret
 ENDPROC(__arch_copy_from_user)
index e442b531252ad87704e9a4f829be4902637922cb..fbb090f431a5c0790c9c98ac7b947505cf7c48d8 100644 (file)
@@ -68,7 +68,7 @@ ENTRY(raw_copy_in_user)
        uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3
+       uaccess_disable_not_uao x3, x4
        mov     x0, #0
        ret
 ENDPROC(raw_copy_in_user)
index 318f15d5c336ccc1ae2683f612e799483336fa62..fda6172d6b88819bcec2a69439fa202c57119f40 100644 (file)
@@ -66,7 +66,7 @@ ENTRY(__arch_copy_to_user)
        uaccess_enable_not_uao x3, x4, x5
        add     end, x0, x2
 #include "copy_template.S"
-       uaccess_disable_not_uao x3
+       uaccess_disable_not_uao x3, x4
        mov     x0, #0
        ret
 ENDPROC(__arch_copy_to_user)
index 6cd20a8c0952a119b5b317377645fd17278ce025..91464e7f77cc33ff6c6d972fa38b5e92d4394e40 100644 (file)
@@ -72,7 +72,7 @@ USER(9f, ic   ivau, x4        )               // invalidate I line PoU
        isb
        mov     x0, #0
 1:
-       uaccess_ttbr0_disable x1
+       uaccess_ttbr0_disable x1, x2
        ret
 9:
        mov     x0, #-EFAULT
index 447537c1699dd86a62369fe6a2c549fa12efe525..2d406b4b522f84703544a86cd4096b7211356bb8 100644 (file)
@@ -140,6 +140,9 @@ ENDPROC(cpu_do_resume)
 ENTRY(cpu_do_switch_mm)
        mrs     x2, ttbr1_el1
        mmid    x1, x1                          // get mm->context.id
+#ifdef CONFIG_ARM64_SW_TTBR0_PAN
+       bfi     x0, x1, #48, #16                // set the ASID field in TTBR0
+#endif
        bfi     x2, x1, #48, #16                // set the ASID
        msr     ttbr1_el1, x2                   // in TTBR1 (since TCR.A1 is set)
        isb
index acdbd2c9e899c1f87f684156244b9c1565e35efd..c5f05c4a4d00883422ed6e211135302cff3be14f 100644 (file)
@@ -107,6 +107,6 @@ ENTRY(privcmd_call)
        /*
         * Disable userspace access from kernel once the hyp call completed.
         */
-       uaccess_ttbr0_disable x6
+       uaccess_ttbr0_disable x6, x7
        ret
 ENDPROC(privcmd_call);