]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'for-next/scs' into for-next/core
authorWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:03:40 +0000 (18:03 +0100)
committerWill Deacon <will@kernel.org>
Thu, 28 May 2020 17:03:40 +0000 (18:03 +0100)
Support for Clang's Shadow Call Stack in the kernel
(Sami Tolvanen and Will Deacon)
* for-next/scs:
  arm64: entry-ftrace.S: Update comment to indicate that x18 is live
  scs: Move DEFINE_SCS macro into core code
  scs: Remove references to asm/scs.h from core code
  scs: Move scs_overflow_check() out of architecture code
  arm64: scs: Use 'scs_sp' register alias for x18
  scs: Move accounting into alloc/free functions
  arm64: scs: Store absolute SCS stack pointer value in thread_info
  efi/libstub: Disable Shadow Call Stack
  arm64: scs: Add shadow stacks for SDEI
  arm64: Implement Shadow Call Stack
  arm64: Disable SCS for hypervisor code
  arm64: vdso: Disable Shadow Call Stack
  arm64: efi: Restore register x18 if it was corrupted
  arm64: Preserve register x18 when CPU is suspended
  arm64: Reserve register x18 from general allocation with SCS
  scs: Disable when function graph tracing is enabled
  scs: Add support for stack usage debugging
  scs: Add page accounting for shadow call stack allocations
  scs: Add support for Clang's Shadow Call Stack (SCS)

1  2 
arch/arm64/Kconfig
arch/arm64/Makefile
arch/arm64/include/asm/kvm_hyp.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/efi-rt-wrapper.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/vdso/Makefile
arch/arm64/mm/proc.S

index 999ddaefef1ee9d21a7e997536fbce0820c11ad3,c380a16533f616be516b86bfbd196a77d7b212a3..77c440db9d46ca80327fababa2b6b93654ba3e64
@@@ -63,11 -61,10 +63,12 @@@ config ARM6
        select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
        select ARCH_KEEP_MEMBLOCK
        select ARCH_USE_CMPXCHG_LOCKREF
 +      select ARCH_USE_GNU_PROPERTY
        select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_USE_QUEUED_SPINLOCKS
 +      select ARCH_USE_SYM_ANNOTATIONS
        select ARCH_SUPPORTS_MEMORY_FAILURE
+       select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK
        select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && (GCC_VERSION >= 50000 || CC_IS_CLANG)
        select ARCH_SUPPORTS_NUMA_BALANCING
Simple merge
index dcb63bf941057af1e4a26cebd7f541635c6a98fd,875b106c5d98805003f1388adf56fec23385b4ab..015883671ec34c841bd2aca5fe8d0d1ceab6d5de
  #include <linux/compiler.h>
  #include <linux/kvm_host.h>
  #include <asm/alternative.h>
 -#include <asm/kvm_mmu.h>
  #include <asm/sysreg.h>
  
- #define __hyp_text __section(.hyp.text) notrace
+ #define __hyp_text __section(.hyp.text) notrace __noscs
  
  #define read_sysreg_elx(r,nvh,vh)                                     \
        ({                                                              \
Simple merge
index 1192c4bb48df03c982dfacdc23fe40602b9a8b70,6ca6c0dc11a1b30e8fff7bf7f42317baa36b6891..75691a2641c1c0f8ec05604ae6b47345fcb93e75
@@@ -34,5 -34,14 +34,14 @@@ SYM_FUNC_START(__efi_rt_asm_wrapper
        ldp     x29, x30, [sp], #32
        b.ne    0f
        ret
- 0:    b       efi_handle_corrupted_x18        // tail call
+ 0:
+       /*
+        * With CONFIG_SHADOW_CALL_STACK, the kernel uses x18 to store a
+        * shadow stack pointer, which we need to restore before returning to
+        * potentially instrumented code. This is safe because the wrapper is
+        * called with preemption disabled and a separate shadow stack is used
+        * for interrupts.
+        */
+       mov     x18, x2
+       b       efi_handle_corrupted_x18        // tail call
 -ENDPROC(__efi_rt_asm_wrapper)
 +SYM_FUNC_END(__efi_rt_asm_wrapper)
index 84cbc513c1e87261e4a02b2e48bdad5e56f1a0ed,741faf0706f1f8e90cf52faecba9c58a2ef4547b..5304d193c79dd3a67bca8d72ec9afbf1d530df2e
@@@ -178,7 -179,9 +179,9 @@@ alternative_cb_en
  
        apply_ssbd 1, x22, x23
  
 -      ptrauth_keys_install_kernel tsk, 1, x20, x22, x23
 +      ptrauth_keys_install_kernel tsk, x20, x22, x23
+       scs_load tsk, x20
        .else
        add     x21, sp, #S_FRAME_SIZE
        get_current_task tsk
@@@ -901,7 -918,9 +919,9 @@@ SYM_FUNC_START(cpu_switch_to
        ldr     lr, [x8]
        mov     sp, x9
        msr     sp_el0, x1
 -      ptrauth_keys_install_kernel x1, 1, x8, x9, x10
 +      ptrauth_keys_install_kernel x1, x8, x9, x10
+       scs_save x0, x8
+       scs_load x1, x8
        ret
  SYM_FUNC_END(cpu_switch_to)
  NOKPROBE(cpu_switch_to)
index 340d60d2e218ab8de297dd666a893dc260630040,1293baddfd20925f335abdf2c547dffeb62421d4..632702146813a36ecfb3fb6bd7e9e5e122b99014
@@@ -745,13 -742,9 +750,14 @@@ SYM_FUNC_START_LOCAL(__secondary_switch
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
        msr     sp_el0, x2
+       scs_load x2, x3
        mov     x29, #0
        mov     x30, #0
 +
 +#ifdef CONFIG_ARM64_PTR_AUTH
 +      ptrauth_keys_init_cpu x2, x3, x4, x5
 +#endif
 +
        b       secondary_start_kernel
  SYM_FUNC_END(__secondary_switched)
  
index fccd67d07c8cd147dfebbc010ba484baa3226e18,a87a4f11724e4e06b68f5358e017ea23bbf8ba2c..1a83a7162a2d61f14faa459abd2e684b44d6fec1
@@@ -29,7 -23,9 +29,7 @@@ ldflags-y := -shared -nostdlib -soname=
  ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
  ccflags-y += -DDISABLE_BRANCH_PROFILING
  
- CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os
 -VDSO_LDFLAGS := -Bsymbolic
 -
+ CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
  KBUILD_CFLAGS                 += $(DISABLE_LTO)
  KASAN_SANITIZE                        := n
  UBSAN_SANITIZE                        := n
Simple merge