]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'for-next/caches' into for-next/core
authorWill Deacon <will@kernel.org>
Thu, 24 Jun 2021 12:33:02 +0000 (13:33 +0100)
committerWill Deacon <will@kernel.org>
Thu, 24 Jun 2021 12:33:02 +0000 (13:33 +0100)
Big cleanup of our cache maintenance routines, which were confusingly
named and inconsistent in their implementations.

* for-next/caches:
  arm64: Rename arm64-internal cache maintenance functions
  arm64: Fix cache maintenance function comments
  arm64: sync_icache_aliases to take end parameter instead of size
  arm64: __clean_dcache_area_pou to take end parameter instead of size
  arm64: __clean_dcache_area_pop to take end parameter instead of size
  arm64: __clean_dcache_area_poc to take end parameter instead of size
  arm64: __flush_dcache_area to take end parameter instead of size
  arm64: dcache_by_line_op to take end parameter instead of size
  arm64: __inval_dcache_area to take end parameter instead of size
  arm64: Fix comments to refer to correct function __flush_icache_range
  arm64: Move documentation of dcache_by_line_op
  arm64: assembler: remove user_alt
  arm64: Downgrade flush_icache_range to invalidate
  arm64: Do not enable uaccess for invalidate_icache_range
  arm64: Do not enable uaccess for flush_icache_range
  arm64: Apply errata to swsusp_arch_suspend_exit
  arm64: assembler: add conditional cache fixups
  arm64: assembler: replace `kaddr` with `addr`

19 files changed:
Makefile
arch/arm64/include/asm/assembler.h
arch/arm64/include/asm/processor.h
arch/arm64/include/asm/scs.h
arch/arm64/include/asm/sdei.h
arch/arm64/include/asm/smp.h
arch/arm64/include/asm/stacktrace.h
arch/arm64/kernel/asm-offsets.c
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/perf_callchain.c
arch/arm64/kernel/process.c
arch/arm64/kernel/ptrace.c
arch/arm64/kernel/sdei.c
arch/arm64/kernel/setup.c
arch/arm64/kernel/smp.c
arch/arm64/kernel/stacktrace.c
arch/arm64/mm/proc.S
scripts/tools-support-relr.sh

index e4468353425a65f1c609c93b76007accf5e8e0aa..e38c74d0433c247c5923bbdc3cc0e9877f7ea48d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1031,7 +1031,7 @@ LDFLAGS_vmlinux   += $(call ld-option, -X,)
 endif
 
 ifeq ($(CONFIG_RELR),y)
-LDFLAGS_vmlinux        += --pack-dyn-relocs=relr
+LDFLAGS_vmlinux        += --pack-dyn-relocs=relr --use-android-relr-tags
 endif
 
 # We never want expected sections to be placed heuristically by the
index c4cecf85dccf50954bfaa18523599b1561041e47..89faca0e740d03a306a70b49d19a91c4a3023886 100644 (file)
@@ -244,15 +244,23 @@ lr        .req    x30             // link register
         * @dst: destination register
         */
 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
        mrs     \dst, tpidr_el2
        .endm
 #else
-       .macro  this_cpu_offset, dst
+       .macro  get_this_cpu_offset, dst
 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
        mrs     \dst, tpidr_el1
 alternative_else
        mrs     \dst, tpidr_el2
+alternative_endif
+       .endm
+
+       .macro  set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+       msr     tpidr_el1, \src
+alternative_else
+       msr     tpidr_el2, \src
 alternative_endif
        .endm
 #endif
@@ -265,7 +273,7 @@ alternative_endif
        .macro adr_this_cpu, dst, sym, tmp
        adrp    \tmp, \sym
        add     \dst, \tmp, #:lo12:\sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        add     \dst, \dst, \tmp
        .endm
 
@@ -276,7 +284,7 @@ alternative_endif
         */
        .macro ldr_this_cpu dst, sym, tmp
        adr_l   \dst, \sym
-       this_cpu_offset \tmp
+       get_this_cpu_offset \tmp
        ldr     \dst, [\dst, \tmp]
        .endm
 
@@ -761,7 +769,7 @@ alternative_endif
        cbz             \tmp, \lbl
 #endif
        adr_l           \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
-       this_cpu_offset \tmp2
+       get_this_cpu_offset     \tmp2
        ldr             w\tmp, [\tmp, \tmp2]
        cbnz            w\tmp, \lbl     // yield on pending softirq in task context
 .Lnoyield_\@:
index 9df3feeee8909b5a0794a8d15c6331d855e072f4..7a094aafec200ed8be4f57f939c958a9e17cf5e1 100644 (file)
@@ -329,13 +329,13 @@ long get_tagged_addr_ctrl(struct task_struct *task);
  * of header definitions for the use of task_stack_page.
  */
 
-#define current_top_of_stack()                                                 \
-({                                                                             \
-       struct stack_info _info;                                                \
-       BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info));   \
-       _info.high;                                                             \
+#define current_top_of_stack()                                                         \
+({                                                                                     \
+       struct stack_info _info;                                                        \
+       BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info));        \
+       _info.high;                                                                     \
 })
-#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack()      (on_task_stack(current, current_stack_pointer, 1, NULL))
 
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_PROCESSOR_H */
index eaa2cd92e4c10122f27e731a77f2a1b15e656ae6..8297bccf0784577e92c4df2ebd810efc74360048 100644 (file)
@@ -9,18 +9,18 @@
 #ifdef CONFIG_SHADOW_CALL_STACK
        scs_sp  .req    x18
 
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        ldr     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        str     scs_sp, [\tsk, #TSK_TI_SCS_SP]
        .endm
 #else
-       .macro scs_load tsk, tmp
+       .macro scs_load tsk
        .endm
 
-       .macro scs_save tsk, tmp
+       .macro scs_save tsk
        .endm
 #endif /* CONFIG_SHADOW_CALL_STACK */
 
index 63e0b92a5fbb069bc232a93e32e17a39d6df9bd1..8bc30a5c456939e28cba34e07eb8c4a670473f5c 100644 (file)
@@ -42,8 +42,9 @@ unsigned long sdei_arch_get_entry_point(int conduit);
 
 struct stack_info;
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+                   struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
@@ -51,7 +52,7 @@ static inline bool on_sdei_stack(unsigned long sp,
        if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
                return false;
        if (in_nmi())
-               return _on_sdei_stack(sp, info);
+               return _on_sdei_stack(sp, size, info);
 
        return false;
 }
index 0e357757c0ccaa997cd061df3847a3f18c319d0c..fc55f5a57a06ef63e3b0321b426a6ffeeaedb549 100644 (file)
@@ -73,12 +73,10 @@ asmlinkage void secondary_start_kernel(void);
 
 /*
  * Initial data for bringing up a secondary CPU.
- * @stack  - sp for the secondary CPU
  * @status - Result passed back from the secondary CPU to
  *           indicate failure.
  */
 struct secondary_data {
-       void *stack;
        struct task_struct *task;
        long status;
 };
index 4b33ca6206793361fad25b23258c05ccfe06b024..1801399204d79fe011ec07e8653a3b7ca1bacdb3 100644 (file)
@@ -69,14 +69,14 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
 
 DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
 
-static inline bool on_stack(unsigned long sp, unsigned long low,
-                               unsigned long high, enum stack_type type,
-                               struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+                           unsigned long low, unsigned long high,
+                           enum stack_type type, struct stack_info *info)
 {
        if (!low)
                return false;
 
-       if (sp < low || sp >= high)
+       if (sp < low || sp + size < sp || sp + size > high)
                return false;
 
        if (info) {
@@ -87,38 +87,38 @@ static inline bool on_stack(unsigned long sp, unsigned long low,
        return true;
 }
 
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
        unsigned long high = low + IRQ_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
 }
 
 static inline bool on_task_stack(const struct task_struct *tsk,
-                                unsigned long sp,
+                                unsigned long sp, unsigned long size,
                                 struct stack_info *info)
 {
        unsigned long low = (unsigned long)task_stack_page(tsk);
        unsigned long high = low + THREAD_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
 }
 
 #ifdef CONFIG_VMAP_STACK
 DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
 
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
        unsigned long high = low + OVERFLOW_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
 }
 #else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
                        struct stack_info *info) { return false; }
 #endif
 
@@ -128,21 +128,21 @@ static inline bool on_overflow_stack(unsigned long sp,
  * context.
  */
 static inline bool on_accessible_stack(const struct task_struct *tsk,
-                                      unsigned long sp,
+                                      unsigned long sp, unsigned long size,
                                       struct stack_info *info)
 {
        if (info)
                info->type = STACK_TYPE_UNKNOWN;
 
-       if (on_task_stack(tsk, sp, info))
+       if (on_task_stack(tsk, sp, size, info))
                return true;
        if (tsk != current || preemptible())
                return false;
-       if (on_irq_stack(sp, info))
+       if (on_irq_stack(sp, size, info))
                return true;
-       if (on_overflow_stack(sp, info))
+       if (on_overflow_stack(sp, size, info))
                return true;
-       if (on_sdei_stack(sp, info))
+       if (on_sdei_stack(sp, size, info))
                return true;
 
        return false;
index 0cb34ccb6e7330757118d727a230d934ce050d36..bd0fc23d8719cfb3d2f6b026ef31612e18be3d54 100644 (file)
@@ -27,6 +27,7 @@
 int main(void)
 {
   DEFINE(TSK_ACTIVE_MM,                offsetof(struct task_struct, active_mm));
+  DEFINE(TSK_CPU,              offsetof(struct task_struct, cpu));
   BLANK();
   DEFINE(TSK_TI_FLAGS,         offsetof(struct task_struct, thread_info.flags));
   DEFINE(TSK_TI_PREEMPT,       offsetof(struct task_struct, thread_info.preempt_count));
@@ -99,7 +100,6 @@ int main(void)
   DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
   DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
   BLANK();
-  DEFINE(CPU_BOOT_STACK,       offsetof(struct secondary_data, stack));
   DEFINE(CPU_BOOT_TASK,                offsetof(struct secondary_data, task));
   BLANK();
   DEFINE(FTR_OVR_VAL_OFFSET,   offsetof(struct arm64_ftr_override, val));
index 3513984a88bd1b149c9fa0113bcd4ae7e4003625..3153f1448cdb776ed831464645811bebe2c86573 100644 (file)
@@ -275,7 +275,7 @@ alternative_else_nop_endif
 
        mte_set_kernel_gcr x22, x23
 
-       scs_load tsk, x20
+       scs_load tsk
        .else
        add     x21, sp, #PT_REGS_SIZE
        get_current_task tsk
@@ -285,7 +285,7 @@ alternative_else_nop_endif
        stp     lr, x21, [sp, #S_LR]
 
        /*
-        * For exceptions from EL0, create a terminal frame record.
+        * For exceptions from EL0, create a final frame record.
         * For exceptions from EL1, create a synthetic frame record so the
         * interrupted code shows up in the backtrace.
         */
@@ -375,7 +375,7 @@ alternative_if ARM64_WORKAROUND_845719
 alternative_else_nop_endif
 #endif
 3:
-       scs_save tsk, x0
+       scs_save tsk
 
 #ifdef CONFIG_ARM64_PTR_AUTH
 alternative_if ARM64_HAS_ADDRESS_AUTH
@@ -979,8 +979,8 @@ SYM_FUNC_START(cpu_switch_to)
        mov     sp, x9
        msr     sp_el0, x1
        ptrauth_keys_install_kernel x1, x8, x9, x10
-       scs_save x0, x8
-       scs_load x1, x8
+       scs_save x0
+       scs_load x1
        ret
 SYM_FUNC_END(cpu_switch_to)
 NOKPROBE(cpu_switch_to)
index 6928cb67d3a033f5000495f2cc484530e67733de..a6ccd6557d198a12c741dc0df6dbeef4767b0b13 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/asm_pointer_auth.h>
 #include <asm/assembler.h>
 #include <asm/boot.h>
+#include <asm/bug.h>
 #include <asm/ptrace.h>
 #include <asm/asm-offsets.h>
 #include <asm/cache.h>
@@ -390,28 +391,48 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
        ret     x28
 SYM_FUNC_END(__create_page_tables)
 
+       /*
+        * Initialize CPU registers with task-specific and cpu-specific context.
+        *
+        * Create a final frame record at task_pt_regs(current)->stackframe, so
+        * that the unwinder can identify the final frame record of any task by
+        * its location in the task stack. We reserve the entire pt_regs space
+        * for consistency with user tasks and kthreads.
+        */
+       .macro  init_cpu_task tsk, tmp1, tmp2
+       msr     sp_el0, \tsk
+
+       ldr     \tmp1, [\tsk, #TSK_STACK]
+       add     sp, \tmp1, #THREAD_SIZE
+       sub     sp, sp, #PT_REGS_SIZE
+
+       stp     xzr, xzr, [sp, #S_STACKFRAME]
+       add     x29, sp, #S_STACKFRAME
+
+       scs_load \tsk
+
+       adr_l   \tmp1, __per_cpu_offset
+       ldr     w\tmp2, [\tsk, #TSK_CPU]
+       ldr     \tmp1, [\tmp1, \tmp2, lsl #3]
+       set_this_cpu_offset \tmp1
+       .endm
+
 /*
  * The following fragment of code is executed with the MMU enabled.
  *
  *   x0 = __PHYS_OFFSET
  */
 SYM_FUNC_START_LOCAL(__primary_switched)
-       adrp    x4, init_thread_union
-       add     sp, x4, #THREAD_SIZE
-       adr_l   x5, init_task
-       msr     sp_el0, x5                      // Save thread_info
+       adr_l   x4, init_task
+       init_cpu_task x4, x5, x6
 
        adr_l   x8, vectors                     // load VBAR_EL1 with virtual
        msr     vbar_el1, x8                    // vector table address
        isb
 
-       stp     xzr, x30, [sp, #-16]!
+       stp     x29, x30, [sp, #-16]!
        mov     x29, sp
 
-#ifdef CONFIG_SHADOW_CALL_STACK
-       adr_l   scs_sp, init_shadow_call_stack  // Set shadow call stack
-#endif
-
        str_l   x21, __fdt_pointer, x5          // Save FDT pointer
 
        ldr_l   x4, kimage_vaddr                // Save the offset between
@@ -443,10 +464,9 @@ SYM_FUNC_START_LOCAL(__primary_switched)
 0:
 #endif
        bl      switch_to_vhe                   // Prefer VHE if possible
-       add     sp, sp, #16
-       mov     x29, #0
-       mov     x30, #0
-       b       start_kernel
+       ldp     x29, x30, [sp], #16
+       bl      start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__primary_switched)
 
        .pushsection ".rodata", "a"
@@ -629,21 +649,17 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
        isb
 
        adr_l   x0, secondary_data
-       ldr     x1, [x0, #CPU_BOOT_STACK]       // get secondary_data.stack
-       cbz     x1, __secondary_too_slow
-       mov     sp, x1
        ldr     x2, [x0, #CPU_BOOT_TASK]
        cbz     x2, __secondary_too_slow
-       msr     sp_el0, x2
-       scs_load x2, x3
-       mov     x29, #0
-       mov     x30, #0
+
+       init_cpu_task x2, x1, x3
 
 #ifdef CONFIG_ARM64_PTR_AUTH
        ptrauth_keys_init_cpu x2, x3, x4, x5
 #endif
 
-       b       secondary_start_kernel
+       bl      secondary_start_kernel
+       ASM_BUG()
 SYM_FUNC_END(__secondary_switched)
 
 SYM_FUNC_START_LOCAL(__secondary_too_slow)
index 88ff471b0bce5f2c49cb49cd38a3603fc952b2aa..4a72c2727309785bb599572da0cf3a511e10e227 100644 (file)
@@ -116,7 +116,7 @@ void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
                tail = (struct frame_tail __user *)regs->regs[29];
 
                while (entry->nr < entry->max_stack &&
-                      tail && !((unsigned long)tail & 0xf))
+                      tail && !((unsigned long)tail & 0x7))
                        tail = user_backtrace(tail, entry);
        } else {
 #ifdef CONFIG_COMPAT
index b4bb67f17a2cab967bd4187d46fd9b982496feca..8928fba54e4bd8ba253225fe63d9359db6078549 100644 (file)
@@ -435,6 +435,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
        }
        p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
        p->thread.cpu_context.sp = (unsigned long)childregs;
+       /*
+        * For the benefit of the unwinder, set up childregs->stackframe
+        * as the final frame for the new task.
+        */
+       p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
 
        ptrace_hw_copy_thread(p);
 
index eb2f73939b7bbbaf7feb4a5cb98dd127ad3c7a9b..499b6b2f9757f9ad198c655de743227f611c2562 100644 (file)
@@ -122,7 +122,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
 {
        return ((addr & ~(THREAD_SIZE - 1))  ==
                (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
-               on_irq_stack(addr, NULL);
+               on_irq_stack(addr, sizeof(unsigned long), NULL);
 }
 
 /**
index 2c7ca449dd5111206fa8559065fd95ce2858dcf9..c524f96f97c491e2aeaf9d172db350ee5d72b686 100644 (file)
@@ -162,31 +162,33 @@ static int init_sdei_scs(void)
        return err;
 }
 
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+                                struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
 }
 
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+                                  struct stack_info *info)
 {
        unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
        unsigned long high = low + SDEI_STACK_SIZE;
 
-       return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+       return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
 }
 
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
 {
        if (!IS_ENABLED(CONFIG_VMAP_STACK))
                return false;
 
-       if (on_sdei_critical_stack(sp, info))
+       if (on_sdei_critical_stack(sp, size, info))
                return true;
 
-       if (on_sdei_normal_stack(sp, info))
+       if (on_sdei_normal_stack(sp, size, info))
                return true;
 
        return false;
index 61845c0821d9dc8533bb0eca1768da7b33d85d65..b7a35a03e9b902b691aca62d8fe9ede5c99b69c6 100644 (file)
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        set_cpu_logical_map(0, mpidr);
 
-       /*
-        * clear __my_cpu_offset on boot CPU to avoid hang caused by
-        * using percpu variable early, for example, lockdep will
-        * access percpu variable inside lock_release
-        */
-       set_my_cpu_offset(0);
        pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
                (unsigned long)mpidr, read_cpuid_id());
 }
index 9b4c1118194dac2d966e6f454098c3b7bd98b48c..2fe8fab886e2bfc38b65da442996f485823f200f 100644 (file)
@@ -120,11 +120,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
         * page tables.
         */
        secondary_data.task = idle;
-       secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
        update_cpu_boot_status(CPU_MMU_OFF);
-       dcache_clean_inval_poc((unsigned long)&secondary_data,
-                           (unsigned long)&secondary_data +
-                                   sizeof(secondary_data));
 
        /* Now bring the CPU into our world */
        ret = boot_secondary(cpu, idle);
@@ -144,10 +140,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
 
        pr_crit("CPU%u: failed to come online\n", cpu);
        secondary_data.task = NULL;
-       secondary_data.stack = NULL;
-       dcache_clean_inval_poc((unsigned long)&secondary_data,
-                           (unsigned long)&secondary_data +
-                                   sizeof(secondary_data));
        status = READ_ONCE(secondary_data.status);
        if (status == CPU_MMU_OFF)
                status = READ_ONCE(__early_cpu_boot_status);
@@ -206,10 +198,7 @@ asmlinkage notrace void secondary_start_kernel(void)
        u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
        struct mm_struct *mm = &init_mm;
        const struct cpu_operations *ops;
-       unsigned int cpu;
-
-       cpu = task_cpu(current);
-       set_my_cpu_offset(per_cpu_offset(cpu));
+       unsigned int cpu = smp_processor_id();
 
        /*
         * All kernel threads share the same mm context; grab a
@@ -456,6 +445,11 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
 void __init smp_prepare_boot_cpu(void)
 {
+       /*
+        * The runtime per-cpu areas have been allocated by
+        * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+        * freed shortly, so we must move over to the runtime per-cpu area.
+        */
        set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
        cpuinfo_store_boot_cpu();
 
index de07147a79260454ccac0880ac9706f8b1e4531d..b189de5ca6cbc3dcdcc5c6020e585f1cc86c6f98 100644 (file)
@@ -68,13 +68,17 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
        unsigned long fp = frame->fp;
        struct stack_info info;
 
-       if (fp & 0xf)
-               return -EINVAL;
-
        if (!tsk)
                tsk = current;
 
-       if (!on_accessible_stack(tsk, fp, &info))
+       /* Final frame; nothing to unwind */
+       if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+               return -ENOENT;
+
+       if (fp & 0x7)
+               return -EINVAL;
+
+       if (!on_accessible_stack(tsk, fp, 16, &info))
                return -EINVAL;
 
        if (test_bit(info.type, frame->stacks_done))
@@ -128,12 +132,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
 
        frame->pc = ptrauth_strip_insn_pac(frame->pc);
 
-       /*
-        * This is a terminal record, so we have finished unwinding.
-        */
-       if (!frame->fp && !frame->pc)
-               return -ENOENT;
-
        return 0;
 }
 NOKPROBE_SYMBOL(unwind_frame);
index 97d7bcd8d4f26c5001c14bc346c5ce88059f2a0a..bc555cd5e6b1e9172d3423193d872d41803b313f 100644 (file)
@@ -83,11 +83,7 @@ SYM_FUNC_START(cpu_do_suspend)
        mrs     x9, mdscr_el1
        mrs     x10, oslsr_el1
        mrs     x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       mrs     x12, tpidr_el1
-alternative_else
-       mrs     x12, tpidr_el2
-alternative_endif
+       get_this_cpu_offset x12
        mrs     x13, sp_el0
        stp     x2, x3, [x0]
        stp     x4, x5, [x0, #16]
@@ -145,11 +141,7 @@ SYM_FUNC_START(cpu_do_resume)
        msr     mdscr_el1, x10
 
        msr     sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
-       msr     tpidr_el1, x13
-alternative_else
-       msr     tpidr_el2, x13
-alternative_endif
+       set_this_cpu_offset x13
        msr     sp_el0, x14
        /*
         * Restore oslsr_el1 by writing oslar_el1
index 45e8aa360b45745e7134a24c32e0c4343a6c3303..cb55878bd5b815a031d883de9a8600feef8ab39d 100755 (executable)
@@ -7,7 +7,8 @@ trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT
 cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
 void *p = &p;
 END
-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
+  --use-android-relr-tags -o $tmp_file
 
 # Despite printing an error message, GNU nm still exits with exit code 0 if it
 # sees a relr section. So we need to check that nothing is printed to stderr.