endif
ifeq ($(CONFIG_RELR),y)
-LDFLAGS_vmlinux += --pack-dyn-relocs=relr
+LDFLAGS_vmlinux += --pack-dyn-relocs=relr --use-android-relr-tags
endif
# We never want expected sections to be placed heuristically by the
* @dst: destination register
*/
#if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
mrs \dst, tpidr_el2
.endm
#else
- .macro this_cpu_offset, dst
+ .macro get_this_cpu_offset, dst
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs \dst, tpidr_el1
alternative_else
mrs \dst, tpidr_el2
+alternative_endif
+ .endm
+
+ .macro set_this_cpu_offset, src
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
+ msr tpidr_el1, \src
+alternative_else
+ msr tpidr_el2, \src
alternative_endif
.endm
#endif
.macro adr_this_cpu, dst, sym, tmp
adrp \tmp, \sym
add \dst, \tmp, #:lo12:\sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
add \dst, \dst, \tmp
.endm
*/
.macro ldr_this_cpu dst, sym, tmp
adr_l \dst, \sym
- this_cpu_offset \tmp
+ get_this_cpu_offset \tmp
ldr \dst, [\dst, \tmp]
.endm
cbz \tmp, \lbl
#endif
adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- this_cpu_offset \tmp2
+ get_this_cpu_offset \tmp2
ldr w\tmp, [\tmp, \tmp2]
cbnz w\tmp, \lbl // yield on pending softirq in task context
.Lnoyield_\@:
* of header definitions for the use of task_stack_page.
*/
-#define current_top_of_stack() \
-({ \
- struct stack_info _info; \
- BUG_ON(!on_accessible_stack(current, current_stack_pointer, &_info)); \
- _info.high; \
+#define current_top_of_stack() \
+({ \
+ struct stack_info _info; \
+ BUG_ON(!on_accessible_stack(current, current_stack_pointer, 1, &_info)); \
+ _info.high; \
})
-#define on_thread_stack() (on_task_stack(current, current_stack_pointer, NULL))
+#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL))
#endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */
#ifdef CONFIG_SHADOW_CALL_STACK
scs_sp .req x18
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
ldr scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
str scs_sp, [\tsk, #TSK_TI_SCS_SP]
.endm
#else
- .macro scs_load tsk, tmp
+ .macro scs_load tsk
.endm
- .macro scs_save tsk, tmp
+ .macro scs_save tsk
.endm
#endif /* CONFIG_SHADOW_CALL_STACK */
struct stack_info;
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info);
-static inline bool on_sdei_stack(unsigned long sp,
+bool _on_sdei_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info);
+static inline bool on_sdei_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
if (!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE))
return false;
if (in_nmi())
- return _on_sdei_stack(sp, info);
+ return _on_sdei_stack(sp, size, info);
return false;
}
/*
* Initial data for bringing up a secondary CPU.
- * @stack - sp for the secondary CPU
* @status - Result passed back from the secondary CPU to
* indicate failure.
*/
struct secondary_data {
- void *stack;
struct task_struct *task;
long status;
};
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
-static inline bool on_stack(unsigned long sp, unsigned long low,
- unsigned long high, enum stack_type type,
- struct stack_info *info)
+static inline bool on_stack(unsigned long sp, unsigned long size,
+ unsigned long low, unsigned long high,
+ enum stack_type type, struct stack_info *info)
{
if (!low)
return false;
- if (sp < low || sp >= high)
+ if (sp < low || sp + size < sp || sp + size > high)
return false;
if (info) {
return true;
}
-static inline bool on_irq_stack(unsigned long sp,
+static inline bool on_irq_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_IRQ, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info);
}
static inline bool on_task_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_TASK, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_TASK, info);
}
#ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_OVERFLOW, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info);
}
#else
-static inline bool on_overflow_stack(unsigned long sp,
+static inline bool on_overflow_stack(unsigned long sp, unsigned long size,
struct stack_info *info) { return false; }
#endif
* context.
*/
static inline bool on_accessible_stack(const struct task_struct *tsk,
- unsigned long sp,
+ unsigned long sp, unsigned long size,
struct stack_info *info)
{
if (info)
info->type = STACK_TYPE_UNKNOWN;
- if (on_task_stack(tsk, sp, info))
+ if (on_task_stack(tsk, sp, size, info))
return true;
if (tsk != current || preemptible())
return false;
- if (on_irq_stack(sp, info))
+ if (on_irq_stack(sp, size, info))
return true;
- if (on_overflow_stack(sp, info))
+ if (on_overflow_stack(sp, size, info))
return true;
- if (on_sdei_stack(sp, info))
+ if (on_sdei_stack(sp, size, info))
return true;
return false;
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TSK_CPU, offsetof(struct task_struct, cpu));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
BLANK();
- DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));
mte_set_kernel_gcr x22, x23
- scs_load tsk, x20
+ scs_load tsk
.else
add x21, sp, #PT_REGS_SIZE
get_current_task tsk
stp lr, x21, [sp, #S_LR]
/*
- * For exceptions from EL0, create a terminal frame record.
+ * For exceptions from EL0, create a final frame record.
* For exceptions from EL1, create a synthetic frame record so the
* interrupted code shows up in the backtrace.
*/
alternative_else_nop_endif
#endif
3:
- scs_save tsk, x0
+ scs_save tsk
#ifdef CONFIG_ARM64_PTR_AUTH
alternative_if ARM64_HAS_ADDRESS_AUTH
mov sp, x9
msr sp_el0, x1
ptrauth_keys_install_kernel x1, x8, x9, x10
- scs_save x0, x8
- scs_load x1, x8
+ scs_save x0
+ scs_load x1
ret
SYM_FUNC_END(cpu_switch_to)
NOKPROBE(cpu_switch_to)
#include <asm/asm_pointer_auth.h>
#include <asm/assembler.h>
#include <asm/boot.h>
+#include <asm/bug.h>
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
#include <asm/cache.h>
ret x28
SYM_FUNC_END(__create_page_tables)
+ /*
+ * Initialize CPU registers with task-specific and cpu-specific context.
+ *
+ * Create a final frame record at task_pt_regs(current)->stackframe, so
+ * that the unwinder can identify the final frame record of any task by
+ * its location in the task stack. We reserve the entire pt_regs space
+ * for consistency with user tasks and kthreads.
+ */
+ .macro init_cpu_task tsk, tmp1, tmp2
+ msr sp_el0, \tsk
+
+ ldr \tmp1, [\tsk, #TSK_STACK]
+ add sp, \tmp1, #THREAD_SIZE
+ sub sp, sp, #PT_REGS_SIZE
+
+ stp xzr, xzr, [sp, #S_STACKFRAME]
+ add x29, sp, #S_STACKFRAME
+
+ scs_load \tsk
+
+ adr_l \tmp1, __per_cpu_offset
+ ldr w\tmp2, [\tsk, #TSK_CPU]
+ ldr \tmp1, [\tmp1, \tmp2, lsl #3]
+ set_this_cpu_offset \tmp1
+ .endm
+
/*
* The following fragment of code is executed with the MMU enabled.
*
* x0 = __PHYS_OFFSET
*/
SYM_FUNC_START_LOCAL(__primary_switched)
- adrp x4, init_thread_union
- add sp, x4, #THREAD_SIZE
- adr_l x5, init_task
- msr sp_el0, x5 // Save thread_info
+ adr_l x4, init_task
+ init_cpu_task x4, x5, x6
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
- stp xzr, x30, [sp, #-16]!
+ stp x29, x30, [sp, #-16]!
mov x29, sp
-#ifdef CONFIG_SHADOW_CALL_STACK
- adr_l scs_sp, init_shadow_call_stack // Set shadow call stack
-#endif
-
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
0:
#endif
bl switch_to_vhe // Prefer VHE if possible
- add sp, sp, #16
- mov x29, #0
- mov x30, #0
- b start_kernel
+ ldp x29, x30, [sp], #16
+ bl start_kernel
+ ASM_BUG()
SYM_FUNC_END(__primary_switched)
.pushsection ".rodata", "a"
isb
adr_l x0, secondary_data
- ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
- cbz x1, __secondary_too_slow
- mov sp, x1
ldr x2, [x0, #CPU_BOOT_TASK]
cbz x2, __secondary_too_slow
- msr sp_el0, x2
- scs_load x2, x3
- mov x29, #0
- mov x30, #0
+
+ init_cpu_task x2, x1, x3
#ifdef CONFIG_ARM64_PTR_AUTH
ptrauth_keys_init_cpu x2, x3, x4, x5
#endif
- b secondary_start_kernel
+ bl secondary_start_kernel
+ ASM_BUG()
SYM_FUNC_END(__secondary_switched)
SYM_FUNC_START_LOCAL(__secondary_too_slow)
tail = (struct frame_tail __user *)regs->regs[29];
while (entry->nr < entry->max_stack &&
- tail && !((unsigned long)tail & 0xf))
+ tail && !((unsigned long)tail & 0x7))
tail = user_backtrace(tail, entry);
} else {
#ifdef CONFIG_COMPAT
}
p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
p->thread.cpu_context.sp = (unsigned long)childregs;
+ /*
+ * For the benefit of the unwinder, set up childregs->stackframe
+ * as the final frame for the new task.
+ */
+ p->thread.cpu_context.fp = (unsigned long)childregs->stackframe;
ptrace_hw_copy_thread(p);
{
return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
- on_irq_stack(addr, NULL);
+ on_irq_stack(addr, sizeof(unsigned long), NULL);
}
/**
return err;
}
-static bool on_sdei_normal_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_normal_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_NORMAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info);
}
-static bool on_sdei_critical_stack(unsigned long sp, struct stack_info *info)
+static bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
+ struct stack_info *info)
{
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE;
- return on_stack(sp, low, high, STACK_TYPE_SDEI_CRITICAL, info);
+ return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info);
}
-bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
+bool _on_sdei_stack(unsigned long sp, unsigned long size, struct stack_info *info)
{
if (!IS_ENABLED(CONFIG_VMAP_STACK))
return false;
- if (on_sdei_critical_stack(sp, info))
+ if (on_sdei_critical_stack(sp, size, info))
return true;
- if (on_sdei_normal_stack(sp, info))
+ if (on_sdei_normal_stack(sp, size, info))
return true;
return false;
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
set_cpu_logical_map(0, mpidr);
- /*
- * clear __my_cpu_offset on boot CPU to avoid hang caused by
- * using percpu variable early, for example, lockdep will
- * access percpu variable inside lock_release
- */
- set_my_cpu_offset(0);
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
(unsigned long)mpidr, read_cpuid_id());
}
* page tables.
*/
secondary_data.task = idle;
- secondary_data.stack = task_stack_page(idle) + THREAD_SIZE;
update_cpu_boot_status(CPU_MMU_OFF);
- dcache_clean_inval_poc((unsigned long)&secondary_data,
- (unsigned long)&secondary_data +
- sizeof(secondary_data));
/* Now bring the CPU into our world */
ret = boot_secondary(cpu, idle);
pr_crit("CPU%u: failed to come online\n", cpu);
secondary_data.task = NULL;
- secondary_data.stack = NULL;
- dcache_clean_inval_poc((unsigned long)&secondary_data,
- (unsigned long)&secondary_data +
- sizeof(secondary_data));
status = READ_ONCE(secondary_data.status);
if (status == CPU_MMU_OFF)
status = READ_ONCE(__early_cpu_boot_status);
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
const struct cpu_operations *ops;
- unsigned int cpu;
-
- cpu = task_cpu(current);
- set_my_cpu_offset(per_cpu_offset(cpu));
+ unsigned int cpu = smp_processor_id();
/*
* All kernel threads share the same mm context; grab a
void __init smp_prepare_boot_cpu(void)
{
+ /*
+ * The runtime per-cpu areas have been allocated by
+ * setup_per_cpu_areas(), and CPU0's boot time per-cpu area will be
+ * freed shortly, so we must move over to the runtime per-cpu area.
+ */
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
cpuinfo_store_boot_cpu();
unsigned long fp = frame->fp;
struct stack_info info;
- if (fp & 0xf)
- return -EINVAL;
-
if (!tsk)
tsk = current;
- if (!on_accessible_stack(tsk, fp, &info))
+ /* Final frame; nothing to unwind */
+ if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
+ return -ENOENT;
+
+ if (fp & 0x7)
+ return -EINVAL;
+
+ if (!on_accessible_stack(tsk, fp, 16, &info))
return -EINVAL;
if (test_bit(info.type, frame->stacks_done))
frame->pc = ptrauth_strip_insn_pac(frame->pc);
- /*
- * This is a terminal record, so we have finished unwinding.
- */
- if (!frame->fp && !frame->pc)
- return -ENOENT;
-
return 0;
}
NOKPROBE_SYMBOL(unwind_frame);
mrs x9, mdscr_el1
mrs x10, oslsr_el1
mrs x11, sctlr_el1
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- mrs x12, tpidr_el1
-alternative_else
- mrs x12, tpidr_el2
-alternative_endif
+ get_this_cpu_offset x12
mrs x13, sp_el0
stp x2, x3, [x0]
stp x4, x5, [x0, #16]
msr mdscr_el1, x10
msr sctlr_el1, x12
-alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
- msr tpidr_el1, x13
-alternative_else
- msr tpidr_el2, x13
-alternative_endif
+ set_this_cpu_offset x13
msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
cat << "END" | $CC -c -x c - -o $tmp_file.o >/dev/null 2>&1
void *p = &p;
END
-$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file
+$LD $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr \
+ --use-android-relr-tags -o $tmp_file
# Despite printing an error message, GNU nm still exits with exit code 0 if it
# sees a relr section. So we need to check that nothing is printed to stderr.