1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/kernel/process.c
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Original Copyright (C) 1995 Linus Torvalds
8 #include <linux/export.h>
9 #include <linux/sched.h>
10 #include <linux/sched/debug.h>
11 #include <linux/sched/task.h>
12 #include <linux/sched/task_stack.h>
13 #include <linux/kernel.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/user.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/elfcore.h>
22 #include <linux/tick.h>
23 #include <linux/utsname.h>
24 #include <linux/uaccess.h>
25 #include <linux/random.h>
26 #include <linux/hw_breakpoint.h>
27 #include <linux/leds.h>
29 #include <asm/processor.h>
30 #include <asm/thread_notify.h>
31 #include <asm/stacktrace.h>
32 #include <asm/system_misc.h>
33 #include <asm/mach/time.h>
39 #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
40 #include <linux/stackprotector.h>
41 unsigned long __stack_chk_guard __read_mostly
;
42 EXPORT_SYMBOL(__stack_chk_guard
);
45 static const char *processor_modes
[] __maybe_unused
= {
46 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
47 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
48 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
49 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
52 static const char *isa_modes
[] __maybe_unused
= {
53 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
57 * This is our default idle handler.
60 void (*arm_pm_idle
)(void);
63 * Called from the core idle loop.
66 void arch_cpu_idle(void)
72 raw_local_irq_enable();
75 void arch_cpu_idle_prepare(void)
80 void arch_cpu_idle_enter(void)
82 ledtrig_cpu(CPU_LED_IDLE_START
);
83 #ifdef CONFIG_PL310_ERRATA_769419
88 void arch_cpu_idle_exit(void)
90 ledtrig_cpu(CPU_LED_IDLE_END
);
93 void __show_regs_alloc_free(struct pt_regs
*regs
)
97 /* check for r0 - r12 only */
98 for (i
= 0; i
< 13; i
++) {
99 pr_alert("Register r%d information:", i
);
100 mem_dump_obj((void *)regs
->uregs
[i
]);
104 void __show_regs(struct pt_regs
*regs
)
108 #ifndef CONFIG_CPU_V7M
110 #ifdef CONFIG_CPU_SW_DOMAIN_PAN
112 * Get the domain register for the parent context. In user
113 * mode, we don't save the DACR, so lets use what it should
114 * be. For other modes, we place it after the pt_regs struct.
116 if (user_mode(regs
)) {
117 domain
= DACR_UACCESS_ENABLE
;
119 domain
= to_svc_pt_regs(regs
)->dacr
;
122 domain
= get_domain();
126 show_regs_print_info(KERN_DEFAULT
);
128 printk("PC is at %pS\n", (void *)instruction_pointer(regs
));
129 printk("LR is at %pS\n", (void *)regs
->ARM_lr
);
130 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
131 regs
->ARM_pc
, regs
->ARM_lr
, regs
->ARM_cpsr
);
132 printk("sp : %08lx ip : %08lx fp : %08lx\n",
133 regs
->ARM_sp
, regs
->ARM_ip
, regs
->ARM_fp
);
134 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
135 regs
->ARM_r10
, regs
->ARM_r9
,
137 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
138 regs
->ARM_r7
, regs
->ARM_r6
,
139 regs
->ARM_r5
, regs
->ARM_r4
);
140 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
141 regs
->ARM_r3
, regs
->ARM_r2
,
142 regs
->ARM_r1
, regs
->ARM_r0
);
144 flags
= regs
->ARM_cpsr
;
145 buf
[0] = flags
& PSR_N_BIT
? 'N' : 'n';
146 buf
[1] = flags
& PSR_Z_BIT
? 'Z' : 'z';
147 buf
[2] = flags
& PSR_C_BIT
? 'C' : 'c';
148 buf
[3] = flags
& PSR_V_BIT
? 'V' : 'v';
151 #ifndef CONFIG_CPU_V7M
155 if ((domain
& domain_mask(DOMAIN_USER
)) ==
156 domain_val(DOMAIN_USER
, DOMAIN_NOACCESS
))
161 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
162 buf
, interrupts_enabled(regs
) ? "n" : "ff",
163 fast_interrupts_enabled(regs
) ? "n" : "ff",
164 processor_modes
[processor_mode(regs
)],
165 isa_modes
[isa_mode(regs
)], segment
);
168 printk("xPSR: %08lx\n", regs
->ARM_cpsr
);
171 #ifdef CONFIG_CPU_CP15
176 #ifdef CONFIG_CPU_CP15_MMU
178 unsigned int transbase
;
179 asm("mrc p15, 0, %0, c2, c0\n\t"
181 snprintf(buf
, sizeof(buf
), " Table: %08x DAC: %08x",
185 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl
));
187 printk("Control: %08x%s\n", ctrl
, buf
);
192 void show_regs(struct pt_regs
* regs
)
198 ATOMIC_NOTIFIER_HEAD(thread_notify_head
);
200 EXPORT_SYMBOL_GPL(thread_notify_head
);
203 * Free current thread data structures etc..
205 void exit_thread(struct task_struct
*tsk
)
207 thread_notify(THREAD_NOTIFY_EXIT
, task_thread_info(tsk
));
210 void flush_thread(void)
212 struct thread_info
*thread
= current_thread_info();
213 struct task_struct
*tsk
= current
;
215 flush_ptrace_hw_breakpoint(tsk
);
217 memset(thread
->used_cp
, 0, sizeof(thread
->used_cp
));
218 memset(&tsk
->thread
.debug
, 0, sizeof(struct debug_info
));
219 memset(&thread
->fpstate
, 0, sizeof(union fp_state
));
223 thread_notify(THREAD_NOTIFY_FLUSH
, thread
);
226 void release_thread(struct task_struct
*dead_task
)
230 asmlinkage
void ret_from_fork(void) __asm__("ret_from_fork");
232 int copy_thread(unsigned long clone_flags
, unsigned long stack_start
,
233 unsigned long stk_sz
, struct task_struct
*p
, unsigned long tls
)
235 struct thread_info
*thread
= task_thread_info(p
);
236 struct pt_regs
*childregs
= task_pt_regs(p
);
238 memset(&thread
->cpu_context
, 0, sizeof(struct cpu_context_save
));
240 #ifdef CONFIG_CPU_USE_DOMAINS
242 * Copy the initial value of the domain access control register
243 * from the current thread: thread->addr_limit will have been
244 * copied from the current thread via setup_thread_stack() in
247 thread
->cpu_domain
= get_domain();
250 if (likely(!(p
->flags
& (PF_KTHREAD
| PF_IO_WORKER
)))) {
251 *childregs
= *current_pt_regs();
252 childregs
->ARM_r0
= 0;
254 childregs
->ARM_sp
= stack_start
;
256 memset(childregs
, 0, sizeof(struct pt_regs
));
257 thread
->cpu_context
.r4
= stk_sz
;
258 thread
->cpu_context
.r5
= stack_start
;
259 childregs
->ARM_cpsr
= SVC_MODE
;
261 thread
->cpu_context
.pc
= (unsigned long)ret_from_fork
;
262 thread
->cpu_context
.sp
= (unsigned long)childregs
;
264 clear_ptrace_hw_breakpoint(p
);
266 if (clone_flags
& CLONE_SETTLS
)
267 thread
->tp_value
[0] = tls
;
268 thread
->tp_value
[1] = get_tpuser();
270 thread_notify(THREAD_NOTIFY_COPY
, thread
);
272 #ifdef CONFIG_STACKPROTECTOR_PER_TASK
273 thread
->stack_canary
= p
->stack_canary
;
279 unsigned long get_wchan(struct task_struct
*p
)
281 struct stackframe frame
;
282 unsigned long stack_page
;
284 if (!p
|| p
== current
|| task_is_running(p
))
287 frame
.fp
= thread_saved_fp(p
);
288 frame
.sp
= thread_saved_sp(p
);
289 frame
.lr
= 0; /* recovered from the stack */
290 frame
.pc
= thread_saved_pc(p
);
291 stack_page
= (unsigned long)task_stack_page(p
);
293 if (frame
.sp
< stack_page
||
294 frame
.sp
>= stack_page
+ THREAD_SIZE
||
295 unwind_frame(&frame
) < 0)
297 if (!in_sched_functions(frame
.pc
))
299 } while (count
++ < 16);
304 #ifdef CONFIG_KUSER_HELPERS
306 * The vectors page is always readable from user space for the
307 * atomic helpers. Insert it into the gate_vma so that it is visible
308 * through ptrace and /proc/<pid>/mem.
310 static struct vm_area_struct gate_vma
;
312 static int __init
gate_vma_init(void)
314 vma_init(&gate_vma
, NULL
);
315 gate_vma
.vm_page_prot
= PAGE_READONLY_EXEC
;
316 gate_vma
.vm_start
= 0xffff0000;
317 gate_vma
.vm_end
= 0xffff0000 + PAGE_SIZE
;
318 gate_vma
.vm_flags
= VM_READ
| VM_EXEC
| VM_MAYREAD
| VM_MAYEXEC
;
321 arch_initcall(gate_vma_init
);
323 struct vm_area_struct
*get_gate_vma(struct mm_struct
*mm
)
328 int in_gate_area(struct mm_struct
*mm
, unsigned long addr
)
330 return (addr
>= gate_vma
.vm_start
) && (addr
< gate_vma
.vm_end
);
333 int in_gate_area_no_mm(unsigned long addr
)
335 return in_gate_area(NULL
, addr
);
337 #define is_gate_vma(vma) ((vma) == &gate_vma)
339 #define is_gate_vma(vma) 0
342 const char *arch_vma_name(struct vm_area_struct
*vma
)
344 return is_gate_vma(vma
) ? "[vectors]" : NULL
;
347 /* If possible, provide a placement hint at a random offset from the
348 * stack for the sigpage and vdso pages.
350 static unsigned long sigpage_addr(const struct mm_struct
*mm
,
353 unsigned long offset
;
359 first
= PAGE_ALIGN(mm
->start_stack
);
361 last
= TASK_SIZE
- (npages
<< PAGE_SHIFT
);
363 /* No room after stack? */
367 /* Just enough room? */
371 slots
= ((last
- first
) >> PAGE_SHIFT
) + 1;
373 offset
= get_random_int() % slots
;
375 addr
= first
+ (offset
<< PAGE_SHIFT
);
380 static struct page
*signal_page
;
381 extern struct page
*get_signal_page(void);
383 static int sigpage_mremap(const struct vm_special_mapping
*sm
,
384 struct vm_area_struct
*new_vma
)
386 current
->mm
->context
.sigpage
= new_vma
->vm_start
;
390 static const struct vm_special_mapping sigpage_mapping
= {
392 .pages
= &signal_page
,
393 .mremap
= sigpage_mremap
,
396 int arch_setup_additional_pages(struct linux_binprm
*bprm
, int uses_interp
)
398 struct mm_struct
*mm
= current
->mm
;
399 struct vm_area_struct
*vma
;
400 unsigned long npages
;
406 signal_page
= get_signal_page();
410 npages
= 1; /* for sigpage */
411 npages
+= vdso_total_pages
;
413 if (mmap_write_lock_killable(mm
))
415 hint
= sigpage_addr(mm
, npages
);
416 addr
= get_unmapped_area(NULL
, hint
, npages
<< PAGE_SHIFT
, 0, 0);
417 if (IS_ERR_VALUE(addr
)) {
422 vma
= _install_special_mapping(mm
, addr
, PAGE_SIZE
,
423 VM_READ
| VM_EXEC
| VM_MAYREAD
| VM_MAYWRITE
| VM_MAYEXEC
,
431 mm
->context
.sigpage
= addr
;
433 /* Unlike the sigpage, failure to install the vdso is unlikely
434 * to be fatal to the process, so no error check needed
437 arm_install_vdso(mm
, addr
+ PAGE_SIZE
);
440 mmap_write_unlock(mm
);