]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
b3901d54 CM |
2 | /* |
3 | * Based on arch/arm/kernel/process.c | |
4 | * | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
7 | * Copyright (C) 2012 ARM Ltd. | |
b3901d54 CM |
8 | */ |
9 | ||
10 | #include <stdarg.h> | |
11 | ||
fd92d4a5 | 12 | #include <linux/compat.h> |
60c0d45a | 13 | #include <linux/efi.h> |
ab7876a9 | 14 | #include <linux/elf.h> |
b3901d54 CM |
15 | #include <linux/export.h> |
16 | #include <linux/sched.h> | |
b17b0153 | 17 | #include <linux/sched/debug.h> |
29930025 | 18 | #include <linux/sched/task.h> |
68db0cf1 | 19 | #include <linux/sched/task_stack.h> |
b3901d54 | 20 | #include <linux/kernel.h> |
19c95f26 | 21 | #include <linux/lockdep.h> |
ab7876a9 | 22 | #include <linux/mman.h> |
b3901d54 CM |
23 | #include <linux/mm.h> |
24 | #include <linux/stddef.h> | |
63f0c603 | 25 | #include <linux/sysctl.h> |
b3901d54 CM |
26 | #include <linux/unistd.h> |
27 | #include <linux/user.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/reboot.h> | |
30 | #include <linux/interrupt.h> | |
b3901d54 CM |
31 | #include <linux/init.h> |
32 | #include <linux/cpu.h> | |
33 | #include <linux/elfcore.h> | |
34 | #include <linux/pm.h> | |
35 | #include <linux/tick.h> | |
36 | #include <linux/utsname.h> | |
37 | #include <linux/uaccess.h> | |
38 | #include <linux/random.h> | |
39 | #include <linux/hw_breakpoint.h> | |
40 | #include <linux/personality.h> | |
41 | #include <linux/notifier.h> | |
096b3224 | 42 | #include <trace/events/power.h> |
c02433dd | 43 | #include <linux/percpu.h> |
bc0ee476 | 44 | #include <linux/thread_info.h> |
63f0c603 | 45 | #include <linux/prctl.h> |
b3901d54 | 46 | |
57f4959b | 47 | #include <asm/alternative.h> |
a9806aa2 | 48 | #include <asm/arch_gicv3.h> |
b3901d54 | 49 | #include <asm/compat.h> |
19c95f26 | 50 | #include <asm/cpufeature.h> |
b3901d54 | 51 | #include <asm/cacheflush.h> |
d0854412 | 52 | #include <asm/exec.h> |
ec45d1cf WD |
53 | #include <asm/fpsimd.h> |
54 | #include <asm/mmu_context.h> | |
b3901d54 | 55 | #include <asm/processor.h> |
75031975 | 56 | #include <asm/pointer_auth.h> |
b3901d54 | 57 | #include <asm/stacktrace.h> |
b3901d54 | 58 | |
0a1213fa | 59 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) |
c0c264ae LA |
60 | #include <linux/stackprotector.h> |
61 | unsigned long __stack_chk_guard __read_mostly; | |
62 | EXPORT_SYMBOL(__stack_chk_guard); | |
63 | #endif | |
64 | ||
b3901d54 CM |
65 | /* |
66 | * Function pointers to optional machine specific functions | |
67 | */ | |
68 | void (*pm_power_off)(void); | |
69 | EXPORT_SYMBOL_GPL(pm_power_off); | |
70 | ||
b0946fc8 | 71 | void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); |
b3901d54 | 72 | |
a9806aa2 JT |
73 | static void __cpu_do_idle(void) |
74 | { | |
75 | dsb(sy); | |
76 | wfi(); | |
77 | } | |
78 | ||
79 | static void __cpu_do_idle_irqprio(void) | |
80 | { | |
81 | unsigned long pmr; | |
82 | unsigned long daif_bits; | |
83 | ||
84 | daif_bits = read_sysreg(daif); | |
85 | write_sysreg(daif_bits | PSR_I_BIT, daif); | |
86 | ||
87 | /* | |
88 | * Unmask PMR before going idle to make sure interrupts can | |
89 | * be raised. | |
90 | */ | |
91 | pmr = gic_read_pmr(); | |
bd82d4bd | 92 | gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET); |
a9806aa2 JT |
93 | |
94 | __cpu_do_idle(); | |
95 | ||
96 | gic_write_pmr(pmr); | |
97 | write_sysreg(daif_bits, daif); | |
98 | } | |
99 | ||
100 | /* | |
101 | * cpu_do_idle() | |
102 | * | |
103 | * Idle the processor (wait for interrupt). | |
104 | * | |
105 | * If the CPU supports priority masking we must do additional work to | |
106 | * ensure that interrupts are not masked at the PMR (because the core will | |
107 | * not wake up if we block the wake up signal in the interrupt controller). | |
108 | */ | |
109 | void cpu_do_idle(void) | |
110 | { | |
111 | if (system_uses_irq_prio_masking()) | |
112 | __cpu_do_idle_irqprio(); | |
113 | else | |
114 | __cpu_do_idle(); | |
115 | } | |
116 | ||
b3901d54 CM |
117 | /* |
118 | * This is our default idle handler. | |
119 | */ | |
0087298f | 120 | void arch_cpu_idle(void) |
b3901d54 CM |
121 | { |
122 | /* | |
123 | * This should do all the clock switching and wait for interrupt | |
124 | * tricks | |
125 | */ | |
6990566b NP |
126 | cpu_do_idle(); |
127 | local_irq_enable(); | |
b3901d54 CM |
128 | } |
129 | ||
9327e2c6 MR |
130 | #ifdef CONFIG_HOTPLUG_CPU |
131 | void arch_cpu_idle_dead(void) | |
132 | { | |
133 | cpu_die(); | |
134 | } | |
135 | #endif | |
136 | ||
90f51a09 AK |
137 | /* |
138 | * Called by kexec, immediately prior to machine_kexec(). | |
139 | * | |
140 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
141 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
142 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
143 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
d66b16f5 | 144 | * functionality embodied in smpt_shutdown_nonboot_cpus() to achieve this. |
90f51a09 | 145 | */ |
b3901d54 CM |
146 | void machine_shutdown(void) |
147 | { | |
5efbe6a6 | 148 | smp_shutdown_nonboot_cpus(reboot_cpu); |
b3901d54 CM |
149 | } |
150 | ||
90f51a09 AK |
151 | /* |
152 | * Halting simply requires that the secondary CPUs stop performing any | |
153 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
154 | * achieves this. | |
155 | */ | |
b3901d54 CM |
156 | void machine_halt(void) |
157 | { | |
b9acc49e | 158 | local_irq_disable(); |
90f51a09 | 159 | smp_send_stop(); |
b3901d54 CM |
160 | while (1); |
161 | } | |
162 | ||
90f51a09 AK |
163 | /* |
164 | * Power-off simply requires that the secondary CPUs stop performing any | |
165 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
166 | * achieves this. When the system power is turned off, it will take all CPUs | |
167 | * with it. | |
168 | */ | |
b3901d54 CM |
169 | void machine_power_off(void) |
170 | { | |
b9acc49e | 171 | local_irq_disable(); |
90f51a09 | 172 | smp_send_stop(); |
b3901d54 CM |
173 | if (pm_power_off) |
174 | pm_power_off(); | |
175 | } | |
176 | ||
90f51a09 AK |
177 | /* |
178 | * Restart requires that the secondary CPUs stop performing any activity | |
68234df4 | 179 | * while the primary CPU resets the system. Systems with multiple CPUs must |
90f51a09 AK |
180 | * provide a HW restart implementation, to ensure that all CPUs reset at once. |
181 | * This is required so that any code running after reset on the primary CPU | |
182 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
183 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
184 | * to use. Implementing such co-ordination would be essentially impossible. | |
185 | */ | |
b3901d54 CM |
186 | void machine_restart(char *cmd) |
187 | { | |
b3901d54 CM |
188 | /* Disable interrupts first */ |
189 | local_irq_disable(); | |
b9acc49e | 190 | smp_send_stop(); |
b3901d54 | 191 | |
60c0d45a AB |
192 | /* |
193 | * UpdateCapsule() depends on the system being reset via | |
194 | * ResetSystem(). | |
195 | */ | |
196 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | |
197 | efi_reboot(reboot_mode, NULL); | |
198 | ||
b3901d54 | 199 | /* Now call the architecture specific reboot code. */ |
aa1e8ec1 | 200 | if (arm_pm_restart) |
ff701306 | 201 | arm_pm_restart(reboot_mode, cmd); |
1c7ffc32 GR |
202 | else |
203 | do_kernel_restart(cmd); | |
b3901d54 CM |
204 | |
205 | /* | |
206 | * Whoops - the architecture was unable to reboot. | |
207 | */ | |
208 | printk("Reboot failed -- System halted\n"); | |
209 | while (1); | |
210 | } | |
211 | ||
ec94a46e DM |
212 | #define bstr(suffix, str) [PSR_BTYPE_ ## suffix >> PSR_BTYPE_SHIFT] = str |
213 | static const char *const btypes[] = { | |
214 | bstr(NONE, "--"), | |
215 | bstr( JC, "jc"), | |
216 | bstr( C, "-c"), | |
217 | bstr( J , "j-") | |
218 | }; | |
219 | #undef bstr | |
220 | ||
b7300d4c WD |
221 | static void print_pstate(struct pt_regs *regs) |
222 | { | |
223 | u64 pstate = regs->pstate; | |
224 | ||
225 | if (compat_user_mode(regs)) { | |
226 | printk("pstate: %08llx (%c%c%c%c %c %s %s %c%c%c)\n", | |
227 | pstate, | |
d64567f6 MR |
228 | pstate & PSR_AA32_N_BIT ? 'N' : 'n', |
229 | pstate & PSR_AA32_Z_BIT ? 'Z' : 'z', | |
230 | pstate & PSR_AA32_C_BIT ? 'C' : 'c', | |
231 | pstate & PSR_AA32_V_BIT ? 'V' : 'v', | |
232 | pstate & PSR_AA32_Q_BIT ? 'Q' : 'q', | |
233 | pstate & PSR_AA32_T_BIT ? "T32" : "A32", | |
234 | pstate & PSR_AA32_E_BIT ? "BE" : "LE", | |
235 | pstate & PSR_AA32_A_BIT ? 'A' : 'a', | |
236 | pstate & PSR_AA32_I_BIT ? 'I' : 'i', | |
237 | pstate & PSR_AA32_F_BIT ? 'F' : 'f'); | |
b7300d4c | 238 | } else { |
ec94a46e DM |
239 | const char *btype_str = btypes[(pstate & PSR_BTYPE_MASK) >> |
240 | PSR_BTYPE_SHIFT]; | |
241 | ||
242 | printk("pstate: %08llx (%c%c%c%c %c%c%c%c %cPAN %cUAO BTYPE=%s)\n", | |
b7300d4c WD |
243 | pstate, |
244 | pstate & PSR_N_BIT ? 'N' : 'n', | |
245 | pstate & PSR_Z_BIT ? 'Z' : 'z', | |
246 | pstate & PSR_C_BIT ? 'C' : 'c', | |
247 | pstate & PSR_V_BIT ? 'V' : 'v', | |
248 | pstate & PSR_D_BIT ? 'D' : 'd', | |
249 | pstate & PSR_A_BIT ? 'A' : 'a', | |
250 | pstate & PSR_I_BIT ? 'I' : 'i', | |
251 | pstate & PSR_F_BIT ? 'F' : 'f', | |
252 | pstate & PSR_PAN_BIT ? '+' : '-', | |
ec94a46e DM |
253 | pstate & PSR_UAO_BIT ? '+' : '-', |
254 | btype_str); | |
b7300d4c WD |
255 | } |
256 | } | |
257 | ||
b3901d54 CM |
258 | void __show_regs(struct pt_regs *regs) |
259 | { | |
6ca68e80 CM |
260 | int i, top_reg; |
261 | u64 lr, sp; | |
262 | ||
263 | if (compat_user_mode(regs)) { | |
264 | lr = regs->compat_lr; | |
265 | sp = regs->compat_sp; | |
266 | top_reg = 12; | |
267 | } else { | |
268 | lr = regs->regs[30]; | |
269 | sp = regs->sp; | |
270 | top_reg = 29; | |
271 | } | |
b3901d54 | 272 | |
a43cb95d | 273 | show_regs_print_info(KERN_DEFAULT); |
b7300d4c | 274 | print_pstate(regs); |
a06f818a WD |
275 | |
276 | if (!user_mode(regs)) { | |
277 | printk("pc : %pS\n", (void *)regs->pc); | |
cdcb61ae | 278 | printk("lr : %pS\n", (void *)ptrauth_strip_insn_pac(lr)); |
a06f818a WD |
279 | } else { |
280 | printk("pc : %016llx\n", regs->pc); | |
281 | printk("lr : %016llx\n", lr); | |
282 | } | |
283 | ||
b7300d4c | 284 | printk("sp : %016llx\n", sp); |
db4b0710 | 285 | |
133d0518 JT |
286 | if (system_uses_irq_prio_masking()) |
287 | printk("pmr_save: %08llx\n", regs->pmr_save); | |
288 | ||
db4b0710 MR |
289 | i = top_reg; |
290 | ||
291 | while (i >= 0) { | |
b3901d54 | 292 | printk("x%-2d: %016llx ", i, regs->regs[i]); |
db4b0710 MR |
293 | i--; |
294 | ||
295 | if (i % 2 == 0) { | |
296 | pr_cont("x%-2d: %016llx ", i, regs->regs[i]); | |
297 | i--; | |
298 | } | |
299 | ||
300 | pr_cont("\n"); | |
b3901d54 | 301 | } |
b3901d54 CM |
302 | } |
303 | ||
304 | void show_regs(struct pt_regs * regs) | |
305 | { | |
b3901d54 | 306 | __show_regs(regs); |
c7689837 | 307 | dump_backtrace(regs, NULL, KERN_DEFAULT); |
b3901d54 CM |
308 | } |
309 | ||
eb35bdd7 WD |
310 | static void tls_thread_flush(void) |
311 | { | |
adf75899 | 312 | write_sysreg(0, tpidr_el0); |
eb35bdd7 WD |
313 | |
314 | if (is_compat_task()) { | |
65896545 | 315 | current->thread.uw.tp_value = 0; |
eb35bdd7 WD |
316 | |
317 | /* | |
318 | * We need to ensure ordering between the shadow state and the | |
319 | * hardware state, so that we don't corrupt the hardware state | |
320 | * with a stale shadow state during context switch. | |
321 | */ | |
322 | barrier(); | |
adf75899 | 323 | write_sysreg(0, tpidrro_el0); |
eb35bdd7 WD |
324 | } |
325 | } | |
326 | ||
63f0c603 CM |
327 | static void flush_tagged_addr_state(void) |
328 | { | |
329 | if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) | |
330 | clear_thread_flag(TIF_TAGGED_ADDR); | |
331 | } | |
332 | ||
b3901d54 CM |
333 | void flush_thread(void) |
334 | { | |
335 | fpsimd_flush_thread(); | |
eb35bdd7 | 336 | tls_thread_flush(); |
b3901d54 | 337 | flush_ptrace_hw_breakpoint(current); |
63f0c603 | 338 | flush_tagged_addr_state(); |
b3901d54 CM |
339 | } |
340 | ||
341 | void release_thread(struct task_struct *dead_task) | |
342 | { | |
343 | } | |
344 | ||
bc0ee476 DM |
345 | void arch_release_task_struct(struct task_struct *tsk) |
346 | { | |
347 | fpsimd_release_task(tsk); | |
348 | } | |
349 | ||
b3901d54 CM |
350 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) |
351 | { | |
6eb6c801 JL |
352 | if (current->mm) |
353 | fpsimd_preserve_current_state(); | |
b3901d54 | 354 | *dst = *src; |
bc0ee476 | 355 | |
4585fc59 MM |
356 | /* We rely on the above assignment to initialize dst's thread_flags: */ |
357 | BUILD_BUG_ON(!IS_ENABLED(CONFIG_THREAD_INFO_IN_TASK)); | |
358 | ||
359 | /* | |
360 | * Detach src's sve_state (if any) from dst so that it does not | |
361 | * get erroneously used or freed prematurely. dst's sve_state | |
362 | * will be allocated on demand later on if dst uses SVE. | |
363 | * For consistency, also clear TIF_SVE here: this could be done | |
364 | * later in copy_process(), but to avoid tripping up future | |
365 | * maintainers it is best not to leave TIF_SVE and sve_state in | |
366 | * an inconsistent state, even temporarily. | |
367 | */ | |
368 | dst->thread.sve_state = NULL; | |
369 | clear_tsk_thread_flag(dst, TIF_SVE); | |
370 | ||
b3901d54 CM |
371 | return 0; |
372 | } | |
373 | ||
374 | asmlinkage void ret_from_fork(void) asm("ret_from_fork"); | |
375 | ||
714acdbd | 376 | int copy_thread(unsigned long clone_flags, unsigned long stack_start, |
a4376f2f | 377 | unsigned long stk_sz, struct task_struct *p, unsigned long tls) |
b3901d54 CM |
378 | { |
379 | struct pt_regs *childregs = task_pt_regs(p); | |
b3901d54 | 380 | |
c34501d2 | 381 | memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); |
b3901d54 | 382 | |
071b6d4a DM |
383 | /* |
384 | * In case p was allocated the same task_struct pointer as some | |
385 | * other recently-exited task, make sure p is disassociated from | |
386 | * any cpu that may have run that now-exited task recently. | |
387 | * Otherwise we could erroneously skip reloading the FPSIMD | |
388 | * registers for p. | |
389 | */ | |
390 | fpsimd_flush_task_state(p); | |
391 | ||
33e45234 KM |
392 | ptrauth_thread_init_kernel(p); |
393 | ||
9ac08002 AV |
394 | if (likely(!(p->flags & PF_KTHREAD))) { |
395 | *childregs = *current_pt_regs(); | |
c34501d2 | 396 | childregs->regs[0] = 0; |
d00a3810 WD |
397 | |
398 | /* | |
399 | * Read the current TLS pointer from tpidr_el0 as it may be | |
400 | * out-of-sync with the saved value. | |
401 | */ | |
adf75899 | 402 | *task_user_tls(p) = read_sysreg(tpidr_el0); |
d00a3810 WD |
403 | |
404 | if (stack_start) { | |
405 | if (is_compat_thread(task_thread_info(p))) | |
e0fd18ce | 406 | childregs->compat_sp = stack_start; |
d00a3810 | 407 | else |
e0fd18ce | 408 | childregs->sp = stack_start; |
c34501d2 | 409 | } |
d00a3810 | 410 | |
b3901d54 | 411 | /* |
a4376f2f AA |
412 | * If a TLS pointer was passed to clone, use it for the new |
413 | * thread. | |
b3901d54 | 414 | */ |
c34501d2 | 415 | if (clone_flags & CLONE_SETTLS) |
a4376f2f | 416 | p->thread.uw.tp_value = tls; |
c34501d2 CM |
417 | } else { |
418 | memset(childregs, 0, sizeof(struct pt_regs)); | |
419 | childregs->pstate = PSR_MODE_EL1h; | |
57f4959b | 420 | if (IS_ENABLED(CONFIG_ARM64_UAO) && |
a4023f68 | 421 | cpus_have_const_cap(ARM64_HAS_UAO)) |
57f4959b | 422 | childregs->pstate |= PSR_UAO_BIT; |
8f04e8e6 | 423 | |
c2876207 | 424 | spectre_v4_enable_task_mitigation(p); |
8f04e8e6 | 425 | |
133d0518 JT |
426 | if (system_uses_irq_prio_masking()) |
427 | childregs->pmr_save = GIC_PRIO_IRQON; | |
428 | ||
c34501d2 CM |
429 | p->thread.cpu_context.x19 = stack_start; |
430 | p->thread.cpu_context.x20 = stk_sz; | |
b3901d54 | 431 | } |
b3901d54 | 432 | p->thread.cpu_context.pc = (unsigned long)ret_from_fork; |
c34501d2 | 433 | p->thread.cpu_context.sp = (unsigned long)childregs; |
b3901d54 CM |
434 | |
435 | ptrace_hw_copy_thread(p); | |
436 | ||
437 | return 0; | |
438 | } | |
439 | ||
936eb65c DM |
440 | void tls_preserve_current_state(void) |
441 | { | |
442 | *task_user_tls(current) = read_sysreg(tpidr_el0); | |
443 | } | |
444 | ||
b3901d54 CM |
445 | static void tls_thread_switch(struct task_struct *next) |
446 | { | |
936eb65c | 447 | tls_preserve_current_state(); |
b3901d54 | 448 | |
18011eac | 449 | if (is_compat_thread(task_thread_info(next))) |
65896545 | 450 | write_sysreg(next->thread.uw.tp_value, tpidrro_el0); |
18011eac WD |
451 | else if (!arm64_kernel_unmapped_at_el0()) |
452 | write_sysreg(0, tpidrro_el0); | |
b3901d54 | 453 | |
18011eac | 454 | write_sysreg(*task_user_tls(next), tpidr_el0); |
b3901d54 CM |
455 | } |
456 | ||
57f4959b | 457 | /* Restore the UAO state depending on next's addr_limit */ |
d0854412 | 458 | void uao_thread_switch(struct task_struct *next) |
57f4959b | 459 | { |
e950631e CM |
460 | if (IS_ENABLED(CONFIG_ARM64_UAO)) { |
461 | if (task_thread_info(next)->addr_limit == KERNEL_DS) | |
462 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); | |
463 | else | |
464 | asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); | |
465 | } | |
57f4959b JM |
466 | } |
467 | ||
cbdf8a18 MZ |
468 | /* |
469 | * Force SSBS state on context-switch, since it may be lost after migrating | |
470 | * from a CPU which treats the bit as RES0 in a heterogeneous system. | |
471 | */ | |
472 | static void ssbs_thread_switch(struct task_struct *next) | |
473 | { | |
cbdf8a18 MZ |
474 | /* |
475 | * Nothing to do for kernel threads, but 'regs' may be junk | |
476 | * (e.g. idle task) so check the flags and bail early. | |
477 | */ | |
478 | if (unlikely(next->flags & PF_KTHREAD)) | |
479 | return; | |
480 | ||
fca3d33d WD |
481 | /* |
482 | * If all CPUs implement the SSBS extension, then we just need to | |
483 | * context-switch the PSTATE field. | |
484 | */ | |
c2876207 | 485 | if (cpus_have_const_cap(ARM64_SSBS)) |
cbdf8a18 MZ |
486 | return; |
487 | ||
c2876207 | 488 | spectre_v4_enable_task_mitigation(next); |
cbdf8a18 MZ |
489 | } |
490 | ||
c02433dd MR |
491 | /* |
492 | * We store our current task in sp_el0, which is clobbered by userspace. Keep a | |
493 | * shadow copy so that we can restore this upon entry from userspace. | |
494 | * | |
495 | * This is *only* for exception entry from EL0, and is not valid until we | |
496 | * __switch_to() a user task. | |
497 | */ | |
498 | DEFINE_PER_CPU(struct task_struct *, __entry_task); | |
499 | ||
500 | static void entry_task_switch(struct task_struct *next) | |
501 | { | |
502 | __this_cpu_write(__entry_task, next); | |
503 | } | |
504 | ||
d49f7d73 MZ |
505 | /* |
506 | * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT. | |
507 | * Assuming the virtual counter is enabled at the beginning of times: | |
508 | * | |
509 | * - disable access when switching from a 64bit task to a 32bit task | |
510 | * - enable access when switching from a 32bit task to a 64bit task | |
511 | */ | |
512 | static void erratum_1418040_thread_switch(struct task_struct *prev, | |
513 | struct task_struct *next) | |
514 | { | |
515 | bool prev32, next32; | |
516 | u64 val; | |
517 | ||
518 | if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) && | |
519 | cpus_have_const_cap(ARM64_WORKAROUND_1418040))) | |
520 | return; | |
521 | ||
522 | prev32 = is_compat_thread(task_thread_info(prev)); | |
523 | next32 = is_compat_thread(task_thread_info(next)); | |
524 | ||
525 | if (prev32 == next32) | |
526 | return; | |
527 | ||
528 | val = read_sysreg(cntkctl_el1); | |
529 | ||
530 | if (!next32) | |
531 | val |= ARCH_TIMER_USR_VCT_ACCESS_EN; | |
532 | else | |
533 | val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN; | |
534 | ||
535 | write_sysreg(val, cntkctl_el1); | |
536 | } | |
537 | ||
b3901d54 CM |
538 | /* |
539 | * Thread switching. | |
540 | */ | |
8f4b326d | 541 | __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, |
b3901d54 CM |
542 | struct task_struct *next) |
543 | { | |
544 | struct task_struct *last; | |
545 | ||
546 | fpsimd_thread_switch(next); | |
547 | tls_thread_switch(next); | |
548 | hw_breakpoint_thread_switch(next); | |
3325732f | 549 | contextidr_thread_switch(next); |
c02433dd | 550 | entry_task_switch(next); |
57f4959b | 551 | uao_thread_switch(next); |
cbdf8a18 | 552 | ssbs_thread_switch(next); |
d49f7d73 | 553 | erratum_1418040_thread_switch(prev, next); |
b3901d54 | 554 | |
5108c67c CM |
555 | /* |
556 | * Complete any pending TLB or cache maintenance on this CPU in case | |
557 | * the thread migrates to a different CPU. | |
22e4ebb9 MD |
558 | * This full barrier is also required by the membarrier system |
559 | * call. | |
5108c67c | 560 | */ |
98f7685e | 561 | dsb(ish); |
b3901d54 CM |
562 | |
563 | /* the actual thread switch */ | |
564 | last = cpu_switch_to(prev, next); | |
565 | ||
566 | return last; | |
567 | } | |
568 | ||
b3901d54 CM |
569 | unsigned long get_wchan(struct task_struct *p) |
570 | { | |
571 | struct stackframe frame; | |
9bbd4c56 | 572 | unsigned long stack_page, ret = 0; |
b3901d54 CM |
573 | int count = 0; |
574 | if (!p || p == current || p->state == TASK_RUNNING) | |
575 | return 0; | |
576 | ||
9bbd4c56 MR |
577 | stack_page = (unsigned long)try_get_task_stack(p); |
578 | if (!stack_page) | |
579 | return 0; | |
580 | ||
f3dcbe67 DM |
581 | start_backtrace(&frame, thread_saved_fp(p), thread_saved_pc(p)); |
582 | ||
b3901d54 | 583 | do { |
31e43ad3 | 584 | if (unwind_frame(p, &frame)) |
9bbd4c56 MR |
585 | goto out; |
586 | if (!in_sched_functions(frame.pc)) { | |
587 | ret = frame.pc; | |
588 | goto out; | |
589 | } | |
b3901d54 | 590 | } while (count ++ < 16); |
9bbd4c56 MR |
591 | |
592 | out: | |
593 | put_task_stack(p); | |
594 | return ret; | |
b3901d54 CM |
595 | } |
596 | ||
597 | unsigned long arch_align_stack(unsigned long sp) | |
598 | { | |
599 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | |
600 | sp -= get_random_int() & ~PAGE_MASK; | |
601 | return sp & ~0xf; | |
602 | } | |
603 | ||
d1be5c99 YN |
604 | /* |
605 | * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. | |
606 | */ | |
607 | void arch_setup_new_exec(void) | |
608 | { | |
609 | current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; | |
75031975 MR |
610 | |
611 | ptrauth_thread_init_user(current); | |
d1be5c99 | 612 | } |
63f0c603 CM |
613 | |
614 | #ifdef CONFIG_ARM64_TAGGED_ADDR_ABI | |
615 | /* | |
616 | * Control the relaxed ABI allowing tagged user addresses into the kernel. | |
617 | */ | |
413235fc | 618 | static unsigned int tagged_addr_disabled; |
63f0c603 CM |
619 | |
620 | long set_tagged_addr_ctrl(unsigned long arg) | |
621 | { | |
63f0c603 CM |
622 | if (is_compat_task()) |
623 | return -EINVAL; | |
624 | if (arg & ~PR_TAGGED_ADDR_ENABLE) | |
625 | return -EINVAL; | |
626 | ||
413235fc CM |
627 | /* |
628 | * Do not allow the enabling of the tagged address ABI if globally | |
629 | * disabled via sysctl abi.tagged_addr_disabled. | |
630 | */ | |
631 | if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) | |
632 | return -EINVAL; | |
633 | ||
63f0c603 CM |
634 | update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
639 | long get_tagged_addr_ctrl(void) | |
640 | { | |
63f0c603 CM |
641 | if (is_compat_task()) |
642 | return -EINVAL; | |
643 | ||
644 | if (test_thread_flag(TIF_TAGGED_ADDR)) | |
645 | return PR_TAGGED_ADDR_ENABLE; | |
646 | ||
647 | return 0; | |
648 | } | |
649 | ||
650 | /* | |
651 | * Global sysctl to disable the tagged user addresses support. This control | |
652 | * only prevents the tagged address ABI enabling via prctl() and does not | |
653 | * disable it for tasks that already opted in to the relaxed ABI. | |
654 | */ | |
63f0c603 CM |
655 | |
656 | static struct ctl_table tagged_addr_sysctl_table[] = { | |
657 | { | |
413235fc | 658 | .procname = "tagged_addr_disabled", |
63f0c603 | 659 | .mode = 0644, |
413235fc | 660 | .data = &tagged_addr_disabled, |
63f0c603 CM |
661 | .maxlen = sizeof(int), |
662 | .proc_handler = proc_dointvec_minmax, | |
2c614c11 MC |
663 | .extra1 = SYSCTL_ZERO, |
664 | .extra2 = SYSCTL_ONE, | |
63f0c603 CM |
665 | }, |
666 | { } | |
667 | }; | |
668 | ||
669 | static int __init tagged_addr_init(void) | |
670 | { | |
671 | if (!register_sysctl("abi", tagged_addr_sysctl_table)) | |
672 | return -EINVAL; | |
673 | return 0; | |
674 | } | |
675 | ||
676 | core_initcall(tagged_addr_init); | |
677 | #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ | |
19c95f26 JT |
678 | |
679 | asmlinkage void __sched arm64_preempt_schedule_irq(void) | |
680 | { | |
681 | lockdep_assert_irqs_disabled(); | |
682 | ||
683 | /* | |
684 | * Preempting a task from an IRQ means we leave copies of PSTATE | |
685 | * on the stack. cpufeature's enable calls may modify PSTATE, but | |
686 | * resuming one of these preempted tasks would undo those changes. | |
687 | * | |
688 | * Only allow a task to be preempted once cpufeatures have been | |
689 | * enabled. | |
690 | */ | |
b51c6ac2 | 691 | if (system_capabilities_finalized()) |
19c95f26 JT |
692 | preempt_schedule_irq(); |
693 | } | |
ab7876a9 DM |
694 | |
695 | #ifdef CONFIG_BINFMT_ELF | |
696 | int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, | |
697 | bool has_interp, bool is_interp) | |
698 | { | |
5d1b631c MB |
699 | /* |
700 | * For dynamically linked executables the interpreter is | |
701 | * responsible for setting PROT_BTI on everything except | |
702 | * itself. | |
703 | */ | |
ab7876a9 DM |
704 | if (is_interp != has_interp) |
705 | return prot; | |
706 | ||
707 | if (!(state->flags & ARM64_ELF_BTI)) | |
708 | return prot; | |
709 | ||
710 | if (prot & PROT_EXEC) | |
711 | prot |= PROT_BTI; | |
712 | ||
713 | return prot; | |
714 | } | |
715 | #endif |