]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm/kernel/process.c
Merge branch 'master' of git://oak/home/sfr/kernels/iseries/work
[mirror_ubuntu-jammy-kernel.git] / arch / arm / kernel / process.c
1 /*
2 * linux/arch/arm/kernel/process.c
3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11 #include <stdarg.h>
12
13 #include <linux/module.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/stddef.h>
18 #include <linux/unistd.h>
19 #include <linux/ptrace.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/a.out.h>
23 #include <linux/delay.h>
24 #include <linux/reboot.h>
25 #include <linux/interrupt.h>
26 #include <linux/kallsyms.h>
27 #include <linux/init.h>
28 #include <linux/cpu.h>
29 #include <linux/elfcore.h>
30 #include <linux/pm.h>
31
32 #include <asm/leds.h>
33 #include <asm/processor.h>
34 #include <asm/system.h>
35 #include <asm/thread_notify.h>
36 #include <asm/uaccess.h>
37 #include <asm/mach/time.h>
38
39 extern const char *processor_modes[];
40 extern void setup_mm_for_reboot(char mode);
41
42 static volatile int hlt_counter;
43
44 #include <asm/arch/system.h>
45
46 void disable_hlt(void)
47 {
48 hlt_counter++;
49 }
50
51 EXPORT_SYMBOL(disable_hlt);
52
53 void enable_hlt(void)
54 {
55 hlt_counter--;
56 }
57
58 EXPORT_SYMBOL(enable_hlt);
59
60 static int __init nohlt_setup(char *__unused)
61 {
62 hlt_counter = 1;
63 return 1;
64 }
65
66 static int __init hlt_setup(char *__unused)
67 {
68 hlt_counter = 0;
69 return 1;
70 }
71
72 __setup("nohlt", nohlt_setup);
73 __setup("hlt", hlt_setup);
74
75 void arm_machine_restart(char mode)
76 {
77 /*
78 * Clean and disable cache, and turn off interrupts
79 */
80 cpu_proc_fin();
81
82 /*
83 * Tell the mm system that we are going to reboot -
84 * we may need it to insert some 1:1 mappings so that
85 * soft boot works.
86 */
87 setup_mm_for_reboot(mode);
88
89 /*
90 * Now call the architecture specific reboot code.
91 */
92 arch_reset(mode);
93
94 /*
95 * Whoops - the architecture was unable to reboot.
96 * Tell the user!
97 */
98 mdelay(1000);
99 printk("Reboot failed -- System halted\n");
100 while (1);
101 }
102
103 /*
104 * Function pointers to optional machine specific functions
105 */
106 void (*pm_idle)(void);
107 EXPORT_SYMBOL(pm_idle);
108
109 void (*pm_power_off)(void);
110 EXPORT_SYMBOL(pm_power_off);
111
112 void (*arm_pm_restart)(char str) = arm_machine_restart;
113 EXPORT_SYMBOL_GPL(arm_pm_restart);
114
115
116 /*
117 * This is our default idle handler. We need to disable
118 * interrupts here to ensure we don't miss a wakeup call.
119 */
120 static void default_idle(void)
121 {
122 if (hlt_counter)
123 cpu_relax();
124 else {
125 local_irq_disable();
126 if (!need_resched()) {
127 timer_dyn_reprogram();
128 arch_idle();
129 }
130 local_irq_enable();
131 }
132 }
133
134 /*
135 * The idle thread. We try to conserve power, while trying to keep
136 * overall latency low. The architecture specific idle is passed
137 * a value to indicate the level of "idleness" of the system.
138 */
139 void cpu_idle(void)
140 {
141 local_fiq_enable();
142
143 /* endless idle loop with no priority at all */
144 while (1) {
145 void (*idle)(void) = pm_idle;
146
147 #ifdef CONFIG_HOTPLUG_CPU
148 if (cpu_is_offline(smp_processor_id())) {
149 leds_event(led_idle_start);
150 cpu_die();
151 }
152 #endif
153
154 if (!idle)
155 idle = default_idle;
156 leds_event(led_idle_start);
157 while (!need_resched())
158 idle();
159 leds_event(led_idle_end);
160 preempt_enable_no_resched();
161 schedule();
162 preempt_disable();
163 }
164 }
165
166 static char reboot_mode = 'h';
167
168 int __init reboot_setup(char *str)
169 {
170 reboot_mode = str[0];
171 return 1;
172 }
173
174 __setup("reboot=", reboot_setup);
175
176 void machine_halt(void)
177 {
178 }
179
180
181 void machine_power_off(void)
182 {
183 if (pm_power_off)
184 pm_power_off();
185 }
186
187 void machine_restart(char * __unused)
188 {
189 arm_pm_restart(reboot_mode);
190 }
191
192 void __show_regs(struct pt_regs *regs)
193 {
194 unsigned long flags = condition_codes(regs);
195
196 printk("CPU: %d\n", smp_processor_id());
197 print_symbol("PC is at %s\n", instruction_pointer(regs));
198 print_symbol("LR is at %s\n", regs->ARM_lr);
199 printk("pc : [<%08lx>] lr : [<%08lx>] %s\n"
200 "sp : %08lx ip : %08lx fp : %08lx\n",
201 instruction_pointer(regs),
202 regs->ARM_lr, print_tainted(), regs->ARM_sp,
203 regs->ARM_ip, regs->ARM_fp);
204 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
205 regs->ARM_r10, regs->ARM_r9,
206 regs->ARM_r8);
207 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
208 regs->ARM_r7, regs->ARM_r6,
209 regs->ARM_r5, regs->ARM_r4);
210 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
211 regs->ARM_r3, regs->ARM_r2,
212 regs->ARM_r1, regs->ARM_r0);
213 printk("Flags: %c%c%c%c",
214 flags & PSR_N_BIT ? 'N' : 'n',
215 flags & PSR_Z_BIT ? 'Z' : 'z',
216 flags & PSR_C_BIT ? 'C' : 'c',
217 flags & PSR_V_BIT ? 'V' : 'v');
218 printk(" IRQs o%s FIQs o%s Mode %s%s Segment %s\n",
219 interrupts_enabled(regs) ? "n" : "ff",
220 fast_interrupts_enabled(regs) ? "n" : "ff",
221 processor_modes[processor_mode(regs)],
222 thumb_mode(regs) ? " (T)" : "",
223 get_fs() == get_ds() ? "kernel" : "user");
224 #if CONFIG_CPU_CP15
225 {
226 unsigned int ctrl;
227 __asm__ (
228 " mrc p15, 0, %0, c1, c0\n"
229 : "=r" (ctrl));
230 printk("Control: %04X\n", ctrl);
231 }
232 #ifdef CONFIG_CPU_CP15_MMU
233 {
234 unsigned int transbase, dac;
235 __asm__ (
236 " mrc p15, 0, %0, c2, c0\n"
237 " mrc p15, 0, %1, c3, c0\n"
238 : "=r" (transbase), "=r" (dac));
239 printk("Table: %08X DAC: %08X\n",
240 transbase, dac);
241 }
242 #endif
243 #endif
244 }
245
246 void show_regs(struct pt_regs * regs)
247 {
248 printk("\n");
249 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
250 __show_regs(regs);
251 __backtrace();
252 }
253
254 void show_fpregs(struct user_fp *regs)
255 {
256 int i;
257
258 for (i = 0; i < 8; i++) {
259 unsigned long *p;
260 char type;
261
262 p = (unsigned long *)(regs->fpregs + i);
263
264 switch (regs->ftype[i]) {
265 case 1: type = 'f'; break;
266 case 2: type = 'd'; break;
267 case 3: type = 'e'; break;
268 default: type = '?'; break;
269 }
270 if (regs->init_flag)
271 type = '?';
272
273 printk(" f%d(%c): %08lx %08lx %08lx%c",
274 i, type, p[0], p[1], p[2], i & 1 ? '\n' : ' ');
275 }
276
277
278 printk("FPSR: %08lx FPCR: %08lx\n",
279 (unsigned long)regs->fpsr,
280 (unsigned long)regs->fpcr);
281 }
282
283 /*
284 * Task structure and kernel stack allocation.
285 */
286 struct thread_info_list {
287 unsigned long *head;
288 unsigned int nr;
289 };
290
291 static DEFINE_PER_CPU(struct thread_info_list, thread_info_list) = { NULL, 0 };
292
293 #define EXTRA_TASK_STRUCT 4
294
295 struct thread_info *alloc_thread_info(struct task_struct *task)
296 {
297 struct thread_info *thread = NULL;
298
299 if (EXTRA_TASK_STRUCT) {
300 struct thread_info_list *th = &get_cpu_var(thread_info_list);
301 unsigned long *p = th->head;
302
303 if (p) {
304 th->head = (unsigned long *)p[0];
305 th->nr -= 1;
306 }
307 put_cpu_var(thread_info_list);
308
309 thread = (struct thread_info *)p;
310 }
311
312 if (!thread)
313 thread = (struct thread_info *)
314 __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
315
316 #ifdef CONFIG_DEBUG_STACK_USAGE
317 /*
318 * The stack must be cleared if you want SYSRQ-T to
319 * give sensible stack usage information
320 */
321 if (thread)
322 memzero(thread, THREAD_SIZE);
323 #endif
324 return thread;
325 }
326
327 void free_thread_info(struct thread_info *thread)
328 {
329 if (EXTRA_TASK_STRUCT) {
330 struct thread_info_list *th = &get_cpu_var(thread_info_list);
331 if (th->nr < EXTRA_TASK_STRUCT) {
332 unsigned long *p = (unsigned long *)thread;
333 p[0] = (unsigned long)th->head;
334 th->head = p;
335 th->nr += 1;
336 put_cpu_var(thread_info_list);
337 return;
338 }
339 put_cpu_var(thread_info_list);
340 }
341 free_pages((unsigned long)thread, THREAD_SIZE_ORDER);
342 }
343
344 /*
345 * Free current thread data structures etc..
346 */
347 void exit_thread(void)
348 {
349 }
350
351 ATOMIC_NOTIFIER_HEAD(thread_notify_head);
352
353 EXPORT_SYMBOL_GPL(thread_notify_head);
354
355 void flush_thread(void)
356 {
357 struct thread_info *thread = current_thread_info();
358 struct task_struct *tsk = current;
359
360 memset(thread->used_cp, 0, sizeof(thread->used_cp));
361 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
362 memset(&thread->fpstate, 0, sizeof(union fp_state));
363
364 thread_notify(THREAD_NOTIFY_FLUSH, thread);
365 }
366
367 void release_thread(struct task_struct *dead_task)
368 {
369 struct thread_info *thread = task_thread_info(dead_task);
370
371 thread_notify(THREAD_NOTIFY_RELEASE, thread);
372 }
373
374 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
375
376 int
377 copy_thread(int nr, unsigned long clone_flags, unsigned long stack_start,
378 unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
379 {
380 struct thread_info *thread = task_thread_info(p);
381 struct pt_regs *childregs = task_pt_regs(p);
382
383 *childregs = *regs;
384 childregs->ARM_r0 = 0;
385 childregs->ARM_sp = stack_start;
386
387 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
388 thread->cpu_context.sp = (unsigned long)childregs;
389 thread->cpu_context.pc = (unsigned long)ret_from_fork;
390
391 if (clone_flags & CLONE_SETTLS)
392 thread->tp_value = regs->ARM_r3;
393
394 return 0;
395 }
396
397 /*
398 * fill in the fpe structure for a core dump...
399 */
400 int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
401 {
402 struct thread_info *thread = current_thread_info();
403 int used_math = thread->used_cp[1] | thread->used_cp[2];
404
405 if (used_math)
406 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
407
408 return used_math != 0;
409 }
410 EXPORT_SYMBOL(dump_fpu);
411
412 /*
413 * fill in the user structure for a core dump..
414 */
415 void dump_thread(struct pt_regs * regs, struct user * dump)
416 {
417 struct task_struct *tsk = current;
418
419 dump->magic = CMAGIC;
420 dump->start_code = tsk->mm->start_code;
421 dump->start_stack = regs->ARM_sp & ~(PAGE_SIZE - 1);
422
423 dump->u_tsize = (tsk->mm->end_code - tsk->mm->start_code) >> PAGE_SHIFT;
424 dump->u_dsize = (tsk->mm->brk - tsk->mm->start_data + PAGE_SIZE - 1) >> PAGE_SHIFT;
425 dump->u_ssize = 0;
426
427 dump->u_debugreg[0] = tsk->thread.debug.bp[0].address;
428 dump->u_debugreg[1] = tsk->thread.debug.bp[1].address;
429 dump->u_debugreg[2] = tsk->thread.debug.bp[0].insn.arm;
430 dump->u_debugreg[3] = tsk->thread.debug.bp[1].insn.arm;
431 dump->u_debugreg[4] = tsk->thread.debug.nsaved;
432
433 if (dump->start_stack < 0x04000000)
434 dump->u_ssize = (0x04000000 - dump->start_stack) >> PAGE_SHIFT;
435
436 dump->regs = *regs;
437 dump->u_fpvalid = dump_fpu (regs, &dump->u_fp);
438 }
439 EXPORT_SYMBOL(dump_thread);
440
441 /*
442 * Shuffle the argument into the correct register before calling the
443 * thread function. r1 is the thread argument, r2 is the pointer to
444 * the thread function, and r3 points to the exit function.
445 */
446 extern void kernel_thread_helper(void);
447 asm( ".section .text\n"
448 " .align\n"
449 " .type kernel_thread_helper, #function\n"
450 "kernel_thread_helper:\n"
451 " mov r0, r1\n"
452 " mov lr, r3\n"
453 " mov pc, r2\n"
454 " .size kernel_thread_helper, . - kernel_thread_helper\n"
455 " .previous");
456
457 /*
458 * Create a kernel thread.
459 */
460 pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
461 {
462 struct pt_regs regs;
463
464 memset(&regs, 0, sizeof(regs));
465
466 regs.ARM_r1 = (unsigned long)arg;
467 regs.ARM_r2 = (unsigned long)fn;
468 regs.ARM_r3 = (unsigned long)do_exit;
469 regs.ARM_pc = (unsigned long)kernel_thread_helper;
470 regs.ARM_cpsr = SVC_MODE;
471
472 return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
473 }
474 EXPORT_SYMBOL(kernel_thread);
475
476 unsigned long get_wchan(struct task_struct *p)
477 {
478 unsigned long fp, lr;
479 unsigned long stack_start, stack_end;
480 int count = 0;
481 if (!p || p == current || p->state == TASK_RUNNING)
482 return 0;
483
484 stack_start = (unsigned long)end_of_stack(p);
485 stack_end = (unsigned long)task_stack_page(p) + THREAD_SIZE;
486
487 fp = thread_saved_fp(p);
488 do {
489 if (fp < stack_start || fp > stack_end)
490 return 0;
491 lr = pc_pointer (((unsigned long *)fp)[-1]);
492 if (!in_sched_functions(lr))
493 return lr;
494 fp = *(unsigned long *) (fp - 12);
495 } while (count ++ < 16);
496 return 0;
497 }