]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/sh/kernel/process.c
Merge branch 'ubuntu-updates' of master.kernel.org:/pub/scm/linux/kernel/git/bcollins...
[mirror_ubuntu-zesty-kernel.git] / arch / sh / kernel / process.c
CommitLineData
1da177e4
LT
1/* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 *
3 * linux/arch/sh/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8ae91b9a 8 * Copyright (C) 2006 Lineo Solutions Inc. support SH4A UBC
1da177e4
LT
9 */
10
11/*
12 * This file handles the architecture-dependent parts of process handling..
13 */
14
15#include <linux/module.h>
16#include <linux/unistd.h>
17#include <linux/mm.h>
18#include <linux/elfcore.h>
1da177e4 19#include <linux/a.out.h>
a3310bbd
PM
20#include <linux/slab.h>
21#include <linux/pm.h>
1da177e4 22#include <linux/ptrace.h>
1da177e4 23#include <linux/kallsyms.h>
a3310bbd 24#include <linux/kexec.h>
1da177e4
LT
25
26#include <asm/io.h>
27#include <asm/uaccess.h>
28#include <asm/mmu_context.h>
29#include <asm/elf.h>
b5233d07 30#include <asm/ubc.h>
1da177e4
LT
31
32static int hlt_counter=0;
33
34int ubc_usercnt = 0;
35
36#define HARD_IDLE_TIMEOUT (HZ / 3)
37
a3310bbd
PM
38void (*pm_idle)(void);
39
40void (*pm_power_off)(void);
41EXPORT_SYMBOL(pm_power_off);
42
1da177e4
LT
43void disable_hlt(void)
44{
45 hlt_counter++;
46}
47
48EXPORT_SYMBOL(disable_hlt);
49
50void enable_hlt(void)
51{
52 hlt_counter--;
53}
54
55EXPORT_SYMBOL(enable_hlt);
56
a3310bbd
PM
57void default_idle(void)
58{
59 if (!hlt_counter)
60 cpu_sleep();
61 else
62 cpu_relax();
63}
64
64c7c8f8 65void cpu_idle(void)
1da177e4
LT
66{
67 /* endless idle loop with no priority at all */
68 while (1) {
a3310bbd
PM
69 void (*idle)(void) = pm_idle;
70
71 if (!idle)
72 idle = default_idle;
73
74 while (!need_resched())
75 idle();
1da177e4 76
5bfb5d69 77 preempt_enable_no_resched();
1da177e4 78 schedule();
5bfb5d69 79 preempt_disable();
1da177e4
LT
80 }
81}
82
1da177e4
LT
83void machine_restart(char * __unused)
84{
85 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
86 asm volatile("ldc %0, sr\n\t"
87 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
88}
89
1da177e4
LT
90void machine_halt(void)
91{
a3310bbd 92 local_irq_disable();
1da177e4 93
1da177e4
LT
94 while (1)
95 cpu_sleep();
96}
97
1da177e4
LT
98void machine_power_off(void)
99{
a3310bbd
PM
100 if (pm_power_off)
101 pm_power_off();
1da177e4
LT
102}
103
1da177e4
LT
104void show_regs(struct pt_regs * regs)
105{
106 printk("\n");
107 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
108 print_symbol("PC is at %s\n", regs->pc);
109 printk("PC : %08lx SP : %08lx SR : %08lx ",
110 regs->pc, regs->regs[15], regs->sr);
111#ifdef CONFIG_MMU
112 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
113#else
114 printk(" ");
115#endif
116 printk("%s\n", print_tainted());
117
118 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
119 regs->regs[0],regs->regs[1],
120 regs->regs[2],regs->regs[3]);
121 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
122 regs->regs[4],regs->regs[5],
123 regs->regs[6],regs->regs[7]);
124 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
125 regs->regs[8],regs->regs[9],
126 regs->regs[10],regs->regs[11]);
127 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
128 regs->regs[12],regs->regs[13],
129 regs->regs[14]);
130 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
131 regs->mach, regs->macl, regs->gbr, regs->pr);
132
133 /*
134 * If we're in kernel mode, dump the stack too..
135 */
136 if (!user_mode(regs)) {
137 extern void show_task(unsigned long *sp);
138 unsigned long sp = regs->regs[15];
139
140 show_task((unsigned long *)sp);
141 }
142}
143
144/*
145 * Create a kernel thread
146 */
147
148/*
149 * This is the mechanism for creating a new kernel thread.
150 *
151 */
152extern void kernel_thread_helper(void);
153__asm__(".align 5\n"
154 "kernel_thread_helper:\n\t"
155 "jsr @r5\n\t"
156 " nop\n\t"
157 "mov.l 1f, r1\n\t"
158 "jsr @r1\n\t"
159 " mov r0, r4\n\t"
160 ".align 2\n\t"
161 "1:.long do_exit");
162
163int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
164{ /* Don't use this in BL=1(cli). Or else, CPU resets! */
165 struct pt_regs regs;
166
167 memset(&regs, 0, sizeof(regs));
168 regs.regs[4] = (unsigned long) arg;
169 regs.regs[5] = (unsigned long) fn;
170
171 regs.pc = (unsigned long) kernel_thread_helper;
172 regs.sr = (1 << 30);
173
174 /* Ok, create the new process.. */
175 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
176}
177
178/*
179 * Free current thread data structures etc..
180 */
181void exit_thread(void)
182{
183 if (current->thread.ubc_pc) {
184 current->thread.ubc_pc = 0;
185 ubc_usercnt -= 1;
186 }
187}
188
189void flush_thread(void)
190{
191#if defined(CONFIG_SH_FPU)
192 struct task_struct *tsk = current;
1da177e4 193 /* Forget lazy FPU state */
3cf0f4ec 194 clear_fpu(tsk, task_pt_regs(tsk));
1da177e4
LT
195 clear_used_math();
196#endif
197}
198
199void release_thread(struct task_struct *dead_task)
200{
201 /* do nothing */
202}
203
204/* Fill in the fpu structure for a core dump.. */
205int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
206{
207 int fpvalid = 0;
208
209#if defined(CONFIG_SH_FPU)
210 struct task_struct *tsk = current;
211
212 fpvalid = !!tsk_used_math(tsk);
213 if (fpvalid) {
214 unlazy_fpu(tsk, regs);
215 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
216 }
217#endif
218
219 return fpvalid;
220}
221
222/*
223 * Capture the user space registers if the task is not running (in user space)
224 */
225int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
226{
227 struct pt_regs ptregs;
228
3cf0f4ec 229 ptregs = *task_pt_regs(tsk);
1da177e4
LT
230 elf_core_copy_regs(regs, &ptregs);
231
232 return 1;
233}
234
235int
236dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
237{
238 int fpvalid = 0;
239
240#if defined(CONFIG_SH_FPU)
241 fpvalid = !!tsk_used_math(tsk);
242 if (fpvalid) {
3cf0f4ec 243 unlazy_fpu(tsk, task_pt_regs(tsk));
1da177e4
LT
244 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
245 }
246#endif
247
248 return fpvalid;
249}
250
251asmlinkage void ret_from_fork(void);
252
253int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
254 unsigned long unused,
255 struct task_struct *p, struct pt_regs *regs)
256{
2991be72 257 struct thread_info *ti = task_thread_info(p);
1da177e4
LT
258 struct pt_regs *childregs;
259#if defined(CONFIG_SH_FPU)
260 struct task_struct *tsk = current;
261
262 unlazy_fpu(tsk, regs);
263 p->thread.fpu = tsk->thread.fpu;
264 copy_to_stopped_child_used_math(p);
265#endif
266
3cf0f4ec 267 childregs = task_pt_regs(p);
1da177e4
LT
268 *childregs = *regs;
269
270 if (user_mode(regs)) {
271 childregs->regs[15] = usp;
2991be72 272 ti->addr_limit = USER_DS;
1da177e4 273 } else {
308a792f 274 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
2991be72 275 ti->addr_limit = KERNEL_DS;
1da177e4
LT
276 }
277 if (clone_flags & CLONE_SETTLS) {
278 childregs->gbr = childregs->regs[0];
279 }
280 childregs->regs[0] = 0; /* Set return value for child */
281
282 p->thread.sp = (unsigned long) childregs;
283 p->thread.pc = (unsigned long) ret_from_fork;
284
285 p->thread.ubc_pc = 0;
286
287 return 0;
288}
289
1da177e4
LT
290/* Tracing by user break controller. */
291static void
292ubc_set_tracing(int asid, unsigned long pc)
293{
8ae91b9a
RS
294#if defined(CONFIG_CPU_SH4A)
295 unsigned long val;
296
297 val = (UBC_CBR_ID_INST | UBC_CBR_RW_READ | UBC_CBR_CE);
298 val |= (UBC_CBR_AIE | UBC_CBR_AIV_SET(asid));
299
300 ctrl_outl(val, UBC_CBR0);
301 ctrl_outl(pc, UBC_CAR0);
302 ctrl_outl(0x0, UBC_CAMR0);
303 ctrl_outl(0x0, UBC_CBCR);
304
305 val = (UBC_CRR_RES | UBC_CRR_PCB | UBC_CRR_BIE);
306 ctrl_outl(val, UBC_CRR0);
307
308 /* Read UBC register that we writed last. For chekking UBC Register changed */
309 val = ctrl_inl(UBC_CRR0);
310
311#else /* CONFIG_CPU_SH4A */
1da177e4
LT
312 ctrl_outl(pc, UBC_BARA);
313
a2d1a5fa 314#ifdef CONFIG_MMU
1da177e4
LT
315 /* We don't have any ASID settings for the SH-2! */
316 if (cpu_data->type != CPU_SH7604)
317 ctrl_outb(asid, UBC_BASRA);
a2d1a5fa 318#endif
1da177e4
LT
319
320 ctrl_outl(0, UBC_BAMRA);
321
e5723e0e 322 if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) {
1da177e4
LT
323 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
324 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
325 } else {
326 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
327 ctrl_outw(BRCR_PCBA, UBC_BRCR);
328 }
8ae91b9a 329#endif /* CONFIG_CPU_SH4A */
1da177e4
LT
330}
331
332/*
333 * switch_to(x,y) should switch tasks from x to y.
334 *
335 */
336struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
337{
338#if defined(CONFIG_SH_FPU)
3cf0f4ec 339 unlazy_fpu(prev, task_pt_regs(prev));
1da177e4
LT
340#endif
341
342#ifdef CONFIG_PREEMPT
343 {
344 unsigned long flags;
345 struct pt_regs *regs;
346
347 local_irq_save(flags);
3cf0f4ec 348 regs = task_pt_regs(prev);
1da177e4
LT
349 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
350 int offset = (int)regs->regs[15];
351
352 /* Reset stack pointer: clear critical region mark */
353 regs->regs[15] = regs->regs[1];
354 if (regs->pc < regs->regs[0])
355 /* Go to rewind point */
356 regs->pc = regs->regs[0] + offset;
357 }
358 local_irq_restore(flags);
359 }
360#endif
361
a2d1a5fa 362#ifdef CONFIG_MMU
1da177e4
LT
363 /*
364 * Restore the kernel mode register
365 * k7 (r7_bank1)
366 */
367 asm volatile("ldc %0, r7_bank"
368 : /* no output */
cafcfcaa 369 : "r" (task_thread_info(next)));
a2d1a5fa 370#endif
1da177e4 371
1da177e4
LT
372 /* If no tasks are using the UBC, we're done */
373 if (ubc_usercnt == 0)
374 /* If no tasks are using the UBC, we're done */;
375 else if (next->thread.ubc_pc && next->mm) {
a2d1a5fa
YS
376 int asid = 0;
377#ifdef CONFIG_MMU
19f9a34f 378 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
a2d1a5fa
YS
379#endif
380 ubc_set_tracing(asid, next->thread.ubc_pc);
1da177e4 381 } else {
8ae91b9a
RS
382#if defined(CONFIG_CPU_SH4A)
383 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
384 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
385#else
1da177e4
LT
386 ctrl_outw(0, UBC_BBRA);
387 ctrl_outw(0, UBC_BBRB);
8ae91b9a 388#endif
1da177e4 389 }
1da177e4
LT
390
391 return prev;
392}
393
394asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
395 unsigned long r6, unsigned long r7,
396 struct pt_regs regs)
397{
398#ifdef CONFIG_MMU
399 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
400#else
401 /* fork almost works, enough to trick you into looking elsewhere :-( */
402 return -EINVAL;
403#endif
404}
405
406asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
407 unsigned long parent_tidptr,
408 unsigned long child_tidptr,
409 struct pt_regs regs)
410{
411 if (!newsp)
412 newsp = regs.regs[15];
413 return do_fork(clone_flags, newsp, &regs, 0,
414 (int __user *)parent_tidptr, (int __user *)child_tidptr);
415}
416
417/*
418 * This is trivial, and on the face of it looks like it
419 * could equally well be done in user mode.
420 *
421 * Not so, for quite unobvious reasons - register pressure.
422 * In user mode vfork() cannot have a stack frame, and if
423 * done by calling the "clone()" system call directly, you
424 * do not have enough call-clobbered registers to hold all
425 * the information you need.
426 */
427asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
428 unsigned long r6, unsigned long r7,
429 struct pt_regs regs)
430{
431 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
432 0, NULL, NULL);
433}
434
435/*
436 * sys_execve() executes a new program.
437 */
438asmlinkage int sys_execve(char *ufilename, char **uargv,
439 char **uenvp, unsigned long r7,
440 struct pt_regs regs)
441{
442 int error;
443 char *filename;
444
445 filename = getname((char __user *)ufilename);
446 error = PTR_ERR(filename);
447 if (IS_ERR(filename))
448 goto out;
449
450 error = do_execve(filename,
451 (char __user * __user *)uargv,
452 (char __user * __user *)uenvp,
453 &regs);
454 if (error == 0) {
455 task_lock(current);
456 current->ptrace &= ~PT_DTRACE;
457 task_unlock(current);
458 }
459 putname(filename);
460out:
461 return error;
462}
463
464unsigned long get_wchan(struct task_struct *p)
465{
466 unsigned long schedule_frame;
467 unsigned long pc;
468
469 if (!p || p == current || p->state == TASK_RUNNING)
470 return 0;
471
472 /*
473 * The same comment as on the Alpha applies here, too ...
474 */
475 pc = thread_saved_pc(p);
476 if (in_sched_functions(pc)) {
477 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
478 return (unsigned long)((unsigned long *)schedule_frame)[1];
479 }
480 return pc;
481}
482
483asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
484 unsigned long r6, unsigned long r7,
485 struct pt_regs regs)
486{
487 /* Clear tracing. */
8ae91b9a
RS
488#if defined(CONFIG_CPU_SH4A)
489 ctrl_outl(UBC_CBR_INIT, UBC_CBR0);
490 ctrl_outl(UBC_CRR_INIT, UBC_CRR0);
491#else
1da177e4
LT
492 ctrl_outw(0, UBC_BBRA);
493 ctrl_outw(0, UBC_BBRB);
8ae91b9a 494#endif
1da177e4
LT
495 current->thread.ubc_pc = 0;
496 ubc_usercnt -= 1;
497
498 force_sig(SIGTRAP, current);
499}
500
501asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
502 unsigned long r6, unsigned long r7,
503 struct pt_regs regs)
504{
505 regs.pc -= 2;
506 force_sig(SIGTRAP, current);
507}