]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/sh/kernel/process.c
sh: Initial vsyscall page support.
[mirror_ubuntu-bionic-kernel.git] / arch / sh / kernel / process.c
1 /* $Id: process.c,v 1.28 2004/05/05 16:54:23 lethal Exp $
2 *
3 * linux/arch/sh/kernel/process.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 *
7 * SuperH version: Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
8 */
9
10 /*
11 * This file handles the architecture-dependent parts of process handling..
12 */
13
14 #include <linux/module.h>
15 #include <linux/unistd.h>
16 #include <linux/mm.h>
17 #include <linux/elfcore.h>
18 #include <linux/a.out.h>
19 #include <linux/slab.h>
20 #include <linux/pm.h>
21 #include <linux/ptrace.h>
22 #include <linux/kallsyms.h>
23 #include <linux/kexec.h>
24
25 #include <asm/io.h>
26 #include <asm/uaccess.h>
27 #include <asm/mmu_context.h>
28 #include <asm/elf.h>
29 #include <asm/ubc.h>
30
31 static int hlt_counter=0;
32
33 int ubc_usercnt = 0;
34
35 #define HARD_IDLE_TIMEOUT (HZ / 3)
36
37 void (*pm_idle)(void);
38
39 void (*pm_power_off)(void);
40 EXPORT_SYMBOL(pm_power_off);
41
42 void disable_hlt(void)
43 {
44 hlt_counter++;
45 }
46
47 EXPORT_SYMBOL(disable_hlt);
48
49 void enable_hlt(void)
50 {
51 hlt_counter--;
52 }
53
54 EXPORT_SYMBOL(enable_hlt);
55
56 void default_idle(void)
57 {
58 if (!hlt_counter)
59 cpu_sleep();
60 else
61 cpu_relax();
62 }
63
64 void cpu_idle(void)
65 {
66 /* endless idle loop with no priority at all */
67 while (1) {
68 void (*idle)(void) = pm_idle;
69
70 if (!idle)
71 idle = default_idle;
72
73 while (!need_resched())
74 idle();
75
76 preempt_enable_no_resched();
77 schedule();
78 preempt_disable();
79 }
80 }
81
82 void machine_restart(char * __unused)
83 {
84 /* SR.BL=1 and invoke address error to let CPU reset (manual reset) */
85 asm volatile("ldc %0, sr\n\t"
86 "mov.l @%1, %0" : : "r" (0x10000000), "r" (0x80000001));
87 }
88
89 void machine_halt(void)
90 {
91 local_irq_disable();
92
93 while (1)
94 cpu_sleep();
95 }
96
97 void machine_power_off(void)
98 {
99 if (pm_power_off)
100 pm_power_off();
101 }
102
103 void show_regs(struct pt_regs * regs)
104 {
105 printk("\n");
106 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm);
107 print_symbol("PC is at %s\n", regs->pc);
108 printk("PC : %08lx SP : %08lx SR : %08lx ",
109 regs->pc, regs->regs[15], regs->sr);
110 #ifdef CONFIG_MMU
111 printk("TEA : %08x ", ctrl_inl(MMU_TEA));
112 #else
113 printk(" ");
114 #endif
115 printk("%s\n", print_tainted());
116
117 printk("R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n",
118 regs->regs[0],regs->regs[1],
119 regs->regs[2],regs->regs[3]);
120 printk("R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n",
121 regs->regs[4],regs->regs[5],
122 regs->regs[6],regs->regs[7]);
123 printk("R8 : %08lx R9 : %08lx R10 : %08lx R11 : %08lx\n",
124 regs->regs[8],regs->regs[9],
125 regs->regs[10],regs->regs[11]);
126 printk("R12 : %08lx R13 : %08lx R14 : %08lx\n",
127 regs->regs[12],regs->regs[13],
128 regs->regs[14]);
129 printk("MACH: %08lx MACL: %08lx GBR : %08lx PR : %08lx\n",
130 regs->mach, regs->macl, regs->gbr, regs->pr);
131
132 /*
133 * If we're in kernel mode, dump the stack too..
134 */
135 if (!user_mode(regs)) {
136 extern void show_task(unsigned long *sp);
137 unsigned long sp = regs->regs[15];
138
139 show_task((unsigned long *)sp);
140 }
141 }
142
143 /*
144 * Create a kernel thread
145 */
146
147 /*
148 * This is the mechanism for creating a new kernel thread.
149 *
150 */
151 extern void kernel_thread_helper(void);
152 __asm__(".align 5\n"
153 "kernel_thread_helper:\n\t"
154 "jsr @r5\n\t"
155 " nop\n\t"
156 "mov.l 1f, r1\n\t"
157 "jsr @r1\n\t"
158 " mov r0, r4\n\t"
159 ".align 2\n\t"
160 "1:.long do_exit");
161
162 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
163 { /* Don't use this in BL=1(cli). Or else, CPU resets! */
164 struct pt_regs regs;
165
166 memset(&regs, 0, sizeof(regs));
167 regs.regs[4] = (unsigned long) arg;
168 regs.regs[5] = (unsigned long) fn;
169
170 regs.pc = (unsigned long) kernel_thread_helper;
171 regs.sr = (1 << 30);
172
173 /* Ok, create the new process.. */
174 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
175 }
176
177 /*
178 * Free current thread data structures etc..
179 */
180 void exit_thread(void)
181 {
182 if (current->thread.ubc_pc) {
183 current->thread.ubc_pc = 0;
184 ubc_usercnt -= 1;
185 }
186 }
187
188 void flush_thread(void)
189 {
190 #if defined(CONFIG_SH_FPU)
191 struct task_struct *tsk = current;
192 /* Forget lazy FPU state */
193 clear_fpu(tsk, task_pt_regs(tsk));
194 clear_used_math();
195 #endif
196 }
197
198 void release_thread(struct task_struct *dead_task)
199 {
200 /* do nothing */
201 }
202
203 /* Fill in the fpu structure for a core dump.. */
204 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
205 {
206 int fpvalid = 0;
207
208 #if defined(CONFIG_SH_FPU)
209 struct task_struct *tsk = current;
210
211 fpvalid = !!tsk_used_math(tsk);
212 if (fpvalid) {
213 unlazy_fpu(tsk, regs);
214 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
215 }
216 #endif
217
218 return fpvalid;
219 }
220
221 /*
222 * Capture the user space registers if the task is not running (in user space)
223 */
224 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
225 {
226 struct pt_regs ptregs;
227
228 ptregs = *task_pt_regs(tsk);
229 elf_core_copy_regs(regs, &ptregs);
230
231 return 1;
232 }
233
234 int
235 dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *fpu)
236 {
237 int fpvalid = 0;
238
239 #if defined(CONFIG_SH_FPU)
240 fpvalid = !!tsk_used_math(tsk);
241 if (fpvalid) {
242 unlazy_fpu(tsk, task_pt_regs(tsk));
243 memcpy(fpu, &tsk->thread.fpu.hard, sizeof(*fpu));
244 }
245 #endif
246
247 return fpvalid;
248 }
249
250 asmlinkage void ret_from_fork(void);
251
252 int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
253 unsigned long unused,
254 struct task_struct *p, struct pt_regs *regs)
255 {
256 struct thread_info *ti = task_thread_info(p);
257 struct pt_regs *childregs;
258 #if defined(CONFIG_SH_FPU)
259 struct task_struct *tsk = current;
260
261 unlazy_fpu(tsk, regs);
262 p->thread.fpu = tsk->thread.fpu;
263 copy_to_stopped_child_used_math(p);
264 #endif
265
266 childregs = task_pt_regs(p);
267 *childregs = *regs;
268
269 if (user_mode(regs)) {
270 childregs->regs[15] = usp;
271 ti->addr_limit = USER_DS;
272 } else {
273 childregs->regs[15] = (unsigned long)task_stack_page(p) + THREAD_SIZE;
274 ti->addr_limit = KERNEL_DS;
275 }
276 if (clone_flags & CLONE_SETTLS) {
277 childregs->gbr = childregs->regs[0];
278 }
279 childregs->regs[0] = 0; /* Set return value for child */
280
281 p->thread.sp = (unsigned long) childregs;
282 p->thread.pc = (unsigned long) ret_from_fork;
283
284 p->thread.ubc_pc = 0;
285
286 return 0;
287 }
288
289 /* Tracing by user break controller. */
290 static void
291 ubc_set_tracing(int asid, unsigned long pc)
292 {
293 ctrl_outl(pc, UBC_BARA);
294
295 #ifdef CONFIG_MMU
296 /* We don't have any ASID settings for the SH-2! */
297 if (cpu_data->type != CPU_SH7604)
298 ctrl_outb(asid, UBC_BASRA);
299 #endif
300
301 ctrl_outl(0, UBC_BAMRA);
302
303 if (cpu_data->type == CPU_SH7729 || cpu_data->type == CPU_SH7710) {
304 ctrl_outw(BBR_INST | BBR_READ | BBR_CPU, UBC_BBRA);
305 ctrl_outl(BRCR_PCBA | BRCR_PCTE, UBC_BRCR);
306 } else {
307 ctrl_outw(BBR_INST | BBR_READ, UBC_BBRA);
308 ctrl_outw(BRCR_PCBA, UBC_BRCR);
309 }
310 }
311
312 /*
313 * switch_to(x,y) should switch tasks from x to y.
314 *
315 */
316 struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next)
317 {
318 #if defined(CONFIG_SH_FPU)
319 unlazy_fpu(prev, task_pt_regs(prev));
320 #endif
321
322 #ifdef CONFIG_PREEMPT
323 {
324 unsigned long flags;
325 struct pt_regs *regs;
326
327 local_irq_save(flags);
328 regs = task_pt_regs(prev);
329 if (user_mode(regs) && regs->regs[15] >= 0xc0000000) {
330 int offset = (int)regs->regs[15];
331
332 /* Reset stack pointer: clear critical region mark */
333 regs->regs[15] = regs->regs[1];
334 if (regs->pc < regs->regs[0])
335 /* Go to rewind point */
336 regs->pc = regs->regs[0] + offset;
337 }
338 local_irq_restore(flags);
339 }
340 #endif
341
342 #ifdef CONFIG_MMU
343 /*
344 * Restore the kernel mode register
345 * k7 (r7_bank1)
346 */
347 asm volatile("ldc %0, r7_bank"
348 : /* no output */
349 : "r" (task_thread_info(next)));
350 #endif
351
352 /* If no tasks are using the UBC, we're done */
353 if (ubc_usercnt == 0)
354 /* If no tasks are using the UBC, we're done */;
355 else if (next->thread.ubc_pc && next->mm) {
356 int asid = 0;
357 #ifdef CONFIG_MMU
358 asid |= next->mm->context.id & MMU_CONTEXT_ASID_MASK;
359 #endif
360 ubc_set_tracing(asid, next->thread.ubc_pc);
361 } else {
362 ctrl_outw(0, UBC_BBRA);
363 ctrl_outw(0, UBC_BBRB);
364 }
365
366 return prev;
367 }
368
369 asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
370 unsigned long r6, unsigned long r7,
371 struct pt_regs regs)
372 {
373 #ifdef CONFIG_MMU
374 return do_fork(SIGCHLD, regs.regs[15], &regs, 0, NULL, NULL);
375 #else
376 /* fork almost works, enough to trick you into looking elsewhere :-( */
377 return -EINVAL;
378 #endif
379 }
380
381 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
382 unsigned long parent_tidptr,
383 unsigned long child_tidptr,
384 struct pt_regs regs)
385 {
386 if (!newsp)
387 newsp = regs.regs[15];
388 return do_fork(clone_flags, newsp, &regs, 0,
389 (int __user *)parent_tidptr, (int __user *)child_tidptr);
390 }
391
392 /*
393 * This is trivial, and on the face of it looks like it
394 * could equally well be done in user mode.
395 *
396 * Not so, for quite unobvious reasons - register pressure.
397 * In user mode vfork() cannot have a stack frame, and if
398 * done by calling the "clone()" system call directly, you
399 * do not have enough call-clobbered registers to hold all
400 * the information you need.
401 */
402 asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
403 unsigned long r6, unsigned long r7,
404 struct pt_regs regs)
405 {
406 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.regs[15], &regs,
407 0, NULL, NULL);
408 }
409
410 /*
411 * sys_execve() executes a new program.
412 */
413 asmlinkage int sys_execve(char *ufilename, char **uargv,
414 char **uenvp, unsigned long r7,
415 struct pt_regs regs)
416 {
417 int error;
418 char *filename;
419
420 filename = getname((char __user *)ufilename);
421 error = PTR_ERR(filename);
422 if (IS_ERR(filename))
423 goto out;
424
425 error = do_execve(filename,
426 (char __user * __user *)uargv,
427 (char __user * __user *)uenvp,
428 &regs);
429 if (error == 0) {
430 task_lock(current);
431 current->ptrace &= ~PT_DTRACE;
432 task_unlock(current);
433 }
434 putname(filename);
435 out:
436 return error;
437 }
438
439 unsigned long get_wchan(struct task_struct *p)
440 {
441 unsigned long schedule_frame;
442 unsigned long pc;
443
444 if (!p || p == current || p->state == TASK_RUNNING)
445 return 0;
446
447 /*
448 * The same comment as on the Alpha applies here, too ...
449 */
450 pc = thread_saved_pc(p);
451 if (in_sched_functions(pc)) {
452 schedule_frame = ((unsigned long *)(long)p->thread.sp)[1];
453 return (unsigned long)((unsigned long *)schedule_frame)[1];
454 }
455 return pc;
456 }
457
458 asmlinkage void break_point_trap(unsigned long r4, unsigned long r5,
459 unsigned long r6, unsigned long r7,
460 struct pt_regs regs)
461 {
462 /* Clear tracing. */
463 ctrl_outw(0, UBC_BBRA);
464 ctrl_outw(0, UBC_BBRB);
465 current->thread.ubc_pc = 0;
466 ubc_usercnt -= 1;
467
468 force_sig(SIGTRAP, current);
469 }
470
471 asmlinkage void break_point_trap_software(unsigned long r4, unsigned long r5,
472 unsigned long r6, unsigned long r7,
473 struct pt_regs regs)
474 {
475 regs.pc -= 2;
476 force_sig(SIGTRAP, current);
477 }