]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/v850/kernel/process.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / v850 / kernel / process.c
1 /*
2 * arch/v850/kernel/process.c -- Arch-dependent process handling
3 *
4 * Copyright (C) 2001,02,03 NEC Electronics Corporation
5 * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
6 *
7 * This file is subject to the terms and conditions of the GNU General
8 * Public License. See the file COPYING in the main directory of this
9 * archive for more details.
10 *
11 * Written by Miles Bader <miles@gnu.org>
12 */
13
14 #include <linux/config.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/smp.h>
20 #include <linux/smp_lock.h>
21 #include <linux/stddef.h>
22 #include <linux/unistd.h>
23 #include <linux/ptrace.h>
24 #include <linux/slab.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/reboot.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <asm/pgtable.h>
32
33 extern void ret_from_fork (void);
34
35
36 /* The idle loop. */
37 void default_idle (void)
38 {
39 while (1) {
40 while (! need_resched ())
41 asm ("halt; nop; nop; nop; nop; nop" ::: "cc");
42 schedule ();
43 }
44 }
45
46 void (*idle)(void) = default_idle;
47
48 /*
49 * The idle thread. There's no useful work to be
50 * done, so just try to conserve power and have a
51 * low exit latency (ie sit in a loop waiting for
52 * somebody to say that they'd like to reschedule)
53 */
54 void cpu_idle (void)
55 {
56 /* endless idle loop with no priority at all */
57 (*idle) ();
58 }
59
60 /*
61 * This is the mechanism for creating a new kernel thread.
62 *
63 * NOTE! Only a kernel-only process (ie the swapper or direct descendants who
64 * haven't done an "execve()") should use this: it will work within a system
65 * call from a "real" process, but the process memory space will not be free'd
66 * until both the parent and the child have exited.
67 */
68 int kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
69 {
70 register mm_segment_t fs = get_fs ();
71 register unsigned long syscall asm (SYSCALL_NUM);
72 register unsigned long arg0 asm (SYSCALL_ARG0);
73 register unsigned long ret asm (SYSCALL_RET);
74
75 set_fs (KERNEL_DS);
76
77 /* Clone this thread. Note that we don't pass the clone syscall's
78 second argument -- it's ignored for calls from kernel mode (the
79 child's SP is always set to the top of the kernel stack). */
80 arg0 = flags | CLONE_VM;
81 syscall = __NR_clone;
82 asm volatile ("trap " SYSCALL_SHORT_TRAP
83 : "=r" (ret), "=r" (syscall)
84 : "1" (syscall), "r" (arg0)
85 : SYSCALL_SHORT_CLOBBERS);
86
87 if (ret == 0) {
88 /* In child thread, call FN and exit. */
89 arg0 = (*fn) (arg);
90 syscall = __NR_exit;
91 asm volatile ("trap " SYSCALL_SHORT_TRAP
92 : "=r" (ret), "=r" (syscall)
93 : "1" (syscall), "r" (arg0)
94 : SYSCALL_SHORT_CLOBBERS);
95 }
96
97 /* In parent. */
98 set_fs (fs);
99
100 return ret;
101 }
102
103 void flush_thread (void)
104 {
105 set_fs (USER_DS);
106 }
107
108 int copy_thread (int nr, unsigned long clone_flags,
109 unsigned long stack_start, unsigned long stack_size,
110 struct task_struct *p, struct pt_regs *regs)
111 {
112 /* Start pushing stuff from the top of the child's kernel stack. */
113 unsigned long orig_ksp = (unsigned long)p->thread_info + THREAD_SIZE;
114 unsigned long ksp = orig_ksp;
115 /* We push two `state save' stack fames (see entry.S) on the new
116 kernel stack:
117 1) The innermost one is what switch_thread would have
118 pushed, and is used when we context switch to the child
119 thread for the first time. It's set up to return to
120 ret_from_fork in entry.S.
121 2) The outermost one (nearest the top) is what a syscall
122 trap would have pushed, and is set up to return to the
123 same location as the parent thread, but with a return
124 value of 0. */
125 struct pt_regs *child_switch_regs, *child_trap_regs;
126
127 /* Trap frame. */
128 ksp -= STATE_SAVE_SIZE;
129 child_trap_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET);
130 /* Switch frame. */
131 ksp -= STATE_SAVE_SIZE;
132 child_switch_regs = (struct pt_regs *)(ksp + STATE_SAVE_PT_OFFSET);
133
134 /* First copy parent's register state to child. */
135 *child_switch_regs = *regs;
136 *child_trap_regs = *regs;
137
138 /* switch_thread returns to the restored value of the lp
139 register (r31), so we make that the place where we want to
140 jump when the child thread begins running. */
141 child_switch_regs->gpr[GPR_LP] = (v850_reg_t)ret_from_fork;
142
143 if (regs->kernel_mode)
144 /* Since we're returning to kernel-mode, make sure the child's
145 stored kernel stack pointer agrees with what the actual
146 stack pointer will be at that point (the trap return code
147 always restores the SP, even when returning to
148 kernel-mode). */
149 child_trap_regs->gpr[GPR_SP] = orig_ksp;
150 else
151 /* Set the child's user-mode stack-pointer (the name
152 `stack_start' is a misnomer, it's just the initial SP
153 value). */
154 child_trap_regs->gpr[GPR_SP] = stack_start;
155
156 /* Thread state for the child (everything else is on the stack). */
157 p->thread.ksp = ksp;
158
159 return 0;
160 }
161
162 /*
163 * fill in the user structure for a core dump..
164 */
165 void dump_thread (struct pt_regs *regs, struct user *dump)
166 {
167 #if 0 /* Later. XXX */
168 dump->magic = CMAGIC;
169 dump->start_code = 0;
170 dump->start_stack = regs->gpr[GPR_SP];
171 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
172 dump->u_dsize = ((unsigned long) (current->mm->brk +
173 (PAGE_SIZE-1))) >> PAGE_SHIFT;
174 dump->u_dsize -= dump->u_tsize;
175 dump->u_ssize = 0;
176
177 if (dump->start_stack < TASK_SIZE)
178 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
179
180 dump->u_ar0 = (struct user_regs_struct *)((int)&dump->regs - (int)dump);
181 dump->regs = *regs;
182 dump->u_fpvalid = 0;
183 #endif
184 }
185
186 /*
187 * sys_execve() executes a new program.
188 */
189 int sys_execve (char *name, char **argv, char **envp, struct pt_regs *regs)
190 {
191 char *filename = getname (name);
192 int error = PTR_ERR (filename);
193
194 if (! IS_ERR (filename)) {
195 error = do_execve (filename, argv, envp, regs);
196 putname (filename);
197 }
198
199 return error;
200 }
201
202
203 /*
204 * These bracket the sleeping functions..
205 */
206 #define first_sched ((unsigned long)__sched_text_start)
207 #define last_sched ((unsigned long)__sched_text_end)
208
209 unsigned long get_wchan (struct task_struct *p)
210 {
211 #if 0 /* Barf. Figure out the stack-layout later. XXX */
212 unsigned long fp, pc;
213 int count = 0;
214
215 if (!p || p == current || p->state == TASK_RUNNING)
216 return 0;
217
218 pc = thread_saved_pc (p);
219
220 /* This quite disgusting function walks up the stack, following
221 saved return address, until it something that's out of bounds
222 (as defined by `first_sched' and `last_sched'). It then
223 returns the last PC that was in-bounds. */
224 do {
225 if (fp < stack_page + sizeof (struct task_struct) ||
226 fp >= 8184+stack_page)
227 return 0;
228 pc = ((unsigned long *)fp)[1];
229 if (pc < first_sched || pc >= last_sched)
230 return pc;
231 fp = *(unsigned long *) fp;
232 } while (count++ < 16);
233 #endif
234
235 return 0;
236 }