]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/frv/kernel/process.c
[PATCH] frv: task_thread_info(), task_stack_page()
[mirror_ubuntu-artful-kernel.git] / arch / frv / kernel / process.c
CommitLineData
1da177e4
LT
1/* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/config.h>
14#include <linux/errno.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/smp_lock.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/ptrace.h>
23#include <linux/slab.h>
24#include <linux/user.h>
25#include <linux/elf.h>
26#include <linux/reboot.h>
27#include <linux/interrupt.h>
28
29#include <asm/uaccess.h>
30#include <asm/system.h>
31#include <asm/setup.h>
32#include <asm/pgtable.h>
33#include <asm/gdb-stub.h>
34#include <asm/mb-regs.h>
35
36#include "local.h"
37
38asmlinkage void ret_from_fork(void);
39
40#include <asm/pgalloc.h>
41
42struct task_struct *alloc_task_struct(void)
43{
44 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
45 if (p)
46 atomic_set((atomic_t *)(p+1), 1);
47 return p;
48}
49
50void free_task_struct(struct task_struct *p)
51{
52 if (atomic_dec_and_test((atomic_t *)(p+1)))
53 kfree(p);
54}
55
56static void core_sleep_idle(void)
57{
58#ifdef LED_DEBUG_SLEEP
59 /* Show that we're sleeping... */
60 __set_LEDS(0x55aa);
61#endif
62 frv_cpu_core_sleep();
63#ifdef LED_DEBUG_SLEEP
64 /* ... and that we woke up */
65 __set_LEDS(0);
66#endif
67 mb();
68}
69
70void (*idle)(void) = core_sleep_idle;
71
72/*
73 * The idle thread. There's no useful work to be
74 * done, so just try to conserve power and have a
75 * low exit latency (ie sit in a loop waiting for
76 * somebody to say that they'd like to reschedule)
77 */
78void cpu_idle(void)
79{
5bfb5d69
NP
80 int cpu = smp_processor_id();
81
1da177e4
LT
82 /* endless idle loop with no priority at all */
83 while (1) {
84 while (!need_resched()) {
5bfb5d69 85 irq_stat[cpu].idle_timestamp = jiffies;
1da177e4
LT
86
87 if (!frv_dma_inprogress && idle)
88 idle();
89 }
90
5bfb5d69 91 preempt_enable_no_resched();
1da177e4 92 schedule();
5bfb5d69 93 preempt_disable();
1da177e4
LT
94 }
95}
96
97void machine_restart(char * __unused)
98{
99 unsigned long reset_addr;
100#ifdef CONFIG_GDBSTUB
101 gdbstub_exit(0);
102#endif
103
104 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
105 reset_addr = 0xfefff500;
106 else
107 reset_addr = 0xfeff0500;
108
109 /* Software reset. */
110 asm volatile(" dcef @(gr0,gr0),1 ! membar !"
111 " sti %1,@(%0,0) !"
112 " nop ! nop ! nop ! nop ! nop ! "
113 " nop ! nop ! nop ! nop ! nop ! "
114 " nop ! nop ! nop ! nop ! nop ! "
115 " nop ! nop ! nop ! nop ! nop ! "
116 : : "r" (reset_addr), "r" (1) );
117
118 for (;;)
119 ;
120}
121
122void machine_halt(void)
123{
124#ifdef CONFIG_GDBSTUB
125 gdbstub_exit(0);
126#endif
127
128 for (;;);
129}
130
131void machine_power_off(void)
132{
133#ifdef CONFIG_GDBSTUB
134 gdbstub_exit(0);
135#endif
136
137 for (;;);
138}
139
140void flush_thread(void)
141{
142#if 0 //ndef NO_FPU
143 unsigned long zero = 0;
144#endif
145 set_fs(USER_DS);
146}
147
148inline unsigned long user_stack(const struct pt_regs *regs)
149{
150 while (regs->next_frame)
151 regs = regs->next_frame;
152 return user_mode(regs) ? regs->sp : 0;
153}
154
155asmlinkage int sys_fork(void)
156{
157#ifndef CONFIG_MMU
158 /* fork almost works, enough to trick you into looking elsewhere:-( */
159 return -EINVAL;
160#else
161 return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
162#endif
163}
164
165asmlinkage int sys_vfork(void)
166{
167 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
168 NULL, NULL);
169}
170
171/*****************************************************************************/
172/*
173 * clone a process
174 * - tlsptr is retrieved by copy_thread()
175 */
176asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
177 int __user *parent_tidptr, int __user *child_tidptr,
178 int __user *tlsptr)
179{
180 if (!newsp)
181 newsp = user_stack(__frame);
182 return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
183} /* end sys_clone() */
184
185/*****************************************************************************/
186/*
187 * This gets called before we allocate a new thread and copy
188 * the current task into it.
189 */
190void prepare_to_copy(struct task_struct *tsk)
191{
192 //unlazy_fpu(tsk);
193} /* end prepare_to_copy() */
194
195/*****************************************************************************/
196/*
197 * set up the kernel stack and exception frames for a new process
198 */
199int copy_thread(int nr, unsigned long clone_flags,
200 unsigned long usp, unsigned long topstk,
201 struct task_struct *p, struct pt_regs *regs)
202{
203 struct pt_regs *childregs0, *childregs, *regs0;
204
205 regs0 = __kernel_frame0_ptr;
206 childregs0 = (struct pt_regs *)
097cb338 207 (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
1da177e4
LT
208 childregs = childregs0;
209
210 /* set up the userspace frame (the only place that the USP is stored) */
211 *childregs0 = *regs0;
212
213 childregs0->gr8 = 0;
214 childregs0->sp = usp;
215 childregs0->next_frame = NULL;
216
217 /* set up the return kernel frame if called from kernel_thread() */
218 if (regs != regs0) {
219 childregs--;
220 *childregs = *regs;
221 childregs->sp = (unsigned long) childregs0;
222 childregs->next_frame = childregs0;
097cb338 223 childregs->gr15 = (unsigned long) task_thread_info(p);
1da177e4
LT
224 childregs->gr29 = (unsigned long) p;
225 }
226
227 p->set_child_tid = p->clear_child_tid = NULL;
228
229 p->thread.frame = childregs;
230 p->thread.curr = p;
231 p->thread.sp = (unsigned long) childregs;
232 p->thread.fp = 0;
233 p->thread.lr = 0;
234 p->thread.pc = (unsigned long) ret_from_fork;
235 p->thread.frame0 = childregs0;
236
237 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
238 if (clone_flags & CLONE_SETTLS)
239 childregs->gr29 = childregs->gr12;
240
241 save_user_regs(p->thread.user);
242
243 return 0;
244} /* end copy_thread() */
245
1da177e4
LT
246/*
247 * sys_execve() executes a new program.
248 */
249asmlinkage int sys_execve(char *name, char **argv, char **envp)
250{
251 int error;
252 char * filename;
253
254 lock_kernel();
255 filename = getname(name);
256 error = PTR_ERR(filename);
257 if (IS_ERR(filename))
258 goto out;
259 error = do_execve(filename, argv, envp, __frame);
260 putname(filename);
261 out:
262 unlock_kernel();
263 return error;
264}
265
266unsigned long get_wchan(struct task_struct *p)
267{
268 struct pt_regs *regs0;
269 unsigned long fp, pc;
270 unsigned long stack_limit;
271 int count = 0;
272 if (!p || p == current || p->state == TASK_RUNNING)
273 return 0;
274
275 stack_limit = (unsigned long) (p + 1);
276 fp = p->thread.fp;
277 regs0 = p->thread.frame0;
278
279 do {
280 if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
281 return 0;
282
283 pc = ((unsigned long *) fp)[2];
284
285 /* FIXME: This depends on the order of these functions. */
286 if (!in_sched_functions(pc))
287 return pc;
288
289 fp = *(unsigned long *) fp;
290 } while (count++ < 16);
291
292 return 0;
293}
294
295unsigned long thread_saved_pc(struct task_struct *tsk)
296{
297 /* Check whether the thread is blocked in resume() */
298 if (in_sched_functions(tsk->thread.pc))
299 return ((unsigned long *)tsk->thread.fp)[2];
300 else
301 return tsk->thread.pc;
302}
303
304int elf_check_arch(const struct elf32_hdr *hdr)
305{
306 unsigned long hsr0 = __get_HSR(0);
307 unsigned long psr = __get_PSR();
308
309 if (hdr->e_machine != EM_FRV)
310 return 0;
311
312 switch (hdr->e_flags & EF_FRV_GPR_MASK) {
313 case EF_FRV_GPR64:
314 if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
315 return 0;
316 case EF_FRV_GPR32:
317 case 0:
318 break;
319 default:
320 return 0;
321 }
322
323 switch (hdr->e_flags & EF_FRV_FPR_MASK) {
324 case EF_FRV_FPR64:
325 if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
326 return 0;
327 case EF_FRV_FPR32:
328 case EF_FRV_FPR_NONE:
329 case 0:
330 break;
331 default:
332 return 0;
333 }
334
335 if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
336 if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
337 PSR_IMPLE(psr) != PSR_IMPLE_FR451)
338 return 0;
339
340 switch (hdr->e_flags & EF_FRV_CPU_MASK) {
341 case EF_FRV_CPU_GENERIC:
342 break;
343 case EF_FRV_CPU_FR300:
344 case EF_FRV_CPU_SIMPLE:
345 case EF_FRV_CPU_TOMCAT:
346 default:
347 return 0;
348 case EF_FRV_CPU_FR400:
349 if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
350 PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
351 PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
352 PSR_IMPLE(psr) != PSR_IMPLE_FR551)
353 return 0;
354 break;
355 case EF_FRV_CPU_FR450:
356 if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
357 return 0;
358 break;
359 case EF_FRV_CPU_FR500:
360 if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
361 return 0;
362 break;
363 case EF_FRV_CPU_FR550:
364 if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
365 return 0;
366 break;
367 }
368
369 return 1;
370}