]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/frv/kernel/process.c
[PATCH] FDPIC: Add coredump capability for the ELF-FDPIC binfmt
[mirror_ubuntu-artful-kernel.git] / arch / frv / kernel / process.c
1 /* process.c: FRV specific parts of process handling
2 *
3 * Copyright (C) 2003-5 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * - Derived from arch/m68k/kernel/process.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13 #include <linux/module.h>
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/elf.h>
26 #include <linux/reboot.h>
27 #include <linux/interrupt.h>
28
29 #include <asm/uaccess.h>
30 #include <asm/system.h>
31 #include <asm/setup.h>
32 #include <asm/pgtable.h>
33 #include <asm/gdb-stub.h>
34 #include <asm/mb-regs.h>
35
36 #include "local.h"
37
38 asmlinkage void ret_from_fork(void);
39
40 #include <asm/pgalloc.h>
41
42 void (*pm_power_off)(void);
43 EXPORT_SYMBOL(pm_power_off);
44
45 struct task_struct *alloc_task_struct(void)
46 {
47 struct task_struct *p = kmalloc(THREAD_SIZE, GFP_KERNEL);
48 if (p)
49 atomic_set((atomic_t *)(p+1), 1);
50 return p;
51 }
52
53 void free_task_struct(struct task_struct *p)
54 {
55 if (atomic_dec_and_test((atomic_t *)(p+1)))
56 kfree(p);
57 }
58
59 static void core_sleep_idle(void)
60 {
61 #ifdef LED_DEBUG_SLEEP
62 /* Show that we're sleeping... */
63 __set_LEDS(0x55aa);
64 #endif
65 frv_cpu_core_sleep();
66 #ifdef LED_DEBUG_SLEEP
67 /* ... and that we woke up */
68 __set_LEDS(0);
69 #endif
70 mb();
71 }
72
73 void (*idle)(void) = core_sleep_idle;
74
75 /*
76 * The idle thread. There's no useful work to be
77 * done, so just try to conserve power and have a
78 * low exit latency (ie sit in a loop waiting for
79 * somebody to say that they'd like to reschedule)
80 */
81 void cpu_idle(void)
82 {
83 int cpu = smp_processor_id();
84
85 /* endless idle loop with no priority at all */
86 while (1) {
87 while (!need_resched()) {
88 irq_stat[cpu].idle_timestamp = jiffies;
89
90 if (!frv_dma_inprogress && idle)
91 idle();
92 }
93
94 preempt_enable_no_resched();
95 schedule();
96 preempt_disable();
97 }
98 }
99
100 void machine_restart(char * __unused)
101 {
102 unsigned long reset_addr;
103 #ifdef CONFIG_GDBSTUB
104 gdbstub_exit(0);
105 #endif
106
107 if (PSR_IMPLE(__get_PSR()) == PSR_IMPLE_FR551)
108 reset_addr = 0xfefff500;
109 else
110 reset_addr = 0xfeff0500;
111
112 /* Software reset. */
113 asm volatile(" dcef @(gr0,gr0),1 ! membar !"
114 " sti %1,@(%0,0) !"
115 " nop ! nop ! nop ! nop ! nop ! "
116 " nop ! nop ! nop ! nop ! nop ! "
117 " nop ! nop ! nop ! nop ! nop ! "
118 " nop ! nop ! nop ! nop ! nop ! "
119 : : "r" (reset_addr), "r" (1) );
120
121 for (;;)
122 ;
123 }
124
125 void machine_halt(void)
126 {
127 #ifdef CONFIG_GDBSTUB
128 gdbstub_exit(0);
129 #endif
130
131 for (;;);
132 }
133
134 void machine_power_off(void)
135 {
136 #ifdef CONFIG_GDBSTUB
137 gdbstub_exit(0);
138 #endif
139
140 for (;;);
141 }
142
143 void flush_thread(void)
144 {
145 #if 0 //ndef NO_FPU
146 unsigned long zero = 0;
147 #endif
148 set_fs(USER_DS);
149 }
150
151 inline unsigned long user_stack(const struct pt_regs *regs)
152 {
153 while (regs->next_frame)
154 regs = regs->next_frame;
155 return user_mode(regs) ? regs->sp : 0;
156 }
157
158 asmlinkage int sys_fork(void)
159 {
160 #ifndef CONFIG_MMU
161 /* fork almost works, enough to trick you into looking elsewhere:-( */
162 return -EINVAL;
163 #else
164 return do_fork(SIGCHLD, user_stack(__frame), __frame, 0, NULL, NULL);
165 #endif
166 }
167
168 asmlinkage int sys_vfork(void)
169 {
170 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, user_stack(__frame), __frame, 0,
171 NULL, NULL);
172 }
173
174 /*****************************************************************************/
175 /*
176 * clone a process
177 * - tlsptr is retrieved by copy_thread()
178 */
179 asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
180 int __user *parent_tidptr, int __user *child_tidptr,
181 int __user *tlsptr)
182 {
183 if (!newsp)
184 newsp = user_stack(__frame);
185 return do_fork(clone_flags, newsp, __frame, 0, parent_tidptr, child_tidptr);
186 } /* end sys_clone() */
187
188 /*****************************************************************************/
189 /*
190 * This gets called before we allocate a new thread and copy
191 * the current task into it.
192 */
193 void prepare_to_copy(struct task_struct *tsk)
194 {
195 //unlazy_fpu(tsk);
196 } /* end prepare_to_copy() */
197
198 /*****************************************************************************/
199 /*
200 * set up the kernel stack and exception frames for a new process
201 */
202 int copy_thread(int nr, unsigned long clone_flags,
203 unsigned long usp, unsigned long topstk,
204 struct task_struct *p, struct pt_regs *regs)
205 {
206 struct pt_regs *childregs0, *childregs, *regs0;
207
208 regs0 = __kernel_frame0_ptr;
209 childregs0 = (struct pt_regs *)
210 (task_stack_page(p) + THREAD_SIZE - USER_CONTEXT_SIZE);
211 childregs = childregs0;
212
213 /* set up the userspace frame (the only place that the USP is stored) */
214 *childregs0 = *regs0;
215
216 childregs0->gr8 = 0;
217 childregs0->sp = usp;
218 childregs0->next_frame = NULL;
219
220 /* set up the return kernel frame if called from kernel_thread() */
221 if (regs != regs0) {
222 childregs--;
223 *childregs = *regs;
224 childregs->sp = (unsigned long) childregs0;
225 childregs->next_frame = childregs0;
226 childregs->gr15 = (unsigned long) task_thread_info(p);
227 childregs->gr29 = (unsigned long) p;
228 }
229
230 p->set_child_tid = p->clear_child_tid = NULL;
231
232 p->thread.frame = childregs;
233 p->thread.curr = p;
234 p->thread.sp = (unsigned long) childregs;
235 p->thread.fp = 0;
236 p->thread.lr = 0;
237 p->thread.pc = (unsigned long) ret_from_fork;
238 p->thread.frame0 = childregs0;
239
240 /* the new TLS pointer is passed in as arg #5 to sys_clone() */
241 if (clone_flags & CLONE_SETTLS)
242 childregs->gr29 = childregs->gr12;
243
244 save_user_regs(p->thread.user);
245
246 return 0;
247 } /* end copy_thread() */
248
249 /*
250 * sys_execve() executes a new program.
251 */
252 asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
253 {
254 int error;
255 char * filename;
256
257 lock_kernel();
258 filename = getname(name);
259 error = PTR_ERR(filename);
260 if (IS_ERR(filename))
261 goto out;
262 error = do_execve(filename, argv, envp, __frame);
263 putname(filename);
264 out:
265 unlock_kernel();
266 return error;
267 }
268
269 unsigned long get_wchan(struct task_struct *p)
270 {
271 struct pt_regs *regs0;
272 unsigned long fp, pc;
273 unsigned long stack_limit;
274 int count = 0;
275 if (!p || p == current || p->state == TASK_RUNNING)
276 return 0;
277
278 stack_limit = (unsigned long) (p + 1);
279 fp = p->thread.fp;
280 regs0 = p->thread.frame0;
281
282 do {
283 if (fp < stack_limit || fp >= (unsigned long) regs0 || fp & 3)
284 return 0;
285
286 pc = ((unsigned long *) fp)[2];
287
288 /* FIXME: This depends on the order of these functions. */
289 if (!in_sched_functions(pc))
290 return pc;
291
292 fp = *(unsigned long *) fp;
293 } while (count++ < 16);
294
295 return 0;
296 }
297
298 unsigned long thread_saved_pc(struct task_struct *tsk)
299 {
300 /* Check whether the thread is blocked in resume() */
301 if (in_sched_functions(tsk->thread.pc))
302 return ((unsigned long *)tsk->thread.fp)[2];
303 else
304 return tsk->thread.pc;
305 }
306
307 int elf_check_arch(const struct elf32_hdr *hdr)
308 {
309 unsigned long hsr0 = __get_HSR(0);
310 unsigned long psr = __get_PSR();
311
312 if (hdr->e_machine != EM_FRV)
313 return 0;
314
315 switch (hdr->e_flags & EF_FRV_GPR_MASK) {
316 case EF_FRV_GPR64:
317 if ((hsr0 & HSR0_GRN) == HSR0_GRN_32)
318 return 0;
319 case EF_FRV_GPR32:
320 case 0:
321 break;
322 default:
323 return 0;
324 }
325
326 switch (hdr->e_flags & EF_FRV_FPR_MASK) {
327 case EF_FRV_FPR64:
328 if ((hsr0 & HSR0_FRN) == HSR0_FRN_32)
329 return 0;
330 case EF_FRV_FPR32:
331 case EF_FRV_FPR_NONE:
332 case 0:
333 break;
334 default:
335 return 0;
336 }
337
338 if ((hdr->e_flags & EF_FRV_MULADD) == EF_FRV_MULADD)
339 if (PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
340 PSR_IMPLE(psr) != PSR_IMPLE_FR451)
341 return 0;
342
343 switch (hdr->e_flags & EF_FRV_CPU_MASK) {
344 case EF_FRV_CPU_GENERIC:
345 break;
346 case EF_FRV_CPU_FR300:
347 case EF_FRV_CPU_SIMPLE:
348 case EF_FRV_CPU_TOMCAT:
349 default:
350 return 0;
351 case EF_FRV_CPU_FR400:
352 if (PSR_IMPLE(psr) != PSR_IMPLE_FR401 &&
353 PSR_IMPLE(psr) != PSR_IMPLE_FR405 &&
354 PSR_IMPLE(psr) != PSR_IMPLE_FR451 &&
355 PSR_IMPLE(psr) != PSR_IMPLE_FR551)
356 return 0;
357 break;
358 case EF_FRV_CPU_FR450:
359 if (PSR_IMPLE(psr) != PSR_IMPLE_FR451)
360 return 0;
361 break;
362 case EF_FRV_CPU_FR500:
363 if (PSR_IMPLE(psr) != PSR_IMPLE_FR501)
364 return 0;
365 break;
366 case EF_FRV_CPU_FR550:
367 if (PSR_IMPLE(psr) != PSR_IMPLE_FR551)
368 return 0;
369 break;
370 }
371
372 return 1;
373 }
374
375 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
376 {
377 memcpy(fpregs,
378 &current->thread.user->f,
379 sizeof(current->thread.user->f));
380 return 1;
381 }