]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/avr32/kernel/process.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / avr32 / kernel / process.c
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/sched.h>
9 #include <linux/sched/debug.h>
10 #include <linux/sched/task.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/fs.h>
15 #include <linux/pm.h>
16 #include <linux/ptrace.h>
17 #include <linux/slab.h>
18 #include <linux/reboot.h>
19 #include <linux/tick.h>
20 #include <linux/uaccess.h>
21 #include <linux/unistd.h>
22
23 #include <asm/sysreg.h>
24 #include <asm/ocd.h>
25 #include <asm/syscalls.h>
26
27 #include <mach/pm.h>
28
29 void (*pm_power_off)(void);
30 EXPORT_SYMBOL(pm_power_off);
31
32 /*
33 * This file handles the architecture-dependent parts of process handling..
34 */
35
36 void arch_cpu_idle(void)
37 {
38 cpu_enter_idle();
39 }
40
41 void machine_halt(void)
42 {
43 /*
44 * Enter Stop mode. The 32 kHz oscillator will keep running so
45 * the RTC will keep the time properly and the system will
46 * boot quickly.
47 */
48 asm volatile("sleep 3\n\t"
49 "sub pc, -2");
50 }
51
52 void machine_power_off(void)
53 {
54 if (pm_power_off)
55 pm_power_off();
56 }
57
58 void machine_restart(char *cmd)
59 {
60 ocd_write(DC, (1 << OCD_DC_DBE_BIT));
61 ocd_write(DC, (1 << OCD_DC_RES_BIT));
62 while (1) ;
63 }
64
65 /*
66 * Free current thread data structures etc
67 */
68 void exit_thread(struct task_struct *tsk)
69 {
70 ocd_disable(tsk);
71 }
72
73 void flush_thread(void)
74 {
75 /* nothing to do */
76 }
77
78 void release_thread(struct task_struct *dead_task)
79 {
80 /* do nothing */
81 }
82
83 static void dump_mem(const char *str, const char *log_lvl,
84 unsigned long bottom, unsigned long top)
85 {
86 unsigned long p;
87 int i;
88
89 printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
90
91 for (p = bottom & ~31; p < top; ) {
92 printk("%s%04lx: ", log_lvl, p & 0xffff);
93
94 for (i = 0; i < 8; i++, p += 4) {
95 unsigned int val;
96
97 if (p < bottom || p >= top)
98 printk(" ");
99 else {
100 if (__get_user(val, (unsigned int __user *)p)) {
101 printk("\n");
102 goto out;
103 }
104 printk("%08x ", val);
105 }
106 }
107 printk("\n");
108 }
109
110 out:
111 return;
112 }
113
114 static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
115 {
116 return (p > (unsigned long)tinfo)
117 && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
118 }
119
120 #ifdef CONFIG_FRAME_POINTER
121 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
122 struct pt_regs *regs, const char *log_lvl)
123 {
124 unsigned long lr, fp;
125 struct thread_info *tinfo;
126
127 if (regs)
128 fp = regs->r7;
129 else if (tsk == current)
130 asm("mov %0, r7" : "=r"(fp));
131 else
132 fp = tsk->thread.cpu_context.r7;
133
134 /*
135 * Walk the stack as long as the frame pointer (a) is within
136 * the kernel stack of the task, and (b) it doesn't move
137 * downwards.
138 */
139 tinfo = task_thread_info(tsk);
140 printk("%sCall trace:\n", log_lvl);
141 while (valid_stack_ptr(tinfo, fp)) {
142 unsigned long new_fp;
143
144 lr = *(unsigned long *)fp;
145 #ifdef CONFIG_KALLSYMS
146 printk("%s [<%08lx>] ", log_lvl, lr);
147 #else
148 printk(" [<%08lx>] ", lr);
149 #endif
150 print_symbol("%s\n", lr);
151
152 new_fp = *(unsigned long *)(fp + 4);
153 if (new_fp <= fp)
154 break;
155 fp = new_fp;
156 }
157 printk("\n");
158 }
159 #else
160 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
161 struct pt_regs *regs, const char *log_lvl)
162 {
163 unsigned long addr;
164
165 printk("%sCall trace:\n", log_lvl);
166
167 while (!kstack_end(sp)) {
168 addr = *sp++;
169 if (kernel_text_address(addr)) {
170 #ifdef CONFIG_KALLSYMS
171 printk("%s [<%08lx>] ", log_lvl, addr);
172 #else
173 printk(" [<%08lx>] ", addr);
174 #endif
175 print_symbol("%s\n", addr);
176 }
177 }
178 printk("\n");
179 }
180 #endif
181
182 void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
183 struct pt_regs *regs, const char *log_lvl)
184 {
185 struct thread_info *tinfo;
186
187 if (sp == 0) {
188 if (tsk)
189 sp = tsk->thread.cpu_context.ksp;
190 else
191 sp = (unsigned long)&tinfo;
192 }
193 if (!tsk)
194 tsk = current;
195
196 tinfo = task_thread_info(tsk);
197
198 if (valid_stack_ptr(tinfo, sp)) {
199 dump_mem("Stack: ", log_lvl, sp,
200 THREAD_SIZE + (unsigned long)tinfo);
201 show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
202 }
203 }
204
205 void show_stack(struct task_struct *tsk, unsigned long *stack)
206 {
207 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
208 }
209
210 static const char *cpu_modes[] = {
211 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
212 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
213 };
214
215 void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
216 {
217 unsigned long sp = regs->sp;
218 unsigned long lr = regs->lr;
219 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
220
221 show_regs_print_info(log_lvl);
222
223 if (!user_mode(regs)) {
224 sp = (unsigned long)regs + FRAME_SIZE_FULL;
225
226 printk("%s", log_lvl);
227 print_symbol("PC is at %s\n", instruction_pointer(regs));
228 printk("%s", log_lvl);
229 print_symbol("LR is at %s\n", lr);
230 }
231
232 printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n"
233 "%ssp : %08lx r12: %08lx r11: %08lx\n",
234 log_lvl, instruction_pointer(regs), lr, print_tainted(),
235 log_lvl, sp, regs->r12, regs->r11);
236 printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n",
237 log_lvl, regs->r10, regs->r9, regs->r8);
238 printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
239 log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
240 printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
241 log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
242 printk("%sFlags: %c%c%c%c%c\n", log_lvl,
243 regs->sr & SR_Q ? 'Q' : 'q',
244 regs->sr & SR_V ? 'V' : 'v',
245 regs->sr & SR_N ? 'N' : 'n',
246 regs->sr & SR_Z ? 'Z' : 'z',
247 regs->sr & SR_C ? 'C' : 'c');
248 printk("%sMode bits: %c%c%c%c%c%c%c%c%c%c\n", log_lvl,
249 regs->sr & SR_H ? 'H' : 'h',
250 regs->sr & SR_J ? 'J' : 'j',
251 regs->sr & SR_DM ? 'M' : 'm',
252 regs->sr & SR_D ? 'D' : 'd',
253 regs->sr & SR_EM ? 'E' : 'e',
254 regs->sr & SR_I3M ? '3' : '.',
255 regs->sr & SR_I2M ? '2' : '.',
256 regs->sr & SR_I1M ? '1' : '.',
257 regs->sr & SR_I0M ? '0' : '.',
258 regs->sr & SR_GM ? 'G' : 'g');
259 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
260 }
261
262 void show_regs(struct pt_regs *regs)
263 {
264 unsigned long sp = regs->sp;
265
266 if (!user_mode(regs))
267 sp = (unsigned long)regs + FRAME_SIZE_FULL;
268
269 show_regs_log_lvl(regs, "");
270 show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
271 }
272 EXPORT_SYMBOL(show_regs);
273
274 /* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
275 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
276 {
277 /* Not valid */
278 return 0;
279 }
280
281 asmlinkage void ret_from_fork(void);
282 asmlinkage void ret_from_kernel_thread(void);
283 asmlinkage void syscall_return(void);
284
285 int copy_thread(unsigned long clone_flags, unsigned long usp,
286 unsigned long arg,
287 struct task_struct *p)
288 {
289 struct pt_regs *childregs = task_pt_regs(p);
290
291 if (unlikely(p->flags & PF_KTHREAD)) {
292 memset(childregs, 0, sizeof(struct pt_regs));
293 p->thread.cpu_context.r0 = arg;
294 p->thread.cpu_context.r1 = usp; /* fn */
295 p->thread.cpu_context.r2 = (unsigned long)syscall_return;
296 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
297 childregs->sr = MODE_SUPERVISOR;
298 } else {
299 *childregs = *current_pt_regs();
300 if (usp)
301 childregs->sp = usp;
302 childregs->r12 = 0; /* Set return value for child */
303 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
304 }
305
306 p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
307 p->thread.cpu_context.ksp = (unsigned long)childregs;
308
309 clear_tsk_thread_flag(p, TIF_DEBUG);
310 if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
311 ocd_enable(p);
312
313 return 0;
314 }
315
316 /*
317 * This function is supposed to answer the question "who called
318 * schedule()?"
319 */
320 unsigned long get_wchan(struct task_struct *p)
321 {
322 unsigned long pc;
323 unsigned long stack_page;
324
325 if (!p || p == current || p->state == TASK_RUNNING)
326 return 0;
327
328 stack_page = (unsigned long)task_stack_page(p);
329 BUG_ON(!stack_page);
330
331 /*
332 * The stored value of PC is either the address right after
333 * the call to __switch_to() or ret_from_fork.
334 */
335 pc = thread_saved_pc(p);
336 if (in_sched_functions(pc)) {
337 #ifdef CONFIG_FRAME_POINTER
338 unsigned long fp = p->thread.cpu_context.r7;
339 BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
340 pc = *(unsigned long *)fp;
341 #else
342 /*
343 * We depend on the frame size of schedule here, which
344 * is actually quite ugly. It might be possible to
345 * determine the frame size automatically at build
346 * time by doing this:
347 * - compile sched/core.c
348 * - disassemble the resulting sched.o
349 * - look for 'sub sp,??' shortly after '<schedule>:'
350 */
351 unsigned long sp = p->thread.cpu_context.ksp + 16;
352 BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
353 pc = *(unsigned long *)sp;
354 #endif
355 }
356
357 return pc;
358 }