]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/ia64/kernel/process.c
Merge branch 'x86-debug-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-hirsute-kernel.git] / arch / ia64 / kernel / process.c
1 /*
2 * Architecture-specific setup.
3 *
4 * Copyright (C) 1998-2003 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
7 *
8 * 2005-10-07 Keith Owens <kaos@sgi.com>
9 * Add notify_die() hooks.
10 */
11 #include <linux/cpu.h>
12 #include <linux/pm.h>
13 #include <linux/elf.h>
14 #include <linux/errno.h>
15 #include <linux/kallsyms.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/module.h>
20 #include <linux/notifier.h>
21 #include <linux/personality.h>
22 #include <linux/sched.h>
23 #include <linux/stddef.h>
24 #include <linux/thread_info.h>
25 #include <linux/unistd.h>
26 #include <linux/efi.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/kdebug.h>
30 #include <linux/utsname.h>
31 #include <linux/tracehook.h>
32 #include <linux/rcupdate.h>
33
34 #include <asm/cpu.h>
35 #include <asm/delay.h>
36 #include <asm/elf.h>
37 #include <asm/irq.h>
38 #include <asm/kexec.h>
39 #include <asm/pgalloc.h>
40 #include <asm/processor.h>
41 #include <asm/sal.h>
42 #include <asm/switch_to.h>
43 #include <asm/tlbflush.h>
44 #include <asm/uaccess.h>
45 #include <asm/unwind.h>
46 #include <asm/user.h>
47
48 #include "entry.h"
49
50 #ifdef CONFIG_PERFMON
51 # include <asm/perfmon.h>
52 #endif
53
54 #include "sigframe.h"
55
56 void (*ia64_mark_idle)(int);
57
58 unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
59 EXPORT_SYMBOL(boot_option_idle_override);
60 void (*pm_idle) (void);
61 EXPORT_SYMBOL(pm_idle);
62 void (*pm_power_off) (void);
63 EXPORT_SYMBOL(pm_power_off);
64
65 void
66 ia64_do_show_stack (struct unw_frame_info *info, void *arg)
67 {
68 unsigned long ip, sp, bsp;
69 char buf[128]; /* don't make it so big that it overflows the stack! */
70
71 printk("\nCall Trace:\n");
72 do {
73 unw_get_ip(info, &ip);
74 if (ip == 0)
75 break;
76
77 unw_get_sp(info, &sp);
78 unw_get_bsp(info, &bsp);
79 snprintf(buf, sizeof(buf),
80 " [<%016lx>] %%s\n"
81 " sp=%016lx bsp=%016lx\n",
82 ip, sp, bsp);
83 print_symbol(buf, ip);
84 } while (unw_unwind(info) >= 0);
85 }
86
87 void
88 show_stack (struct task_struct *task, unsigned long *sp)
89 {
90 if (!task)
91 unw_init_running(ia64_do_show_stack, NULL);
92 else {
93 struct unw_frame_info info;
94
95 unw_init_from_blocked_task(&info, task);
96 ia64_do_show_stack(&info, NULL);
97 }
98 }
99
100 void
101 dump_stack (void)
102 {
103 show_stack(NULL, NULL);
104 }
105
106 EXPORT_SYMBOL(dump_stack);
107
108 void
109 show_regs (struct pt_regs *regs)
110 {
111 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
112
113 print_modules();
114 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
115 smp_processor_id(), current->comm);
116 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
117 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
118 init_utsname()->release);
119 print_symbol("ip is at %s\n", ip);
120 printk("unat: %016lx pfs : %016lx rsc : %016lx\n",
121 regs->ar_unat, regs->ar_pfs, regs->ar_rsc);
122 printk("rnat: %016lx bsps: %016lx pr : %016lx\n",
123 regs->ar_rnat, regs->ar_bspstore, regs->pr);
124 printk("ldrs: %016lx ccv : %016lx fpsr: %016lx\n",
125 regs->loadrs, regs->ar_ccv, regs->ar_fpsr);
126 printk("csd : %016lx ssd : %016lx\n", regs->ar_csd, regs->ar_ssd);
127 printk("b0 : %016lx b6 : %016lx b7 : %016lx\n", regs->b0, regs->b6, regs->b7);
128 printk("f6 : %05lx%016lx f7 : %05lx%016lx\n",
129 regs->f6.u.bits[1], regs->f6.u.bits[0],
130 regs->f7.u.bits[1], regs->f7.u.bits[0]);
131 printk("f8 : %05lx%016lx f9 : %05lx%016lx\n",
132 regs->f8.u.bits[1], regs->f8.u.bits[0],
133 regs->f9.u.bits[1], regs->f9.u.bits[0]);
134 printk("f10 : %05lx%016lx f11 : %05lx%016lx\n",
135 regs->f10.u.bits[1], regs->f10.u.bits[0],
136 regs->f11.u.bits[1], regs->f11.u.bits[0]);
137
138 printk("r1 : %016lx r2 : %016lx r3 : %016lx\n", regs->r1, regs->r2, regs->r3);
139 printk("r8 : %016lx r9 : %016lx r10 : %016lx\n", regs->r8, regs->r9, regs->r10);
140 printk("r11 : %016lx r12 : %016lx r13 : %016lx\n", regs->r11, regs->r12, regs->r13);
141 printk("r14 : %016lx r15 : %016lx r16 : %016lx\n", regs->r14, regs->r15, regs->r16);
142 printk("r17 : %016lx r18 : %016lx r19 : %016lx\n", regs->r17, regs->r18, regs->r19);
143 printk("r20 : %016lx r21 : %016lx r22 : %016lx\n", regs->r20, regs->r21, regs->r22);
144 printk("r23 : %016lx r24 : %016lx r25 : %016lx\n", regs->r23, regs->r24, regs->r25);
145 printk("r26 : %016lx r27 : %016lx r28 : %016lx\n", regs->r26, regs->r27, regs->r28);
146 printk("r29 : %016lx r30 : %016lx r31 : %016lx\n", regs->r29, regs->r30, regs->r31);
147
148 if (user_mode(regs)) {
149 /* print the stacked registers */
150 unsigned long val, *bsp, ndirty;
151 int i, sof, is_nat = 0;
152
153 sof = regs->cr_ifs & 0x7f; /* size of frame */
154 ndirty = (regs->loadrs >> 19);
155 bsp = ia64_rse_skip_regs((unsigned long *) regs->ar_bspstore, ndirty);
156 for (i = 0; i < sof; ++i) {
157 get_user(val, (unsigned long __user *) ia64_rse_skip_regs(bsp, i));
158 printk("r%-3u:%c%016lx%s", 32 + i, is_nat ? '*' : ' ', val,
159 ((i == sof - 1) || (i % 3) == 2) ? "\n" : " ");
160 }
161 } else
162 show_stack(NULL, NULL);
163 }
164
165 /* local support for deprecated console_print */
166 void
167 console_print(const char *s)
168 {
169 printk(KERN_EMERG "%s", s);
170 }
171
172 void
173 do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
174 {
175 if (fsys_mode(current, &scr->pt)) {
176 /*
177 * defer signal-handling etc. until we return to
178 * privilege-level 0.
179 */
180 if (!ia64_psr(&scr->pt)->lp)
181 ia64_psr(&scr->pt)->lp = 1;
182 return;
183 }
184
185 #ifdef CONFIG_PERFMON
186 if (current->thread.pfm_needs_checking)
187 /*
188 * Note: pfm_handle_work() allow us to call it with interrupts
189 * disabled, and may enable interrupts within the function.
190 */
191 pfm_handle_work();
192 #endif
193
194 /* deal with pending signal delivery */
195 if (test_thread_flag(TIF_SIGPENDING)) {
196 local_irq_enable(); /* force interrupt enable */
197 ia64_do_signal(scr, in_syscall);
198 }
199
200 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
201 clear_thread_flag(TIF_NOTIFY_RESUME);
202 tracehook_notify_resume(&scr->pt);
203 }
204
205 /* copy user rbs to kernel rbs */
206 if (unlikely(test_thread_flag(TIF_RESTORE_RSE))) {
207 local_irq_enable(); /* force interrupt enable */
208 ia64_sync_krbs();
209 }
210
211 local_irq_disable(); /* force interrupt disable */
212 }
213
214 static int pal_halt = 1;
215 static int can_do_pal_halt = 1;
216
217 static int __init nohalt_setup(char * str)
218 {
219 pal_halt = can_do_pal_halt = 0;
220 return 1;
221 }
222 __setup("nohalt", nohalt_setup);
223
224 void
225 update_pal_halt_status(int status)
226 {
227 can_do_pal_halt = pal_halt && status;
228 }
229
230 /*
231 * We use this if we don't have any better idle routine..
232 */
233 void
234 default_idle (void)
235 {
236 local_irq_enable();
237 while (!need_resched()) {
238 if (can_do_pal_halt) {
239 local_irq_disable();
240 if (!need_resched()) {
241 safe_halt();
242 }
243 local_irq_enable();
244 } else
245 cpu_relax();
246 }
247 }
248
249 #ifdef CONFIG_HOTPLUG_CPU
250 /* We don't actually take CPU down, just spin without interrupts. */
251 static inline void play_dead(void)
252 {
253 unsigned int this_cpu = smp_processor_id();
254
255 /* Ack it */
256 __get_cpu_var(cpu_state) = CPU_DEAD;
257
258 max_xtp();
259 local_irq_disable();
260 idle_task_exit();
261 ia64_jump_to_sal(&sal_boot_rendez_state[this_cpu]);
262 /*
263 * The above is a point of no-return, the processor is
264 * expected to be in SAL loop now.
265 */
266 BUG();
267 }
268 #else
269 static inline void play_dead(void)
270 {
271 BUG();
272 }
273 #endif /* CONFIG_HOTPLUG_CPU */
274
275 void __attribute__((noreturn))
276 cpu_idle (void)
277 {
278 void (*mark_idle)(int) = ia64_mark_idle;
279 int cpu = smp_processor_id();
280
281 /* endless idle loop with no priority at all */
282 while (1) {
283 rcu_idle_enter();
284 if (can_do_pal_halt) {
285 current_thread_info()->status &= ~TS_POLLING;
286 /*
287 * TS_POLLING-cleared state must be visible before we
288 * test NEED_RESCHED:
289 */
290 smp_mb();
291 } else {
292 current_thread_info()->status |= TS_POLLING;
293 }
294
295 if (!need_resched()) {
296 void (*idle)(void);
297 #ifdef CONFIG_SMP
298 min_xtp();
299 #endif
300 rmb();
301 if (mark_idle)
302 (*mark_idle)(1);
303
304 idle = pm_idle;
305 if (!idle)
306 idle = default_idle;
307 (*idle)();
308 if (mark_idle)
309 (*mark_idle)(0);
310 #ifdef CONFIG_SMP
311 normal_xtp();
312 #endif
313 }
314 rcu_idle_exit();
315 schedule_preempt_disabled();
316 check_pgt_cache();
317 if (cpu_is_offline(cpu))
318 play_dead();
319 }
320 }
321
322 void
323 ia64_save_extra (struct task_struct *task)
324 {
325 #ifdef CONFIG_PERFMON
326 unsigned long info;
327 #endif
328
329 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
330 ia64_save_debug_regs(&task->thread.dbr[0]);
331
332 #ifdef CONFIG_PERFMON
333 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
334 pfm_save_regs(task);
335
336 info = __get_cpu_var(pfm_syst_info);
337 if (info & PFM_CPUINFO_SYST_WIDE)
338 pfm_syst_wide_update_task(task, info, 0);
339 #endif
340 }
341
342 void
343 ia64_load_extra (struct task_struct *task)
344 {
345 #ifdef CONFIG_PERFMON
346 unsigned long info;
347 #endif
348
349 if ((task->thread.flags & IA64_THREAD_DBG_VALID) != 0)
350 ia64_load_debug_regs(&task->thread.dbr[0]);
351
352 #ifdef CONFIG_PERFMON
353 if ((task->thread.flags & IA64_THREAD_PM_VALID) != 0)
354 pfm_load_regs(task);
355
356 info = __get_cpu_var(pfm_syst_info);
357 if (info & PFM_CPUINFO_SYST_WIDE)
358 pfm_syst_wide_update_task(task, info, 1);
359 #endif
360 }
361
362 /*
363 * Copy the state of an ia-64 thread.
364 *
365 * We get here through the following call chain:
366 *
367 * from user-level: from kernel:
368 *
369 * <clone syscall> <some kernel call frames>
370 * sys_clone :
371 * do_fork do_fork
372 * copy_thread copy_thread
373 *
374 * This means that the stack layout is as follows:
375 *
376 * +---------------------+ (highest addr)
377 * | struct pt_regs |
378 * +---------------------+
379 * | struct switch_stack |
380 * +---------------------+
381 * | |
382 * | memory stack |
383 * | | <-- sp (lowest addr)
384 * +---------------------+
385 *
386 * Observe that we copy the unat values that are in pt_regs and switch_stack. Spilling an
387 * integer to address X causes bit N in ar.unat to be set to the NaT bit of the register,
388 * with N=(X & 0x1ff)/8. Thus, copying the unat value preserves the NaT bits ONLY if the
389 * pt_regs structure in the parent is congruent to that of the child, modulo 512. Since
390 * the stack is page aligned and the page size is at least 4KB, this is always the case,
391 * so there is nothing to worry about.
392 */
393 int
394 copy_thread(unsigned long clone_flags,
395 unsigned long user_stack_base, unsigned long user_stack_size,
396 struct task_struct *p, struct pt_regs *regs)
397 {
398 extern char ia64_ret_from_clone;
399 struct switch_stack *child_stack, *stack;
400 unsigned long rbs, child_rbs, rbs_size;
401 struct pt_regs *child_ptregs;
402 int retval = 0;
403
404 #ifdef CONFIG_SMP
405 /*
406 * For SMP idle threads, fork_by_hand() calls do_fork with
407 * NULL regs.
408 */
409 if (!regs)
410 return 0;
411 #endif
412
413 stack = ((struct switch_stack *) regs) - 1;
414
415 child_ptregs = (struct pt_regs *) ((unsigned long) p + IA64_STK_OFFSET) - 1;
416 child_stack = (struct switch_stack *) child_ptregs - 1;
417
418 /* copy parent's switch_stack & pt_regs to child: */
419 memcpy(child_stack, stack, sizeof(*child_ptregs) + sizeof(*child_stack));
420
421 rbs = (unsigned long) current + IA64_RBS_OFFSET;
422 child_rbs = (unsigned long) p + IA64_RBS_OFFSET;
423 rbs_size = stack->ar_bspstore - rbs;
424
425 /* copy the parent's register backing store to the child: */
426 memcpy((void *) child_rbs, (void *) rbs, rbs_size);
427
428 if (likely(user_mode(child_ptregs))) {
429 if (clone_flags & CLONE_SETTLS)
430 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
431 if (user_stack_base) {
432 child_ptregs->r12 = user_stack_base + user_stack_size - 16;
433 child_ptregs->ar_bspstore = user_stack_base;
434 child_ptregs->ar_rnat = 0;
435 child_ptregs->loadrs = 0;
436 }
437 } else {
438 /*
439 * Note: we simply preserve the relative position of
440 * the stack pointer here. There is no need to
441 * allocate a scratch area here, since that will have
442 * been taken care of by the caller of sys_clone()
443 * already.
444 */
445 child_ptregs->r12 = (unsigned long) child_ptregs - 16; /* kernel sp */
446 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
447 }
448 child_stack->ar_bspstore = child_rbs + rbs_size;
449 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
450
451 /* copy parts of thread_struct: */
452 p->thread.ksp = (unsigned long) child_stack - 16;
453
454 /* stop some PSR bits from being inherited.
455 * the psr.up/psr.pp bits must be cleared on fork but inherited on execve()
456 * therefore we must specify them explicitly here and not include them in
457 * IA64_PSR_BITS_TO_CLEAR.
458 */
459 child_ptregs->cr_ipsr = ((child_ptregs->cr_ipsr | IA64_PSR_BITS_TO_SET)
460 & ~(IA64_PSR_BITS_TO_CLEAR | IA64_PSR_PP | IA64_PSR_UP));
461
462 /*
463 * NOTE: The calling convention considers all floating point
464 * registers in the high partition (fph) to be scratch. Since
465 * the only way to get to this point is through a system call,
466 * we know that the values in fph are all dead. Hence, there
467 * is no need to inherit the fph state from the parent to the
468 * child and all we have to do is to make sure that
469 * IA64_THREAD_FPH_VALID is cleared in the child.
470 *
471 * XXX We could push this optimization a bit further by
472 * clearing IA64_THREAD_FPH_VALID on ANY system call.
473 * However, it's not clear this is worth doing. Also, it
474 * would be a slight deviation from the normal Linux system
475 * call behavior where scratch registers are preserved across
476 * system calls (unless used by the system call itself).
477 */
478 # define THREAD_FLAGS_TO_CLEAR (IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID \
479 | IA64_THREAD_PM_VALID)
480 # define THREAD_FLAGS_TO_SET 0
481 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
482 | THREAD_FLAGS_TO_SET);
483 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
484
485 #ifdef CONFIG_PERFMON
486 if (current->thread.pfm_context)
487 pfm_inherit(p, child_ptregs);
488 #endif
489 return retval;
490 }
491
492 static void
493 do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *arg)
494 {
495 unsigned long mask, sp, nat_bits = 0, ar_rnat, urbs_end, cfm;
496 unsigned long uninitialized_var(ip); /* GCC be quiet */
497 elf_greg_t *dst = arg;
498 struct pt_regs *pt;
499 char nat;
500 int i;
501
502 memset(dst, 0, sizeof(elf_gregset_t)); /* don't leak any kernel bits to user-level */
503
504 if (unw_unwind_to_user(info) < 0)
505 return;
506
507 unw_get_sp(info, &sp);
508 pt = (struct pt_regs *) (sp + 16);
509
510 urbs_end = ia64_get_user_rbs_end(task, pt, &cfm);
511
512 if (ia64_sync_user_rbs(task, info->sw, pt->ar_bspstore, urbs_end) < 0)
513 return;
514
515 ia64_peek(task, info->sw, urbs_end, (long) ia64_rse_rnat_addr((long *) urbs_end),
516 &ar_rnat);
517
518 /*
519 * coredump format:
520 * r0-r31
521 * NaT bits (for r0-r31; bit N == 1 iff rN is a NaT)
522 * predicate registers (p0-p63)
523 * b0-b7
524 * ip cfm user-mask
525 * ar.rsc ar.bsp ar.bspstore ar.rnat
526 * ar.ccv ar.unat ar.fpsr ar.pfs ar.lc ar.ec
527 */
528
529 /* r0 is zero */
530 for (i = 1, mask = (1UL << i); i < 32; ++i) {
531 unw_get_gr(info, i, &dst[i], &nat);
532 if (nat)
533 nat_bits |= mask;
534 mask <<= 1;
535 }
536 dst[32] = nat_bits;
537 unw_get_pr(info, &dst[33]);
538
539 for (i = 0; i < 8; ++i)
540 unw_get_br(info, i, &dst[34 + i]);
541
542 unw_get_rp(info, &ip);
543 dst[42] = ip + ia64_psr(pt)->ri;
544 dst[43] = cfm;
545 dst[44] = pt->cr_ipsr & IA64_PSR_UM;
546
547 unw_get_ar(info, UNW_AR_RSC, &dst[45]);
548 /*
549 * For bsp and bspstore, unw_get_ar() would return the kernel
550 * addresses, but we need the user-level addresses instead:
551 */
552 dst[46] = urbs_end; /* note: by convention PT_AR_BSP points to the end of the urbs! */
553 dst[47] = pt->ar_bspstore;
554 dst[48] = ar_rnat;
555 unw_get_ar(info, UNW_AR_CCV, &dst[49]);
556 unw_get_ar(info, UNW_AR_UNAT, &dst[50]);
557 unw_get_ar(info, UNW_AR_FPSR, &dst[51]);
558 dst[52] = pt->ar_pfs; /* UNW_AR_PFS is == to pt->cr_ifs for interrupt frames */
559 unw_get_ar(info, UNW_AR_LC, &dst[53]);
560 unw_get_ar(info, UNW_AR_EC, &dst[54]);
561 unw_get_ar(info, UNW_AR_CSD, &dst[55]);
562 unw_get_ar(info, UNW_AR_SSD, &dst[56]);
563 }
564
565 void
566 do_dump_task_fpu (struct task_struct *task, struct unw_frame_info *info, void *arg)
567 {
568 elf_fpreg_t *dst = arg;
569 int i;
570
571 memset(dst, 0, sizeof(elf_fpregset_t)); /* don't leak any "random" bits */
572
573 if (unw_unwind_to_user(info) < 0)
574 return;
575
576 /* f0 is 0.0, f1 is 1.0 */
577
578 for (i = 2; i < 32; ++i)
579 unw_get_fr(info, i, dst + i);
580
581 ia64_flush_fph(task);
582 if ((task->thread.flags & IA64_THREAD_FPH_VALID) != 0)
583 memcpy(dst + 32, task->thread.fph, 96*16);
584 }
585
586 void
587 do_copy_regs (struct unw_frame_info *info, void *arg)
588 {
589 do_copy_task_regs(current, info, arg);
590 }
591
592 void
593 do_dump_fpu (struct unw_frame_info *info, void *arg)
594 {
595 do_dump_task_fpu(current, info, arg);
596 }
597
598 void
599 ia64_elf_core_copy_regs (struct pt_regs *pt, elf_gregset_t dst)
600 {
601 unw_init_running(do_copy_regs, dst);
602 }
603
604 int
605 dump_fpu (struct pt_regs *pt, elf_fpregset_t dst)
606 {
607 unw_init_running(do_dump_fpu, dst);
608 return 1; /* f0-f31 are always valid so we always return 1 */
609 }
610
611 long
612 sys_execve (const char __user *filename,
613 const char __user *const __user *argv,
614 const char __user *const __user *envp,
615 struct pt_regs *regs)
616 {
617 char *fname;
618 int error;
619
620 fname = getname(filename);
621 error = PTR_ERR(fname);
622 if (IS_ERR(fname))
623 goto out;
624 error = do_execve(fname, argv, envp, regs);
625 putname(fname);
626 out:
627 return error;
628 }
629
630 pid_t
631 kernel_thread (int (*fn)(void *), void *arg, unsigned long flags)
632 {
633 extern void start_kernel_thread (void);
634 unsigned long *helper_fptr = (unsigned long *) &start_kernel_thread;
635 struct {
636 struct switch_stack sw;
637 struct pt_regs pt;
638 } regs;
639
640 memset(&regs, 0, sizeof(regs));
641 regs.pt.cr_iip = helper_fptr[0]; /* set entry point (IP) */
642 regs.pt.r1 = helper_fptr[1]; /* set GP */
643 regs.pt.r9 = (unsigned long) fn; /* 1st argument */
644 regs.pt.r11 = (unsigned long) arg; /* 2nd argument */
645 /* Preserve PSR bits, except for bits 32-34 and 37-45, which we can't read. */
646 regs.pt.cr_ipsr = ia64_getreg(_IA64_REG_PSR) | IA64_PSR_BN;
647 regs.pt.cr_ifs = 1UL << 63; /* mark as valid, empty frame */
648 regs.sw.ar_fpsr = regs.pt.ar_fpsr = ia64_getreg(_IA64_REG_AR_FPSR);
649 regs.sw.ar_bspstore = (unsigned long) current + IA64_RBS_OFFSET;
650 regs.sw.pr = (1 << PRED_KERNEL_STACK);
651 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs.pt, 0, NULL, NULL);
652 }
653 EXPORT_SYMBOL(kernel_thread);
654
655 /* This gets called from kernel_thread() via ia64_invoke_thread_helper(). */
656 int
657 kernel_thread_helper (int (*fn)(void *), void *arg)
658 {
659 return (*fn)(arg);
660 }
661
662 /*
663 * Flush thread state. This is called when a thread does an execve().
664 */
665 void
666 flush_thread (void)
667 {
668 /* drop floating-point and debug-register state if it exists: */
669 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
670 ia64_drop_fpu(current);
671 }
672
673 /*
674 * Clean up state associated with current thread. This is called when
675 * the thread calls exit().
676 */
677 void
678 exit_thread (void)
679 {
680
681 ia64_drop_fpu(current);
682 #ifdef CONFIG_PERFMON
683 /* if needed, stop monitoring and flush state to perfmon context */
684 if (current->thread.pfm_context)
685 pfm_exit_thread(current);
686
687 /* free debug register resources */
688 if (current->thread.flags & IA64_THREAD_DBG_VALID)
689 pfm_release_debug_registers(current);
690 #endif
691 }
692
693 unsigned long
694 get_wchan (struct task_struct *p)
695 {
696 struct unw_frame_info info;
697 unsigned long ip;
698 int count = 0;
699
700 if (!p || p == current || p->state == TASK_RUNNING)
701 return 0;
702
703 /*
704 * Note: p may not be a blocked task (it could be current or
705 * another process running on some other CPU. Rather than
706 * trying to determine if p is really blocked, we just assume
707 * it's blocked and rely on the unwind routines to fail
708 * gracefully if the process wasn't really blocked after all.
709 * --davidm 99/12/15
710 */
711 unw_init_from_blocked_task(&info, p);
712 do {
713 if (p->state == TASK_RUNNING)
714 return 0;
715 if (unw_unwind(&info) < 0)
716 return 0;
717 unw_get_ip(&info, &ip);
718 if (!in_sched_functions(ip))
719 return ip;
720 } while (count++ < 16);
721 return 0;
722 }
723
724 void
725 cpu_halt (void)
726 {
727 pal_power_mgmt_info_u_t power_info[8];
728 unsigned long min_power;
729 int i, min_power_state;
730
731 if (ia64_pal_halt_info(power_info) != 0)
732 return;
733
734 min_power_state = 0;
735 min_power = power_info[0].pal_power_mgmt_info_s.power_consumption;
736 for (i = 1; i < 8; ++i)
737 if (power_info[i].pal_power_mgmt_info_s.im
738 && power_info[i].pal_power_mgmt_info_s.power_consumption < min_power) {
739 min_power = power_info[i].pal_power_mgmt_info_s.power_consumption;
740 min_power_state = i;
741 }
742
743 while (1)
744 ia64_pal_halt(min_power_state);
745 }
746
747 void machine_shutdown(void)
748 {
749 #ifdef CONFIG_HOTPLUG_CPU
750 int cpu;
751
752 for_each_online_cpu(cpu) {
753 if (cpu != smp_processor_id())
754 cpu_down(cpu);
755 }
756 #endif
757 #ifdef CONFIG_KEXEC
758 kexec_disable_iosapic();
759 #endif
760 }
761
762 void
763 machine_restart (char *restart_cmd)
764 {
765 (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
766 (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
767 }
768
769 void
770 machine_halt (void)
771 {
772 (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
773 cpu_halt();
774 }
775
776 void
777 machine_power_off (void)
778 {
779 if (pm_power_off)
780 pm_power_off();
781 machine_halt();
782 }
783