]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/process_32.c
1f5fa1cf16ddff5a1957e649847ef7e010187fb0
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / process_32.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 */
7
8 /*
9 * This file handles the architecture-dependent parts of process handling..
10 */
11
12 #include <stdarg.h>
13
14 #include <linux/cpu.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
17 #include <linux/fs.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/elfcore.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/user.h>
26 #include <linux/interrupt.h>
27 #include <linux/utsname.h>
28 #include <linux/delay.h>
29 #include <linux/reboot.h>
30 #include <linux/init.h>
31 #include <linux/mc146818rtc.h>
32 #include <linux/module.h>
33 #include <linux/kallsyms.h>
34 #include <linux/ptrace.h>
35 #include <linux/random.h>
36 #include <linux/personality.h>
37 #include <linux/tick.h>
38 #include <linux/percpu.h>
39 #include <linux/prctl.h>
40
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/ldt.h>
46 #include <asm/processor.h>
47 #include <asm/i387.h>
48 #include <asm/desc.h>
49 #ifdef CONFIG_MATH_EMULATION
50 #include <asm/math_emu.h>
51 #endif
52
53 #include <linux/err.h>
54
55 #include <asm/tlbflush.h>
56 #include <asm/cpu.h>
57 #include <asm/kdebug.h>
58
59 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
60
61 static int hlt_counter;
62
63 unsigned long boot_option_idle_override = 0;
64 EXPORT_SYMBOL(boot_option_idle_override);
65
66 DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
67 EXPORT_PER_CPU_SYMBOL(current_task);
68
69 DEFINE_PER_CPU(int, cpu_number);
70 EXPORT_PER_CPU_SYMBOL(cpu_number);
71
72 /*
73 * Return saved PC of a blocked thread.
74 */
75 unsigned long thread_saved_pc(struct task_struct *tsk)
76 {
77 return ((unsigned long *)tsk->thread.sp)[3];
78 }
79
80 /*
81 * Powermanagement idle function, if any..
82 */
83 void (*pm_idle)(void);
84 EXPORT_SYMBOL(pm_idle);
85
86 void disable_hlt(void)
87 {
88 hlt_counter++;
89 }
90
91 EXPORT_SYMBOL(disable_hlt);
92
93 void enable_hlt(void)
94 {
95 hlt_counter--;
96 }
97
98 EXPORT_SYMBOL(enable_hlt);
99
100 /*
101 * We use this if we don't have any better
102 * idle routine..
103 */
104 void default_idle(void)
105 {
106 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
107 current_thread_info()->status &= ~TS_POLLING;
108 /*
109 * TS_POLLING-cleared state must be visible before we
110 * test NEED_RESCHED:
111 */
112 smp_mb();
113
114 if (!need_resched())
115 safe_halt(); /* enables interrupts racelessly */
116 else
117 local_irq_enable();
118 current_thread_info()->status |= TS_POLLING;
119 } else {
120 local_irq_enable();
121 /* loop is done by the caller */
122 cpu_relax();
123 }
124 }
125 #ifdef CONFIG_APM_MODULE
126 EXPORT_SYMBOL(default_idle);
127 #endif
128
129 #ifdef CONFIG_HOTPLUG_CPU
130 #include <asm/nmi.h>
131 /* We don't actually take CPU down, just spin without interrupts. */
132 static inline void play_dead(void)
133 {
134 /* This must be done before dead CPU ack */
135 cpu_exit_clear();
136 wbinvd();
137 mb();
138 /* Ack it */
139 __get_cpu_var(cpu_state) = CPU_DEAD;
140
141 /*
142 * With physical CPU hotplug, we should halt the cpu
143 */
144 local_irq_disable();
145 while (1)
146 halt();
147 }
148 #else
149 static inline void play_dead(void)
150 {
151 BUG();
152 }
153 #endif /* CONFIG_HOTPLUG_CPU */
154
155 /*
156 * The idle thread. There's no useful work to be
157 * done, so just try to conserve power and have a
158 * low exit latency (ie sit in a loop waiting for
159 * somebody to say that they'd like to reschedule)
160 */
161 void cpu_idle(void)
162 {
163 int cpu = smp_processor_id();
164
165 current_thread_info()->status |= TS_POLLING;
166
167 /* endless idle loop with no priority at all */
168 while (1) {
169 tick_nohz_stop_sched_tick(1);
170 while (!need_resched()) {
171 void (*idle)(void);
172
173 check_pgt_cache();
174 rmb();
175 idle = pm_idle;
176
177 if (rcu_pending(cpu))
178 rcu_check_callbacks(cpu, 0);
179
180 if (!idle)
181 idle = default_idle;
182
183 if (cpu_is_offline(cpu))
184 play_dead();
185
186 local_irq_disable();
187 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
188 idle();
189 }
190 tick_nohz_restart_sched_tick();
191 preempt_enable_no_resched();
192 schedule();
193 preempt_disable();
194 }
195 }
196
197 void __show_registers(struct pt_regs *regs, int all)
198 {
199 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
200 unsigned long d0, d1, d2, d3, d6, d7;
201 unsigned long sp;
202 unsigned short ss, gs;
203
204 if (user_mode_vm(regs)) {
205 sp = regs->sp;
206 ss = regs->ss & 0xffff;
207 savesegment(gs, gs);
208 } else {
209 sp = (unsigned long) (&regs->sp);
210 savesegment(ss, ss);
211 savesegment(gs, gs);
212 }
213
214 printk("\n");
215 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
216 task_pid_nr(current), current->comm,
217 print_tainted(), init_utsname()->release,
218 (int)strcspn(init_utsname()->version, " "),
219 init_utsname()->version);
220
221 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
222 (u16)regs->cs, regs->ip, regs->flags,
223 smp_processor_id());
224 print_symbol("EIP is at %s\n", regs->ip);
225
226 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
227 regs->ax, regs->bx, regs->cx, regs->dx);
228 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
229 regs->si, regs->di, regs->bp, sp);
230 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
231 (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);
232
233 if (!all)
234 return;
235
236 cr0 = read_cr0();
237 cr2 = read_cr2();
238 cr3 = read_cr3();
239 cr4 = read_cr4_safe();
240 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
241 cr0, cr2, cr3, cr4);
242
243 get_debugreg(d0, 0);
244 get_debugreg(d1, 1);
245 get_debugreg(d2, 2);
246 get_debugreg(d3, 3);
247 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
248 d0, d1, d2, d3);
249
250 get_debugreg(d6, 6);
251 get_debugreg(d7, 7);
252 printk("DR6: %08lx DR7: %08lx\n",
253 d6, d7);
254 }
255
256 void show_regs(struct pt_regs *regs)
257 {
258 __show_registers(regs, 1);
259 show_trace(NULL, regs, &regs->sp, regs->bp);
260 }
261
262 /*
263 * This gets run with %bx containing the
264 * function to call, and %dx containing
265 * the "args".
266 */
267 extern void kernel_thread_helper(void);
268
269 /*
270 * Create a kernel thread
271 */
272 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
273 {
274 struct pt_regs regs;
275
276 memset(&regs, 0, sizeof(regs));
277
278 regs.bx = (unsigned long) fn;
279 regs.dx = (unsigned long) arg;
280
281 regs.ds = __USER_DS;
282 regs.es = __USER_DS;
283 regs.fs = __KERNEL_PERCPU;
284 regs.orig_ax = -1;
285 regs.ip = (unsigned long) kernel_thread_helper;
286 regs.cs = __KERNEL_CS | get_kernel_rpl();
287 regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
288
289 /* Ok, create the new process.. */
290 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
291 }
292 EXPORT_SYMBOL(kernel_thread);
293
294 /*
295 * Free current thread data structures etc..
296 */
297 void exit_thread(void)
298 {
299 /* The process may have allocated an io port bitmap... nuke it. */
300 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
301 struct task_struct *tsk = current;
302 struct thread_struct *t = &tsk->thread;
303 int cpu = get_cpu();
304 struct tss_struct *tss = &per_cpu(init_tss, cpu);
305
306 kfree(t->io_bitmap_ptr);
307 t->io_bitmap_ptr = NULL;
308 clear_thread_flag(TIF_IO_BITMAP);
309 /*
310 * Careful, clear this in the TSS too:
311 */
312 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
313 t->io_bitmap_max = 0;
314 tss->io_bitmap_owner = NULL;
315 tss->io_bitmap_max = 0;
316 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
317 put_cpu();
318 }
319 }
320
321 void flush_thread(void)
322 {
323 struct task_struct *tsk = current;
324
325 tsk->thread.debugreg0 = 0;
326 tsk->thread.debugreg1 = 0;
327 tsk->thread.debugreg2 = 0;
328 tsk->thread.debugreg3 = 0;
329 tsk->thread.debugreg6 = 0;
330 tsk->thread.debugreg7 = 0;
331 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
332 clear_tsk_thread_flag(tsk, TIF_DEBUG);
333 /*
334 * Forget coprocessor state..
335 */
336 clear_fpu(tsk);
337 clear_used_math();
338 }
339
340 void release_thread(struct task_struct *dead_task)
341 {
342 BUG_ON(dead_task->mm);
343 release_vm86_irqs(dead_task);
344 }
345
346 /*
347 * This gets called before we allocate a new thread and copy
348 * the current task into it.
349 */
350 void prepare_to_copy(struct task_struct *tsk)
351 {
352 unlazy_fpu(tsk);
353 }
354
355 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
356 unsigned long unused,
357 struct task_struct * p, struct pt_regs * regs)
358 {
359 struct pt_regs * childregs;
360 struct task_struct *tsk;
361 int err;
362
363 childregs = task_pt_regs(p);
364 *childregs = *regs;
365 childregs->ax = 0;
366 childregs->sp = sp;
367
368 p->thread.sp = (unsigned long) childregs;
369 p->thread.sp0 = (unsigned long) (childregs+1);
370
371 p->thread.ip = (unsigned long) ret_from_fork;
372
373 savesegment(gs, p->thread.gs);
374
375 tsk = current;
376 if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
377 p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
378 IO_BITMAP_BYTES, GFP_KERNEL);
379 if (!p->thread.io_bitmap_ptr) {
380 p->thread.io_bitmap_max = 0;
381 return -ENOMEM;
382 }
383 set_tsk_thread_flag(p, TIF_IO_BITMAP);
384 }
385
386 err = 0;
387
388 /*
389 * Set a new TLS for the child thread?
390 */
391 if (clone_flags & CLONE_SETTLS)
392 err = do_set_thread_area(p, -1,
393 (struct user_desc __user *)childregs->si, 0);
394
395 if (err && p->thread.io_bitmap_ptr) {
396 kfree(p->thread.io_bitmap_ptr);
397 p->thread.io_bitmap_max = 0;
398 }
399 return err;
400 }
401
402 void
403 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
404 {
405 __asm__("movl %0, %%gs" :: "r"(0));
406 regs->fs = 0;
407 set_fs(USER_DS);
408 regs->ds = __USER_DS;
409 regs->es = __USER_DS;
410 regs->ss = __USER_DS;
411 regs->cs = __USER_CS;
412 regs->ip = new_ip;
413 regs->sp = new_sp;
414 /*
415 * Free the old FP and other extended state
416 */
417 free_thread_xstate(current);
418 }
419 EXPORT_SYMBOL_GPL(start_thread);
420
421 static void hard_disable_TSC(void)
422 {
423 write_cr4(read_cr4() | X86_CR4_TSD);
424 }
425
426 void disable_TSC(void)
427 {
428 preempt_disable();
429 if (!test_and_set_thread_flag(TIF_NOTSC))
430 /*
431 * Must flip the CPU state synchronously with
432 * TIF_NOTSC in the current running context.
433 */
434 hard_disable_TSC();
435 preempt_enable();
436 }
437
438 static void hard_enable_TSC(void)
439 {
440 write_cr4(read_cr4() & ~X86_CR4_TSD);
441 }
442
443 static void enable_TSC(void)
444 {
445 preempt_disable();
446 if (test_and_clear_thread_flag(TIF_NOTSC))
447 /*
448 * Must flip the CPU state synchronously with
449 * TIF_NOTSC in the current running context.
450 */
451 hard_enable_TSC();
452 preempt_enable();
453 }
454
455 int get_tsc_mode(unsigned long adr)
456 {
457 unsigned int val;
458
459 if (test_thread_flag(TIF_NOTSC))
460 val = PR_TSC_SIGSEGV;
461 else
462 val = PR_TSC_ENABLE;
463
464 return put_user(val, (unsigned int __user *)adr);
465 }
466
467 int set_tsc_mode(unsigned int val)
468 {
469 if (val == PR_TSC_SIGSEGV)
470 disable_TSC();
471 else if (val == PR_TSC_ENABLE)
472 enable_TSC();
473 else
474 return -EINVAL;
475
476 return 0;
477 }
478
479 static noinline void
480 __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
481 struct tss_struct *tss)
482 {
483 struct thread_struct *prev, *next;
484 unsigned long debugctl;
485
486 prev = &prev_p->thread;
487 next = &next_p->thread;
488
489 debugctl = prev->debugctlmsr;
490 if (next->ds_area_msr != prev->ds_area_msr) {
491 /* we clear debugctl to make sure DS
492 * is not in use when we change it */
493 debugctl = 0;
494 update_debugctlmsr(0);
495 wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0);
496 }
497
498 if (next->debugctlmsr != debugctl)
499 update_debugctlmsr(next->debugctlmsr);
500
501 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
502 set_debugreg(next->debugreg0, 0);
503 set_debugreg(next->debugreg1, 1);
504 set_debugreg(next->debugreg2, 2);
505 set_debugreg(next->debugreg3, 3);
506 /* no 4 and 5 */
507 set_debugreg(next->debugreg6, 6);
508 set_debugreg(next->debugreg7, 7);
509 }
510
511 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
512 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
513 /* prev and next are different */
514 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
515 hard_disable_TSC();
516 else
517 hard_enable_TSC();
518 }
519
520 #ifdef X86_BTS
521 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
522 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
523
524 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
525 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
526 #endif
527
528
529 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
530 /*
531 * Disable the bitmap via an invalid offset. We still cache
532 * the previous bitmap owner and the IO bitmap contents:
533 */
534 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
535 return;
536 }
537
538 if (likely(next == tss->io_bitmap_owner)) {
539 /*
540 * Previous owner of the bitmap (hence the bitmap content)
541 * matches the next task, we dont have to do anything but
542 * to set a valid offset in the TSS:
543 */
544 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
545 return;
546 }
547 /*
548 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
549 * and we let the task to get a GPF in case an I/O instruction
550 * is performed. The handler of the GPF will verify that the
551 * faulting task has a valid I/O bitmap and, it true, does the
552 * real copy and restart the instruction. This will save us
553 * redundant copies when the currently switched task does not
554 * perform any I/O during its timeslice.
555 */
556 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
557 }
558
559 /*
560 * switch_to(x,yn) should switch tasks from x to y.
561 *
562 * We fsave/fwait so that an exception goes off at the right time
563 * (as a call from the fsave or fwait in effect) rather than to
564 * the wrong process. Lazy FP saving no longer makes any sense
565 * with modern CPU's, and this simplifies a lot of things (SMP
566 * and UP become the same).
567 *
568 * NOTE! We used to use the x86 hardware context switching. The
569 * reason for not using it any more becomes apparent when you
570 * try to recover gracefully from saved state that is no longer
571 * valid (stale segment register values in particular). With the
572 * hardware task-switch, there is no way to fix up bad state in
573 * a reasonable manner.
574 *
575 * The fact that Intel documents the hardware task-switching to
576 * be slow is a fairly red herring - this code is not noticeably
577 * faster. However, there _is_ some room for improvement here,
578 * so the performance issues may eventually be a valid point.
579 * More important, however, is the fact that this allows us much
580 * more flexibility.
581 *
582 * The return value (in %ax) will be the "prev" task after
583 * the task-switch, and shows up in ret_from_fork in entry.S,
584 * for example.
585 */
586 struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
587 {
588 struct thread_struct *prev = &prev_p->thread,
589 *next = &next_p->thread;
590 int cpu = smp_processor_id();
591 struct tss_struct *tss = &per_cpu(init_tss, cpu);
592
593 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
594
595 __unlazy_fpu(prev_p);
596
597
598 /* we're going to use this soon, after a few expensive things */
599 if (next_p->fpu_counter > 5)
600 prefetch(next->xstate);
601
602 /*
603 * Reload esp0.
604 */
605 load_sp0(tss, next);
606
607 /*
608 * Save away %gs. No need to save %fs, as it was saved on the
609 * stack on entry. No need to save %es and %ds, as those are
610 * always kernel segments while inside the kernel. Doing this
611 * before setting the new TLS descriptors avoids the situation
612 * where we temporarily have non-reloadable segments in %fs
613 * and %gs. This could be an issue if the NMI handler ever
614 * used %fs or %gs (it does not today), or if the kernel is
615 * running inside of a hypervisor layer.
616 */
617 savesegment(gs, prev->gs);
618
619 /*
620 * Load the per-thread Thread-Local Storage descriptor.
621 */
622 load_TLS(next, cpu);
623
624 /*
625 * Restore IOPL if needed. In normal use, the flags restore
626 * in the switch assembly will handle this. But if the kernel
627 * is running virtualized at a non-zero CPL, the popf will
628 * not restore flags, so it must be done in a separate step.
629 */
630 if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
631 set_iopl_mask(next->iopl);
632
633 /*
634 * Now maybe handle debug registers and/or IO bitmaps
635 */
636 if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
637 task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
638 __switch_to_xtra(prev_p, next_p, tss);
639
640 /*
641 * Leave lazy mode, flushing any hypercalls made here.
642 * This must be done before restoring TLS segments so
643 * the GDT and LDT are properly updated, and must be
644 * done before math_state_restore, so the TS bit is up
645 * to date.
646 */
647 arch_leave_lazy_cpu_mode();
648
649 /* If the task has used fpu the last 5 timeslices, just do a full
650 * restore of the math state immediately to avoid the trap; the
651 * chances of needing FPU soon are obviously high now
652 */
653 if (next_p->fpu_counter > 5)
654 math_state_restore();
655
656 /*
657 * Restore %gs if needed (which is common)
658 */
659 if (prev->gs | next->gs)
660 loadsegment(gs, next->gs);
661
662 x86_write_percpu(current_task, next_p);
663
664 return prev_p;
665 }
666
667 asmlinkage int sys_fork(struct pt_regs regs)
668 {
669 return do_fork(SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
670 }
671
672 asmlinkage int sys_clone(struct pt_regs regs)
673 {
674 unsigned long clone_flags;
675 unsigned long newsp;
676 int __user *parent_tidptr, *child_tidptr;
677
678 clone_flags = regs.bx;
679 newsp = regs.cx;
680 parent_tidptr = (int __user *)regs.dx;
681 child_tidptr = (int __user *)regs.di;
682 if (!newsp)
683 newsp = regs.sp;
684 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
685 }
686
687 /*
688 * This is trivial, and on the face of it looks like it
689 * could equally well be done in user mode.
690 *
691 * Not so, for quite unobvious reasons - register pressure.
692 * In user mode vfork() cannot have a stack frame, and if
693 * done by calling the "clone()" system call directly, you
694 * do not have enough call-clobbered registers to hold all
695 * the information you need.
696 */
697 asmlinkage int sys_vfork(struct pt_regs regs)
698 {
699 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, &regs, 0, NULL, NULL);
700 }
701
702 /*
703 * sys_execve() executes a new program.
704 */
705 asmlinkage int sys_execve(struct pt_regs regs)
706 {
707 int error;
708 char * filename;
709
710 filename = getname((char __user *) regs.bx);
711 error = PTR_ERR(filename);
712 if (IS_ERR(filename))
713 goto out;
714 error = do_execve(filename,
715 (char __user * __user *) regs.cx,
716 (char __user * __user *) regs.dx,
717 &regs);
718 if (error == 0) {
719 /* Make sure we don't return using sysenter.. */
720 set_thread_flag(TIF_IRET);
721 }
722 putname(filename);
723 out:
724 return error;
725 }
726
727 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
728 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
729
730 unsigned long get_wchan(struct task_struct *p)
731 {
732 unsigned long bp, sp, ip;
733 unsigned long stack_page;
734 int count = 0;
735 if (!p || p == current || p->state == TASK_RUNNING)
736 return 0;
737 stack_page = (unsigned long)task_stack_page(p);
738 sp = p->thread.sp;
739 if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
740 return 0;
741 /* include/asm-i386/system.h:switch_to() pushes bp last. */
742 bp = *(unsigned long *) sp;
743 do {
744 if (bp < stack_page || bp > top_ebp+stack_page)
745 return 0;
746 ip = *(unsigned long *) (bp+4);
747 if (!in_sched_functions(ip))
748 return ip;
749 bp = *(unsigned long *) bp;
750 } while (count++ < 16);
751 return 0;
752 }
753
754 unsigned long arch_align_stack(unsigned long sp)
755 {
756 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
757 sp -= get_random_int() % 8192;
758 return sp & ~0xf;
759 }
760
761 unsigned long arch_randomize_brk(struct mm_struct *mm)
762 {
763 unsigned long range_end = mm->brk + 0x02000000;
764 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
765 }