]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/kernel/process_64.c
Merge branch 'linus' into x86/pebs
[mirror_ubuntu-kernels.git] / arch / x86 / kernel / process_64.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * X86-64 port
8 * Andi Kleen.
9 *
10 * CPU hotplug support - ashok.raj@intel.com
11 */
12
13 /*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
17 #include <stdarg.h>
18
19 #include <linux/cpu.h>
20 #include <linux/errno.h>
21 #include <linux/sched.h>
22 #include <linux/fs.h>
23 #include <linux/kernel.h>
24 #include <linux/mm.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/utsname.h>
31 #include <linux/delay.h>
32 #include <linux/module.h>
33 #include <linux/ptrace.h>
34 #include <linux/random.h>
35 #include <linux/notifier.h>
36 #include <linux/kprobes.h>
37 #include <linux/kdebug.h>
38 #include <linux/tick.h>
39 #include <linux/prctl.h>
40
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
44 #include <asm/io.h>
45 #include <asm/processor.h>
46 #include <asm/i387.h>
47 #include <asm/mmu_context.h>
48 #include <asm/pda.h>
49 #include <asm/prctl.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/ia32.h>
53 #include <asm/idle.h>
54
55 asmlinkage extern void ret_from_fork(void);
56
57 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58
59 static ATOMIC_NOTIFIER_HEAD(idle_notifier);
60
61 void idle_notifier_register(struct notifier_block *n)
62 {
63 atomic_notifier_chain_register(&idle_notifier, n);
64 }
65
66 void enter_idle(void)
67 {
68 write_pda(isidle, 1);
69 atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
70 }
71
72 static void __exit_idle(void)
73 {
74 if (test_and_clear_bit_pda(0, isidle) == 0)
75 return;
76 atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
77 }
78
79 /* Called from interrupts to signify idle end */
80 void exit_idle(void)
81 {
82 /* idle loop has pid 0 */
83 if (current->pid)
84 return;
85 __exit_idle();
86 }
87
88 #ifdef CONFIG_HOTPLUG_CPU
89 DECLARE_PER_CPU(int, cpu_state);
90
91 #include <asm/nmi.h>
92 /* We halt the CPU with physical CPU hotplug */
93 static inline void play_dead(void)
94 {
95 idle_task_exit();
96 wbinvd();
97 mb();
98 /* Ack it */
99 __get_cpu_var(cpu_state) = CPU_DEAD;
100
101 local_irq_disable();
102 while (1)
103 halt();
104 }
105 #else
106 static inline void play_dead(void)
107 {
108 BUG();
109 }
110 #endif /* CONFIG_HOTPLUG_CPU */
111
112 /*
113 * The idle thread. There's no useful work to be
114 * done, so just try to conserve power and have a
115 * low exit latency (ie sit in a loop waiting for
116 * somebody to say that they'd like to reschedule)
117 */
118 void cpu_idle(void)
119 {
120 current_thread_info()->status |= TS_POLLING;
121 /* endless idle loop with no priority at all */
122 while (1) {
123 tick_nohz_stop_sched_tick(1);
124 while (!need_resched()) {
125
126 rmb();
127
128 if (cpu_is_offline(smp_processor_id()))
129 play_dead();
130 /*
131 * Idle routines should keep interrupts disabled
132 * from here on, until they go to idle.
133 * Otherwise, idle callbacks can misfire.
134 */
135 local_irq_disable();
136 enter_idle();
137 /* Don't trace irqs off for idle */
138 stop_critical_timings();
139 pm_idle();
140 start_critical_timings();
141 /* In many cases the interrupt that ended idle
142 has already called exit_idle. But some idle
143 loops can be woken up without interrupt. */
144 __exit_idle();
145 }
146
147 tick_nohz_restart_sched_tick();
148 preempt_enable_no_resched();
149 schedule();
150 preempt_disable();
151 }
152 }
153
154 /* Prints also some state that isn't saved in the pt_regs */
155 void __show_regs(struct pt_regs * regs)
156 {
157 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
158 unsigned long d0, d1, d2, d3, d6, d7;
159 unsigned int fsindex, gsindex;
160 unsigned int ds, cs, es;
161
162 printk("\n");
163 print_modules();
164 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
165 current->pid, current->comm, print_tainted(),
166 init_utsname()->release,
167 (int)strcspn(init_utsname()->version, " "),
168 init_utsname()->version);
169 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip);
170 printk_address(regs->ip, 1);
171 printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp,
172 regs->flags);
173 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
174 regs->ax, regs->bx, regs->cx);
175 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
176 regs->dx, regs->si, regs->di);
177 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
178 regs->bp, regs->r8, regs->r9);
179 printk("R10: %016lx R11: %016lx R12: %016lx\n",
180 regs->r10, regs->r11, regs->r12);
181 printk("R13: %016lx R14: %016lx R15: %016lx\n",
182 regs->r13, regs->r14, regs->r15);
183
184 asm("movl %%ds,%0" : "=r" (ds));
185 asm("movl %%cs,%0" : "=r" (cs));
186 asm("movl %%es,%0" : "=r" (es));
187 asm("movl %%fs,%0" : "=r" (fsindex));
188 asm("movl %%gs,%0" : "=r" (gsindex));
189
190 rdmsrl(MSR_FS_BASE, fs);
191 rdmsrl(MSR_GS_BASE, gs);
192 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
193
194 cr0 = read_cr0();
195 cr2 = read_cr2();
196 cr3 = read_cr3();
197 cr4 = read_cr4();
198
199 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
200 fs,fsindex,gs,gsindex,shadowgs);
201 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
202 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
203
204 get_debugreg(d0, 0);
205 get_debugreg(d1, 1);
206 get_debugreg(d2, 2);
207 printk("DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2);
208 get_debugreg(d3, 3);
209 get_debugreg(d6, 6);
210 get_debugreg(d7, 7);
211 printk("DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7);
212 }
213
214 void show_regs(struct pt_regs *regs)
215 {
216 printk("CPU %d:", smp_processor_id());
217 __show_regs(regs);
218 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
219 }
220
221 /*
222 * Free current thread data structures etc..
223 */
224 void exit_thread(void)
225 {
226 struct task_struct *me = current;
227 struct thread_struct *t = &me->thread;
228
229 if (me->thread.io_bitmap_ptr) {
230 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
231
232 kfree(t->io_bitmap_ptr);
233 t->io_bitmap_ptr = NULL;
234 clear_thread_flag(TIF_IO_BITMAP);
235 /*
236 * Careful, clear this in the TSS too:
237 */
238 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
239 t->io_bitmap_max = 0;
240 put_cpu();
241 }
242 #ifdef CONFIG_X86_DS
243 /* Free any DS contexts that have not been properly released. */
244 if (unlikely(t->ds_ctx)) {
245 /* we clear debugctl to make sure DS is not used. */
246 update_debugctlmsr(0);
247 ds_free(t->ds_ctx);
248 }
249 #endif /* CONFIG_X86_DS */
250 }
251
252 void flush_thread(void)
253 {
254 struct task_struct *tsk = current;
255
256 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
257 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
258 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
259 clear_tsk_thread_flag(tsk, TIF_IA32);
260 } else {
261 set_tsk_thread_flag(tsk, TIF_IA32);
262 current_thread_info()->status |= TS_COMPAT;
263 }
264 }
265 clear_tsk_thread_flag(tsk, TIF_DEBUG);
266
267 tsk->thread.debugreg0 = 0;
268 tsk->thread.debugreg1 = 0;
269 tsk->thread.debugreg2 = 0;
270 tsk->thread.debugreg3 = 0;
271 tsk->thread.debugreg6 = 0;
272 tsk->thread.debugreg7 = 0;
273 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
274 /*
275 * Forget coprocessor state..
276 */
277 tsk->fpu_counter = 0;
278 clear_fpu(tsk);
279 clear_used_math();
280 }
281
282 void release_thread(struct task_struct *dead_task)
283 {
284 if (dead_task->mm) {
285 if (dead_task->mm->context.size) {
286 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
287 dead_task->comm,
288 dead_task->mm->context.ldt,
289 dead_task->mm->context.size);
290 BUG();
291 }
292 }
293 }
294
295 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
296 {
297 struct user_desc ud = {
298 .base_addr = addr,
299 .limit = 0xfffff,
300 .seg_32bit = 1,
301 .limit_in_pages = 1,
302 .useable = 1,
303 };
304 struct desc_struct *desc = t->thread.tls_array;
305 desc += tls;
306 fill_ldt(desc, &ud);
307 }
308
309 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
310 {
311 return get_desc_base(&t->thread.tls_array[tls]);
312 }
313
314 /*
315 * This gets called before we allocate a new thread and copy
316 * the current task into it.
317 */
318 void prepare_to_copy(struct task_struct *tsk)
319 {
320 unlazy_fpu(tsk);
321 }
322
323 int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
324 unsigned long unused,
325 struct task_struct * p, struct pt_regs * regs)
326 {
327 int err;
328 struct pt_regs * childregs;
329 struct task_struct *me = current;
330
331 childregs = ((struct pt_regs *)
332 (THREAD_SIZE + task_stack_page(p))) - 1;
333 *childregs = *regs;
334
335 childregs->ax = 0;
336 childregs->sp = sp;
337 if (sp == ~0UL)
338 childregs->sp = (unsigned long)childregs;
339
340 p->thread.sp = (unsigned long) childregs;
341 p->thread.sp0 = (unsigned long) (childregs+1);
342 p->thread.usersp = me->thread.usersp;
343
344 set_tsk_thread_flag(p, TIF_FORK);
345
346 p->thread.fs = me->thread.fs;
347 p->thread.gs = me->thread.gs;
348
349 savesegment(gs, p->thread.gsindex);
350 savesegment(fs, p->thread.fsindex);
351 savesegment(es, p->thread.es);
352 savesegment(ds, p->thread.ds);
353
354 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
355 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
356 if (!p->thread.io_bitmap_ptr) {
357 p->thread.io_bitmap_max = 0;
358 return -ENOMEM;
359 }
360 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
361 IO_BITMAP_BYTES);
362 set_tsk_thread_flag(p, TIF_IO_BITMAP);
363 }
364
365 /*
366 * Set a new TLS for the child thread?
367 */
368 if (clone_flags & CLONE_SETTLS) {
369 #ifdef CONFIG_IA32_EMULATION
370 if (test_thread_flag(TIF_IA32))
371 err = do_set_thread_area(p, -1,
372 (struct user_desc __user *)childregs->si, 0);
373 else
374 #endif
375 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
376 if (err)
377 goto out;
378 }
379 err = 0;
380 out:
381 if (err && p->thread.io_bitmap_ptr) {
382 kfree(p->thread.io_bitmap_ptr);
383 p->thread.io_bitmap_max = 0;
384 }
385 return err;
386 }
387
388 void
389 start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
390 {
391 loadsegment(fs, 0);
392 loadsegment(es, 0);
393 loadsegment(ds, 0);
394 load_gs_index(0);
395 regs->ip = new_ip;
396 regs->sp = new_sp;
397 write_pda(oldrsp, new_sp);
398 regs->cs = __USER_CS;
399 regs->ss = __USER_DS;
400 regs->flags = 0x200;
401 set_fs(USER_DS);
402 /*
403 * Free the old FP and other extended state
404 */
405 free_thread_xstate(current);
406 }
407 EXPORT_SYMBOL_GPL(start_thread);
408
409 static void hard_disable_TSC(void)
410 {
411 write_cr4(read_cr4() | X86_CR4_TSD);
412 }
413
414 void disable_TSC(void)
415 {
416 preempt_disable();
417 if (!test_and_set_thread_flag(TIF_NOTSC))
418 /*
419 * Must flip the CPU state synchronously with
420 * TIF_NOTSC in the current running context.
421 */
422 hard_disable_TSC();
423 preempt_enable();
424 }
425
426 static void hard_enable_TSC(void)
427 {
428 write_cr4(read_cr4() & ~X86_CR4_TSD);
429 }
430
431 static void enable_TSC(void)
432 {
433 preempt_disable();
434 if (test_and_clear_thread_flag(TIF_NOTSC))
435 /*
436 * Must flip the CPU state synchronously with
437 * TIF_NOTSC in the current running context.
438 */
439 hard_enable_TSC();
440 preempt_enable();
441 }
442
443 int get_tsc_mode(unsigned long adr)
444 {
445 unsigned int val;
446
447 if (test_thread_flag(TIF_NOTSC))
448 val = PR_TSC_SIGSEGV;
449 else
450 val = PR_TSC_ENABLE;
451
452 return put_user(val, (unsigned int __user *)adr);
453 }
454
455 int set_tsc_mode(unsigned int val)
456 {
457 if (val == PR_TSC_SIGSEGV)
458 disable_TSC();
459 else if (val == PR_TSC_ENABLE)
460 enable_TSC();
461 else
462 return -EINVAL;
463
464 return 0;
465 }
466
467 /*
468 * This special macro can be used to load a debugging register
469 */
470 #define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
471
472 static inline void __switch_to_xtra(struct task_struct *prev_p,
473 struct task_struct *next_p,
474 struct tss_struct *tss)
475 {
476 struct thread_struct *prev, *next;
477 unsigned long debugctl;
478
479 prev = &prev_p->thread,
480 next = &next_p->thread;
481
482 debugctl = prev->debugctlmsr;
483
484 #ifdef CONFIG_X86_DS
485 {
486 unsigned long ds_prev = 0, ds_next = 0;
487
488 if (prev->ds_ctx)
489 ds_prev = (unsigned long)prev->ds_ctx->ds;
490 if (next->ds_ctx)
491 ds_next = (unsigned long)next->ds_ctx->ds;
492
493 if (ds_next != ds_prev) {
494 /*
495 * We clear debugctl to make sure DS
496 * is not in use when we change it:
497 */
498 debugctl = 0;
499 update_debugctlmsr(0);
500 wrmsrl(MSR_IA32_DS_AREA, ds_next);
501 }
502 }
503 #endif /* CONFIG_X86_DS */
504
505 if (next->debugctlmsr != debugctl)
506 update_debugctlmsr(next->debugctlmsr);
507
508 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
509 loaddebug(next, 0);
510 loaddebug(next, 1);
511 loaddebug(next, 2);
512 loaddebug(next, 3);
513 /* no 4 and 5 */
514 loaddebug(next, 6);
515 loaddebug(next, 7);
516 }
517
518 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
519 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
520 /* prev and next are different */
521 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
522 hard_disable_TSC();
523 else
524 hard_enable_TSC();
525 }
526
527 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
528 /*
529 * Copy the relevant range of the IO bitmap.
530 * Normally this is 128 bytes or less:
531 */
532 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
533 max(prev->io_bitmap_max, next->io_bitmap_max));
534 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
535 /*
536 * Clear any possible leftover bits:
537 */
538 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
539 }
540
541 #ifdef CONFIG_X86_PTRACE_BTS
542 if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS))
543 ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS);
544
545 if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS))
546 ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES);
547 #endif /* CONFIG_X86_PTRACE_BTS */
548 }
549
550 /*
551 * switch_to(x,y) should switch tasks from x to y.
552 *
553 * This could still be optimized:
554 * - fold all the options into a flag word and test it with a single test.
555 * - could test fs/gs bitsliced
556 *
557 * Kprobes not supported here. Set the probe on schedule instead.
558 */
559 struct task_struct *
560 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
561 {
562 struct thread_struct *prev = &prev_p->thread;
563 struct thread_struct *next = &next_p->thread;
564 int cpu = smp_processor_id();
565 struct tss_struct *tss = &per_cpu(init_tss, cpu);
566 unsigned fsindex, gsindex;
567
568 /* we're going to use this soon, after a few expensive things */
569 if (next_p->fpu_counter>5)
570 prefetch(next->xstate);
571
572 /*
573 * Reload esp0, LDT and the page table pointer:
574 */
575 load_sp0(tss, next);
576
577 /*
578 * Switch DS and ES.
579 * This won't pick up thread selector changes, but I guess that is ok.
580 */
581 savesegment(es, prev->es);
582 if (unlikely(next->es | prev->es))
583 loadsegment(es, next->es);
584
585 savesegment(ds, prev->ds);
586 if (unlikely(next->ds | prev->ds))
587 loadsegment(ds, next->ds);
588
589
590 /* We must save %fs and %gs before load_TLS() because
591 * %fs and %gs may be cleared by load_TLS().
592 *
593 * (e.g. xen_load_tls())
594 */
595 savesegment(fs, fsindex);
596 savesegment(gs, gsindex);
597
598 load_TLS(next, cpu);
599
600 /*
601 * Leave lazy mode, flushing any hypercalls made here.
602 * This must be done before restoring TLS segments so
603 * the GDT and LDT are properly updated, and must be
604 * done before math_state_restore, so the TS bit is up
605 * to date.
606 */
607 arch_leave_lazy_cpu_mode();
608
609 /*
610 * Switch FS and GS.
611 *
612 * Segment register != 0 always requires a reload. Also
613 * reload when it has changed. When prev process used 64bit
614 * base always reload to avoid an information leak.
615 */
616 if (unlikely(fsindex | next->fsindex | prev->fs)) {
617 loadsegment(fs, next->fsindex);
618 /*
619 * Check if the user used a selector != 0; if yes
620 * clear 64bit base, since overloaded base is always
621 * mapped to the Null selector
622 */
623 if (fsindex)
624 prev->fs = 0;
625 }
626 /* when next process has a 64bit base use it */
627 if (next->fs)
628 wrmsrl(MSR_FS_BASE, next->fs);
629 prev->fsindex = fsindex;
630
631 if (unlikely(gsindex | next->gsindex | prev->gs)) {
632 load_gs_index(next->gsindex);
633 if (gsindex)
634 prev->gs = 0;
635 }
636 if (next->gs)
637 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
638 prev->gsindex = gsindex;
639
640 /* Must be after DS reload */
641 unlazy_fpu(prev_p);
642
643 /*
644 * Switch the PDA and FPU contexts.
645 */
646 prev->usersp = read_pda(oldrsp);
647 write_pda(oldrsp, next->usersp);
648 write_pda(pcurrent, next_p);
649
650 write_pda(kernelstack,
651 (unsigned long)task_stack_page(next_p) +
652 THREAD_SIZE - PDA_STACKOFFSET);
653 #ifdef CONFIG_CC_STACKPROTECTOR
654 write_pda(stack_canary, next_p->stack_canary);
655 /*
656 * Build time only check to make sure the stack_canary is at
657 * offset 40 in the pda; this is a gcc ABI requirement
658 */
659 BUILD_BUG_ON(offsetof(struct x8664_pda, stack_canary) != 40);
660 #endif
661
662 /*
663 * Now maybe reload the debug registers and handle I/O bitmaps
664 */
665 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
666 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
667 __switch_to_xtra(prev_p, next_p, tss);
668
669 /* If the task has used fpu the last 5 timeslices, just do a full
670 * restore of the math state immediately to avoid the trap; the
671 * chances of needing FPU soon are obviously high now
672 *
673 * tsk_used_math() checks prevent calling math_state_restore(),
674 * which can sleep in the case of !tsk_used_math()
675 */
676 if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
677 math_state_restore();
678 return prev_p;
679 }
680
681 /*
682 * sys_execve() executes a new program.
683 */
684 asmlinkage
685 long sys_execve(char __user *name, char __user * __user *argv,
686 char __user * __user *envp, struct pt_regs *regs)
687 {
688 long error;
689 char * filename;
690
691 filename = getname(name);
692 error = PTR_ERR(filename);
693 if (IS_ERR(filename))
694 return error;
695 error = do_execve(filename, argv, envp, regs);
696 putname(filename);
697 return error;
698 }
699
700 void set_personality_64bit(void)
701 {
702 /* inherit personality from parent */
703
704 /* Make sure to be in 64bit mode */
705 clear_thread_flag(TIF_IA32);
706
707 /* TBD: overwrites user setup. Should have two bits.
708 But 64bit processes have always behaved this way,
709 so it's not too bad. The main problem is just that
710 32bit childs are affected again. */
711 current->personality &= ~READ_IMPLIES_EXEC;
712 }
713
714 asmlinkage long sys_fork(struct pt_regs *regs)
715 {
716 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
717 }
718
719 asmlinkage long
720 sys_clone(unsigned long clone_flags, unsigned long newsp,
721 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
722 {
723 if (!newsp)
724 newsp = regs->sp;
725 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
726 }
727
728 /*
729 * This is trivial, and on the face of it looks like it
730 * could equally well be done in user mode.
731 *
732 * Not so, for quite unobvious reasons - register pressure.
733 * In user mode vfork() cannot have a stack frame, and if
734 * done by calling the "clone()" system call directly, you
735 * do not have enough call-clobbered registers to hold all
736 * the information you need.
737 */
738 asmlinkage long sys_vfork(struct pt_regs *regs)
739 {
740 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
741 NULL, NULL);
742 }
743
744 unsigned long get_wchan(struct task_struct *p)
745 {
746 unsigned long stack;
747 u64 fp,ip;
748 int count = 0;
749
750 if (!p || p == current || p->state==TASK_RUNNING)
751 return 0;
752 stack = (unsigned long)task_stack_page(p);
753 if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE)
754 return 0;
755 fp = *(u64 *)(p->thread.sp);
756 do {
757 if (fp < (unsigned long)stack ||
758 fp > (unsigned long)stack+THREAD_SIZE)
759 return 0;
760 ip = *(u64 *)(fp+8);
761 if (!in_sched_functions(ip))
762 return ip;
763 fp = *(u64 *)fp;
764 } while (count++ < 16);
765 return 0;
766 }
767
768 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
769 {
770 int ret = 0;
771 int doit = task == current;
772 int cpu;
773
774 switch (code) {
775 case ARCH_SET_GS:
776 if (addr >= TASK_SIZE_OF(task))
777 return -EPERM;
778 cpu = get_cpu();
779 /* handle small bases via the GDT because that's faster to
780 switch. */
781 if (addr <= 0xffffffff) {
782 set_32bit_tls(task, GS_TLS, addr);
783 if (doit) {
784 load_TLS(&task->thread, cpu);
785 load_gs_index(GS_TLS_SEL);
786 }
787 task->thread.gsindex = GS_TLS_SEL;
788 task->thread.gs = 0;
789 } else {
790 task->thread.gsindex = 0;
791 task->thread.gs = addr;
792 if (doit) {
793 load_gs_index(0);
794 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
795 }
796 }
797 put_cpu();
798 break;
799 case ARCH_SET_FS:
800 /* Not strictly needed for fs, but do it for symmetry
801 with gs */
802 if (addr >= TASK_SIZE_OF(task))
803 return -EPERM;
804 cpu = get_cpu();
805 /* handle small bases via the GDT because that's faster to
806 switch. */
807 if (addr <= 0xffffffff) {
808 set_32bit_tls(task, FS_TLS, addr);
809 if (doit) {
810 load_TLS(&task->thread, cpu);
811 loadsegment(fs, FS_TLS_SEL);
812 }
813 task->thread.fsindex = FS_TLS_SEL;
814 task->thread.fs = 0;
815 } else {
816 task->thread.fsindex = 0;
817 task->thread.fs = addr;
818 if (doit) {
819 /* set the selector to 0 to not confuse
820 __switch_to */
821 loadsegment(fs, 0);
822 ret = checking_wrmsrl(MSR_FS_BASE, addr);
823 }
824 }
825 put_cpu();
826 break;
827 case ARCH_GET_FS: {
828 unsigned long base;
829 if (task->thread.fsindex == FS_TLS_SEL)
830 base = read_32bit_tls(task, FS_TLS);
831 else if (doit)
832 rdmsrl(MSR_FS_BASE, base);
833 else
834 base = task->thread.fs;
835 ret = put_user(base, (unsigned long __user *)addr);
836 break;
837 }
838 case ARCH_GET_GS: {
839 unsigned long base;
840 unsigned gsindex;
841 if (task->thread.gsindex == GS_TLS_SEL)
842 base = read_32bit_tls(task, GS_TLS);
843 else if (doit) {
844 savesegment(gs, gsindex);
845 if (gsindex)
846 rdmsrl(MSR_KERNEL_GS_BASE, base);
847 else
848 base = task->thread.gs;
849 }
850 else
851 base = task->thread.gs;
852 ret = put_user(base, (unsigned long __user *)addr);
853 break;
854 }
855
856 default:
857 ret = -EINVAL;
858 break;
859 }
860
861 return ret;
862 }
863
864 long sys_arch_prctl(int code, unsigned long addr)
865 {
866 return do_arch_prctl(current, code, addr);
867 }
868
869 unsigned long arch_align_stack(unsigned long sp)
870 {
871 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
872 sp -= get_random_int() % 8192;
873 return sp & ~0xf;
874 }
875
876 unsigned long arch_randomize_brk(struct mm_struct *mm)
877 {
878 unsigned long range_end = mm->brk + 0x02000000;
879 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
880 }