]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86_64/kernel/process.c
[PATCH] x86_64: Change init sections for CPU hotplug support
[mirror_ubuntu-artful-kernel.git] / arch / x86_64 / kernel / process.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/x86-64/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
11 *
12 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
13 */
14
15/*
16 * This file handles the architecture-dependent parts of process handling..
17 */
18
19#include <stdarg.h>
20
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/elfcore.h>
26#include <linux/smp.h>
27#include <linux/slab.h>
28#include <linux/user.h>
29#include <linux/module.h>
30#include <linux/a.out.h>
31#include <linux/interrupt.h>
32#include <linux/delay.h>
33#include <linux/irq.h>
34#include <linux/ptrace.h>
35#include <linux/utsname.h>
36#include <linux/random.h>
73649dab 37#include <linux/kprobes.h>
1da177e4
LT
38
39#include <asm/uaccess.h>
40#include <asm/pgtable.h>
41#include <asm/system.h>
42#include <asm/io.h>
43#include <asm/processor.h>
44#include <asm/i387.h>
45#include <asm/mmu_context.h>
46#include <asm/pda.h>
47#include <asm/prctl.h>
48#include <asm/kdebug.h>
49#include <asm/desc.h>
50#include <asm/proto.h>
51#include <asm/ia32.h>
52
53asmlinkage extern void ret_from_fork(void);
54
55unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
56
57static atomic_t hlt_counter = ATOMIC_INIT(0);
58
59unsigned long boot_option_idle_override = 0;
60EXPORT_SYMBOL(boot_option_idle_override);
61
62/*
63 * Powermanagement idle function, if any..
64 */
65void (*pm_idle)(void);
66static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
67
68void disable_hlt(void)
69{
70 atomic_inc(&hlt_counter);
71}
72
73EXPORT_SYMBOL(disable_hlt);
74
75void enable_hlt(void)
76{
77 atomic_dec(&hlt_counter);
78}
79
80EXPORT_SYMBOL(enable_hlt);
81
82/*
83 * We use this if we don't have any better
84 * idle routine..
85 */
86void default_idle(void)
87{
88 if (!atomic_read(&hlt_counter)) {
89 local_irq_disable();
90 if (!need_resched())
91 safe_halt();
92 else
93 local_irq_enable();
94 }
95}
96
97/*
98 * On SMP it's slightly faster (but much more power-consuming!)
99 * to poll the ->need_resched flag instead of waiting for the
100 * cross-CPU IPI to arrive. Use this option with caution.
101 */
102static void poll_idle (void)
103{
104 int oldval;
105
106 local_irq_enable();
107
108 /*
109 * Deal with another CPU just having chosen a thread to
110 * run here:
111 */
112 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
113
114 if (!oldval) {
115 set_thread_flag(TIF_POLLING_NRFLAG);
116 asm volatile(
117 "2:"
118 "testl %0,%1;"
119 "rep; nop;"
120 "je 2b;"
121 : :
122 "i" (_TIF_NEED_RESCHED),
123 "m" (current_thread_info()->flags));
124 } else {
125 set_need_resched();
126 }
127}
128
129void cpu_idle_wait(void)
130{
131 unsigned int cpu, this_cpu = get_cpu();
132 cpumask_t map;
133
134 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
135 put_cpu();
136
137 cpus_clear(map);
138 for_each_online_cpu(cpu) {
139 per_cpu(cpu_idle_state, cpu) = 1;
140 cpu_set(cpu, map);
141 }
142
143 __get_cpu_var(cpu_idle_state) = 0;
144
145 wmb();
146 do {
147 ssleep(1);
148 for_each_online_cpu(cpu) {
149 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
150 cpu_clear(cpu, map);
151 }
152 cpus_and(map, map, cpu_online_map);
153 } while (!cpus_empty(map));
154}
155EXPORT_SYMBOL_GPL(cpu_idle_wait);
156
157/*
158 * The idle thread. There's no useful work to be
159 * done, so just try to conserve power and have a
160 * low exit latency (ie sit in a loop waiting for
161 * somebody to say that they'd like to reschedule)
162 */
163void cpu_idle (void)
164{
165 /* endless idle loop with no priority at all */
166 while (1) {
167 while (!need_resched()) {
168 void (*idle)(void);
169
170 if (__get_cpu_var(cpu_idle_state))
171 __get_cpu_var(cpu_idle_state) = 0;
172
173 rmb();
174 idle = pm_idle;
175 if (!idle)
176 idle = default_idle;
177 idle();
178 }
179
180 schedule();
181 }
182}
183
184/*
185 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
186 * which can obviate IPI to trigger checking of need_resched.
187 * We execute MONITOR against need_resched and enter optimized wait state
188 * through MWAIT. Whenever someone changes need_resched, we would be woken
189 * up from MWAIT (without an IPI).
190 */
191static void mwait_idle(void)
192{
193 local_irq_enable();
194
195 if (!need_resched()) {
196 set_thread_flag(TIF_POLLING_NRFLAG);
197 do {
198 __monitor((void *)&current_thread_info()->flags, 0, 0);
199 if (need_resched())
200 break;
201 __mwait(0, 0);
202 } while (!need_resched());
203 clear_thread_flag(TIF_POLLING_NRFLAG);
204 }
205}
206
e6982c67 207void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
1da177e4
LT
208{
209 static int printed;
210 if (cpu_has(c, X86_FEATURE_MWAIT)) {
211 /*
212 * Skip, if setup has overridden idle.
213 * One CPU supports mwait => All CPUs supports mwait
214 */
215 if (!pm_idle) {
216 if (!printed) {
217 printk("using mwait in idle threads.\n");
218 printed = 1;
219 }
220 pm_idle = mwait_idle;
221 }
222 }
223}
224
225static int __init idle_setup (char *str)
226{
227 if (!strncmp(str, "poll", 4)) {
228 printk("using polling idle threads.\n");
229 pm_idle = poll_idle;
230 }
231
232 boot_option_idle_override = 1;
233 return 1;
234}
235
236__setup("idle=", idle_setup);
237
238/* Prints also some state that isn't saved in the pt_regs */
239void __show_regs(struct pt_regs * regs)
240{
241 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
242 unsigned int fsindex,gsindex;
243 unsigned int ds,cs,es;
244
245 printk("\n");
246 print_modules();
247 printk("Pid: %d, comm: %.20s %s %s\n",
248 current->pid, current->comm, print_tainted(), system_utsname.release);
249 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
250 printk_address(regs->rip);
251 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
252 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
253 regs->rax, regs->rbx, regs->rcx);
254 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
255 regs->rdx, regs->rsi, regs->rdi);
256 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
257 regs->rbp, regs->r8, regs->r9);
258 printk("R10: %016lx R11: %016lx R12: %016lx\n",
259 regs->r10, regs->r11, regs->r12);
260 printk("R13: %016lx R14: %016lx R15: %016lx\n",
261 regs->r13, regs->r14, regs->r15);
262
263 asm("movl %%ds,%0" : "=r" (ds));
264 asm("movl %%cs,%0" : "=r" (cs));
265 asm("movl %%es,%0" : "=r" (es));
266 asm("movl %%fs,%0" : "=r" (fsindex));
267 asm("movl %%gs,%0" : "=r" (gsindex));
268
269 rdmsrl(MSR_FS_BASE, fs);
270 rdmsrl(MSR_GS_BASE, gs);
271 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
272
273 asm("movq %%cr0, %0": "=r" (cr0));
274 asm("movq %%cr2, %0": "=r" (cr2));
275 asm("movq %%cr3, %0": "=r" (cr3));
276 asm("movq %%cr4, %0": "=r" (cr4));
277
278 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
279 fs,fsindex,gs,gsindex,shadowgs);
280 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
281 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
282}
283
284void show_regs(struct pt_regs *regs)
285{
286 __show_regs(regs);
287 show_trace(&regs->rsp);
288}
289
290/*
291 * Free current thread data structures etc..
292 */
293void exit_thread(void)
294{
295 struct task_struct *me = current;
296 struct thread_struct *t = &me->thread;
73649dab
RL
297
298 /*
299 * Remove function-return probe instances associated with this task
300 * and put them back on the free list. Do not insert an exit probe for
301 * this function, it will be disabled by kprobe_flush_task if you do.
302 */
303 kprobe_flush_task(me);
304
1da177e4
LT
305 if (me->thread.io_bitmap_ptr) {
306 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
307
308 kfree(t->io_bitmap_ptr);
309 t->io_bitmap_ptr = NULL;
310 /*
311 * Careful, clear this in the TSS too:
312 */
313 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
314 t->io_bitmap_max = 0;
315 put_cpu();
316 }
317}
318
319void flush_thread(void)
320{
321 struct task_struct *tsk = current;
322 struct thread_info *t = current_thread_info();
323
73649dab
RL
324 /*
325 * Remove function-return probe instances associated with this task
326 * and put them back on the free list. Do not insert an exit probe for
327 * this function, it will be disabled by kprobe_flush_task if you do.
328 */
329 kprobe_flush_task(tsk);
330
1da177e4
LT
331 if (t->flags & _TIF_ABI_PENDING)
332 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
333
334 tsk->thread.debugreg0 = 0;
335 tsk->thread.debugreg1 = 0;
336 tsk->thread.debugreg2 = 0;
337 tsk->thread.debugreg3 = 0;
338 tsk->thread.debugreg6 = 0;
339 tsk->thread.debugreg7 = 0;
340 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
341 /*
342 * Forget coprocessor state..
343 */
344 clear_fpu(tsk);
345 clear_used_math();
346}
347
348void release_thread(struct task_struct *dead_task)
349{
350 if (dead_task->mm) {
351 if (dead_task->mm->context.size) {
352 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
353 dead_task->comm,
354 dead_task->mm->context.ldt,
355 dead_task->mm->context.size);
356 BUG();
357 }
358 }
359}
360
361static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
362{
363 struct user_desc ud = {
364 .base_addr = addr,
365 .limit = 0xfffff,
366 .seg_32bit = 1,
367 .limit_in_pages = 1,
368 .useable = 1,
369 };
370 struct n_desc_struct *desc = (void *)t->thread.tls_array;
371 desc += tls;
372 desc->a = LDT_entry_a(&ud);
373 desc->b = LDT_entry_b(&ud);
374}
375
376static inline u32 read_32bit_tls(struct task_struct *t, int tls)
377{
378 struct desc_struct *desc = (void *)t->thread.tls_array;
379 desc += tls;
380 return desc->base0 |
381 (((u32)desc->base1) << 16) |
382 (((u32)desc->base2) << 24);
383}
384
385/*
386 * This gets called before we allocate a new thread and copy
387 * the current task into it.
388 */
389void prepare_to_copy(struct task_struct *tsk)
390{
391 unlazy_fpu(tsk);
392}
393
394int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
395 unsigned long unused,
396 struct task_struct * p, struct pt_regs * regs)
397{
398 int err;
399 struct pt_regs * childregs;
400 struct task_struct *me = current;
401
402 childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
403
404 *childregs = *regs;
405
406 childregs->rax = 0;
407 childregs->rsp = rsp;
408 if (rsp == ~0UL) {
409 childregs->rsp = (unsigned long)childregs;
410 }
411
412 p->thread.rsp = (unsigned long) childregs;
413 p->thread.rsp0 = (unsigned long) (childregs+1);
414 p->thread.userrsp = me->thread.userrsp;
415
416 set_ti_thread_flag(p->thread_info, TIF_FORK);
417
418 p->thread.fs = me->thread.fs;
419 p->thread.gs = me->thread.gs;
420
fd51f666
L
421 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
422 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
423 asm("mov %%es,%0" : "=m" (p->thread.es));
424 asm("mov %%ds,%0" : "=m" (p->thread.ds));
1da177e4
LT
425
426 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
427 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
428 if (!p->thread.io_bitmap_ptr) {
429 p->thread.io_bitmap_max = 0;
430 return -ENOMEM;
431 }
432 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
433 }
434
435 /*
436 * Set a new TLS for the child thread?
437 */
438 if (clone_flags & CLONE_SETTLS) {
439#ifdef CONFIG_IA32_EMULATION
440 if (test_thread_flag(TIF_IA32))
441 err = ia32_child_tls(p, childregs);
442 else
443#endif
444 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
445 if (err)
446 goto out;
447 }
448 err = 0;
449out:
450 if (err && p->thread.io_bitmap_ptr) {
451 kfree(p->thread.io_bitmap_ptr);
452 p->thread.io_bitmap_max = 0;
453 }
454 return err;
455}
456
457/*
458 * This special macro can be used to load a debugging register
459 */
460#define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
461
462/*
463 * switch_to(x,y) should switch tasks from x to y.
464 *
465 * This could still be optimized:
466 * - fold all the options into a flag word and test it with a single test.
467 * - could test fs/gs bitsliced
468 */
469struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
470{
471 struct thread_struct *prev = &prev_p->thread,
472 *next = &next_p->thread;
473 int cpu = smp_processor_id();
474 struct tss_struct *tss = &per_cpu(init_tss, cpu);
475
476 unlazy_fpu(prev_p);
477
478 /*
479 * Reload esp0, LDT and the page table pointer:
480 */
481 tss->rsp0 = next->rsp0;
482
483 /*
484 * Switch DS and ES.
485 * This won't pick up thread selector changes, but I guess that is ok.
486 */
fd51f666 487 asm volatile("mov %%es,%0" : "=m" (prev->es));
1da177e4
LT
488 if (unlikely(next->es | prev->es))
489 loadsegment(es, next->es);
490
fd51f666 491 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
1da177e4
LT
492 if (unlikely(next->ds | prev->ds))
493 loadsegment(ds, next->ds);
494
495 load_TLS(next, cpu);
496
497 /*
498 * Switch FS and GS.
499 */
500 {
501 unsigned fsindex;
502 asm volatile("movl %%fs,%0" : "=r" (fsindex));
503 /* segment register != 0 always requires a reload.
504 also reload when it has changed.
505 when prev process used 64bit base always reload
506 to avoid an information leak. */
507 if (unlikely(fsindex | next->fsindex | prev->fs)) {
508 loadsegment(fs, next->fsindex);
509 /* check if the user used a selector != 0
510 * if yes clear 64bit base, since overloaded base
511 * is always mapped to the Null selector
512 */
513 if (fsindex)
514 prev->fs = 0;
515 }
516 /* when next process has a 64bit base use it */
517 if (next->fs)
518 wrmsrl(MSR_FS_BASE, next->fs);
519 prev->fsindex = fsindex;
520 }
521 {
522 unsigned gsindex;
523 asm volatile("movl %%gs,%0" : "=r" (gsindex));
524 if (unlikely(gsindex | next->gsindex | prev->gs)) {
525 load_gs_index(next->gsindex);
526 if (gsindex)
527 prev->gs = 0;
528 }
529 if (next->gs)
530 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
531 prev->gsindex = gsindex;
532 }
533
534 /*
535 * Switch the PDA context.
536 */
537 prev->userrsp = read_pda(oldrsp);
538 write_pda(oldrsp, next->userrsp);
539 write_pda(pcurrent, next_p);
540 write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
541
542 /*
543 * Now maybe reload the debug registers
544 */
545 if (unlikely(next->debugreg7)) {
546 loaddebug(next, 0);
547 loaddebug(next, 1);
548 loaddebug(next, 2);
549 loaddebug(next, 3);
550 /* no 4 and 5 */
551 loaddebug(next, 6);
552 loaddebug(next, 7);
553 }
554
555
556 /*
557 * Handle the IO bitmap
558 */
559 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
560 if (next->io_bitmap_ptr)
561 /*
562 * Copy the relevant range of the IO bitmap.
563 * Normally this is 128 bytes or less:
564 */
565 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
566 max(prev->io_bitmap_max, next->io_bitmap_max));
567 else {
568 /*
569 * Clear any possible leftover bits:
570 */
571 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
572 }
573 }
574
575 return prev_p;
576}
577
578/*
579 * sys_execve() executes a new program.
580 */
581asmlinkage
582long sys_execve(char __user *name, char __user * __user *argv,
583 char __user * __user *envp, struct pt_regs regs)
584{
585 long error;
586 char * filename;
587
588 filename = getname(name);
589 error = PTR_ERR(filename);
590 if (IS_ERR(filename))
591 return error;
592 error = do_execve(filename, argv, envp, &regs);
593 if (error == 0) {
594 task_lock(current);
595 current->ptrace &= ~PT_DTRACE;
596 task_unlock(current);
597 }
598 putname(filename);
599 return error;
600}
601
602void set_personality_64bit(void)
603{
604 /* inherit personality from parent */
605
606 /* Make sure to be in 64bit mode */
607 clear_thread_flag(TIF_IA32);
608
609 /* TBD: overwrites user setup. Should have two bits.
610 But 64bit processes have always behaved this way,
611 so it's not too bad. The main problem is just that
612 32bit childs are affected again. */
613 current->personality &= ~READ_IMPLIES_EXEC;
614}
615
616asmlinkage long sys_fork(struct pt_regs *regs)
617{
618 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
619}
620
621asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
622{
623 if (!newsp)
624 newsp = regs->rsp;
625 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
626}
627
628/*
629 * This is trivial, and on the face of it looks like it
630 * could equally well be done in user mode.
631 *
632 * Not so, for quite unobvious reasons - register pressure.
633 * In user mode vfork() cannot have a stack frame, and if
634 * done by calling the "clone()" system call directly, you
635 * do not have enough call-clobbered registers to hold all
636 * the information you need.
637 */
638asmlinkage long sys_vfork(struct pt_regs *regs)
639{
640 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
641 NULL, NULL);
642}
643
644unsigned long get_wchan(struct task_struct *p)
645{
646 unsigned long stack;
647 u64 fp,rip;
648 int count = 0;
649
650 if (!p || p == current || p->state==TASK_RUNNING)
651 return 0;
652 stack = (unsigned long)p->thread_info;
653 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
654 return 0;
655 fp = *(u64 *)(p->thread.rsp);
656 do {
657 if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
658 return 0;
659 rip = *(u64 *)(fp+8);
660 if (!in_sched_functions(rip))
661 return rip;
662 fp = *(u64 *)fp;
663 } while (count++ < 16);
664 return 0;
665}
666
667long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
668{
669 int ret = 0;
670 int doit = task == current;
671 int cpu;
672
673 switch (code) {
674 case ARCH_SET_GS:
84929801 675 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
676 return -EPERM;
677 cpu = get_cpu();
678 /* handle small bases via the GDT because that's faster to
679 switch. */
680 if (addr <= 0xffffffff) {
681 set_32bit_tls(task, GS_TLS, addr);
682 if (doit) {
683 load_TLS(&task->thread, cpu);
684 load_gs_index(GS_TLS_SEL);
685 }
686 task->thread.gsindex = GS_TLS_SEL;
687 task->thread.gs = 0;
688 } else {
689 task->thread.gsindex = 0;
690 task->thread.gs = addr;
691 if (doit) {
692 load_gs_index(0);
693 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
694 }
695 }
696 put_cpu();
697 break;
698 case ARCH_SET_FS:
699 /* Not strictly needed for fs, but do it for symmetry
700 with gs */
84929801 701 if (addr >= TASK_SIZE_OF(task))
1da177e4
LT
702 return -EPERM;
703 cpu = get_cpu();
704 /* handle small bases via the GDT because that's faster to
705 switch. */
706 if (addr <= 0xffffffff) {
707 set_32bit_tls(task, FS_TLS, addr);
708 if (doit) {
709 load_TLS(&task->thread, cpu);
710 asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
711 }
712 task->thread.fsindex = FS_TLS_SEL;
713 task->thread.fs = 0;
714 } else {
715 task->thread.fsindex = 0;
716 task->thread.fs = addr;
717 if (doit) {
718 /* set the selector to 0 to not confuse
719 __switch_to */
720 asm volatile("movl %0,%%fs" :: "r" (0));
721 ret = checking_wrmsrl(MSR_FS_BASE, addr);
722 }
723 }
724 put_cpu();
725 break;
726 case ARCH_GET_FS: {
727 unsigned long base;
728 if (task->thread.fsindex == FS_TLS_SEL)
729 base = read_32bit_tls(task, FS_TLS);
730 else if (doit) {
731 rdmsrl(MSR_FS_BASE, base);
732 } else
733 base = task->thread.fs;
734 ret = put_user(base, (unsigned long __user *)addr);
735 break;
736 }
737 case ARCH_GET_GS: {
738 unsigned long base;
739 if (task->thread.gsindex == GS_TLS_SEL)
740 base = read_32bit_tls(task, GS_TLS);
741 else if (doit) {
742 rdmsrl(MSR_KERNEL_GS_BASE, base);
743 } else
744 base = task->thread.gs;
745 ret = put_user(base, (unsigned long __user *)addr);
746 break;
747 }
748
749 default:
750 ret = -EINVAL;
751 break;
752 }
753
754 return ret;
755}
756
757long sys_arch_prctl(int code, unsigned long addr)
758{
759 return do_arch_prctl(current, code, addr);
760}
761
762/*
763 * Capture the user space registers if the task is not running (in user space)
764 */
765int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
766{
767 struct pt_regs *pp, ptregs;
768
769 pp = (struct pt_regs *)(tsk->thread.rsp0);
770 --pp;
771
772 ptregs = *pp;
773 ptregs.cs &= 0xffff;
774 ptregs.ss &= 0xffff;
775
776 elf_core_copy_regs(regs, &ptregs);
777
778 return 1;
779}
780
781unsigned long arch_align_stack(unsigned long sp)
782{
783 if (randomize_va_space)
784 sp -= get_random_int() % 8192;
785 return sp & ~0xf;
786}