]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kernel/process_64.c
x86/speculation: Prepare for per task indirect branch speculation control
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / process_64.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6612538c 6 *
1da177e4
LT
7 * X86-64 port
8 * Andi Kleen.
76e4f660
AR
9 *
10 * CPU hotplug support - ashok.raj@intel.com
1da177e4
LT
11 */
12
13/*
14 * This file handles the architecture-dependent parts of process handling..
15 */
16
76e4f660 17#include <linux/cpu.h>
1da177e4
LT
18#include <linux/errno.h>
19#include <linux/sched.h>
29930025 20#include <linux/sched/task.h>
68db0cf1 21#include <linux/sched/task_stack.h>
6612538c 22#include <linux/fs.h>
1da177e4
LT
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/elfcore.h>
26#include <linux/smp.h>
27#include <linux/slab.h>
28#include <linux/user.h>
1da177e4
LT
29#include <linux/interrupt.h>
30#include <linux/delay.h>
186f4360 31#include <linux/export.h>
1da177e4 32#include <linux/ptrace.h>
95833c83 33#include <linux/notifier.h>
c6fd91f0 34#include <linux/kprobes.h>
1eeb66a1 35#include <linux/kdebug.h>
529e25f6 36#include <linux/prctl.h>
7de08b4e
GP
37#include <linux/uaccess.h>
38#include <linux/io.h>
8b96f011 39#include <linux/ftrace.h>
ff3f097e 40#include <linux/syscalls.h>
1da177e4 41
1da177e4 42#include <asm/pgtable.h>
1da177e4 43#include <asm/processor.h>
78f7f1e5 44#include <asm/fpu/internal.h>
1da177e4 45#include <asm/mmu_context.h>
1da177e4 46#include <asm/prctl.h>
1da177e4
LT
47#include <asm/desc.h>
48#include <asm/proto.h>
49#include <asm/ia32.h>
bbc1f698 50#include <asm/syscalls.h>
66cb5917 51#include <asm/debugreg.h>
f05e798a 52#include <asm/switch_to.h>
b7a58459 53#include <asm/xen/hypervisor.h>
2eefd878 54#include <asm/vdso.h>
05830204 55#include <asm/intel_rdt_sched.h>
ada26481
DS
56#include <asm/unistd.h>
57#ifdef CONFIG_IA32_EMULATION
58/* Not included via unistd.h */
59#include <asm/unistd_32_ia32.h>
60#endif
1da177e4 61
c38e5038 62__visible DEFINE_PER_CPU(unsigned long, rsp_scratch);
1da177e4 63
6612538c 64/* Prints also some state that isn't saved in the pt_regs */
e2ce07c8 65void __show_regs(struct pt_regs *regs, int all)
1da177e4
LT
66{
67 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
bb1995d5 68 unsigned long d0, d1, d2, d3, d6, d7;
6612538c
HS
69 unsigned int fsindex, gsindex;
70 unsigned int ds, cs, es;
814e2c84 71
b02fcf9b
JP
72 show_iret_regs(regs);
73
6fa81a12
JP
74 if (regs->orig_ax != -1)
75 pr_cont(" ORIG_RAX: %016lx\n", regs->orig_ax);
76 else
77 pr_cont("\n");
78
d015a092 79 printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n",
65ea5b03 80 regs->ax, regs->bx, regs->cx);
d015a092 81 printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n",
65ea5b03 82 regs->dx, regs->si, regs->di);
d015a092 83 printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n",
65ea5b03 84 regs->bp, regs->r8, regs->r9);
d015a092 85 printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n",
7de08b4e 86 regs->r10, regs->r11, regs->r12);
d015a092 87 printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n",
7de08b4e 88 regs->r13, regs->r14, regs->r15);
1da177e4 89
b02fcf9b
JP
90 if (!all)
91 return;
92
7de08b4e
GP
93 asm("movl %%ds,%0" : "=r" (ds));
94 asm("movl %%cs,%0" : "=r" (cs));
95 asm("movl %%es,%0" : "=r" (es));
1da177e4
LT
96 asm("movl %%fs,%0" : "=r" (fsindex));
97 asm("movl %%gs,%0" : "=r" (gsindex));
98
99 rdmsrl(MSR_FS_BASE, fs);
7de08b4e
GP
100 rdmsrl(MSR_GS_BASE, gs);
101 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
1da177e4 102
f51c9452
GOC
103 cr0 = read_cr0();
104 cr2 = read_cr2();
6c690ee1 105 cr3 = __read_cr3();
1e02ce4c 106 cr4 = __read_cr4();
1da177e4 107
d015a092 108 printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
7de08b4e 109 fs, fsindex, gs, gsindex, shadowgs);
d015a092 110 printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds,
8092c654 111 es, cr0);
d015a092 112 printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3,
8092c654 113 cr4);
bb1995d5
AS
114
115 get_debugreg(d0, 0);
116 get_debugreg(d1, 1);
117 get_debugreg(d2, 2);
bb1995d5
AS
118 get_debugreg(d3, 3);
119 get_debugreg(d6, 6);
120 get_debugreg(d7, 7);
4338774c
DJ
121
122 /* Only print out debug registers if they are in their non-default state. */
ba6d018e
NI
123 if (!((d0 == 0) && (d1 == 0) && (d2 == 0) && (d3 == 0) &&
124 (d6 == DR6_RESERVED) && (d7 == 0x400))) {
125 printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n",
126 d0, d1, d2);
127 printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n",
128 d3, d6, d7);
129 }
4338774c 130
c0b17b5b
DH
131 if (boot_cpu_has(X86_FEATURE_OSPKE))
132 printk(KERN_DEFAULT "PKRU: %08x\n", read_pkru());
1da177e4
LT
133}
134
1da177e4
LT
135void release_thread(struct task_struct *dead_task)
136{
137 if (dead_task->mm) {
a5b9e5a2 138#ifdef CONFIG_MODIFY_LDT_SYSCALL
37868fe1 139 if (dead_task->mm->context.ldt) {
349eab6e 140 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
c767a54b 141 dead_task->comm,
0d430e3f 142 dead_task->mm->context.ldt->entries,
bbf79d21 143 dead_task->mm->context.ldt->nr_entries);
1da177e4
LT
144 BUG();
145 }
a5b9e5a2 146#endif
1da177e4
LT
147 }
148}
149
e137a4d8
AL
150enum which_selector {
151 FS,
152 GS
153};
154
155/*
156 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
157 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
158 * It's forcibly inlined because it'll generate better code and this function
159 * is hot.
160 */
161static __always_inline void save_base_legacy(struct task_struct *prev_p,
162 unsigned short selector,
163 enum which_selector which)
164{
165 if (likely(selector == 0)) {
166 /*
167 * On Intel (without X86_BUG_NULL_SEG), the segment base could
168 * be the pre-existing saved base or it could be zero. On AMD
169 * (with X86_BUG_NULL_SEG), the segment base could be almost
170 * anything.
171 *
172 * This branch is very hot (it's hit twice on almost every
173 * context switch between 64-bit programs), and avoiding
174 * the RDMSR helps a lot, so we just assume that whatever
175 * value is already saved is correct. This matches historical
176 * Linux behavior, so it won't break existing applications.
177 *
178 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
179 * report that the base is zero, it needs to actually be zero:
180 * see the corresponding logic in load_seg_legacy.
181 */
182 } else {
183 /*
184 * If the selector is 1, 2, or 3, then the base is zero on
185 * !X86_BUG_NULL_SEG CPUs and could be anything on
186 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
187 * has never attempted to preserve the base across context
188 * switches.
189 *
190 * If selector > 3, then it refers to a real segment, and
191 * saving the base isn't necessary.
192 */
193 if (which == FS)
194 prev_p->thread.fsbase = 0;
195 else
196 prev_p->thread.gsbase = 0;
197 }
198}
199
200static __always_inline void save_fsgs(struct task_struct *task)
201{
202 savesegment(fs, task->thread.fsindex);
203 savesegment(gs, task->thread.gsindex);
204 save_base_legacy(task, task->thread.fsindex, FS);
205 save_base_legacy(task, task->thread.gsindex, GS);
206}
207
208static __always_inline void loadseg(enum which_selector which,
209 unsigned short sel)
210{
211 if (which == FS)
212 loadsegment(fs, sel);
213 else
214 load_gs_index(sel);
215}
216
217static __always_inline void load_seg_legacy(unsigned short prev_index,
218 unsigned long prev_base,
219 unsigned short next_index,
220 unsigned long next_base,
221 enum which_selector which)
222{
223 if (likely(next_index <= 3)) {
224 /*
225 * The next task is using 64-bit TLS, is not using this
226 * segment at all, or is having fun with arcane CPU features.
227 */
228 if (next_base == 0) {
229 /*
230 * Nasty case: on AMD CPUs, we need to forcibly zero
231 * the base.
232 */
233 if (static_cpu_has_bug(X86_BUG_NULL_SEG)) {
234 loadseg(which, __USER_DS);
235 loadseg(which, next_index);
236 } else {
237 /*
238 * We could try to exhaustively detect cases
239 * under which we can skip the segment load,
240 * but there's really only one case that matters
241 * for performance: if both the previous and
242 * next states are fully zeroed, we can skip
243 * the load.
244 *
245 * (This assumes that prev_base == 0 has no
246 * false positives. This is the case on
247 * Intel-style CPUs.)
248 */
249 if (likely(prev_index | next_index | prev_base))
250 loadseg(which, next_index);
251 }
252 } else {
253 if (prev_index != next_index)
254 loadseg(which, next_index);
255 wrmsrl(which == FS ? MSR_FS_BASE : MSR_KERNEL_GS_BASE,
256 next_base);
257 }
258 } else {
259 /*
260 * The next task is using a real segment. Loading the selector
261 * is sufficient.
262 */
263 loadseg(which, next_index);
264 }
265}
266
c1bd55f9
JT
267int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
268 unsigned long arg, struct task_struct *p, unsigned long tls)
1da177e4
LT
269{
270 int err;
7de08b4e 271 struct pt_regs *childregs;
0100301b
BG
272 struct fork_frame *fork_frame;
273 struct inactive_task_frame *frame;
1da177e4
LT
274 struct task_struct *me = current;
275
7076aada 276 childregs = task_pt_regs(p);
0100301b
BG
277 fork_frame = container_of(childregs, struct fork_frame, regs);
278 frame = &fork_frame->frame;
279 frame->bp = 0;
280 frame->ret_addr = (unsigned long) ret_from_fork;
281 p->thread.sp = (unsigned long) fork_frame;
66cb5917 282 p->thread.io_bitmap_ptr = NULL;
1da177e4 283
ada85708 284 savesegment(gs, p->thread.gsindex);
296f781a 285 p->thread.gsbase = p->thread.gsindex ? 0 : me->thread.gsbase;
ada85708 286 savesegment(fs, p->thread.fsindex);
296f781a 287 p->thread.fsbase = p->thread.fsindex ? 0 : me->thread.fsbase;
ada85708
JF
288 savesegment(es, p->thread.es);
289 savesegment(ds, p->thread.ds);
7076aada
AV
290 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
291
1d4b4b29 292 if (unlikely(p->flags & PF_KTHREAD)) {
7076aada
AV
293 /* kernel thread */
294 memset(childregs, 0, sizeof(struct pt_regs));
616d2483
BG
295 frame->bx = sp; /* function */
296 frame->r12 = arg;
7076aada
AV
297 return 0;
298 }
616d2483 299 frame->bx = 0;
1d4b4b29 300 *childregs = *current_pt_regs();
7076aada
AV
301
302 childregs->ax = 0;
1d4b4b29
AV
303 if (sp)
304 childregs->sp = sp;
1da177e4 305
66cb5917 306 err = -ENOMEM;
d3a4f48d 307 if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) {
cced4022
TM
308 p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr,
309 IO_BITMAP_BYTES, GFP_KERNEL);
1da177e4
LT
310 if (!p->thread.io_bitmap_ptr) {
311 p->thread.io_bitmap_max = 0;
312 return -ENOMEM;
313 }
d3a4f48d 314 set_tsk_thread_flag(p, TIF_IO_BITMAP);
6612538c 315 }
1da177e4
LT
316
317 /*
318 * Set a new TLS for the child thread?
319 */
320 if (clone_flags & CLONE_SETTLS) {
321#ifdef CONFIG_IA32_EMULATION
abfb9498 322 if (in_ia32_syscall())
efd1ca52 323 err = do_set_thread_area(p, -1,
c1bd55f9 324 (struct user_desc __user *)tls, 0);
7de08b4e
GP
325 else
326#endif
17a6e1b8 327 err = do_arch_prctl_64(p, ARCH_SET_FS, tls);
7de08b4e 328 if (err)
1da177e4
LT
329 goto out;
330 }
331 err = 0;
332out:
333 if (err && p->thread.io_bitmap_ptr) {
334 kfree(p->thread.io_bitmap_ptr);
335 p->thread.io_bitmap_max = 0;
336 }
66cb5917 337
1da177e4
LT
338 return err;
339}
340
e634d8fc
PA
341static void
342start_thread_common(struct pt_regs *regs, unsigned long new_ip,
343 unsigned long new_sp,
344 unsigned int _cs, unsigned int _ss, unsigned int _ds)
513ad84b 345{
767d035d
AL
346 WARN_ON_ONCE(regs != current_pt_regs());
347
348 if (static_cpu_has(X86_BUG_NULL_SEG)) {
349 /* Loading zero below won't clear the base. */
350 loadsegment(fs, __USER_DS);
351 load_gs_index(__USER_DS);
352 }
353
ada85708 354 loadsegment(fs, 0);
e634d8fc
PA
355 loadsegment(es, _ds);
356 loadsegment(ds, _ds);
513ad84b 357 load_gs_index(0);
767d035d 358
513ad84b
IM
359 regs->ip = new_ip;
360 regs->sp = new_sp;
e634d8fc
PA
361 regs->cs = _cs;
362 regs->ss = _ss;
a6f05a6a 363 regs->flags = X86_EFLAGS_IF;
1daeaa31 364 force_iret();
513ad84b 365}
e634d8fc
PA
366
367void
368start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
369{
370 start_thread_common(regs, new_ip, new_sp,
371 __USER_CS, __USER_DS, 0);
372}
513ad84b 373
7da77078
BG
374#ifdef CONFIG_COMPAT
375void compat_start_thread(struct pt_regs *regs, u32 new_ip, u32 new_sp)
a6f05a6a 376{
e634d8fc 377 start_thread_common(regs, new_ip, new_sp,
d1a797f3
PA
378 test_thread_flag(TIF_X32)
379 ? __USER_CS : __USER32_CS,
380 __USER_DS, __USER_DS);
a6f05a6a
PA
381}
382#endif
513ad84b 383
1da177e4
LT
384/*
385 * switch_to(x,y) should switch tasks from x to y.
386 *
6612538c 387 * This could still be optimized:
1da177e4
LT
388 * - fold all the options into a flag word and test it with a single test.
389 * - could test fs/gs bitsliced
099f318b
AK
390 *
391 * Kprobes not supported here. Set the probe on schedule instead.
8b96f011 392 * Function graph tracer not supported too.
1da177e4 393 */
35ea7903 394__visible __notrace_funcgraph struct task_struct *
a88cde13 395__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
1da177e4 396{
87b935a0
JF
397 struct thread_struct *prev = &prev_p->thread;
398 struct thread_struct *next = &next_p->thread;
384a23f9
IM
399 struct fpu *prev_fpu = &prev->fpu;
400 struct fpu *next_fpu = &next->fpu;
6612538c 401 int cpu = smp_processor_id();
c482feef 402 struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu);
e07e23e1 403
1d3e53e8
AL
404 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
405 this_cpu_read(irq_count) != -1);
406
c474e507 407 switch_fpu_prepare(prev_fpu, cpu);
4903062b 408
478de5a9
JF
409 /* We must save %fs and %gs before load_TLS() because
410 * %fs and %gs may be cleared by load_TLS().
411 *
412 * (e.g. xen_load_tls())
413 */
e137a4d8 414 save_fsgs(prev_p);
478de5a9 415
f647d7c1
AL
416 /*
417 * Load TLS before restoring any segments so that segment loads
418 * reference the correct GDT entries.
419 */
1da177e4
LT
420 load_TLS(next, cpu);
421
3fe0a63e 422 /*
f647d7c1
AL
423 * Leave lazy mode, flushing any hypercalls made here. This
424 * must be done after loading TLS entries in the GDT but before
425 * loading segments that might reference them, and and it must
3a0aee48 426 * be done before fpu__restore(), so the TS bit is up to
f647d7c1 427 * date.
3fe0a63e 428 */
224101ed 429 arch_end_context_switch(next_p);
3fe0a63e 430
f647d7c1
AL
431 /* Switch DS and ES.
432 *
433 * Reading them only returns the selectors, but writing them (if
434 * nonzero) loads the full descriptor from the GDT or LDT. The
435 * LDT for next is loaded in switch_mm, and the GDT is loaded
436 * above.
437 *
438 * We therefore need to write new values to the segment
439 * registers on every context switch unless both the new and old
440 * values are zero.
441 *
442 * Note that we don't need to do anything for CS and SS, as
443 * those are saved and restored as part of pt_regs.
444 */
445 savesegment(es, prev->es);
446 if (unlikely(next->es | prev->es))
447 loadsegment(es, next->es);
448
449 savesegment(ds, prev->ds);
450 if (unlikely(next->ds | prev->ds))
451 loadsegment(ds, next->ds);
452
e137a4d8
AL
453 load_seg_legacy(prev->fsindex, prev->fsbase,
454 next->fsindex, next->fsbase, FS);
455 load_seg_legacy(prev->gsindex, prev->gsbase,
456 next->gsindex, next->gsbase, GS);
1da177e4 457
c474e507 458 switch_fpu_finish(next_fpu, cpu);
34ddc81a 459
7de08b4e 460 /*
45948d77 461 * Switch the PDA and FPU contexts.
1da177e4 462 */
c6ae41e7 463 this_cpu_write(current_task, next_p);
9aaefe7b 464 this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
18bd057b 465
bd7dc5a6 466 /* Reload sp0. */
46f5a10a 467 update_sp0(next_p);
b27559a4 468
1da177e4 469 /*
d3a4f48d 470 * Now maybe reload the debug registers and handle I/O bitmaps
1da177e4 471 */
eee3af4a
MM
472 if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT ||
473 task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV))
d3a4f48d 474 __switch_to_xtra(prev_p, next_p, tss);
1da177e4 475
5e57f1d6 476#ifdef CONFIG_XEN_PV
b7a58459
AL
477 /*
478 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
479 * current_pt_regs()->flags may not match the current task's
480 * intended IOPL. We need to switch it manually.
481 */
482 if (unlikely(static_cpu_has(X86_FEATURE_XENPV) &&
483 prev->iopl != next->iopl))
484 xen_set_iopl_mask(next->iopl);
485#endif
486
61f01dd9
AL
487 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS)) {
488 /*
489 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
490 * does not update the cached descriptor. As a result, if we
491 * do SYSRET while SS is NULL, we'll end up in user mode with
492 * SS apparently equal to __USER_DS but actually unusable.
493 *
494 * The straightforward workaround would be to fix it up just
495 * before SYSRET, but that would slow down the system call
496 * fast paths. Instead, we ensure that SS is never NULL in
497 * system call context. We do this by replacing NULL SS
498 * selectors at every context switch. SYSCALL sets up a valid
499 * SS, so the only way to get NULL is to re-enter the kernel
500 * from CPL 3 through an interrupt. Since that can't happen
501 * in the same task as a running syscall, we are guaranteed to
502 * context switch between every interrupt vector entry and a
503 * subsequent SYSRET.
504 *
505 * We read SS first because SS reads are much faster than
506 * writes. Out of caution, we force SS to __KERNEL_DS even if
507 * it previously had a different non-NULL value.
508 */
509 unsigned short ss_sel;
510 savesegment(ss, ss_sel);
511 if (ss_sel != __KERNEL_DS)
512 loadsegment(ss, __KERNEL_DS);
513 }
514
4f341a5e
FY
515 /* Load the Intel cache allocation PQR MSR. */
516 intel_rdt_sched_in();
517
1da177e4
LT
518 return prev_p;
519}
520
1da177e4
LT
521void set_personality_64bit(void)
522{
523 /* inherit personality from parent */
524
525 /* Make sure to be in 64bit mode */
6612538c 526 clear_thread_flag(TIF_IA32);
6bd33008 527 clear_thread_flag(TIF_ADDR32);
bb212724 528 clear_thread_flag(TIF_X32);
ada26481
DS
529 /* Pretend that this comes from a 64bit execve */
530 task_pt_regs(current)->orig_ax = __NR_execve;
a0add795 531 current_thread_info()->status &= ~TS_COMPAT;
1da177e4 532
375906f8
SW
533 /* Ensure the corresponding mm is not marked. */
534 if (current->mm)
535 current->mm->context.ia32_compat = 0;
536
1da177e4
LT
537 /* TBD: overwrites user setup. Should have two bits.
538 But 64bit processes have always behaved this way,
539 so it's not too bad. The main problem is just that
6612538c 540 32bit childs are affected again. */
1da177e4
LT
541 current->personality &= ~READ_IMPLIES_EXEC;
542}
543
ada26481 544static void __set_personality_x32(void)
05d43ed8 545{
ada26481
DS
546#ifdef CONFIG_X86_X32
547 clear_thread_flag(TIF_IA32);
548 set_thread_flag(TIF_X32);
549 if (current->mm)
550 current->mm->context.ia32_compat = TIF_X32;
551 current->personality &= ~READ_IMPLIES_EXEC;
552 /*
553 * in_compat_syscall() uses the presence of the x32 syscall bit
554 * flag to determine compat status. The x86 mmap() code relies on
555 * the syscall bitness so set x32 syscall bit right here to make
556 * in_compat_syscall() work during exec().
557 *
558 * Pretend to come from a x32 execve.
559 */
560 task_pt_regs(current)->orig_ax = __NR_x32_execve | __X32_SYSCALL_BIT;
6c46cb88 561 current_thread_info()->status &= ~TS_COMPAT;
ada26481
DS
562#endif
563}
05d43ed8 564
ada26481
DS
565static void __set_personality_ia32(void)
566{
567#ifdef CONFIG_IA32_EMULATION
568 set_thread_flag(TIF_IA32);
569 clear_thread_flag(TIF_X32);
570 if (current->mm)
571 current->mm->context.ia32_compat = TIF_IA32;
572 current->personality |= force_personality32;
573 /* Prepare the first "return" to user space */
574 task_pt_regs(current)->orig_ax = __NR_ia32_execve;
6c46cb88 575 current_thread_info()->status |= TS_COMPAT;
ada26481
DS
576#endif
577}
578
579void set_personality_ia32(bool x32)
580{
05d43ed8 581 /* Make sure to be in 32bit mode */
6bd33008 582 set_thread_flag(TIF_ADDR32);
05d43ed8 583
ada26481
DS
584 if (x32)
585 __set_personality_x32();
586 else
587 __set_personality_ia32();
05d43ed8 588}
febb72a6 589EXPORT_SYMBOL_GPL(set_personality_ia32);
05d43ed8 590
91b7bd39 591#ifdef CONFIG_CHECKPOINT_RESTORE
2eefd878
DS
592static long prctl_map_vdso(const struct vdso_image *image, unsigned long addr)
593{
594 int ret;
595
596 ret = map_vdso_once(image, addr);
597 if (ret)
598 return ret;
599
600 return (long)image->size;
601}
91b7bd39 602#endif
2eefd878 603
17a6e1b8 604long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
7de08b4e
GP
605{
606 int ret = 0;
1da177e4
LT
607 int doit = task == current;
608 int cpu;
609
dd93938a 610 switch (option) {
1da177e4 611 case ARCH_SET_GS:
17a6e1b8 612 if (arg2 >= TASK_SIZE_MAX)
7de08b4e 613 return -EPERM;
1da177e4 614 cpu = get_cpu();
731e33e3 615 task->thread.gsindex = 0;
17a6e1b8 616 task->thread.gsbase = arg2;
731e33e3
AL
617 if (doit) {
618 load_gs_index(0);
17a6e1b8 619 ret = wrmsrl_safe(MSR_KERNEL_GS_BASE, arg2);
1da177e4 620 }
4afd0565 621 put_cpu();
1da177e4
LT
622 break;
623 case ARCH_SET_FS:
624 /* Not strictly needed for fs, but do it for symmetry
625 with gs */
17a6e1b8 626 if (arg2 >= TASK_SIZE_MAX)
6612538c 627 return -EPERM;
1da177e4 628 cpu = get_cpu();
731e33e3 629 task->thread.fsindex = 0;
17a6e1b8 630 task->thread.fsbase = arg2;
731e33e3
AL
631 if (doit) {
632 /* set the selector to 0 to not confuse __switch_to */
633 loadsegment(fs, 0);
17a6e1b8 634 ret = wrmsrl_safe(MSR_FS_BASE, arg2);
1da177e4
LT
635 }
636 put_cpu();
637 break;
6612538c
HS
638 case ARCH_GET_FS: {
639 unsigned long base;
17a6e1b8 640
d47b50e7 641 if (doit)
1da177e4 642 rdmsrl(MSR_FS_BASE, base);
a88cde13 643 else
296f781a 644 base = task->thread.fsbase;
17a6e1b8 645 ret = put_user(base, (unsigned long __user *)arg2);
6612538c 646 break;
1da177e4 647 }
6612538c 648 case ARCH_GET_GS: {
1da177e4 649 unsigned long base;
17a6e1b8 650
d47b50e7
AL
651 if (doit)
652 rdmsrl(MSR_KERNEL_GS_BASE, base);
d47b50e7 653 else
296f781a 654 base = task->thread.gsbase;
17a6e1b8 655 ret = put_user(base, (unsigned long __user *)arg2);
1da177e4
LT
656 break;
657 }
658
2eefd878 659#ifdef CONFIG_CHECKPOINT_RESTORE
6e68b087 660# ifdef CONFIG_X86_X32_ABI
2eefd878 661 case ARCH_MAP_VDSO_X32:
17a6e1b8 662 return prctl_map_vdso(&vdso_image_x32, arg2);
91b7bd39
IM
663# endif
664# if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
2eefd878 665 case ARCH_MAP_VDSO_32:
17a6e1b8 666 return prctl_map_vdso(&vdso_image_32, arg2);
91b7bd39 667# endif
2eefd878 668 case ARCH_MAP_VDSO_64:
17a6e1b8 669 return prctl_map_vdso(&vdso_image_64, arg2);
2eefd878
DS
670#endif
671
1da177e4
LT
672 default:
673 ret = -EINVAL;
674 break;
6612538c 675 }
1da177e4 676
6612538c
HS
677 return ret;
678}
1da177e4 679
17a6e1b8 680SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
1da177e4 681{
b0b9b014
KH
682 long ret;
683
684 ret = do_arch_prctl_64(current, option, arg2);
685 if (ret == -EINVAL)
686 ret = do_arch_prctl_common(current, option, arg2);
687
688 return ret;
1da177e4
LT
689}
690
79170fda
KH
691#ifdef CONFIG_IA32_EMULATION
692COMPAT_SYSCALL_DEFINE2(arch_prctl, int, option, unsigned long, arg2)
693{
694 return do_arch_prctl_common(current, option, arg2);
695}
696#endif
697
89240ba0
SS
698unsigned long KSTK_ESP(struct task_struct *task)
699{
263042e4 700 return task_pt_regs(task)->sp;
89240ba0 701}