2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
10 * CPU hotplug support - ashok.raj@intel.com
14 * This file handles the architecture-dependent parts of process handling..
17 #include <linux/cpu.h>
18 #include <linux/errno.h>
19 #include <linux/sched.h>
20 #include <linux/sched/task.h>
21 #include <linux/sched/task_stack.h>
23 #include <linux/kernel.h>
25 #include <linux/elfcore.h>
26 #include <linux/smp.h>
27 #include <linux/slab.h>
28 #include <linux/user.h>
29 #include <linux/interrupt.h>
30 #include <linux/delay.h>
31 #include <linux/export.h>
32 #include <linux/ptrace.h>
33 #include <linux/notifier.h>
34 #include <linux/kprobes.h>
35 #include <linux/kdebug.h>
36 #include <linux/prctl.h>
37 #include <linux/uaccess.h>
39 #include <linux/ftrace.h>
40 #include <linux/syscalls.h>
42 #include <asm/pgtable.h>
43 #include <asm/processor.h>
44 #include <asm/fpu/internal.h>
45 #include <asm/mmu_context.h>
46 #include <asm/prctl.h>
48 #include <asm/proto.h>
50 #include <asm/syscalls.h>
51 #include <asm/debugreg.h>
52 #include <asm/switch_to.h>
53 #include <asm/xen/hypervisor.h>
55 #include <asm/intel_rdt_sched.h>
56 #include <asm/unistd.h>
57 #ifdef CONFIG_IA32_EMULATION
58 /* Not included via unistd.h */
59 #include <asm/unistd_32_ia32.h>
64 __visible
DEFINE_PER_CPU(unsigned long, rsp_scratch
);
66 /* Prints also some state that isn't saved in the pt_regs */
67 void __show_regs(struct pt_regs
*regs
, int all
)
69 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L, fs
, gs
, shadowgs
;
70 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
71 unsigned int fsindex
, gsindex
;
72 unsigned int ds
, cs
, es
;
76 if (regs
->orig_ax
!= -1)
77 pr_cont(" ORIG_RAX: %016lx\n", regs
->orig_ax
);
81 printk(KERN_DEFAULT
"RAX: %016lx RBX: %016lx RCX: %016lx\n",
82 regs
->ax
, regs
->bx
, regs
->cx
);
83 printk(KERN_DEFAULT
"RDX: %016lx RSI: %016lx RDI: %016lx\n",
84 regs
->dx
, regs
->si
, regs
->di
);
85 printk(KERN_DEFAULT
"RBP: %016lx R08: %016lx R09: %016lx\n",
86 regs
->bp
, regs
->r8
, regs
->r9
);
87 printk(KERN_DEFAULT
"R10: %016lx R11: %016lx R12: %016lx\n",
88 regs
->r10
, regs
->r11
, regs
->r12
);
89 printk(KERN_DEFAULT
"R13: %016lx R14: %016lx R15: %016lx\n",
90 regs
->r13
, regs
->r14
, regs
->r15
);
95 asm("movl %%ds,%0" : "=r" (ds
));
96 asm("movl %%cs,%0" : "=r" (cs
));
97 asm("movl %%es,%0" : "=r" (es
));
98 asm("movl %%fs,%0" : "=r" (fsindex
));
99 asm("movl %%gs,%0" : "=r" (gsindex
));
101 rdmsrl(MSR_FS_BASE
, fs
);
102 rdmsrl(MSR_GS_BASE
, gs
);
103 rdmsrl(MSR_KERNEL_GS_BASE
, shadowgs
);
110 printk(KERN_DEFAULT
"FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
111 fs
, fsindex
, gs
, gsindex
, shadowgs
);
112 printk(KERN_DEFAULT
"CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs
, ds
,
114 printk(KERN_DEFAULT
"CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2
, cr3
,
124 /* Only print out debug registers if they are in their non-default state. */
125 if (!((d0
== 0) && (d1
== 0) && (d2
== 0) && (d3
== 0) &&
126 (d6
== DR6_RESERVED
) && (d7
== 0x400))) {
127 printk(KERN_DEFAULT
"DR0: %016lx DR1: %016lx DR2: %016lx\n",
129 printk(KERN_DEFAULT
"DR3: %016lx DR6: %016lx DR7: %016lx\n",
133 if (boot_cpu_has(X86_FEATURE_OSPKE
))
134 printk(KERN_DEFAULT
"PKRU: %08x\n", read_pkru());
137 void release_thread(struct task_struct
*dead_task
)
140 #ifdef CONFIG_MODIFY_LDT_SYSCALL
141 if (dead_task
->mm
->context
.ldt
) {
142 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
144 dead_task
->mm
->context
.ldt
->entries
,
145 dead_task
->mm
->context
.ldt
->nr_entries
);
152 enum which_selector
{
158 * Saves the FS or GS base for an outgoing thread if FSGSBASE extensions are
159 * not available. The goal is to be reasonably fast on non-FSGSBASE systems.
160 * It's forcibly inlined because it'll generate better code and this function
163 static __always_inline
void save_base_legacy(struct task_struct
*prev_p
,
164 unsigned short selector
,
165 enum which_selector which
)
167 if (likely(selector
== 0)) {
169 * On Intel (without X86_BUG_NULL_SEG), the segment base could
170 * be the pre-existing saved base or it could be zero. On AMD
171 * (with X86_BUG_NULL_SEG), the segment base could be almost
174 * This branch is very hot (it's hit twice on almost every
175 * context switch between 64-bit programs), and avoiding
176 * the RDMSR helps a lot, so we just assume that whatever
177 * value is already saved is correct. This matches historical
178 * Linux behavior, so it won't break existing applications.
180 * To avoid leaking state, on non-X86_BUG_NULL_SEG CPUs, if we
181 * report that the base is zero, it needs to actually be zero:
182 * see the corresponding logic in load_seg_legacy.
186 * If the selector is 1, 2, or 3, then the base is zero on
187 * !X86_BUG_NULL_SEG CPUs and could be anything on
188 * X86_BUG_NULL_SEG CPUs. In the latter case, Linux
189 * has never attempted to preserve the base across context
192 * If selector > 3, then it refers to a real segment, and
193 * saving the base isn't necessary.
196 prev_p
->thread
.fsbase
= 0;
198 prev_p
->thread
.gsbase
= 0;
202 static __always_inline
void save_fsgs(struct task_struct
*task
)
204 savesegment(fs
, task
->thread
.fsindex
);
205 savesegment(gs
, task
->thread
.gsindex
);
206 save_base_legacy(task
, task
->thread
.fsindex
, FS
);
207 save_base_legacy(task
, task
->thread
.gsindex
, GS
);
210 static __always_inline
void loadseg(enum which_selector which
,
214 loadsegment(fs
, sel
);
219 static __always_inline
void load_seg_legacy(unsigned short prev_index
,
220 unsigned long prev_base
,
221 unsigned short next_index
,
222 unsigned long next_base
,
223 enum which_selector which
)
225 if (likely(next_index
<= 3)) {
227 * The next task is using 64-bit TLS, is not using this
228 * segment at all, or is having fun with arcane CPU features.
230 if (next_base
== 0) {
232 * Nasty case: on AMD CPUs, we need to forcibly zero
235 if (static_cpu_has_bug(X86_BUG_NULL_SEG
)) {
236 loadseg(which
, __USER_DS
);
237 loadseg(which
, next_index
);
240 * We could try to exhaustively detect cases
241 * under which we can skip the segment load,
242 * but there's really only one case that matters
243 * for performance: if both the previous and
244 * next states are fully zeroed, we can skip
247 * (This assumes that prev_base == 0 has no
248 * false positives. This is the case on
251 if (likely(prev_index
| next_index
| prev_base
))
252 loadseg(which
, next_index
);
255 if (prev_index
!= next_index
)
256 loadseg(which
, next_index
);
257 wrmsrl(which
== FS
? MSR_FS_BASE
: MSR_KERNEL_GS_BASE
,
262 * The next task is using a real segment. Loading the selector
265 loadseg(which
, next_index
);
269 int copy_thread_tls(unsigned long clone_flags
, unsigned long sp
,
270 unsigned long arg
, struct task_struct
*p
, unsigned long tls
)
273 struct pt_regs
*childregs
;
274 struct fork_frame
*fork_frame
;
275 struct inactive_task_frame
*frame
;
276 struct task_struct
*me
= current
;
278 childregs
= task_pt_regs(p
);
279 fork_frame
= container_of(childregs
, struct fork_frame
, regs
);
280 frame
= &fork_frame
->frame
;
282 frame
->ret_addr
= (unsigned long) ret_from_fork
;
283 p
->thread
.sp
= (unsigned long) fork_frame
;
284 p
->thread
.io_bitmap_ptr
= NULL
;
286 savesegment(gs
, p
->thread
.gsindex
);
287 p
->thread
.gsbase
= p
->thread
.gsindex
? 0 : me
->thread
.gsbase
;
288 savesegment(fs
, p
->thread
.fsindex
);
289 p
->thread
.fsbase
= p
->thread
.fsindex
? 0 : me
->thread
.fsbase
;
290 savesegment(es
, p
->thread
.es
);
291 savesegment(ds
, p
->thread
.ds
);
292 memset(p
->thread
.ptrace_bps
, 0, sizeof(p
->thread
.ptrace_bps
));
294 if (unlikely(p
->flags
& PF_KTHREAD
)) {
296 memset(childregs
, 0, sizeof(struct pt_regs
));
297 frame
->bx
= sp
; /* function */
302 *childregs
= *current_pt_regs();
309 if (unlikely(test_tsk_thread_flag(me
, TIF_IO_BITMAP
))) {
310 p
->thread
.io_bitmap_ptr
= kmemdup(me
->thread
.io_bitmap_ptr
,
311 IO_BITMAP_BYTES
, GFP_KERNEL
);
312 if (!p
->thread
.io_bitmap_ptr
) {
313 p
->thread
.io_bitmap_max
= 0;
316 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
320 * Set a new TLS for the child thread?
322 if (clone_flags
& CLONE_SETTLS
) {
323 #ifdef CONFIG_IA32_EMULATION
324 if (in_ia32_syscall())
325 err
= do_set_thread_area(p
, -1,
326 (struct user_desc __user
*)tls
, 0);
329 err
= do_arch_prctl_64(p
, ARCH_SET_FS
, tls
);
335 if (err
&& p
->thread
.io_bitmap_ptr
) {
336 kfree(p
->thread
.io_bitmap_ptr
);
337 p
->thread
.io_bitmap_max
= 0;
344 start_thread_common(struct pt_regs
*regs
, unsigned long new_ip
,
345 unsigned long new_sp
,
346 unsigned int _cs
, unsigned int _ss
, unsigned int _ds
)
348 WARN_ON_ONCE(regs
!= current_pt_regs());
350 if (static_cpu_has(X86_BUG_NULL_SEG
)) {
351 /* Loading zero below won't clear the base. */
352 loadsegment(fs
, __USER_DS
);
353 load_gs_index(__USER_DS
);
357 loadsegment(es
, _ds
);
358 loadsegment(ds
, _ds
);
365 regs
->flags
= X86_EFLAGS_IF
;
370 start_thread(struct pt_regs
*regs
, unsigned long new_ip
, unsigned long new_sp
)
372 start_thread_common(regs
, new_ip
, new_sp
,
373 __USER_CS
, __USER_DS
, 0);
377 void compat_start_thread(struct pt_regs
*regs
, u32 new_ip
, u32 new_sp
)
379 start_thread_common(regs
, new_ip
, new_sp
,
380 test_thread_flag(TIF_X32
)
381 ? __USER_CS
: __USER32_CS
,
382 __USER_DS
, __USER_DS
);
387 * switch_to(x,y) should switch tasks from x to y.
389 * This could still be optimized:
390 * - fold all the options into a flag word and test it with a single test.
391 * - could test fs/gs bitsliced
393 * Kprobes not supported here. Set the probe on schedule instead.
394 * Function graph tracer not supported too.
396 __visible __notrace_funcgraph
struct task_struct
*
397 __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
399 struct thread_struct
*prev
= &prev_p
->thread
;
400 struct thread_struct
*next
= &next_p
->thread
;
401 struct fpu
*prev_fpu
= &prev
->fpu
;
402 struct fpu
*next_fpu
= &next
->fpu
;
403 int cpu
= smp_processor_id();
405 WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY
) &&
406 this_cpu_read(irq_count
) != -1);
408 switch_fpu_prepare(prev_fpu
, cpu
);
410 /* We must save %fs and %gs before load_TLS() because
411 * %fs and %gs may be cleared by load_TLS().
413 * (e.g. xen_load_tls())
418 * Load TLS before restoring any segments so that segment loads
419 * reference the correct GDT entries.
424 * Leave lazy mode, flushing any hypercalls made here. This
425 * must be done after loading TLS entries in the GDT but before
426 * loading segments that might reference them, and and it must
427 * be done before fpu__restore(), so the TS bit is up to
430 arch_end_context_switch(next_p
);
434 * Reading them only returns the selectors, but writing them (if
435 * nonzero) loads the full descriptor from the GDT or LDT. The
436 * LDT for next is loaded in switch_mm, and the GDT is loaded
439 * We therefore need to write new values to the segment
440 * registers on every context switch unless both the new and old
443 * Note that we don't need to do anything for CS and SS, as
444 * those are saved and restored as part of pt_regs.
446 savesegment(es
, prev
->es
);
447 if (unlikely(next
->es
| prev
->es
))
448 loadsegment(es
, next
->es
);
450 savesegment(ds
, prev
->ds
);
451 if (unlikely(next
->ds
| prev
->ds
))
452 loadsegment(ds
, next
->ds
);
454 load_seg_legacy(prev
->fsindex
, prev
->fsbase
,
455 next
->fsindex
, next
->fsbase
, FS
);
456 load_seg_legacy(prev
->gsindex
, prev
->gsbase
,
457 next
->gsindex
, next
->gsbase
, GS
);
459 switch_fpu_finish(next_fpu
, cpu
);
462 * Switch the PDA and FPU contexts.
464 this_cpu_write(current_task
, next_p
);
465 this_cpu_write(cpu_current_top_of_stack
, task_top_of_stack(next_p
));
468 update_task_stack(next_p
);
470 switch_to_extra(prev_p
, next_p
);
474 * On Xen PV, IOPL bits in pt_regs->flags have no effect, and
475 * current_pt_regs()->flags may not match the current task's
476 * intended IOPL. We need to switch it manually.
478 if (unlikely(static_cpu_has(X86_FEATURE_XENPV
) &&
479 prev
->iopl
!= next
->iopl
))
480 xen_set_iopl_mask(next
->iopl
);
483 if (static_cpu_has_bug(X86_BUG_SYSRET_SS_ATTRS
)) {
485 * AMD CPUs have a misfeature: SYSRET sets the SS selector but
486 * does not update the cached descriptor. As a result, if we
487 * do SYSRET while SS is NULL, we'll end up in user mode with
488 * SS apparently equal to __USER_DS but actually unusable.
490 * The straightforward workaround would be to fix it up just
491 * before SYSRET, but that would slow down the system call
492 * fast paths. Instead, we ensure that SS is never NULL in
493 * system call context. We do this by replacing NULL SS
494 * selectors at every context switch. SYSCALL sets up a valid
495 * SS, so the only way to get NULL is to re-enter the kernel
496 * from CPL 3 through an interrupt. Since that can't happen
497 * in the same task as a running syscall, we are guaranteed to
498 * context switch between every interrupt vector entry and a
501 * We read SS first because SS reads are much faster than
502 * writes. Out of caution, we force SS to __KERNEL_DS even if
503 * it previously had a different non-NULL value.
505 unsigned short ss_sel
;
506 savesegment(ss
, ss_sel
);
507 if (ss_sel
!= __KERNEL_DS
)
508 loadsegment(ss
, __KERNEL_DS
);
511 /* Load the Intel cache allocation PQR MSR. */
512 intel_rdt_sched_in();
517 void set_personality_64bit(void)
519 /* inherit personality from parent */
521 /* Make sure to be in 64bit mode */
522 clear_thread_flag(TIF_IA32
);
523 clear_thread_flag(TIF_ADDR32
);
524 clear_thread_flag(TIF_X32
);
525 /* Pretend that this comes from a 64bit execve */
526 task_pt_regs(current
)->orig_ax
= __NR_execve
;
527 current_thread_info()->status
&= ~TS_COMPAT
;
529 /* Ensure the corresponding mm is not marked. */
531 current
->mm
->context
.ia32_compat
= 0;
533 /* TBD: overwrites user setup. Should have two bits.
534 But 64bit processes have always behaved this way,
535 so it's not too bad. The main problem is just that
536 32bit childs are affected again. */
537 current
->personality
&= ~READ_IMPLIES_EXEC
;
540 static void __set_personality_x32(void)
542 #ifdef CONFIG_X86_X32
543 clear_thread_flag(TIF_IA32
);
544 set_thread_flag(TIF_X32
);
546 current
->mm
->context
.ia32_compat
= TIF_X32
;
547 current
->personality
&= ~READ_IMPLIES_EXEC
;
549 * in_compat_syscall() uses the presence of the x32 syscall bit
550 * flag to determine compat status. The x86 mmap() code relies on
551 * the syscall bitness so set x32 syscall bit right here to make
552 * in_compat_syscall() work during exec().
554 * Pretend to come from a x32 execve.
556 task_pt_regs(current
)->orig_ax
= __NR_x32_execve
| __X32_SYSCALL_BIT
;
557 current_thread_info()->status
&= ~TS_COMPAT
;
561 static void __set_personality_ia32(void)
563 #ifdef CONFIG_IA32_EMULATION
564 set_thread_flag(TIF_IA32
);
565 clear_thread_flag(TIF_X32
);
567 current
->mm
->context
.ia32_compat
= TIF_IA32
;
568 current
->personality
|= force_personality32
;
569 /* Prepare the first "return" to user space */
570 task_pt_regs(current
)->orig_ax
= __NR_ia32_execve
;
571 current_thread_info()->status
|= TS_COMPAT
;
575 void set_personality_ia32(bool x32
)
577 /* Make sure to be in 32bit mode */
578 set_thread_flag(TIF_ADDR32
);
581 __set_personality_x32();
583 __set_personality_ia32();
585 EXPORT_SYMBOL_GPL(set_personality_ia32
);
587 #ifdef CONFIG_CHECKPOINT_RESTORE
588 static long prctl_map_vdso(const struct vdso_image
*image
, unsigned long addr
)
592 ret
= map_vdso_once(image
, addr
);
596 return (long)image
->size
;
600 long do_arch_prctl_64(struct task_struct
*task
, int option
, unsigned long arg2
)
603 int doit
= task
== current
;
608 if (arg2
>= TASK_SIZE_MAX
)
611 task
->thread
.gsindex
= 0;
612 task
->thread
.gsbase
= arg2
;
615 ret
= wrmsrl_safe(MSR_KERNEL_GS_BASE
, arg2
);
620 /* Not strictly needed for fs, but do it for symmetry
622 if (arg2
>= TASK_SIZE_MAX
)
625 task
->thread
.fsindex
= 0;
626 task
->thread
.fsbase
= arg2
;
628 /* set the selector to 0 to not confuse __switch_to */
630 ret
= wrmsrl_safe(MSR_FS_BASE
, arg2
);
638 rdmsrl(MSR_FS_BASE
, base
);
640 base
= task
->thread
.fsbase
;
641 ret
= put_user(base
, (unsigned long __user
*)arg2
);
648 rdmsrl(MSR_KERNEL_GS_BASE
, base
);
650 base
= task
->thread
.gsbase
;
651 ret
= put_user(base
, (unsigned long __user
*)arg2
);
655 #ifdef CONFIG_CHECKPOINT_RESTORE
656 # ifdef CONFIG_X86_X32_ABI
657 case ARCH_MAP_VDSO_X32
:
658 return prctl_map_vdso(&vdso_image_x32
, arg2
);
660 # if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
661 case ARCH_MAP_VDSO_32
:
662 return prctl_map_vdso(&vdso_image_32
, arg2
);
664 case ARCH_MAP_VDSO_64
:
665 return prctl_map_vdso(&vdso_image_64
, arg2
);
676 SYSCALL_DEFINE2(arch_prctl
, int, option
, unsigned long, arg2
)
680 ret
= do_arch_prctl_64(current
, option
, arg2
);
682 ret
= do_arch_prctl_common(current
, option
, arg2
);
687 #ifdef CONFIG_IA32_EMULATION
688 COMPAT_SYSCALL_DEFINE2(arch_prctl
, int, option
, unsigned long, arg2
)
690 return do_arch_prctl_common(current
, option
, arg2
);
694 unsigned long KSTK_ESP(struct task_struct
*task
)
696 return task_pt_regs(task
)->sp
;