]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
f05e798a DH |
2 | #ifndef _ASM_X86_SWITCH_TO_H |
3 | #define _ASM_X86_SWITCH_TO_H | |
4 | ||
d375cf15 AL |
5 | #include <linux/sched/task_stack.h> |
6 | ||
f05e798a | 7 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
0100301b BG |
8 | |
9 | struct task_struct *__switch_to_asm(struct task_struct *prev, | |
10 | struct task_struct *next); | |
11 | ||
35ea7903 | 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
0100301b | 13 | struct task_struct *next); |
f05e798a DH |
14 | struct tss_struct; |
15 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
16 | struct tss_struct *tss); | |
17 | ||
e37e43a4 AL |
18 | /* This runs runs on the previous thread's stack. */ |
19 | static inline void prepare_switch_to(struct task_struct *prev, | |
20 | struct task_struct *next) | |
21 | { | |
22 | #ifdef CONFIG_VMAP_STACK | |
23 | /* | |
24 | * If we switch to a stack that has a top-level paging entry | |
25 | * that is not present in the current mm, the resulting #PF will | |
26 | * will be promoted to a double-fault and we'll panic. Probe | |
27 | * the new stack now so that vmalloc_fault can fix up the page | |
28 | * tables if needed. This can only happen if we use a stack | |
29 | * in vmap space. | |
30 | * | |
31 | * We assume that the stack is aligned so that it never spans | |
32 | * more than one top-level paging entry. | |
33 | * | |
34 | * To minimize cache pollution, just follow the stack pointer. | |
35 | */ | |
36 | READ_ONCE(*(unsigned char *)next->thread.sp); | |
37 | #endif | |
38 | } | |
39 | ||
616d2483 BG |
40 | asmlinkage void ret_from_fork(void); |
41 | ||
2c96b2fe JP |
42 | /* |
43 | * This is the structure pointed to by thread.sp for an inactive task. The | |
44 | * order of the fields must match the code in __switch_to_asm(). | |
45 | */ | |
7b32aead | 46 | struct inactive_task_frame { |
0100301b BG |
47 | #ifdef CONFIG_X86_64 |
48 | unsigned long r15; | |
49 | unsigned long r14; | |
50 | unsigned long r13; | |
51 | unsigned long r12; | |
52 | #else | |
53 | unsigned long si; | |
54 | unsigned long di; | |
55 | #endif | |
56 | unsigned long bx; | |
2c96b2fe JP |
57 | |
58 | /* | |
59 | * These two fields must be together. They form a stack frame header, | |
60 | * needed by get_frame_pointer(). | |
61 | */ | |
7b32aead | 62 | unsigned long bp; |
0100301b | 63 | unsigned long ret_addr; |
7b32aead BG |
64 | }; |
65 | ||
0100301b BG |
66 | struct fork_frame { |
67 | struct inactive_task_frame frame; | |
68 | struct pt_regs regs; | |
69 | }; | |
f05e798a | 70 | |
f05e798a DH |
71 | #define switch_to(prev, next, last) \ |
72 | do { \ | |
e37e43a4 AL |
73 | prepare_switch_to(prev, next); \ |
74 | \ | |
0100301b | 75 | ((last) = __switch_to_asm((prev), (next))); \ |
f05e798a DH |
76 | } while (0) |
77 | ||
bd7dc5a6 AL |
78 | #ifdef CONFIG_X86_32 |
79 | static inline void refresh_sysenter_cs(struct thread_struct *thread) | |
80 | { | |
81 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ | |
c482feef | 82 | if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) |
bd7dc5a6 AL |
83 | return; |
84 | ||
c482feef | 85 | this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); |
bd7dc5a6 AL |
86 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); |
87 | } | |
88 | #endif | |
89 | ||
46f5a10a AL |
90 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
91 | static inline void update_sp0(struct task_struct *task) | |
92 | { | |
7f2590a1 | 93 | /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ |
d375cf15 | 94 | #ifdef CONFIG_X86_32 |
46f5a10a | 95 | load_sp0(task->thread.sp0); |
d375cf15 | 96 | #else |
7f2590a1 AL |
97 | if (static_cpu_has(X86_FEATURE_XENPV)) |
98 | load_sp0(task_top_of_stack(task)); | |
d375cf15 | 99 | #endif |
46f5a10a AL |
100 | } |
101 | ||
f05e798a | 102 | #endif /* _ASM_X86_SWITCH_TO_H */ |