]>
Commit | Line | Data |
---|---|---|
f05e798a DH |
1 | #ifndef _ASM_X86_SWITCH_TO_H |
2 | #define _ASM_X86_SWITCH_TO_H | |
3 | ||
4910af19 AL |
4 | #include <linux/sched/task_stack.h> |
5 | ||
f05e798a | 6 | struct task_struct; /* one of the stranger aspects of C forward declarations */ |
0100301b BG |
7 | |
8 | struct task_struct *__switch_to_asm(struct task_struct *prev, | |
9 | struct task_struct *next); | |
10 | ||
35ea7903 | 11 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
0100301b | 12 | struct task_struct *next); |
f05e798a DH |
13 | struct tss_struct; |
14 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
15 | struct tss_struct *tss); | |
16 | ||
e37e43a4 AL |
17 | /* This runs runs on the previous thread's stack. */ |
18 | static inline void prepare_switch_to(struct task_struct *prev, | |
19 | struct task_struct *next) | |
20 | { | |
21 | #ifdef CONFIG_VMAP_STACK | |
22 | /* | |
23 | * If we switch to a stack that has a top-level paging entry | |
24 | * that is not present in the current mm, the resulting #PF will | |
25 | * will be promoted to a double-fault and we'll panic. Probe | |
26 | * the new stack now so that vmalloc_fault can fix up the page | |
27 | * tables if needed. This can only happen if we use a stack | |
28 | * in vmap space. | |
29 | * | |
30 | * We assume that the stack is aligned so that it never spans | |
31 | * more than one top-level paging entry. | |
32 | * | |
33 | * To minimize cache pollution, just follow the stack pointer. | |
34 | */ | |
35 | READ_ONCE(*(unsigned char *)next->thread.sp); | |
36 | #endif | |
37 | } | |
38 | ||
616d2483 BG |
39 | asmlinkage void ret_from_fork(void); |
40 | ||
2c96b2fe JP |
41 | /* |
42 | * This is the structure pointed to by thread.sp for an inactive task. The | |
43 | * order of the fields must match the code in __switch_to_asm(). | |
44 | */ | |
7b32aead | 45 | struct inactive_task_frame { |
0100301b BG |
46 | #ifdef CONFIG_X86_64 |
47 | unsigned long r15; | |
48 | unsigned long r14; | |
49 | unsigned long r13; | |
50 | unsigned long r12; | |
51 | #else | |
52 | unsigned long si; | |
53 | unsigned long di; | |
54 | #endif | |
55 | unsigned long bx; | |
2c96b2fe JP |
56 | |
57 | /* | |
58 | * These two fields must be together. They form a stack frame header, | |
59 | * needed by get_frame_pointer(). | |
60 | */ | |
7b32aead | 61 | unsigned long bp; |
0100301b | 62 | unsigned long ret_addr; |
7b32aead BG |
63 | }; |
64 | ||
0100301b BG |
65 | struct fork_frame { |
66 | struct inactive_task_frame frame; | |
67 | struct pt_regs regs; | |
68 | }; | |
f05e798a | 69 | |
f05e798a DH |
70 | #define switch_to(prev, next, last) \ |
71 | do { \ | |
e37e43a4 AL |
72 | prepare_switch_to(prev, next); \ |
73 | \ | |
0100301b | 74 | ((last) = __switch_to_asm((prev), (next))); \ |
f05e798a DH |
75 | } while (0) |
76 | ||
779e32d0 AL |
77 | #ifdef CONFIG_X86_32 |
78 | static inline void refresh_sysenter_cs(struct thread_struct *thread) | |
79 | { | |
80 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ | |
81 | if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) | |
82 | return; | |
83 | ||
84 | this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); | |
85 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
86 | } | |
87 | #endif | |
88 | ||
cc87284c AL |
89 | /* This is used when switching tasks or entering/exiting vm86 mode. */ |
90 | static inline void update_sp0(struct task_struct *task) | |
91 | { | |
bfb2d0ed | 92 | /* On x86_64, sp0 always points to the entry trampoline stack, which is constant: */ |
4910af19 | 93 | #ifdef CONFIG_X86_32 |
cc87284c | 94 | load_sp0(task->thread.sp0); |
4910af19 | 95 | #else |
bfb2d0ed AL |
96 | if (static_cpu_has(X86_FEATURE_XENPV)) |
97 | load_sp0(task_top_of_stack(task)); | |
4910af19 | 98 | #endif |
cc87284c AL |
99 | } |
100 | ||
f05e798a | 101 | #endif /* _ASM_X86_SWITCH_TO_H */ |