]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/kvm/svm/vmenter.S
x86/kvm/svm: Move guest enter/exit into .noinstr.text
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kvm / svm / vmenter.S
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
3 #include <asm/asm.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
7
8 #define WORD_SIZE (BITS_PER_LONG / 8)
9
10 /* Intentionally omit RAX as it's context switched by hardware */
11 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
12 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
13 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
14 /* Intentionally omit RSP as it's context switched by hardware */
15 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
16 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
17 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
18
19 #ifdef CONFIG_X86_64
20 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
21 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
22 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
23 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
24 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
25 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
26 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
27 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
28 #endif
29
30 .section .noinstr.text, "ax"
31
32 /**
33 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34 * @vmcb_pa: unsigned long
35 * @regs: unsigned long * (to guest registers)
36 */
37 SYM_FUNC_START(__svm_vcpu_run)
38 push %_ASM_BP
39 #ifdef CONFIG_X86_64
40 push %r15
41 push %r14
42 push %r13
43 push %r12
44 #else
45 push %edi
46 push %esi
47 #endif
48 push %_ASM_BX
49
50 /* Save @regs. */
51 push %_ASM_ARG2
52
53 /* Save @vmcb. */
54 push %_ASM_ARG1
55
56 /* Move @regs to RAX. */
57 mov %_ASM_ARG2, %_ASM_AX
58
59 /* Load guest registers. */
60 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
66 #ifdef CONFIG_X86_64
67 mov VCPU_R8 (%_ASM_AX), %r8
68 mov VCPU_R9 (%_ASM_AX), %r9
69 mov VCPU_R10(%_ASM_AX), %r10
70 mov VCPU_R11(%_ASM_AX), %r11
71 mov VCPU_R12(%_ASM_AX), %r12
72 mov VCPU_R13(%_ASM_AX), %r13
73 mov VCPU_R14(%_ASM_AX), %r14
74 mov VCPU_R15(%_ASM_AX), %r15
75 #endif
76
77 /* "POP" @vmcb to RAX. */
78 pop %_ASM_AX
79
80 /* Enter guest mode */
81 sti
82 1: vmload %_ASM_AX
83 jmp 3f
84 2: cmpb $0, kvm_rebooting
85 jne 3f
86 ud2
87 _ASM_EXTABLE(1b, 2b)
88
89 3: vmrun %_ASM_AX
90 jmp 5f
91 4: cmpb $0, kvm_rebooting
92 jne 5f
93 ud2
94 _ASM_EXTABLE(3b, 4b)
95
96 5: vmsave %_ASM_AX
97 jmp 7f
98 6: cmpb $0, kvm_rebooting
99 jne 7f
100 ud2
101 _ASM_EXTABLE(5b, 6b)
102 7:
103 cli
104
105 #ifdef CONFIG_RETPOLINE
106 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
107 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
108 #endif
109
110 /* "POP" @regs to RAX. */
111 pop %_ASM_AX
112
113 /* Save all guest registers. */
114 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
115 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
116 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
117 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
118 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
119 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
120 #ifdef CONFIG_X86_64
121 mov %r8, VCPU_R8 (%_ASM_AX)
122 mov %r9, VCPU_R9 (%_ASM_AX)
123 mov %r10, VCPU_R10(%_ASM_AX)
124 mov %r11, VCPU_R11(%_ASM_AX)
125 mov %r12, VCPU_R12(%_ASM_AX)
126 mov %r13, VCPU_R13(%_ASM_AX)
127 mov %r14, VCPU_R14(%_ASM_AX)
128 mov %r15, VCPU_R15(%_ASM_AX)
129 #endif
130
131 /*
132 * Clear all general purpose registers except RSP and RAX to prevent
133 * speculative use of the guest's values, even those that are reloaded
134 * via the stack. In theory, an L1 cache miss when restoring registers
135 * could lead to speculative execution with the guest's values.
136 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
137 * free. RSP and RAX are exempt as they are restored by hardware
138 * during VM-Exit.
139 */
140 xor %ecx, %ecx
141 xor %edx, %edx
142 xor %ebx, %ebx
143 xor %ebp, %ebp
144 xor %esi, %esi
145 xor %edi, %edi
146 #ifdef CONFIG_X86_64
147 xor %r8d, %r8d
148 xor %r9d, %r9d
149 xor %r10d, %r10d
150 xor %r11d, %r11d
151 xor %r12d, %r12d
152 xor %r13d, %r13d
153 xor %r14d, %r14d
154 xor %r15d, %r15d
155 #endif
156
157 pop %_ASM_BX
158
159 #ifdef CONFIG_X86_64
160 pop %r12
161 pop %r13
162 pop %r14
163 pop %r15
164 #else
165 pop %esi
166 pop %edi
167 #endif
168 pop %_ASM_BP
169 ret
170 SYM_FUNC_END(__svm_vcpu_run)