]>
Commit | Line | Data |
---|---|---|
7a338472 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d5edb7f8 | 2 | /* |
cc68765d | 3 | * tools/testing/selftests/kvm/lib/x86_64/vmx.c |
d5edb7f8 PB |
4 | * |
5 | * Copyright (C) 2018, Google LLC. | |
d5edb7f8 PB |
6 | */ |
7 | ||
8 | #define _GNU_SOURCE /* for program_invocation_name */ | |
9 | ||
10 | #include "test_util.h" | |
11 | #include "kvm_util.h" | |
cc68765d | 12 | #include "processor.h" |
d5edb7f8 PB |
13 | #include "vmx.h" |
14 | ||
18178ff8 VK |
15 | bool enable_evmcs; |
16 | ||
87ccb7db | 17 | /* Allocate memory regions for nested VMX tests. |
d5edb7f8 PB |
18 | * |
19 | * Input Args: | |
87ccb7db | 20 | * vm - The VM to allocate guest-virtual addresses in. |
d5edb7f8 | 21 | * |
87ccb7db PB |
22 | * Output Args: |
23 | * p_vmx_gva - The guest virtual address for the struct vmx_pages. | |
d5edb7f8 PB |
24 | * |
25 | * Return: | |
87ccb7db | 26 | * Pointer to structure with the addresses of the VMX areas. |
d5edb7f8 | 27 | */ |
87ccb7db PB |
28 | struct vmx_pages * |
29 | vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) | |
d5edb7f8 | 30 | { |
87ccb7db PB |
31 | vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); |
32 | struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); | |
d5edb7f8 PB |
33 | |
34 | /* Setup of a region of guest memory for the vmxon region. */ | |
87ccb7db PB |
35 | vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); |
36 | vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); | |
37 | vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); | |
d5edb7f8 PB |
38 | |
39 | /* Setup of a region of guest memory for a vmcs. */ | |
87ccb7db PB |
40 | vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); |
41 | vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); | |
42 | vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); | |
d5edb7f8 | 43 | |
87ccb7db PB |
44 | /* Setup of a region of guest memory for the MSR bitmap. */ |
45 | vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); | |
46 | vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); | |
47 | vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); | |
9a78bdf3 PB |
48 | memset(vmx->msr_hva, 0, getpagesize()); |
49 | ||
50 | /* Setup of a region of guest memory for the shadow VMCS. */ | |
51 | vmx->shadow_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); | |
52 | vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); | |
53 | vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); | |
54 | ||
55 | /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ | |
56 | vmx->vmread = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); | |
57 | vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); | |
58 | vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); | |
59 | memset(vmx->vmread_hva, 0, getpagesize()); | |
60 | ||
61 | vmx->vmwrite = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0); | |
62 | vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); | |
63 | vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); | |
64 | memset(vmx->vmwrite_hva, 0, getpagesize()); | |
d5edb7f8 | 65 | |
18178ff8 VK |
66 | /* Setup of a region of guest memory for the VP Assist page. */ |
67 | vmx->vp_assist = (void *)vm_vaddr_alloc(vm, getpagesize(), | |
68 | 0x10000, 0, 0); | |
69 | vmx->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)vmx->vp_assist); | |
70 | vmx->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vp_assist); | |
71 | ||
72 | /* Setup of a region of guest memory for the enlightened VMCS. */ | |
73 | vmx->enlightened_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), | |
74 | 0x10000, 0, 0); | |
75 | vmx->enlightened_vmcs_hva = | |
76 | addr_gva2hva(vm, (uintptr_t)vmx->enlightened_vmcs); | |
77 | vmx->enlightened_vmcs_gpa = | |
78 | addr_gva2gpa(vm, (uintptr_t)vmx->enlightened_vmcs); | |
79 | ||
87ccb7db PB |
80 | *p_vmx_gva = vmx_gva; |
81 | return vmx; | |
d5edb7f8 PB |
82 | } |
83 | ||
87ccb7db | 84 | bool prepare_for_vmx_operation(struct vmx_pages *vmx) |
d5edb7f8 PB |
85 | { |
86 | uint64_t feature_control; | |
87 | uint64_t required; | |
88 | unsigned long cr0; | |
89 | unsigned long cr4; | |
90 | ||
91 | /* | |
92 | * Ensure bits in CR0 and CR4 are valid in VMX operation: | |
93 | * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. | |
94 | * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. | |
95 | */ | |
96 | __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); | |
97 | cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); | |
98 | cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); | |
99 | __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); | |
100 | ||
101 | __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); | |
102 | cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); | |
103 | cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); | |
104 | /* Enable VMX operation */ | |
105 | cr4 |= X86_CR4_VMXE; | |
106 | __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); | |
107 | ||
108 | /* | |
109 | * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: | |
110 | * Bit 0: Lock bit. If clear, VMXON causes a #GP. | |
111 | * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON | |
112 | * outside of SMX causes a #GP. | |
113 | */ | |
114 | required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | |
115 | required |= FEATURE_CONTROL_LOCKED; | |
116 | feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL); | |
117 | if ((feature_control & required) != required) | |
118 | wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required); | |
87ccb7db PB |
119 | |
120 | /* Enter VMX root operation. */ | |
121 | *(uint32_t *)(vmx->vmxon) = vmcs_revision(); | |
122 | if (vmxon(vmx->vmxon_gpa)) | |
123 | return false; | |
124 | ||
1e7ecd1b VK |
125 | return true; |
126 | } | |
127 | ||
128 | bool load_vmcs(struct vmx_pages *vmx) | |
129 | { | |
18178ff8 VK |
130 | if (!enable_evmcs) { |
131 | /* Load a VMCS. */ | |
132 | *(uint32_t *)(vmx->vmcs) = vmcs_revision(); | |
133 | if (vmclear(vmx->vmcs_gpa)) | |
134 | return false; | |
135 | ||
136 | if (vmptrld(vmx->vmcs_gpa)) | |
137 | return false; | |
138 | ||
139 | /* Setup shadow VMCS, do not load it yet. */ | |
140 | *(uint32_t *)(vmx->shadow_vmcs) = | |
141 | vmcs_revision() | 0x80000000ul; | |
142 | if (vmclear(vmx->shadow_vmcs_gpa)) | |
143 | return false; | |
144 | } else { | |
145 | if (evmcs_vmptrld(vmx->enlightened_vmcs_gpa, | |
146 | vmx->enlightened_vmcs)) | |
147 | return false; | |
148 | current_evmcs->revision_id = vmcs_revision(); | |
149 | } | |
9a78bdf3 | 150 | |
87ccb7db | 151 | return true; |
d5edb7f8 PB |
152 | } |
153 | ||
154 | /* | |
155 | * Initialize the control fields to the most basic settings possible. | |
156 | */ | |
87ccb7db | 157 | static inline void init_vmcs_control_fields(struct vmx_pages *vmx) |
d5edb7f8 PB |
158 | { |
159 | vmwrite(VIRTUAL_PROCESSOR_ID, 0); | |
160 | vmwrite(POSTED_INTR_NV, 0); | |
161 | ||
9a78bdf3 PB |
162 | vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); |
163 | if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0)) | |
164 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, | |
165 | rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); | |
166 | else | |
167 | vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); | |
d5edb7f8 PB |
168 | vmwrite(EXCEPTION_BITMAP, 0); |
169 | vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); | |
170 | vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ | |
171 | vmwrite(CR3_TARGET_COUNT, 0); | |
172 | vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | | |
173 | VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ | |
174 | vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); | |
175 | vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); | |
176 | vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | | |
177 | VM_ENTRY_IA32E_MODE); /* 64-bit guest */ | |
178 | vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); | |
179 | vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); | |
180 | vmwrite(TPR_THRESHOLD, 0); | |
d5edb7f8 PB |
181 | |
182 | vmwrite(CR0_GUEST_HOST_MASK, 0); | |
183 | vmwrite(CR4_GUEST_HOST_MASK, 0); | |
184 | vmwrite(CR0_READ_SHADOW, get_cr0()); | |
185 | vmwrite(CR4_READ_SHADOW, get_cr4()); | |
87ccb7db PB |
186 | |
187 | vmwrite(MSR_BITMAP, vmx->msr_gpa); | |
9a78bdf3 PB |
188 | vmwrite(VMREAD_BITMAP, vmx->vmread_gpa); |
189 | vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa); | |
d5edb7f8 PB |
190 | } |
191 | ||
192 | /* | |
193 | * Initialize the host state fields based on the current host state, with | |
194 | * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch | |
195 | * or vmresume. | |
196 | */ | |
197 | static inline void init_vmcs_host_state(void) | |
198 | { | |
199 | uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); | |
200 | ||
201 | vmwrite(HOST_ES_SELECTOR, get_es()); | |
202 | vmwrite(HOST_CS_SELECTOR, get_cs()); | |
203 | vmwrite(HOST_SS_SELECTOR, get_ss()); | |
204 | vmwrite(HOST_DS_SELECTOR, get_ds()); | |
205 | vmwrite(HOST_FS_SELECTOR, get_fs()); | |
206 | vmwrite(HOST_GS_SELECTOR, get_gs()); | |
207 | vmwrite(HOST_TR_SELECTOR, get_tr()); | |
208 | ||
209 | if (exit_controls & VM_EXIT_LOAD_IA32_PAT) | |
210 | vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); | |
211 | if (exit_controls & VM_EXIT_LOAD_IA32_EFER) | |
212 | vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); | |
213 | if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) | |
214 | vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, | |
215 | rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); | |
216 | ||
217 | vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); | |
218 | ||
219 | vmwrite(HOST_CR0, get_cr0()); | |
220 | vmwrite(HOST_CR3, get_cr3()); | |
221 | vmwrite(HOST_CR4, get_cr4()); | |
222 | vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); | |
223 | vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); | |
224 | vmwrite(HOST_TR_BASE, | |
225 | get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr()))); | |
226 | vmwrite(HOST_GDTR_BASE, get_gdt_base()); | |
227 | vmwrite(HOST_IDTR_BASE, get_idt_base()); | |
228 | vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); | |
229 | vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); | |
230 | } | |
231 | ||
232 | /* | |
233 | * Initialize the guest state fields essentially as a clone of | |
234 | * the host state fields. Some host state fields have fixed | |
235 | * values, and we set the corresponding guest state fields accordingly. | |
236 | */ | |
237 | static inline void init_vmcs_guest_state(void *rip, void *rsp) | |
238 | { | |
239 | vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); | |
240 | vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); | |
241 | vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); | |
242 | vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); | |
243 | vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); | |
244 | vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); | |
245 | vmwrite(GUEST_LDTR_SELECTOR, 0); | |
246 | vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); | |
247 | vmwrite(GUEST_INTR_STATUS, 0); | |
248 | vmwrite(GUEST_PML_INDEX, 0); | |
249 | ||
250 | vmwrite(VMCS_LINK_POINTER, -1ll); | |
251 | vmwrite(GUEST_IA32_DEBUGCTL, 0); | |
252 | vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); | |
253 | vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); | |
254 | vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, | |
255 | vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); | |
256 | ||
257 | vmwrite(GUEST_ES_LIMIT, -1); | |
258 | vmwrite(GUEST_CS_LIMIT, -1); | |
259 | vmwrite(GUEST_SS_LIMIT, -1); | |
260 | vmwrite(GUEST_DS_LIMIT, -1); | |
261 | vmwrite(GUEST_FS_LIMIT, -1); | |
262 | vmwrite(GUEST_GS_LIMIT, -1); | |
263 | vmwrite(GUEST_LDTR_LIMIT, -1); | |
264 | vmwrite(GUEST_TR_LIMIT, 0x67); | |
265 | vmwrite(GUEST_GDTR_LIMIT, 0xffff); | |
266 | vmwrite(GUEST_IDTR_LIMIT, 0xffff); | |
267 | vmwrite(GUEST_ES_AR_BYTES, | |
268 | vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); | |
269 | vmwrite(GUEST_CS_AR_BYTES, 0xa09b); | |
270 | vmwrite(GUEST_SS_AR_BYTES, 0xc093); | |
271 | vmwrite(GUEST_DS_AR_BYTES, | |
272 | vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); | |
273 | vmwrite(GUEST_FS_AR_BYTES, | |
274 | vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); | |
275 | vmwrite(GUEST_GS_AR_BYTES, | |
276 | vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); | |
277 | vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); | |
278 | vmwrite(GUEST_TR_AR_BYTES, 0x8b); | |
279 | vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); | |
280 | vmwrite(GUEST_ACTIVITY_STATE, 0); | |
281 | vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); | |
282 | vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); | |
283 | ||
284 | vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); | |
285 | vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); | |
286 | vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); | |
287 | vmwrite(GUEST_ES_BASE, 0); | |
288 | vmwrite(GUEST_CS_BASE, 0); | |
289 | vmwrite(GUEST_SS_BASE, 0); | |
290 | vmwrite(GUEST_DS_BASE, 0); | |
291 | vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); | |
292 | vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); | |
293 | vmwrite(GUEST_LDTR_BASE, 0); | |
294 | vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); | |
295 | vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); | |
296 | vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); | |
297 | vmwrite(GUEST_DR7, 0x400); | |
298 | vmwrite(GUEST_RSP, (uint64_t)rsp); | |
299 | vmwrite(GUEST_RIP, (uint64_t)rip); | |
300 | vmwrite(GUEST_RFLAGS, 2); | |
301 | vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); | |
302 | vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); | |
303 | vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); | |
304 | } | |
305 | ||
87ccb7db | 306 | void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) |
d5edb7f8 | 307 | { |
87ccb7db | 308 | init_vmcs_control_fields(vmx); |
d5edb7f8 PB |
309 | init_vmcs_host_state(); |
310 | init_vmcs_guest_state(guest_rip, guest_rsp); | |
311 | } |