]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/cpuid.c
kvm: x86: only provide PV features if enabled in guest's CPUID
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / cpuid.c
CommitLineData
20c8ccb1 1// SPDX-License-Identifier: GPL-2.0-only
00b27a3e
AK
2/*
3 * Kernel-based Virtual Machine driver for Linux
4 * cpuid support routines
5 *
6 * derived from arch/x86/kvm/x86.c
7 *
8 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
9 * Copyright IBM Corporation, 2008
00b27a3e
AK
10 */
11
12#include <linux/kvm_host.h>
1767e931 13#include <linux/export.h>
bb5a798a
JK
14#include <linux/vmalloc.h>
15#include <linux/uaccess.h>
3905f9ad
IM
16#include <linux/sched/stat.h>
17
4504b5c9 18#include <asm/processor.h>
00b27a3e 19#include <asm/user.h>
669ebabb 20#include <asm/fpu/xstate.h>
00b27a3e
AK
21#include "cpuid.h"
22#include "lapic.h"
23#include "mmu.h"
24#include "trace.h"
474a5bb9 25#include "pmu.h"
00b27a3e 26
66a6950f
SC
27/*
28 * Unlike "struct cpuinfo_x86.x86_capability", kvm_cpu_caps doesn't need to be
29 * aligned to sizeof(unsigned long) because it's not accessed via bitops.
30 */
31u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
32EXPORT_SYMBOL_GPL(kvm_cpu_caps);
33
412a3c41 34static u32 xstate_required_size(u64 xstate_bv, bool compacted)
4344ee98
PB
35{
36 int feature_bit = 0;
37 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
38
d91cab78 39 xstate_bv &= XFEATURE_MASK_EXTEND;
4344ee98
PB
40 while (xstate_bv) {
41 if (xstate_bv & 0x1) {
412a3c41 42 u32 eax, ebx, ecx, edx, offset;
4344ee98 43 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
412a3c41
PB
44 offset = compacted ? ret : ebx;
45 ret = max(ret, offset + eax);
4344ee98
PB
46 }
47
48 xstate_bv >>= 1;
49 feature_bit++;
50 }
51
52 return ret;
53}
54
87382003 55#define F feature_bit
5c404cab 56
a76733a9
XL
57static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
58{
59 struct kvm_cpuid_entry2 *best;
60
61 /*
62 * The existing code assumes virtual address is 48-bit or 57-bit in the
63 * canonical address checks; exit if it is ever changed.
64 */
65 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
66 if (best) {
67 int vaddr_bits = (best->eax & 0xff00) >> 8;
68
69 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
70 return -EINVAL;
71 }
72
73 return 0;
74}
75
aedbaf4f 76void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
00b27a3e
AK
77{
78 struct kvm_cpuid_entry2 *best;
00b27a3e
AK
79
80 best = kvm_find_cpuid_entry(vcpu, 1, 0);
0d3b2ba1
XL
81 if (best) {
82 /* Update OSXSAVE bit */
83 if (boot_cpu_has(X86_FEATURE_XSAVE))
84 cpuid_entry_change(best, X86_FEATURE_OSXSAVE,
b32666b1 85 kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE));
00b27a3e 86
0d3b2ba1 87 cpuid_entry_change(best, X86_FEATURE_APIC,
b32666b1 88 vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
0d3b2ba1 89 }
c7dd15b3 90
b9baba86 91 best = kvm_find_cpuid_entry(vcpu, 7, 0);
b32666b1
SC
92 if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
93 cpuid_entry_change(best, X86_FEATURE_OSPKE,
94 kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
b9baba86 95
d7876f1b 96 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
aedbaf4f 97 if (best)
a71936ab 98 best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
d7876f1b 99
412a3c41 100 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
4c61534a
SC
101 if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
102 cpuid_entry_has(best, X86_FEATURE_XSAVEC)))
412a3c41
PB
103 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
104
caa057a2
WL
105 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
106 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
107 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
108 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
109
66570e96
OU
110 /*
111 * save the feature bitmap to avoid cpuid lookup for every PV
112 * operation
113 */
114 if (best)
115 vcpu->arch.pv_cpuid.features = best->eax;
116
511a8556
WL
117 if (!kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT)) {
118 best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
b32666b1
SC
119 if (best)
120 cpuid_entry_change(best, X86_FEATURE_MWAIT,
121 vcpu->arch.ia32_misc_enable_msr &
122 MSR_IA32_MISC_ENABLE_MWAIT);
511a8556 123 }
aedbaf4f
XL
124}
125
346ce359 126static void kvm_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu)
aedbaf4f
XL
127{
128 struct kvm_lapic *apic = vcpu->arch.apic;
129 struct kvm_cpuid_entry2 *best;
130
5668821a
XL
131 kvm_x86_ops.vcpu_after_set_cpuid(vcpu);
132
aedbaf4f
XL
133 best = kvm_find_cpuid_entry(vcpu, 1, 0);
134 if (best && apic) {
135 if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
136 apic->lapic_timer.timer_mode_mask = 3 << 17;
137 else
138 apic->lapic_timer.timer_mode_mask = 1 << 17;
139
140 kvm_apic_set_version(vcpu);
141 }
142
143 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
144 if (!best)
145 vcpu->arch.guest_supported_xcr0 = 0;
146 else
147 vcpu->arch.guest_supported_xcr0 =
148 (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
511a8556 149
5a4f55cd 150 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
855feb67 151 kvm_mmu_reset_context(vcpu);
5a4f55cd 152
c6702c9d 153 kvm_pmu_refresh(vcpu);
b899c132
KS
154 vcpu->arch.cr4_guest_rsvd_bits =
155 __cr4_reserved_bits(guest_cpuid_has, vcpu);
32de2b5e 156 kvm_x86_ops.update_exception_bitmap(vcpu);
00b27a3e
AK
157}
158
159static int is_efer_nx(void)
160{
91661989 161 return host_efer & EFER_NX;
00b27a3e
AK
162}
163
164static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
165{
166 int i;
167 struct kvm_cpuid_entry2 *e, *entry;
168
169 entry = NULL;
170 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
171 e = &vcpu->arch.cpuid_entries[i];
172 if (e->function == 0x80000001) {
173 entry = e;
174 break;
175 }
176 }
4c61534a 177 if (entry && cpuid_entry_has(entry, X86_FEATURE_NX) && !is_efer_nx()) {
b32666b1 178 cpuid_entry_clear(entry, X86_FEATURE_NX);
00b27a3e
AK
179 printk(KERN_INFO "kvm: guest NX capability removed\n");
180 }
181}
182
5a4f55cd
EK
183int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
184{
185 struct kvm_cpuid_entry2 *best;
186
187 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
188 if (!best || best->eax < 0x80000008)
189 goto not_found;
190 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
191 if (best)
192 return best->eax & 0xff;
193not_found:
194 return 36;
195}
5a4f55cd 196
00b27a3e
AK
197/* when an old userspace process fills a new kernel module */
198int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
199 struct kvm_cpuid *cpuid,
200 struct kvm_cpuid_entry __user *entries)
201{
202 int r, i;
83676e92 203 struct kvm_cpuid_entry *cpuid_entries = NULL;
00b27a3e
AK
204
205 r = -E2BIG;
206 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
207 goto out;
83676e92 208 if (cpuid->nent) {
7ec28e26
DE
209 cpuid_entries = vmemdup_user(entries,
210 array_size(sizeof(struct kvm_cpuid_entry),
211 cpuid->nent));
212 if (IS_ERR(cpuid_entries)) {
213 r = PTR_ERR(cpuid_entries);
83676e92 214 goto out;
7ec28e26 215 }
83676e92 216 }
00b27a3e
AK
217 for (i = 0; i < cpuid->nent; i++) {
218 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
219 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
220 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
221 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
222 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
223 vcpu->arch.cpuid_entries[i].index = 0;
224 vcpu->arch.cpuid_entries[i].flags = 0;
225 vcpu->arch.cpuid_entries[i].padding[0] = 0;
226 vcpu->arch.cpuid_entries[i].padding[1] = 0;
227 vcpu->arch.cpuid_entries[i].padding[2] = 0;
228 }
229 vcpu->arch.cpuid_nent = cpuid->nent;
a76733a9
XL
230 r = kvm_check_cpuid(vcpu);
231 if (r) {
232 vcpu->arch.cpuid_nent = 0;
233 kvfree(cpuid_entries);
234 goto out;
235 }
236
00b27a3e 237 cpuid_fix_nx_cap(vcpu);
aedbaf4f 238 kvm_update_cpuid_runtime(vcpu);
346ce359 239 kvm_vcpu_after_set_cpuid(vcpu);
00b27a3e 240
7ec28e26 241 kvfree(cpuid_entries);
00b27a3e
AK
242out:
243 return r;
244}
245
246int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
247 struct kvm_cpuid2 *cpuid,
248 struct kvm_cpuid_entry2 __user *entries)
249{
250 int r;
251
252 r = -E2BIG;
253 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
254 goto out;
255 r = -EFAULT;
256 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
257 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
258 goto out;
259 vcpu->arch.cpuid_nent = cpuid->nent;
a76733a9
XL
260 r = kvm_check_cpuid(vcpu);
261 if (r) {
18964092 262 vcpu->arch.cpuid_nent = 0;
a76733a9
XL
263 goto out;
264 }
265
aedbaf4f 266 kvm_update_cpuid_runtime(vcpu);
346ce359 267 kvm_vcpu_after_set_cpuid(vcpu);
00b27a3e
AK
268out:
269 return r;
270}
271
272int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
273 struct kvm_cpuid2 *cpuid,
274 struct kvm_cpuid_entry2 __user *entries)
275{
276 int r;
277
278 r = -E2BIG;
279 if (cpuid->nent < vcpu->arch.cpuid_nent)
280 goto out;
281 r = -EFAULT;
282 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
283 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
284 goto out;
285 return 0;
286
287out:
288 cpuid->nent = vcpu->arch.cpuid_nent;
289 return r;
290}
291
66a6950f
SC
292static __always_inline void kvm_cpu_cap_mask(enum cpuid_leafs leaf, u32 mask)
293{
d8577a4c
SC
294 const struct cpuid_reg cpuid = x86_feature_cpuid(leaf * 32);
295 struct kvm_cpuid_entry2 entry;
296
66a6950f
SC
297 reverse_cpuid_check(leaf);
298 kvm_cpu_caps[leaf] &= mask;
d8577a4c
SC
299
300 cpuid_count(cpuid.function, cpuid.index,
301 &entry.eax, &entry.ebx, &entry.ecx, &entry.edx);
302
855c7e9b 303 kvm_cpu_caps[leaf] &= *__cpuid_entry_get_reg(&entry, cpuid.reg);
66a6950f
SC
304}
305
306void kvm_set_cpu_caps(void)
307{
308 unsigned int f_nx = is_efer_nx() ? F(NX) : 0;
309#ifdef CONFIG_X86_64
310 unsigned int f_gbpages = F(GBPAGES);
311 unsigned int f_lm = F(LM);
312#else
313 unsigned int f_gbpages = 0;
314 unsigned int f_lm = 0;
315#endif
316
317 BUILD_BUG_ON(sizeof(kvm_cpu_caps) >
318 sizeof(boot_cpu_data.x86_capability));
319
320 memcpy(&kvm_cpu_caps, &boot_cpu_data.x86_capability,
321 sizeof(kvm_cpu_caps));
322
323 kvm_cpu_cap_mask(CPUID_1_ECX,
324 /*
325 * NOTE: MONITOR (and MWAIT) are emulated as NOP, but *not*
326 * advertised to guests via CPUID!
327 */
328 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
329 0 /* DS-CPL, VMX, SMX, EST */ |
330 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
27461da3 331 F(FMA) | F(CX16) | 0 /* xTPR Update */ | F(PDCM) |
66a6950f
SC
332 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
333 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
334 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
335 F(F16C) | F(RDRAND)
336 );
93c380e7
SC
337 /* KVM emulates x2apic in software irrespective of host support. */
338 kvm_cpu_cap_set(X86_FEATURE_X2APIC);
66a6950f
SC
339
340 kvm_cpu_cap_mask(CPUID_1_EDX,
341 F(FPU) | F(VME) | F(DE) | F(PSE) |
342 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
343 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
344 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
345 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
346 0 /* Reserved, DS, ACPI */ | F(MMX) |
347 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
348 0 /* HTT, TM, Reserved, PBE */
349 );
350
351 kvm_cpu_cap_mask(CPUID_7_0_EBX,
352 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
353 F(BMI2) | F(ERMS) | 0 /*INVPCID*/ | F(RTM) | 0 /*MPX*/ | F(RDSEED) |
354 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
355 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
356 F(SHA_NI) | F(AVX512BW) | F(AVX512VL) | 0 /*INTEL_PT*/
357 );
358
359 kvm_cpu_cap_mask(CPUID_7_ECX,
fa44b82e 360 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ | F(RDPID) |
66a6950f
SC
361 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
362 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG) |
363 F(CLDEMOTE) | F(MOVDIRI) | F(MOVDIR64B) | 0 /*WAITPKG*/
364 );
365 /* Set LA57 based on hardware capability. */
366 if (cpuid_ecx(7) & F(LA57))
367 kvm_cpu_cap_set(X86_FEATURE_LA57);
368
fa44b82e
BM
369 /*
370 * PKU not yet implemented for shadow paging and requires OSPKE
371 * to be set on the host. Clear it if that is not the case
372 */
373 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
374 kvm_cpu_cap_clear(X86_FEATURE_PKU);
375
66a6950f
SC
376 kvm_cpu_cap_mask(CPUID_7_EDX,
377 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
378 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES) | F(INTEL_STIBP) |
43bd9ef4 379 F(MD_CLEAR) | F(AVX512_VP2INTERSECT) | F(FSRM) |
61aa9a0a 380 F(SERIALIZE) | F(TSXLDTRK)
66a6950f
SC
381 );
382
93c380e7
SC
383 /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
384 kvm_cpu_cap_set(X86_FEATURE_TSC_ADJUST);
385 kvm_cpu_cap_set(X86_FEATURE_ARCH_CAPABILITIES);
386
387 if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
388 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL);
389 if (boot_cpu_has(X86_FEATURE_STIBP))
390 kvm_cpu_cap_set(X86_FEATURE_INTEL_STIBP);
391 if (boot_cpu_has(X86_FEATURE_AMD_SSBD))
392 kvm_cpu_cap_set(X86_FEATURE_SPEC_CTRL_SSBD);
393
66a6950f
SC
394 kvm_cpu_cap_mask(CPUID_7_1_EAX,
395 F(AVX512_BF16)
396 );
397
398 kvm_cpu_cap_mask(CPUID_D_1_EAX,
399 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | F(XSAVES)
400 );
401
402 kvm_cpu_cap_mask(CPUID_8000_0001_ECX,
403 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
404 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
405 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
406 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
407 F(TOPOEXT) | F(PERFCTR_CORE)
408 );
409
410 kvm_cpu_cap_mask(CPUID_8000_0001_EDX,
411 F(FPU) | F(VME) | F(DE) | F(PSE) |
412 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
413 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
414 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
415 F(PAT) | F(PSE36) | 0 /* Reserved */ |
416 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
417 F(FXSR) | F(FXSR_OPT) | f_gbpages | F(RDTSCP) |
418 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW)
419 );
420
421 if (!tdp_enabled && IS_ENABLED(CONFIG_X86_64))
422 kvm_cpu_cap_set(X86_FEATURE_GBPAGES);
423
424 kvm_cpu_cap_mask(CPUID_8000_0008_EBX,
425 F(CLZERO) | F(XSAVEERPTR) |
426 F(WBNOINVD) | F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
427 F(AMD_SSB_NO) | F(AMD_STIBP) | F(AMD_STIBP_ALWAYS_ON)
428 );
429
93c380e7
SC
430 /*
431 * AMD has separate bits for each SPEC_CTRL bit.
432 * arch/x86/kernel/cpu/bugs.c is kind enough to
433 * record that in cpufeatures so use them.
434 */
435 if (boot_cpu_has(X86_FEATURE_IBPB))
436 kvm_cpu_cap_set(X86_FEATURE_AMD_IBPB);
437 if (boot_cpu_has(X86_FEATURE_IBRS))
438 kvm_cpu_cap_set(X86_FEATURE_AMD_IBRS);
439 if (boot_cpu_has(X86_FEATURE_STIBP))
440 kvm_cpu_cap_set(X86_FEATURE_AMD_STIBP);
441 if (boot_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
442 kvm_cpu_cap_set(X86_FEATURE_AMD_SSBD);
443 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
444 kvm_cpu_cap_set(X86_FEATURE_AMD_SSB_NO);
445 /*
446 * The preference is to use SPEC CTRL MSR instead of the
447 * VIRT_SPEC MSR.
448 */
449 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
450 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
451 kvm_cpu_cap_set(X86_FEATURE_VIRT_SSBD);
452
9b58b985
SC
453 /*
454 * Hide all SVM features by default, SVM will set the cap bits for
455 * features it emulates and/or exposes for L1.
456 */
457 kvm_cpu_cap_mask(CPUID_8000_000A_EDX, 0);
458
66a6950f
SC
459 kvm_cpu_cap_mask(CPUID_C000_0001_EDX,
460 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
461 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
462 F(PMM) | F(PMM_EN)
463 );
464}
465EXPORT_SYMBOL_GPL(kvm_set_cpu_caps);
466
e53c95e8
SC
467struct kvm_cpuid_array {
468 struct kvm_cpuid_entry2 *entries;
65b18914 469 int maxnent;
e53c95e8
SC
470 int nent;
471};
472
473static struct kvm_cpuid_entry2 *do_host_cpuid(struct kvm_cpuid_array *array,
aa10a7dc 474 u32 function, u32 index)
00b27a3e 475{
e53c95e8
SC
476 struct kvm_cpuid_entry2 *entry;
477
478 if (array->nent >= array->maxnent)
aa10a7dc 479 return NULL;
e53c95e8
SC
480
481 entry = &array->entries[array->nent++];
aa10a7dc 482
00b27a3e
AK
483 entry->function = function;
484 entry->index = index;
ab8bcf64
PB
485 entry->flags = 0;
486
00b27a3e
AK
487 cpuid_count(entry->function, entry->index,
488 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
d9aadaf6
PB
489
490 switch (function) {
d9aadaf6
PB
491 case 4:
492 case 7:
493 case 0xb:
494 case 0xd:
a06dcd62
JM
495 case 0xf:
496 case 0x10:
497 case 0x12:
d9aadaf6 498 case 0x14:
a06dcd62
JM
499 case 0x17:
500 case 0x18:
501 case 0x1f:
d9aadaf6
PB
502 case 0x8000001d:
503 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
504 break;
505 }
aa10a7dc
SC
506
507 return entry;
00b27a3e
AK
508}
509
e53c95e8 510static int __do_cpuid_func_emulated(struct kvm_cpuid_array *array, u32 func)
9c15bb1d 511{
7c7f9548
SC
512 struct kvm_cpuid_entry2 *entry;
513
514 if (array->nent >= array->maxnent)
515 return -E2BIG;
e53c95e8 516
7c7f9548 517 entry = &array->entries[array->nent];
ab8bcf64
PB
518 entry->function = func;
519 entry->index = 0;
520 entry->flags = 0;
521
84cffe49
BP
522 switch (func) {
523 case 0:
fb6d4d34 524 entry->eax = 7;
e53c95e8 525 ++array->nent;
84cffe49
BP
526 break;
527 case 1:
528 entry->ecx = F(MOVBE);
e53c95e8 529 ++array->nent;
84cffe49 530 break;
fb6d4d34
PB
531 case 7:
532 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
ab8bcf64
PB
533 entry->eax = 0;
534 entry->ecx = F(RDPID);
e53c95e8 535 ++array->nent;
84cffe49
BP
536 default:
537 break;
538 }
539
9c15bb1d
BP
540 return 0;
541}
542
e53c95e8 543static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
00b27a3e 544{
e53c95e8 545 struct kvm_cpuid_entry2 *entry;
74fa0bc7 546 int r, i, max_idx;
00b27a3e 547
00b27a3e
AK
548 /* all calls to cpuid_count() should be made on the same cpu */
549 get_cpu();
831bf664
SL
550
551 r = -E2BIG;
552
e53c95e8 553 entry = do_host_cpuid(array, function, 0);
7c7f9548 554 if (!entry)
831bf664
SL
555 goto out;
556
00b27a3e
AK
557 switch (function) {
558 case 0:
a87f2d3a
LX
559 /* Limited to the highest leaf implemented in KVM. */
560 entry->eax = min(entry->eax, 0x1fU);
00b27a3e
AK
561 break;
562 case 1:
bd791999
SC
563 cpuid_entry_override(entry, CPUID_1_EDX);
564 cpuid_entry_override(entry, CPUID_1_ECX);
00b27a3e 565 break;
74fa0bc7 566 case 2:
c571a144
SC
567 /*
568 * On ancient CPUs, function 2 entries are STATEFUL. That is,
569 * CPUID(function=2, index=0) may return different results each
570 * time, with the least-significant byte in EAX enumerating the
571 * number of times software should do CPUID(2, 0).
572 *
7ff6c035
SC
573 * Modern CPUs, i.e. every CPU KVM has *ever* run on are less
574 * idiotic. Intel's SDM states that EAX & 0xff "will always
575 * return 01H. Software should ignore this value and not
c571a144
SC
576 * interpret it as an informational descriptor", while AMD's
577 * APM states that CPUID(2) is reserved.
7ff6c035
SC
578 *
579 * WARN if a frankenstein CPU that supports virtualization and
580 * a stateful CPUID.0x2 is encountered.
c571a144 581 */
7ff6c035 582 WARN_ON_ONCE((entry->eax & 0xff) > 1);
00b27a3e 583 break;
32a243df
JM
584 /* functions 4 and 0x8000001d have additional index. */
585 case 4:
c8629039
SC
586 case 0x8000001d:
587 /*
588 * Read entries until the cache type in the previous entry is
589 * zero, i.e. indicates an invalid entry.
590 */
e53c95e8
SC
591 for (i = 1; entry->eax & 0x1f; ++i) {
592 entry = do_host_cpuid(array, function, i);
593 if (!entry)
0fc62671 594 goto out;
00b27a3e
AK
595 }
596 break;
e453aa0f
JK
597 case 6: /* Thermal management */
598 entry->eax = 0x4; /* allow ARAT */
599 entry->ebx = 0;
600 entry->ecx = 0;
601 entry->edx = 0;
602 break;
54d360d4 603 /* function 7 has additional index. */
74fa0bc7 604 case 7:
09f628a0 605 entry->eax = min(entry->eax, 1u);
bd791999
SC
606 cpuid_entry_override(entry, CPUID_7_0_EBX);
607 cpuid_entry_override(entry, CPUID_7_ECX);
608 cpuid_entry_override(entry, CPUID_7_EDX);
09f628a0 609
bcf600ca
SC
610 /* KVM only supports 0x7.0 and 0x7.1, capped above via min(). */
611 if (entry->eax == 1) {
612 entry = do_host_cpuid(array, function, 1);
e53c95e8 613 if (!entry)
54d360d4
PB
614 goto out;
615
bd791999 616 cpuid_entry_override(entry, CPUID_7_1_EAX);
09f628a0
SC
617 entry->ebx = 0;
618 entry->ecx = 0;
619 entry->edx = 0;
54d360d4 620 }
00b27a3e 621 break;
00b27a3e
AK
622 case 9:
623 break;
a6c06ed1
GN
624 case 0xa: { /* Architectural Performance Monitoring */
625 struct x86_pmu_capability cap;
626 union cpuid10_eax eax;
627 union cpuid10_edx edx;
628
629 perf_get_x86_pmu_capability(&cap);
630
631 /*
632 * Only support guest architectural pmu on a host
633 * with architectural pmu.
634 */
635 if (!cap.version)
636 memset(&cap, 0, sizeof(cap));
637
638 eax.split.version_id = min(cap.version, 2);
639 eax.split.num_counters = cap.num_counters_gp;
640 eax.split.bit_width = cap.bit_width_gp;
641 eax.split.mask_length = cap.events_mask_len;
642
2e8cd7a3 643 edx.split.num_counters_fixed = min(cap.num_counters_fixed, MAX_FIXED_COUNTERS);
a6c06ed1
GN
644 edx.split.bit_width_fixed = cap.bit_width_fixed;
645 edx.split.reserved = 0;
646
647 entry->eax = eax.full;
648 entry->ebx = cap.events_mask;
649 entry->ecx = 0;
650 entry->edx = edx.full;
651 break;
652 }
a87f2d3a
LX
653 /*
654 * Per Intel's SDM, the 0x1f is a superset of 0xb,
655 * thus they can be handled by common code.
656 */
657 case 0x1f:
74fa0bc7 658 case 0xb:
a1a640b8 659 /*
e53c95e8
SC
660 * Populate entries until the level type (ECX[15:8]) of the
661 * previous entry is zero. Note, CPUID EAX.{0x1f,0xb}.0 is
662 * the starting entry, filled by the primary do_host_cpuid().
a1a640b8 663 */
e53c95e8
SC
664 for (i = 1; entry->ecx & 0xff00; ++i) {
665 entry = do_host_cpuid(array, function, i);
666 if (!entry)
831bf664 667 goto out;
00b27a3e
AK
668 }
669 break;
cfc48181
SC
670 case 0xd:
671 entry->eax &= supported_xcr0;
672 entry->ebx = xstate_required_size(supported_xcr0, false);
e08e8336 673 entry->ecx = entry->ebx;
cfc48181
SC
674 entry->edx &= supported_xcr0 >> 32;
675 if (!supported_xcr0)
b65d6e17
PB
676 break;
677
e53c95e8
SC
678 entry = do_host_cpuid(array, function, 1);
679 if (!entry)
3dc4a9cf
SC
680 goto out;
681
bd791999 682 cpuid_entry_override(entry, CPUID_D_1_EAX);
e53c95e8 683 if (entry->eax & (F(XSAVES)|F(XSAVEC)))
408e9a31
PB
684 entry->ebx = xstate_required_size(supported_xcr0 | supported_xss,
685 true);
686 else {
687 WARN_ON_ONCE(supported_xss != 0);
e53c95e8 688 entry->ebx = 0;
408e9a31
PB
689 }
690 entry->ecx &= supported_xss;
691 entry->edx &= supported_xss >> 32;
3dc4a9cf 692
0eee8f9d 693 for (i = 2; i < 64; ++i) {
408e9a31
PB
694 bool s_state;
695 if (supported_xcr0 & BIT_ULL(i))
696 s_state = false;
697 else if (supported_xss & BIT_ULL(i))
698 s_state = true;
699 else
1893c941 700 continue;
3dc4a9cf 701
0eee8f9d 702 entry = do_host_cpuid(array, function, i);
e53c95e8 703 if (!entry)
831bf664
SL
704 goto out;
705
91001d40 706 /*
cfc48181 707 * The supported check above should have filtered out
408e9a31 708 * invalid sub-leafs. Only valid sub-leafs should
91001d40 709 * reach this point, and they should have a non-zero
408e9a31
PB
710 * save state size. Furthermore, check whether the
711 * processor agrees with supported_xcr0/supported_xss
712 * on whether this is an XCR0- or IA32_XSS-managed area.
91001d40 713 */
408e9a31 714 if (WARN_ON_ONCE(!entry->eax || (entry->ecx & 0x1) != s_state)) {
e53c95e8 715 --array->nent;
3dc4a9cf 716 continue;
8b2fc445 717 }
e53c95e8 718 entry->edx = 0;
00b27a3e
AK
719 }
720 break;
86f5201d 721 /* Intel PT */
74fa0bc7 722 case 0x14:
dd69cc25 723 if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) {
7392079c 724 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
86f5201d 725 break;
7392079c 726 }
86f5201d 727
74fa0bc7 728 for (i = 1, max_idx = entry->eax; i <= max_idx; ++i) {
e53c95e8 729 if (!do_host_cpuid(array, function, i))
86f5201d 730 goto out;
86f5201d
CP
731 }
732 break;
00b27a3e 733 case KVM_CPUID_SIGNATURE: {
326d07cb
MK
734 static const char signature[12] = "KVMKVMKVM\0\0";
735 const u32 *sigptr = (const u32 *)signature;
57c22e5f 736 entry->eax = KVM_CPUID_FEATURES;
00b27a3e
AK
737 entry->ebx = sigptr[0];
738 entry->ecx = sigptr[1];
739 entry->edx = sigptr[2];
740 break;
741 }
742 case KVM_CPUID_FEATURES:
743 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
744 (1 << KVM_FEATURE_NOP_IO_DELAY) |
745 (1 << KVM_FEATURE_CLOCKSOURCE2) |
746 (1 << KVM_FEATURE_ASYNC_PF) |
ae7a2a3f 747 (1 << KVM_FEATURE_PV_EOI) |
6aef266c 748 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
f38a7b75 749 (1 << KVM_FEATURE_PV_UNHALT) |
fe2a3027 750 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
4180bf1b 751 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT) |
2d5ba19b 752 (1 << KVM_FEATURE_PV_SEND_IPI) |
32b72ecc 753 (1 << KVM_FEATURE_POLL_CONTROL) |
72de5fa4
VK
754 (1 << KVM_FEATURE_PV_SCHED_YIELD) |
755 (1 << KVM_FEATURE_ASYNC_PF_INT);
00b27a3e
AK
756
757 if (sched_info_on())
758 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
759
760 entry->ebx = 0;
761 entry->ecx = 0;
762 entry->edx = 0;
763 break;
764 case 0x80000000:
8765d753 765 entry->eax = min(entry->eax, 0x8000001f);
00b27a3e
AK
766 break;
767 case 0x80000001:
bd791999
SC
768 cpuid_entry_override(entry, CPUID_8000_0001_EDX);
769 cpuid_entry_override(entry, CPUID_8000_0001_ECX);
00b27a3e 770 break;
43d05de2
EN
771 case 0x80000006:
772 /* L2 cache and TLB: pass through host info. */
773 break;
e4c9a5a1
MT
774 case 0x80000007: /* Advanced power management */
775 /* invariant TSC is CPUID.80000007H:EDX[8] */
776 entry->edx &= (1 << 8);
777 /* mask against host */
778 entry->edx &= boot_cpu_data.x86_power;
779 entry->eax = entry->ebx = entry->ecx = 0;
780 break;
00b27a3e
AK
781 case 0x80000008: {
782 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
783 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
784 unsigned phys_as = entry->eax & 0xff;
785
786 if (!g_phys_as)
787 g_phys_as = phys_as;
788 entry->eax = g_phys_as | (virt_as << 8);
15d45071 789 entry->edx = 0;
bd791999 790 cpuid_entry_override(entry, CPUID_8000_0008_EBX);
00b27a3e
AK
791 break;
792 }
25703874
SC
793 case 0x8000000A:
794 if (!kvm_cpu_cap_has(X86_FEATURE_SVM)) {
795 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
796 break;
797 }
798 entry->eax = 1; /* SVM revision 1 */
799 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
800 ASID emulation to nested SVM */
801 entry->ecx = 0; /* Reserved */
802 cpuid_entry_override(entry, CPUID_8000_000A_EDX);
803 break;
00b27a3e
AK
804 case 0x80000019:
805 entry->ecx = entry->edx = 0;
806 break;
807 case 0x8000001a:
382409b4 808 case 0x8000001e:
00b27a3e 809 break;
c1de0f25
PG
810 /* Support memory encryption cpuid if host supports it */
811 case 0x8000001F:
812 if (!boot_cpu_has(X86_FEATURE_SEV))
813 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
814 break;
00b27a3e
AK
815 /*Add support for Centaur's CPUID instruction*/
816 case 0xC0000000:
817 /*Just support up to 0xC0000004 now*/
818 entry->eax = min(entry->eax, 0xC0000004);
819 break;
820 case 0xC0000001:
bd791999 821 cpuid_entry_override(entry, CPUID_C000_0001_EDX);
00b27a3e
AK
822 break;
823 case 3: /* Processor serial number */
824 case 5: /* MONITOR/MWAIT */
00b27a3e
AK
825 case 0xC0000002:
826 case 0xC0000003:
827 case 0xC0000004:
828 default:
829 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
830 break;
831 }
832
831bf664
SL
833 r = 0;
834
835out:
00b27a3e 836 put_cpu();
831bf664
SL
837
838 return r;
00b27a3e
AK
839}
840
e53c95e8
SC
841static int do_cpuid_func(struct kvm_cpuid_array *array, u32 func,
842 unsigned int type)
9c15bb1d
BP
843{
844 if (type == KVM_GET_EMULATED_CPUID)
e53c95e8 845 return __do_cpuid_func_emulated(array, func);
9c15bb1d 846
e53c95e8 847 return __do_cpuid_func(array, func);
9c15bb1d
BP
848}
849
8b86079c 850#define CENTAUR_CPUID_SIGNATURE 0xC0000000
831bf664 851
e53c95e8
SC
852static int get_cpuid_func(struct kvm_cpuid_array *array, u32 func,
853 unsigned int type)
619a17f1
SC
854{
855 u32 limit;
856 int r;
857
8b86079c
SC
858 if (func == CENTAUR_CPUID_SIGNATURE &&
859 boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR)
860 return 0;
861
e53c95e8 862 r = do_cpuid_func(array, func, type);
619a17f1
SC
863 if (r)
864 return r;
865
e53c95e8 866 limit = array->entries[array->nent - 1].eax;
619a17f1 867 for (func = func + 1; func <= limit; ++func) {
e53c95e8 868 r = do_cpuid_func(array, func, type);
619a17f1
SC
869 if (r)
870 break;
871 }
872
873 return r;
874}
875
9c15bb1d
BP
876static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
877 __u32 num_entries, unsigned int ioctl_type)
878{
879 int i;
1b2ca422 880 __u32 pad[3];
9c15bb1d
BP
881
882 if (ioctl_type != KVM_GET_EMULATED_CPUID)
883 return false;
884
885 /*
886 * We want to make sure that ->padding is being passed clean from
887 * userspace in case we want to use it for something in the future.
888 *
889 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
890 * have to give ourselves satisfied only with the emulated side. /me
891 * sheds a tear.
892 */
893 for (i = 0; i < num_entries; i++) {
1b2ca422
BP
894 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
895 return true;
896
897 if (pad[0] || pad[1] || pad[2])
9c15bb1d
BP
898 return true;
899 }
900 return false;
901}
902
903int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
904 struct kvm_cpuid_entry2 __user *entries,
905 unsigned int type)
00b27a3e 906{
8b86079c
SC
907 static const u32 funcs[] = {
908 0, 0x80000000, CENTAUR_CPUID_SIGNATURE, KVM_CPUID_SIGNATURE,
831bf664 909 };
00b27a3e 910
e53c95e8
SC
911 struct kvm_cpuid_array array = {
912 .nent = 0,
e53c95e8
SC
913 };
914 int r, i;
d5a661d1 915
00b27a3e 916 if (cpuid->nent < 1)
d5a661d1 917 return -E2BIG;
00b27a3e
AK
918 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
919 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
9c15bb1d
BP
920
921 if (sanity_check_entries(entries, cpuid->nent, type))
922 return -EINVAL;
923
e53c95e8 924 array.entries = vzalloc(array_size(sizeof(struct kvm_cpuid_entry2),
fad953ce 925 cpuid->nent));
e53c95e8 926 if (!array.entries)
d5a661d1 927 return -ENOMEM;
00b27a3e 928
65b18914
XL
929 array.maxnent = cpuid->nent;
930
8b86079c 931 for (i = 0; i < ARRAY_SIZE(funcs); i++) {
e53c95e8 932 r = get_cpuid_func(&array, funcs[i], type);
831bf664 933 if (r)
00b27a3e
AK
934 goto out_free;
935 }
e53c95e8 936 cpuid->nent = array.nent;
00b27a3e 937
e53c95e8
SC
938 if (copy_to_user(entries, array.entries,
939 array.nent * sizeof(struct kvm_cpuid_entry2)))
d5a661d1 940 r = -EFAULT;
00b27a3e
AK
941
942out_free:
e53c95e8 943 vfree(array.entries);
00b27a3e
AK
944 return r;
945}
946
00b27a3e
AK
947struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
948 u32 function, u32 index)
949{
7ff6c035 950 struct kvm_cpuid_entry2 *e;
00b27a3e 951 int i;
00b27a3e
AK
952
953 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
00b27a3e 954 e = &vcpu->arch.cpuid_entries[i];
7ff6c035
SC
955
956 if (e->function == function && (e->index == index ||
957 !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX)))
958 return e;
00b27a3e 959 }
7ff6c035 960 return NULL;
00b27a3e
AK
961}
962EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
963
00b27a3e 964/*
8d892311
SC
965 * Intel CPUID semantics treats any query for an out-of-range leaf as if the
966 * highest basic leaf (i.e. CPUID.0H:EAX) were requested. AMD CPUID semantics
967 * returns all zeroes for any undefined leaf, whether or not the leaf is in
968 * range. Centaur/VIA follows Intel semantics.
969 *
970 * A leaf is considered out-of-range if its function is higher than the maximum
971 * supported leaf of its associated class or if its associated class does not
972 * exist.
973 *
974 * There are three primary classes to be considered, with their respective
975 * ranges described as "<base> - <top>[,<base2> - <top2>] inclusive. A primary
976 * class exists if a guest CPUID entry for its <base> leaf exists. For a given
977 * class, CPUID.<base>.EAX contains the max supported leaf for the class.
978 *
979 * - Basic: 0x00000000 - 0x3fffffff, 0x50000000 - 0x7fffffff
980 * - Hypervisor: 0x40000000 - 0x4fffffff
981 * - Extended: 0x80000000 - 0xbfffffff
982 * - Centaur: 0xc0000000 - 0xcfffffff
983 *
984 * The Hypervisor class is further subdivided into sub-classes that each act as
985 * their own indepdent class associated with a 0x100 byte range. E.g. if Qemu
986 * is advertising support for both HyperV and KVM, the resulting Hypervisor
987 * CPUID sub-classes are:
988 *
989 * - HyperV: 0x40000000 - 0x400000ff
990 * - KVM: 0x40000100 - 0x400001ff
00b27a3e 991 */
09c7431e
SC
992static struct kvm_cpuid_entry2 *
993get_out_of_range_cpuid_entry(struct kvm_vcpu *vcpu, u32 *fn_ptr, u32 index)
00b27a3e 994{
8d892311 995 struct kvm_cpuid_entry2 *basic, *class;
09c7431e 996 u32 function = *fn_ptr;
8d892311
SC
997
998 basic = kvm_find_cpuid_entry(vcpu, 0, 0);
999 if (!basic)
09c7431e
SC
1000 return NULL;
1001
1002 if (is_guest_vendor_amd(basic->ebx, basic->ecx, basic->edx) ||
1003 is_guest_vendor_hygon(basic->ebx, basic->ecx, basic->edx))
1004 return NULL;
8d892311
SC
1005
1006 if (function >= 0x40000000 && function <= 0x4fffffff)
1007 class = kvm_find_cpuid_entry(vcpu, function & 0xffffff00, 0);
1008 else if (function >= 0xc0000000)
1009 class = kvm_find_cpuid_entry(vcpu, 0xc0000000, 0);
1010 else
1011 class = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
43561123 1012
09c7431e
SC
1013 if (class && function <= class->eax)
1014 return NULL;
1015
1016 /*
1017 * Leaf specific adjustments are also applied when redirecting to the
1018 * max basic entry, e.g. if the max basic leaf is 0xb but there is no
1019 * entry for CPUID.0xb.index (see below), then the output value for EDX
1020 * needs to be pulled from CPUID.0xb.1.
1021 */
1022 *fn_ptr = basic->eax;
1023
1024 /*
1025 * The class does not exist or the requested function is out of range;
1026 * the effective CPUID entry is the max basic leaf. Note, the index of
1027 * the original requested leaf is observed!
1028 */
1029 return kvm_find_cpuid_entry(vcpu, basic->eax, index);
00b27a3e
AK
1030}
1031
e911eb3b 1032bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
f91af517 1033 u32 *ecx, u32 *edx, bool exact_only)
00b27a3e 1034{
b7fb8488 1035 u32 orig_function = *eax, function = *eax, index = *ecx;
43561123 1036 struct kvm_cpuid_entry2 *entry;
2b110b61 1037 bool exact, used_max_basic = false;
e911eb3b 1038
43561123 1039 entry = kvm_find_cpuid_entry(vcpu, function, index);
f91af517 1040 exact = !!entry;
09c7431e 1041
2b110b61 1042 if (!entry && !exact_only) {
09c7431e 1043 entry = get_out_of_range_cpuid_entry(vcpu, &function, index);
2b110b61
SC
1044 used_max_basic = !!entry;
1045 }
09c7431e 1046
43561123
JM
1047 if (entry) {
1048 *eax = entry->eax;
1049 *ebx = entry->ebx;
1050 *ecx = entry->ecx;
1051 *edx = entry->edx;
edef5c36
PB
1052 if (function == 7 && index == 0) {
1053 u64 data;
1054 if (!__kvm_get_msr(vcpu, MSR_IA32_TSX_CTRL, &data, true) &&
1055 (data & TSX_CTRL_CPUID_CLEAR))
1056 *ebx &= ~(F(RTM) | F(HLE));
1057 }
43561123 1058 } else {
62046e5a 1059 *eax = *ebx = *ecx = *edx = 0;
43561123
JM
1060 /*
1061 * When leaf 0BH or 1FH is defined, CL is pass-through
1062 * and EDX is always the x2APIC ID, even for undefined
1063 * subleaves. Index 1 will exist iff the leaf is
1064 * implemented, so we pass through CL iff leaf 1
1065 * exists. EDX can be copied from any existing index.
1066 */
1067 if (function == 0xb || function == 0x1f) {
1068 entry = kvm_find_cpuid_entry(vcpu, function, 1);
1069 if (entry) {
1070 *ecx = index & 0xff;
1071 *edx = entry->edx;
1072 }
1073 }
1074 }
2b110b61
SC
1075 trace_kvm_cpuid(orig_function, index, *eax, *ebx, *ecx, *edx, exact,
1076 used_max_basic);
f91af517 1077 return exact;
62046e5a 1078}
66f7b72e 1079EXPORT_SYMBOL_GPL(kvm_cpuid);
62046e5a 1080
6a908b62 1081int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
62046e5a 1082{
1e13175b 1083 u32 eax, ebx, ecx, edx;
62046e5a 1084
db2336a8
KH
1085 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
1086 return 1;
1087
de3cd117
SC
1088 eax = kvm_rax_read(vcpu);
1089 ecx = kvm_rcx_read(vcpu);
f91af517 1090 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, false);
de3cd117
SC
1091 kvm_rax_write(vcpu, eax);
1092 kvm_rbx_write(vcpu, ebx);
1093 kvm_rcx_write(vcpu, ecx);
1094 kvm_rdx_write(vcpu, edx);
6affcbed 1095 return kvm_skip_emulated_instruction(vcpu);
00b27a3e
AK
1096}
1097EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);