]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/cpuid.c
x86/bugs: Add AMD's SPEC_CTRL MSR usage
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / cpuid.c
CommitLineData
00b27a3e
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 * cpuid support routines
4 *
5 * derived from arch/x86/kvm/x86.c
6 *
7 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
8 * Copyright IBM Corporation, 2008
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/kvm_host.h>
1767e931 16#include <linux/export.h>
bb5a798a
JK
17#include <linux/vmalloc.h>
18#include <linux/uaccess.h>
3905f9ad
IM
19#include <linux/sched/stat.h>
20
4504b5c9 21#include <asm/processor.h>
00b27a3e 22#include <asm/user.h>
669ebabb 23#include <asm/fpu/xstate.h>
00b27a3e
AK
24#include "cpuid.h"
25#include "lapic.h"
26#include "mmu.h"
27#include "trace.h"
474a5bb9 28#include "pmu.h"
00b27a3e 29
412a3c41 30static u32 xstate_required_size(u64 xstate_bv, bool compacted)
4344ee98
PB
31{
32 int feature_bit = 0;
33 u32 ret = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
34
d91cab78 35 xstate_bv &= XFEATURE_MASK_EXTEND;
4344ee98
PB
36 while (xstate_bv) {
37 if (xstate_bv & 0x1) {
412a3c41 38 u32 eax, ebx, ecx, edx, offset;
4344ee98 39 cpuid_count(0xD, feature_bit, &eax, &ebx, &ecx, &edx);
412a3c41
PB
40 offset = compacted ? ret : ebx;
41 ret = max(ret, offset + eax);
4344ee98
PB
42 }
43
44 xstate_bv >>= 1;
45 feature_bit++;
46 }
47
48 return ret;
49}
50
a87036ad
PB
51bool kvm_mpx_supported(void)
52{
53 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
54 && kvm_x86_ops->mpx_supported());
55}
56EXPORT_SYMBOL_GPL(kvm_mpx_supported);
57
4ff41732
PB
58u64 kvm_supported_xcr0(void)
59{
60 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
61
a87036ad 62 if (!kvm_mpx_supported())
d91cab78 63 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
4ff41732
PB
64
65 return xcr0;
66}
67
5c404cab
PB
68#define F(x) bit(X86_FEATURE_##x)
69
b7b27aa0 70/* For scattered features from cpufeatures.h; we currently expose none */
4504b5c9
LK
71#define KF(x) bit(KVM_CPUID_BIT_##x)
72
dd598091 73int kvm_update_cpuid(struct kvm_vcpu *vcpu)
00b27a3e
AK
74{
75 struct kvm_cpuid_entry2 *best;
76 struct kvm_lapic *apic = vcpu->arch.apic;
77
78 best = kvm_find_cpuid_entry(vcpu, 1, 0);
79 if (!best)
dd598091 80 return 0;
00b27a3e
AK
81
82 /* Update OSXSAVE bit */
d366bf7e 83 if (boot_cpu_has(X86_FEATURE_XSAVE) && best->function == 0x1) {
5c404cab 84 best->ecx &= ~F(OSXSAVE);
00b27a3e 85 if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE))
5c404cab 86 best->ecx |= F(OSXSAVE);
00b27a3e
AK
87 }
88
c7dd15b3
JM
89 best->edx &= ~F(APIC);
90 if (vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE)
91 best->edx |= F(APIC);
92
00b27a3e 93 if (apic) {
5c404cab 94 if (best->ecx & F(TSC_DEADLINE_TIMER))
00b27a3e
AK
95 apic->lapic_timer.timer_mode_mask = 3 << 17;
96 else
97 apic->lapic_timer.timer_mode_mask = 1 << 17;
98 }
f5132b01 99
b9baba86
HH
100 best = kvm_find_cpuid_entry(vcpu, 7, 0);
101 if (best) {
102 /* Update OSPKE bit */
103 if (boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) {
104 best->ecx &= ~F(OSPKE);
105 if (kvm_read_cr4_bits(vcpu, X86_CR4_PKE))
106 best->ecx |= F(OSPKE);
107 }
108 }
109
d7876f1b 110 best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
4344ee98 111 if (!best) {
d7876f1b 112 vcpu->arch.guest_supported_xcr0 = 0;
4344ee98
PB
113 vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
114 } else {
d7876f1b
PB
115 vcpu->arch.guest_supported_xcr0 =
116 (best->eax | ((u64)best->edx << 32)) &
4ff41732 117 kvm_supported_xcr0();
56c103ec 118 vcpu->arch.guest_xstate_size = best->ebx =
412a3c41 119 xstate_required_size(vcpu->arch.xcr0, false);
4344ee98 120 }
d7876f1b 121
412a3c41
PB
122 best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
123 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
124 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
125
dd598091 126 /*
fd8cb433
YZ
127 * The existing code assumes virtual address is 48-bit or 57-bit in the
128 * canonical address checks; exit if it is ever changed.
dd598091
NA
129 */
130 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
fd8cb433
YZ
131 if (best) {
132 int vaddr_bits = (best->eax & 0xff00) >> 8;
133
134 if (vaddr_bits != 48 && vaddr_bits != 57 && vaddr_bits != 0)
135 return -EINVAL;
136 }
dd598091 137
caa057a2
WL
138 best = kvm_find_cpuid_entry(vcpu, KVM_CPUID_FEATURES, 0);
139 if (kvm_hlt_in_guest(vcpu->kvm) && best &&
140 (best->eax & (1 << KVM_FEATURE_PV_UNHALT)))
141 best->eax &= ~(1 << KVM_FEATURE_PV_UNHALT);
142
5a4f55cd
EK
143 /* Update physical-address width */
144 vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
855feb67 145 kvm_mmu_reset_context(vcpu);
5a4f55cd 146
c6702c9d 147 kvm_pmu_refresh(vcpu);
dd598091 148 return 0;
00b27a3e
AK
149}
150
151static int is_efer_nx(void)
152{
153 unsigned long long efer = 0;
154
155 rdmsrl_safe(MSR_EFER, &efer);
156 return efer & EFER_NX;
157}
158
159static void cpuid_fix_nx_cap(struct kvm_vcpu *vcpu)
160{
161 int i;
162 struct kvm_cpuid_entry2 *e, *entry;
163
164 entry = NULL;
165 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
166 e = &vcpu->arch.cpuid_entries[i];
167 if (e->function == 0x80000001) {
168 entry = e;
169 break;
170 }
171 }
5c404cab
PB
172 if (entry && (entry->edx & F(NX)) && !is_efer_nx()) {
173 entry->edx &= ~F(NX);
00b27a3e
AK
174 printk(KERN_INFO "kvm: guest NX capability removed\n");
175 }
176}
177
5a4f55cd
EK
178int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu)
179{
180 struct kvm_cpuid_entry2 *best;
181
182 best = kvm_find_cpuid_entry(vcpu, 0x80000000, 0);
183 if (!best || best->eax < 0x80000008)
184 goto not_found;
185 best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
186 if (best)
187 return best->eax & 0xff;
188not_found:
189 return 36;
190}
191EXPORT_SYMBOL_GPL(cpuid_query_maxphyaddr);
192
00b27a3e
AK
193/* when an old userspace process fills a new kernel module */
194int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
195 struct kvm_cpuid *cpuid,
196 struct kvm_cpuid_entry __user *entries)
197{
198 int r, i;
83676e92 199 struct kvm_cpuid_entry *cpuid_entries = NULL;
00b27a3e
AK
200
201 r = -E2BIG;
202 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
203 goto out;
204 r = -ENOMEM;
83676e92
PB
205 if (cpuid->nent) {
206 cpuid_entries = vmalloc(sizeof(struct kvm_cpuid_entry) *
207 cpuid->nent);
208 if (!cpuid_entries)
209 goto out;
210 r = -EFAULT;
211 if (copy_from_user(cpuid_entries, entries,
212 cpuid->nent * sizeof(struct kvm_cpuid_entry)))
213 goto out;
214 }
00b27a3e
AK
215 for (i = 0; i < cpuid->nent; i++) {
216 vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
217 vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
218 vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
219 vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
220 vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
221 vcpu->arch.cpuid_entries[i].index = 0;
222 vcpu->arch.cpuid_entries[i].flags = 0;
223 vcpu->arch.cpuid_entries[i].padding[0] = 0;
224 vcpu->arch.cpuid_entries[i].padding[1] = 0;
225 vcpu->arch.cpuid_entries[i].padding[2] = 0;
226 }
227 vcpu->arch.cpuid_nent = cpuid->nent;
228 cpuid_fix_nx_cap(vcpu);
00b27a3e
AK
229 kvm_apic_set_version(vcpu);
230 kvm_x86_ops->cpuid_update(vcpu);
dd598091 231 r = kvm_update_cpuid(vcpu);
00b27a3e 232
00b27a3e 233out:
83676e92 234 vfree(cpuid_entries);
00b27a3e
AK
235 return r;
236}
237
238int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
239 struct kvm_cpuid2 *cpuid,
240 struct kvm_cpuid_entry2 __user *entries)
241{
242 int r;
243
244 r = -E2BIG;
245 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
246 goto out;
247 r = -EFAULT;
248 if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
249 cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
250 goto out;
251 vcpu->arch.cpuid_nent = cpuid->nent;
252 kvm_apic_set_version(vcpu);
253 kvm_x86_ops->cpuid_update(vcpu);
dd598091 254 r = kvm_update_cpuid(vcpu);
00b27a3e
AK
255out:
256 return r;
257}
258
259int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
260 struct kvm_cpuid2 *cpuid,
261 struct kvm_cpuid_entry2 __user *entries)
262{
263 int r;
264
265 r = -E2BIG;
266 if (cpuid->nent < vcpu->arch.cpuid_nent)
267 goto out;
268 r = -EFAULT;
269 if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
270 vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
271 goto out;
272 return 0;
273
274out:
275 cpuid->nent = vcpu->arch.cpuid_nent;
276 return r;
277}
278
279static void cpuid_mask(u32 *word, int wordnum)
280{
281 *word &= boot_cpu_data.x86_capability[wordnum];
282}
283
284static void do_cpuid_1_ent(struct kvm_cpuid_entry2 *entry, u32 function,
285 u32 index)
286{
287 entry->function = function;
288 entry->index = index;
289 cpuid_count(entry->function, entry->index,
290 &entry->eax, &entry->ebx, &entry->ecx, &entry->edx);
291 entry->flags = 0;
292}
293
9c15bb1d
BP
294static int __do_cpuid_ent_emulated(struct kvm_cpuid_entry2 *entry,
295 u32 func, u32 index, int *nent, int maxnent)
296{
84cffe49
BP
297 switch (func) {
298 case 0:
fb6d4d34 299 entry->eax = 7;
84cffe49
BP
300 ++*nent;
301 break;
302 case 1:
303 entry->ecx = F(MOVBE);
304 ++*nent;
305 break;
fb6d4d34
PB
306 case 7:
307 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
308 if (index == 0)
309 entry->ecx = F(RDPID);
310 ++*nent;
84cffe49
BP
311 default:
312 break;
313 }
314
315 entry->function = func;
316 entry->index = index;
317
9c15bb1d
BP
318 return 0;
319}
320
321static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
322 u32 index, int *nent, int maxnent)
00b27a3e 323{
831bf664 324 int r;
00b27a3e
AK
325 unsigned f_nx = is_efer_nx() ? F(NX) : 0;
326#ifdef CONFIG_X86_64
327 unsigned f_gbpages = (kvm_x86_ops->get_lpage_level() == PT_PDPE_LEVEL)
328 ? F(GBPAGES) : 0;
329 unsigned f_lm = F(LM);
330#else
331 unsigned f_gbpages = 0;
332 unsigned f_lm = 0;
333#endif
334 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
ad756a16 335 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
a87036ad 336 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
55412b2e 337 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
66336cab 338 unsigned f_umip = kvm_x86_ops->umip_emulated() ? F(UMIP) : 0;
00b27a3e
AK
339
340 /* cpuid 1.edx */
e0b18ef7 341 const u32 kvm_cpuid_1_edx_x86_features =
00b27a3e
AK
342 F(FPU) | F(VME) | F(DE) | F(PSE) |
343 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
344 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SEP) |
345 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
840d2830 346 F(PAT) | F(PSE36) | 0 /* PSN */ | F(CLFLUSH) |
00b27a3e
AK
347 0 /* Reserved, DS, ACPI */ | F(MMX) |
348 F(FXSR) | F(XMM) | F(XMM2) | F(SELFSNOOP) |
349 0 /* HTT, TM, Reserved, PBE */;
350 /* cpuid 0x80000001.edx */
e0b18ef7 351 const u32 kvm_cpuid_8000_0001_edx_x86_features =
00b27a3e
AK
352 F(FPU) | F(VME) | F(DE) | F(PSE) |
353 F(TSC) | F(MSR) | F(PAE) | F(MCE) |
354 F(CX8) | F(APIC) | 0 /* Reserved */ | F(SYSCALL) |
355 F(MTRR) | F(PGE) | F(MCA) | F(CMOV) |
356 F(PAT) | F(PSE36) | 0 /* Reserved */ |
357 f_nx | 0 /* Reserved */ | F(MMXEXT) | F(MMX) |
358 F(FXSR) | F(FXSR_OPT) | f_gbpages | f_rdtscp |
359 0 /* Reserved */ | f_lm | F(3DNOWEXT) | F(3DNOW);
360 /* cpuid 1.ecx */
e0b18ef7 361 const u32 kvm_cpuid_1_ecx_x86_features =
87c00572
GS
362 /* NOTE: MONITOR (and MWAIT) are emulated as NOP,
363 * but *not* advertised to guests via CPUID ! */
00b27a3e
AK
364 F(XMM3) | F(PCLMULQDQ) | 0 /* DTES64, MONITOR */ |
365 0 /* DS-CPL, VMX, SMX, EST */ |
366 0 /* TM2 */ | F(SSSE3) | 0 /* CNXT-ID */ | 0 /* Reserved */ |
fb215366 367 F(FMA) | F(CX16) | 0 /* xTPR Update, PDCM */ |
ad756a16 368 F(PCID) | 0 /* Reserved, DCA */ | F(XMM4_1) |
00b27a3e
AK
369 F(XMM4_2) | F(X2APIC) | F(MOVBE) | F(POPCNT) |
370 0 /* Reserved*/ | F(AES) | F(XSAVE) | 0 /* OSXSAVE */ | F(AVX) |
371 F(F16C) | F(RDRAND);
372 /* cpuid 0x80000001.ecx */
e0b18ef7 373 const u32 kvm_cpuid_8000_0001_ecx_x86_features =
00b27a3e
AK
374 F(LAHF_LM) | F(CMP_LEGACY) | 0 /*SVM*/ | 0 /* ExtApicSpace */ |
375 F(CR8_LEGACY) | F(ABM) | F(SSE4A) | F(MISALIGNSSE) |
2b036c6b 376 F(3DNOWPREFETCH) | F(OSVW) | 0 /* IBS */ | F(XOP) |
806793f5 377 0 /* SKINIT, WDT, LWP */ | F(FMA4) | F(TBM) |
c51eb52b 378 F(TOPOEXT) | F(PERFCTR_CORE);
00b27a3e 379
15d45071
AR
380 /* cpuid 0x80000008.ebx */
381 const u32 kvm_cpuid_8000_0008_ebx_x86_features =
6ac2f49e
KRW
382 F(AMD_IBPB) | F(AMD_IBRS) | F(AMD_SSBD) | F(VIRT_SSBD) |
383 F(AMD_SSB_NO);
15d45071 384
00b27a3e 385 /* cpuid 0xC0000001.edx */
e0b18ef7 386 const u32 kvm_cpuid_C000_0001_edx_x86_features =
00b27a3e
AK
387 F(XSTORE) | F(XSTORE_EN) | F(XCRYPT) | F(XCRYPT_EN) |
388 F(ACE2) | F(ACE2_EN) | F(PHE) | F(PHE_EN) |
389 F(PMM) | F(PMM_EN);
390
391 /* cpuid 7.0.ebx */
e0b18ef7 392 const u32 kvm_cpuid_7_0_ebx_x86_features =
83c52915 393 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
390bd528 394 F(BMI2) | F(ERMS) | f_invpcid | F(RTM) | f_mpx | F(RDSEED) |
83781d18
YS
395 F(ADX) | F(SMAP) | F(AVX512IFMA) | F(AVX512F) | F(AVX512PF) |
396 F(AVX512ER) | F(AVX512CD) | F(CLFLUSHOPT) | F(CLWB) | F(AVX512DQ) |
397 F(SHA_NI) | F(AVX512BW) | F(AVX512VL);
00b27a3e 398
b65d6e17 399 /* cpuid 0xD.1.eax */
e0b18ef7 400 const u32 kvm_cpuid_D_1_eax_x86_features =
55412b2e 401 F(XSAVEOPT) | F(XSAVEC) | F(XGETBV1) | f_xsaves;
b65d6e17 402
b9baba86 403 /* cpuid 7.0.ecx*/
83781d18 404 const u32 kvm_cpuid_7_0_ecx_x86_features =
ae3e61e1 405 F(AVX512VBMI) | F(LA57) | F(PKU) | 0 /*OSPKE*/ |
80fef315
YZ
406 F(AVX512_VPOPCNTDQ) | F(UMIP) | F(AVX512_VBMI2) | F(GFNI) |
407 F(VAES) | F(VPCLMULQDQ) | F(AVX512_VNNI) | F(AVX512_BITALG);
b9baba86 408
4504b5c9
LK
409 /* cpuid 7.0.edx*/
410 const u32 kvm_cpuid_7_0_edx_x86_features =
0aa48468
KRW
411 F(AVX512_4VNNIW) | F(AVX512_4FMAPS) | F(SPEC_CTRL) |
412 F(SPEC_CTRL_SSBD) | F(ARCH_CAPABILITIES);
4504b5c9 413
00b27a3e
AK
414 /* all calls to cpuid_count() should be made on the same cpu */
415 get_cpu();
831bf664
SL
416
417 r = -E2BIG;
418
419 if (*nent >= maxnent)
420 goto out;
421
00b27a3e
AK
422 do_cpuid_1_ent(entry, function, index);
423 ++*nent;
424
425 switch (function) {
426 case 0:
427 entry->eax = min(entry->eax, (u32)0xd);
428 break;
429 case 1:
e0b18ef7
HH
430 entry->edx &= kvm_cpuid_1_edx_x86_features;
431 cpuid_mask(&entry->edx, CPUID_1_EDX);
432 entry->ecx &= kvm_cpuid_1_ecx_x86_features;
433 cpuid_mask(&entry->ecx, CPUID_1_ECX);
00b27a3e
AK
434 /* we support x2apic emulation even if host does not support
435 * it since we emulate x2apic in software */
436 entry->ecx |= F(X2APIC);
437 break;
438 /* function 2 entries are STATEFUL. That is, repeated cpuid commands
439 * may return different values. This forces us to get_cpu() before
440 * issuing the first command, and also to emulate this annoying behavior
441 * in kvm_emulate_cpuid() using KVM_CPUID_FLAG_STATE_READ_NEXT */
442 case 2: {
443 int t, times = entry->eax & 0xff;
444
445 entry->flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
446 entry->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
831bf664
SL
447 for (t = 1; t < times; ++t) {
448 if (*nent >= maxnent)
449 goto out;
450
00b27a3e
AK
451 do_cpuid_1_ent(&entry[t], function, 0);
452 entry[t].flags |= KVM_CPUID_FLAG_STATEFUL_FUNC;
453 ++*nent;
454 }
455 break;
456 }
457 /* function 4 has additional index. */
458 case 4: {
459 int i, cache_type;
460
461 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
462 /* read more entries until cache_type is zero */
831bf664
SL
463 for (i = 1; ; ++i) {
464 if (*nent >= maxnent)
465 goto out;
466
00b27a3e
AK
467 cache_type = entry[i - 1].eax & 0x1f;
468 if (!cache_type)
469 break;
470 do_cpuid_1_ent(&entry[i], function, i);
471 entry[i].flags |=
472 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
473 ++*nent;
474 }
475 break;
476 }
e453aa0f
JK
477 case 6: /* Thermal management */
478 entry->eax = 0x4; /* allow ARAT */
479 entry->ebx = 0;
480 entry->ecx = 0;
481 entry->edx = 0;
482 break;
00b27a3e
AK
483 case 7: {
484 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
bbbda795 485 /* Mask ebx against host capability word 9 */
00b27a3e 486 if (index == 0) {
e0b18ef7
HH
487 entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
488 cpuid_mask(&entry->ebx, CPUID_7_0_EBX);
ba904635
WA
489 // TSC_ADJUST is emulated
490 entry->ebx |= F(TSC_ADJUST);
b9baba86
HH
491 entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
492 cpuid_mask(&entry->ecx, CPUID_7_ECX);
66336cab 493 entry->ecx |= f_umip;
b9baba86 494 /* PKU is not yet implemented for shadow paging. */
c469268c 495 if (!tdp_enabled || !boot_cpu_has(X86_FEATURE_OSPKE))
b9baba86 496 entry->ecx &= ~F(PKU);
4504b5c9 497 entry->edx &= kvm_cpuid_7_0_edx_x86_features;
b7b27aa0 498 cpuid_mask(&entry->edx, CPUID_7_EDX);
1eaafe91
JM
499 /*
500 * We emulate ARCH_CAPABILITIES in software even
501 * if the host doesn't support it.
502 */
503 entry->edx |= F(ARCH_CAPABILITIES);
b9baba86 504 } else {
00b27a3e 505 entry->ebx = 0;
b9baba86 506 entry->ecx = 0;
4504b5c9 507 entry->edx = 0;
b9baba86 508 }
00b27a3e 509 entry->eax = 0;
00b27a3e
AK
510 break;
511 }
512 case 9:
513 break;
a6c06ed1
GN
514 case 0xa: { /* Architectural Performance Monitoring */
515 struct x86_pmu_capability cap;
516 union cpuid10_eax eax;
517 union cpuid10_edx edx;
518
519 perf_get_x86_pmu_capability(&cap);
520
521 /*
522 * Only support guest architectural pmu on a host
523 * with architectural pmu.
524 */
525 if (!cap.version)
526 memset(&cap, 0, sizeof(cap));
527
528 eax.split.version_id = min(cap.version, 2);
529 eax.split.num_counters = cap.num_counters_gp;
530 eax.split.bit_width = cap.bit_width_gp;
531 eax.split.mask_length = cap.events_mask_len;
532
533 edx.split.num_counters_fixed = cap.num_counters_fixed;
534 edx.split.bit_width_fixed = cap.bit_width_fixed;
535 edx.split.reserved = 0;
536
537 entry->eax = eax.full;
538 entry->ebx = cap.events_mask;
539 entry->ecx = 0;
540 entry->edx = edx.full;
541 break;
542 }
00b27a3e
AK
543 /* function 0xb has additional index. */
544 case 0xb: {
545 int i, level_type;
546
547 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
548 /* read more entries until level_type is zero */
831bf664
SL
549 for (i = 1; ; ++i) {
550 if (*nent >= maxnent)
551 goto out;
552
00b27a3e
AK
553 level_type = entry[i - 1].ecx & 0xff00;
554 if (!level_type)
555 break;
556 do_cpuid_1_ent(&entry[i], function, i);
557 entry[i].flags |=
558 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
559 ++*nent;
560 }
561 break;
562 }
563 case 0xd: {
564 int idx, i;
4ff41732 565 u64 supported = kvm_supported_xcr0();
00b27a3e 566
4ff41732 567 entry->eax &= supported;
e08e8336
RK
568 entry->ebx = xstate_required_size(supported, false);
569 entry->ecx = entry->ebx;
4ff41732 570 entry->edx &= supported >> 32;
00b27a3e 571 entry->flags |= KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
b65d6e17
PB
572 if (!supported)
573 break;
574
831bf664 575 for (idx = 1, i = 1; idx < 64; ++idx) {
4ff41732 576 u64 mask = ((u64)1 << idx);
831bf664
SL
577 if (*nent >= maxnent)
578 goto out;
579
00b27a3e 580 do_cpuid_1_ent(&entry[i], function, idx);
412a3c41 581 if (idx == 1) {
e0b18ef7 582 entry[i].eax &= kvm_cpuid_D_1_eax_x86_features;
316314ca 583 cpuid_mask(&entry[i].eax, CPUID_D_1_EAX);
412a3c41
PB
584 entry[i].ebx = 0;
585 if (entry[i].eax & (F(XSAVES)|F(XSAVEC)))
586 entry[i].ebx =
587 xstate_required_size(supported,
588 true);
404e0a19
PB
589 } else {
590 if (entry[i].eax == 0 || !(supported & mask))
591 continue;
592 if (WARN_ON_ONCE(entry[i].ecx & 1))
593 continue;
594 }
595 entry[i].ecx = 0;
596 entry[i].edx = 0;
00b27a3e
AK
597 entry[i].flags |=
598 KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
599 ++*nent;
600 ++i;
601 }
602 break;
603 }
604 case KVM_CPUID_SIGNATURE: {
326d07cb
MK
605 static const char signature[12] = "KVMKVMKVM\0\0";
606 const u32 *sigptr = (const u32 *)signature;
57c22e5f 607 entry->eax = KVM_CPUID_FEATURES;
00b27a3e
AK
608 entry->ebx = sigptr[0];
609 entry->ecx = sigptr[1];
610 entry->edx = sigptr[2];
611 break;
612 }
613 case KVM_CPUID_FEATURES:
614 entry->eax = (1 << KVM_FEATURE_CLOCKSOURCE) |
615 (1 << KVM_FEATURE_NOP_IO_DELAY) |
616 (1 << KVM_FEATURE_CLOCKSOURCE2) |
617 (1 << KVM_FEATURE_ASYNC_PF) |
ae7a2a3f 618 (1 << KVM_FEATURE_PV_EOI) |
6aef266c 619 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) |
f38a7b75 620 (1 << KVM_FEATURE_PV_UNHALT) |
fe2a3027
RK
621 (1 << KVM_FEATURE_PV_TLB_FLUSH) |
622 (1 << KVM_FEATURE_ASYNC_PF_VMEXIT);
00b27a3e
AK
623
624 if (sched_info_on())
625 entry->eax |= (1 << KVM_FEATURE_STEAL_TIME);
626
627 entry->ebx = 0;
628 entry->ecx = 0;
629 entry->edx = 0;
630 break;
631 case 0x80000000:
8765d753 632 entry->eax = min(entry->eax, 0x8000001f);
00b27a3e
AK
633 break;
634 case 0x80000001:
e0b18ef7
HH
635 entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
636 cpuid_mask(&entry->edx, CPUID_8000_0001_EDX);
637 entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
638 cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX);
00b27a3e 639 break;
e4c9a5a1
MT
640 case 0x80000007: /* Advanced power management */
641 /* invariant TSC is CPUID.80000007H:EDX[8] */
642 entry->edx &= (1 << 8);
643 /* mask against host */
644 entry->edx &= boot_cpu_data.x86_power;
645 entry->eax = entry->ebx = entry->ecx = 0;
646 break;
00b27a3e
AK
647 case 0x80000008: {
648 unsigned g_phys_as = (entry->eax >> 16) & 0xff;
649 unsigned virt_as = max((entry->eax >> 8) & 0xff, 48U);
650 unsigned phys_as = entry->eax & 0xff;
651
652 if (!g_phys_as)
653 g_phys_as = phys_as;
654 entry->eax = g_phys_as | (virt_as << 8);
15d45071 655 entry->edx = 0;
bc226f07
TL
656 /*
657 * IBRS, IBPB and VIRT_SSBD aren't necessarily present in
658 * hardware cpuid
659 */
e7c587da
BP
660 if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
661 entry->ebx |= F(AMD_IBPB);
662 if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
663 entry->ebx |= F(AMD_IBRS);
bc226f07
TL
664 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
665 entry->ebx |= F(VIRT_SSBD);
15d45071
AR
666 entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
667 cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
6ac2f49e
KRW
668 /*
669 * The preference is to use SPEC CTRL MSR instead of the
670 * VIRT_SPEC MSR.
671 */
672 if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
673 !boot_cpu_has(X86_FEATURE_AMD_SSBD))
bc226f07 674 entry->ebx |= F(VIRT_SSBD);
00b27a3e
AK
675 break;
676 }
677 case 0x80000019:
678 entry->ecx = entry->edx = 0;
679 break;
680 case 0x8000001a:
681 break;
682 case 0x8000001d:
683 break;
684 /*Add support for Centaur's CPUID instruction*/
685 case 0xC0000000:
686 /*Just support up to 0xC0000004 now*/
687 entry->eax = min(entry->eax, 0xC0000004);
688 break;
689 case 0xC0000001:
e0b18ef7
HH
690 entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
691 cpuid_mask(&entry->edx, CPUID_C000_0001_EDX);
00b27a3e
AK
692 break;
693 case 3: /* Processor serial number */
694 case 5: /* MONITOR/MWAIT */
00b27a3e
AK
695 case 0xC0000002:
696 case 0xC0000003:
697 case 0xC0000004:
698 default:
699 entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
700 break;
701 }
702
703 kvm_x86_ops->set_supported_cpuid(function, entry);
704
831bf664
SL
705 r = 0;
706
707out:
00b27a3e 708 put_cpu();
831bf664
SL
709
710 return r;
00b27a3e
AK
711}
712
9c15bb1d
BP
713static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 func,
714 u32 idx, int *nent, int maxnent, unsigned int type)
715{
716 if (type == KVM_GET_EMULATED_CPUID)
717 return __do_cpuid_ent_emulated(entry, func, idx, nent, maxnent);
718
719 return __do_cpuid_ent(entry, func, idx, nent, maxnent);
720}
721
00b27a3e
AK
722#undef F
723
831bf664
SL
724struct kvm_cpuid_param {
725 u32 func;
726 u32 idx;
727 bool has_leaf_count;
326d07cb 728 bool (*qualifier)(const struct kvm_cpuid_param *param);
831bf664
SL
729};
730
326d07cb 731static bool is_centaur_cpu(const struct kvm_cpuid_param *param)
831bf664
SL
732{
733 return boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR;
734}
735
9c15bb1d
BP
736static bool sanity_check_entries(struct kvm_cpuid_entry2 __user *entries,
737 __u32 num_entries, unsigned int ioctl_type)
738{
739 int i;
1b2ca422 740 __u32 pad[3];
9c15bb1d
BP
741
742 if (ioctl_type != KVM_GET_EMULATED_CPUID)
743 return false;
744
745 /*
746 * We want to make sure that ->padding is being passed clean from
747 * userspace in case we want to use it for something in the future.
748 *
749 * Sadly, this wasn't enforced for KVM_GET_SUPPORTED_CPUID and so we
750 * have to give ourselves satisfied only with the emulated side. /me
751 * sheds a tear.
752 */
753 for (i = 0; i < num_entries; i++) {
1b2ca422
BP
754 if (copy_from_user(pad, entries[i].padding, sizeof(pad)))
755 return true;
756
757 if (pad[0] || pad[1] || pad[2])
9c15bb1d
BP
758 return true;
759 }
760 return false;
761}
762
763int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
764 struct kvm_cpuid_entry2 __user *entries,
765 unsigned int type)
00b27a3e
AK
766{
767 struct kvm_cpuid_entry2 *cpuid_entries;
831bf664 768 int limit, nent = 0, r = -E2BIG, i;
00b27a3e 769 u32 func;
326d07cb 770 static const struct kvm_cpuid_param param[] = {
831bf664
SL
771 { .func = 0, .has_leaf_count = true },
772 { .func = 0x80000000, .has_leaf_count = true },
773 { .func = 0xC0000000, .qualifier = is_centaur_cpu, .has_leaf_count = true },
774 { .func = KVM_CPUID_SIGNATURE },
775 { .func = KVM_CPUID_FEATURES },
776 };
00b27a3e
AK
777
778 if (cpuid->nent < 1)
779 goto out;
780 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
781 cpuid->nent = KVM_MAX_CPUID_ENTRIES;
9c15bb1d
BP
782
783 if (sanity_check_entries(entries, cpuid->nent, type))
784 return -EINVAL;
785
00b27a3e 786 r = -ENOMEM;
84cffe49 787 cpuid_entries = vzalloc(sizeof(struct kvm_cpuid_entry2) * cpuid->nent);
00b27a3e
AK
788 if (!cpuid_entries)
789 goto out;
790
831bf664
SL
791 r = 0;
792 for (i = 0; i < ARRAY_SIZE(param); i++) {
326d07cb 793 const struct kvm_cpuid_param *ent = &param[i];
00b27a3e 794
831bf664
SL
795 if (ent->qualifier && !ent->qualifier(ent))
796 continue;
00b27a3e 797
831bf664 798 r = do_cpuid_ent(&cpuid_entries[nent], ent->func, ent->idx,
9c15bb1d 799 &nent, cpuid->nent, type);
00b27a3e 800
831bf664 801 if (r)
00b27a3e
AK
802 goto out_free;
803
831bf664
SL
804 if (!ent->has_leaf_count)
805 continue;
806
00b27a3e 807 limit = cpuid_entries[nent - 1].eax;
831bf664
SL
808 for (func = ent->func + 1; func <= limit && nent < cpuid->nent && r == 0; ++func)
809 r = do_cpuid_ent(&cpuid_entries[nent], func, ent->idx,
9c15bb1d 810 &nent, cpuid->nent, type);
00b27a3e 811
831bf664 812 if (r)
00b27a3e
AK
813 goto out_free;
814 }
815
00b27a3e
AK
816 r = -EFAULT;
817 if (copy_to_user(entries, cpuid_entries,
818 nent * sizeof(struct kvm_cpuid_entry2)))
819 goto out_free;
820 cpuid->nent = nent;
821 r = 0;
822
823out_free:
824 vfree(cpuid_entries);
825out:
826 return r;
827}
828
829static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
830{
831 struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
a3641631
WL
832 struct kvm_cpuid_entry2 *ej;
833 int j = i;
834 int nent = vcpu->arch.cpuid_nent;
00b27a3e
AK
835
836 e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
837 /* when no next entry is found, the current entry[i] is reselected */
a3641631
WL
838 do {
839 j = (j + 1) % nent;
840 ej = &vcpu->arch.cpuid_entries[j];
841 } while (ej->function != e->function);
842
843 ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
844
845 return j;
00b27a3e
AK
846}
847
848/* find an entry with matching function, matching index (if needed), and that
849 * should be read next (if it's stateful) */
850static int is_matching_cpuid_entry(struct kvm_cpuid_entry2 *e,
851 u32 function, u32 index)
852{
853 if (e->function != function)
854 return 0;
855 if ((e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) && e->index != index)
856 return 0;
857 if ((e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC) &&
858 !(e->flags & KVM_CPUID_FLAG_STATE_READ_NEXT))
859 return 0;
860 return 1;
861}
862
863struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
864 u32 function, u32 index)
865{
866 int i;
867 struct kvm_cpuid_entry2 *best = NULL;
868
869 for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
870 struct kvm_cpuid_entry2 *e;
871
872 e = &vcpu->arch.cpuid_entries[i];
873 if (is_matching_cpuid_entry(e, function, index)) {
874 if (e->flags & KVM_CPUID_FLAG_STATEFUL_FUNC)
875 move_to_next_stateful_cpuid_entry(vcpu, i);
876 best = e;
877 break;
878 }
879 }
880 return best;
881}
882EXPORT_SYMBOL_GPL(kvm_find_cpuid_entry);
883
00b27a3e
AK
884/*
885 * If no match is found, check whether we exceed the vCPU's limit
886 * and return the content of the highest valid _standard_ leaf instead.
887 * This is to satisfy the CPUID specification.
888 */
889static struct kvm_cpuid_entry2* check_cpuid_limit(struct kvm_vcpu *vcpu,
890 u32 function, u32 index)
891{
892 struct kvm_cpuid_entry2 *maxlevel;
893
894 maxlevel = kvm_find_cpuid_entry(vcpu, function & 0x80000000, 0);
895 if (!maxlevel || maxlevel->eax >= function)
896 return NULL;
897 if (function & 0x80000000) {
898 maxlevel = kvm_find_cpuid_entry(vcpu, 0, 0);
899 if (!maxlevel)
900 return NULL;
901 }
902 return kvm_find_cpuid_entry(vcpu, maxlevel->eax, index);
903}
904
e911eb3b
YZ
905bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
906 u32 *ecx, u32 *edx, bool check_limit)
00b27a3e 907{
62046e5a 908 u32 function = *eax, index = *ecx;
00b27a3e 909 struct kvm_cpuid_entry2 *best;
e911eb3b 910 bool entry_found = true;
00b27a3e 911
00b27a3e
AK
912 best = kvm_find_cpuid_entry(vcpu, function, index);
913
e911eb3b
YZ
914 if (!best) {
915 entry_found = false;
916 if (!check_limit)
917 goto out;
918
00b27a3e 919 best = check_cpuid_limit(vcpu, function, index);
e911eb3b 920 }
00b27a3e 921
e911eb3b 922out:
00b27a3e 923 if (best) {
62046e5a
AK
924 *eax = best->eax;
925 *ebx = best->ebx;
926 *ecx = best->ecx;
927 *edx = best->edx;
928 } else
929 *eax = *ebx = *ecx = *edx = 0;
e911eb3b
YZ
930 trace_kvm_cpuid(function, *eax, *ebx, *ecx, *edx, entry_found);
931 return entry_found;
62046e5a 932}
66f7b72e 933EXPORT_SYMBOL_GPL(kvm_cpuid);
62046e5a 934
6a908b62 935int kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
62046e5a 936{
1e13175b 937 u32 eax, ebx, ecx, edx;
62046e5a 938
db2336a8
KH
939 if (cpuid_fault_enabled(vcpu) && !kvm_require_cpl(vcpu, 0))
940 return 1;
941
1e13175b 942 eax = kvm_register_read(vcpu, VCPU_REGS_RAX);
62046e5a 943 ecx = kvm_register_read(vcpu, VCPU_REGS_RCX);
e911eb3b 944 kvm_cpuid(vcpu, &eax, &ebx, &ecx, &edx, true);
62046e5a
AK
945 kvm_register_write(vcpu, VCPU_REGS_RAX, eax);
946 kvm_register_write(vcpu, VCPU_REGS_RBX, ebx);
947 kvm_register_write(vcpu, VCPU_REGS_RCX, ecx);
948 kvm_register_write(vcpu, VCPU_REGS_RDX, edx);
6affcbed 949 return kvm_skip_emulated_instruction(vcpu);
00b27a3e
AK
950}
951EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);