2 * i386 CPUID helper functions
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #include "qemu-option.h"
28 #include "qemu-config.h"
30 #include "qapi/qapi-visit-core.h"
34 /* feature flags taken from "Intel Processor Identification and the CPUID
35 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
36 * between feature naming conventions, aliases may be added.
38 static const char *feature_name
[] = {
39 "fpu", "vme", "de", "pse",
40 "tsc", "msr", "pae", "mce",
41 "cx8", "apic", NULL
, "sep",
42 "mtrr", "pge", "mca", "cmov",
43 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
44 NULL
, "ds" /* Intel dts */, "acpi", "mmx",
45 "fxsr", "sse", "sse2", "ss",
46 "ht" /* Intel htt */, "tm", "ia64", "pbe",
48 static const char *ext_feature_name
[] = {
49 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
50 "ds_cpl", "vmx", "smx", "est",
51 "tm2", "ssse3", "cid", NULL
,
52 "fma", "cx16", "xtpr", "pdcm",
53 NULL
, NULL
, "dca", "sse4.1|sse4_1",
54 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
55 "tsc-deadline", "aes", "xsave", "osxsave",
56 "avx", NULL
, NULL
, "hypervisor",
58 static const char *ext2_feature_name
[] = {
59 "fpu", "vme", "de", "pse",
60 "tsc", "msr", "pae", "mce",
61 "cx8" /* AMD CMPXCHG8B */, "apic", NULL
, "syscall",
62 "mtrr", "pge", "mca", "cmov",
63 "pat", "pse36", NULL
, NULL
/* Linux mp */,
64 "nx|xd", NULL
, "mmxext", "mmx",
65 "fxsr", "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
66 NULL
, "lm|i64", "3dnowext", "3dnow",
68 static const char *ext3_feature_name
[] = {
69 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
70 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
71 "3dnowprefetch", "osvw", "ibs", "xop",
72 "skinit", "wdt", NULL
, NULL
,
73 "fma4", NULL
, "cvt16", "nodeid_msr",
74 NULL
, NULL
, NULL
, NULL
,
75 NULL
, NULL
, NULL
, NULL
,
76 NULL
, NULL
, NULL
, NULL
,
79 static const char *kvm_feature_name
[] = {
80 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL
, NULL
, NULL
,
81 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
82 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
83 NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
, NULL
,
86 static const char *svm_feature_name
[] = {
87 "npt", "lbrv", "svm_lock", "nrip_save",
88 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
89 NULL
, NULL
, "pause_filter", NULL
,
90 "pfthreshold", NULL
, NULL
, NULL
,
91 NULL
, NULL
, NULL
, NULL
,
92 NULL
, NULL
, NULL
, NULL
,
93 NULL
, NULL
, NULL
, NULL
,
94 NULL
, NULL
, NULL
, NULL
,
97 /* collects per-function cpuid data
99 typedef struct model_features_t
{
100 uint32_t *guest_feat
;
103 const char **flag_names
;
108 int enforce_cpuid
= 0;
110 void host_cpuid(uint32_t function
, uint32_t count
,
111 uint32_t *eax
, uint32_t *ebx
, uint32_t *ecx
, uint32_t *edx
)
113 #if defined(CONFIG_KVM)
118 : "=a"(vec
[0]), "=b"(vec
[1]),
119 "=c"(vec
[2]), "=d"(vec
[3])
120 : "0"(function
), "c"(count
) : "cc");
122 asm volatile("pusha \n\t"
124 "mov %%eax, 0(%2) \n\t"
125 "mov %%ebx, 4(%2) \n\t"
126 "mov %%ecx, 8(%2) \n\t"
127 "mov %%edx, 12(%2) \n\t"
129 : : "a"(function
), "c"(count
), "S"(vec
)
144 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
146 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
147 * a substring. ex if !NULL points to the first char after a substring,
148 * otherwise the string is assumed to sized by a terminating nul.
149 * Return lexical ordering of *s1:*s2.
151 static int sstrcmp(const char *s1
, const char *e1
, const char *s2
,
155 if (!*s1
|| !*s2
|| *s1
!= *s2
)
158 if (s1
== e1
&& s2
== e2
)
167 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
168 * '|' delimited (possibly empty) strings in which case search for a match
169 * within the alternatives proceeds left to right. Return 0 for success,
170 * non-zero otherwise.
172 static int altcmp(const char *s
, const char *e
, const char *altstr
)
176 for (q
= p
= altstr
; ; ) {
177 while (*p
&& *p
!= '|')
179 if ((q
== p
&& !*s
) || (q
!= p
&& !sstrcmp(s
, e
, q
, p
)))
188 /* search featureset for flag *[s..e), if found set corresponding bit in
189 * *pval and return true, otherwise return false
191 static bool lookup_feature(uint32_t *pval
, const char *s
, const char *e
,
192 const char **featureset
)
198 for (mask
= 1, ppc
= featureset
; mask
; mask
<<= 1, ++ppc
) {
199 if (*ppc
&& !altcmp(s
, e
, *ppc
)) {
207 static void add_flagname_to_bitmaps(const char *flagname
, uint32_t *features
,
208 uint32_t *ext_features
,
209 uint32_t *ext2_features
,
210 uint32_t *ext3_features
,
211 uint32_t *kvm_features
,
212 uint32_t *svm_features
)
214 if (!lookup_feature(features
, flagname
, NULL
, feature_name
) &&
215 !lookup_feature(ext_features
, flagname
, NULL
, ext_feature_name
) &&
216 !lookup_feature(ext2_features
, flagname
, NULL
, ext2_feature_name
) &&
217 !lookup_feature(ext3_features
, flagname
, NULL
, ext3_feature_name
) &&
218 !lookup_feature(kvm_features
, flagname
, NULL
, kvm_feature_name
) &&
219 !lookup_feature(svm_features
, flagname
, NULL
, svm_feature_name
))
220 fprintf(stderr
, "CPU feature %s not found\n", flagname
);
223 typedef struct x86_def_t
{
224 struct x86_def_t
*next
;
227 uint32_t vendor1
, vendor2
, vendor3
;
232 uint32_t features
, ext_features
, ext2_features
, ext3_features
;
233 uint32_t kvm_features
, svm_features
;
238 /* Store the results of Centaur's CPUID instructions */
239 uint32_t ext4_features
;
243 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
244 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
245 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
246 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
247 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
248 CPUID_PSE36 | CPUID_FXSR)
249 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
250 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
251 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
252 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
253 CPUID_PAE | CPUID_SEP | CPUID_APIC)
254 #define EXT2_FEATURE_MASK 0x0183F3FF
256 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
257 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
258 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
259 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
260 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
261 /* partly implemented:
262 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64)
263 CPUID_PSE36 (needed for Solaris) */
265 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
266 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | \
267 CPUID_EXT_CX16 | CPUID_EXT_POPCNT | \
268 CPUID_EXT_HYPERVISOR)
270 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_EST,
271 CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_XSAVE */
272 #define TCG_EXT2_FEATURES ((TCG_FEATURES & EXT2_FEATURE_MASK) | \
273 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
274 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT)
276 CPUID_EXT2_PDPE1GB */
277 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
278 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
279 #define TCG_SVM_FEATURES 0
281 /* maintains list of cpu model definitions
283 static x86_def_t
*x86_defs
= {NULL
};
285 /* built-in cpu model definitions (deprecated)
287 static x86_def_t builtin_x86_defs
[] = {
291 .vendor1
= CPUID_VENDOR_AMD_1
,
292 .vendor2
= CPUID_VENDOR_AMD_2
,
293 .vendor3
= CPUID_VENDOR_AMD_3
,
297 .features
= PPRO_FEATURES
|
298 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
300 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
| CPUID_EXT_POPCNT
,
301 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
302 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
303 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
304 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
305 .xlevel
= 0x8000000A,
306 .model_id
= "QEMU Virtual CPU version " QEMU_VERSION
,
311 .vendor1
= CPUID_VENDOR_AMD_1
,
312 .vendor2
= CPUID_VENDOR_AMD_2
,
313 .vendor3
= CPUID_VENDOR_AMD_3
,
317 .features
= PPRO_FEATURES
|
318 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
319 CPUID_PSE36
| CPUID_VME
| CPUID_HT
,
320 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_CX16
|
322 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
323 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
|
324 CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
| CPUID_EXT2_MMXEXT
|
325 CPUID_EXT2_FFXSR
| CPUID_EXT2_PDPE1GB
| CPUID_EXT2_RDTSCP
,
326 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
328 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
329 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
330 .ext3_features
= CPUID_EXT3_LAHF_LM
| CPUID_EXT3_SVM
|
331 CPUID_EXT3_ABM
| CPUID_EXT3_SSE4A
,
332 .svm_features
= CPUID_SVM_NPT
| CPUID_SVM_LBRV
,
333 .xlevel
= 0x8000001A,
334 .model_id
= "AMD Phenom(tm) 9550 Quad-Core Processor"
342 .features
= PPRO_FEATURES
|
343 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
344 CPUID_PSE36
| CPUID_VME
| CPUID_DTS
| CPUID_ACPI
| CPUID_SS
|
345 CPUID_HT
| CPUID_TM
| CPUID_PBE
,
346 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
347 CPUID_EXT_DTES64
| CPUID_EXT_DSCPL
| CPUID_EXT_VMX
| CPUID_EXT_EST
|
348 CPUID_EXT_TM2
| CPUID_EXT_CX16
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
349 .ext2_features
= CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
350 .ext3_features
= CPUID_EXT3_LAHF_LM
,
351 .xlevel
= 0x80000008,
352 .model_id
= "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
357 .vendor1
= CPUID_VENDOR_INTEL_1
,
358 .vendor2
= CPUID_VENDOR_INTEL_2
,
359 .vendor3
= CPUID_VENDOR_INTEL_3
,
363 /* Missing: CPUID_VME, CPUID_HT */
364 .features
= PPRO_FEATURES
|
365 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
|
367 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
368 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_CX16
,
369 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
370 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) |
371 CPUID_EXT2_LM
| CPUID_EXT2_SYSCALL
| CPUID_EXT2_NX
,
372 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
373 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
374 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
375 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
377 .xlevel
= 0x80000008,
378 .model_id
= "Common KVM processor"
386 .features
= PPRO_FEATURES
,
387 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_POPCNT
,
388 .xlevel
= 0x80000004,
389 .model_id
= "QEMU Virtual CPU version " QEMU_VERSION
,
397 .features
= PPRO_FEATURES
|
398 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_PSE36
,
399 .ext_features
= CPUID_EXT_SSE3
,
400 .ext2_features
= PPRO_FEATURES
& EXT2_FEATURE_MASK
,
402 .xlevel
= 0x80000008,
403 .model_id
= "Common 32-bit KVM processor"
411 .features
= PPRO_FEATURES
| CPUID_VME
|
412 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_DTS
| CPUID_ACPI
|
413 CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
414 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_VMX
|
415 CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
| CPUID_EXT_PDCM
,
416 .ext2_features
= CPUID_EXT2_NX
,
417 .xlevel
= 0x80000008,
418 .model_id
= "Genuine Intel(R) CPU T2600 @ 2.16GHz",
426 .features
= I486_FEATURES
,
435 .features
= PENTIUM_FEATURES
,
444 .features
= PENTIUM2_FEATURES
,
453 .features
= PENTIUM3_FEATURES
,
459 .vendor1
= CPUID_VENDOR_AMD_1
,
460 .vendor2
= CPUID_VENDOR_AMD_2
,
461 .vendor3
= CPUID_VENDOR_AMD_3
,
465 .features
= PPRO_FEATURES
| CPUID_PSE36
| CPUID_VME
| CPUID_MTRR
| CPUID_MCA
,
466 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_MMXEXT
| CPUID_EXT2_3DNOW
| CPUID_EXT2_3DNOWEXT
,
467 .xlevel
= 0x80000008,
468 /* XXX: put another string ? */
469 .model_id
= "QEMU Virtual CPU version " QEMU_VERSION
,
473 /* original is on level 10 */
478 .features
= PPRO_FEATURES
|
479 CPUID_MTRR
| CPUID_CLFLUSH
| CPUID_MCA
| CPUID_VME
| CPUID_DTS
|
480 CPUID_ACPI
| CPUID_SS
| CPUID_HT
| CPUID_TM
| CPUID_PBE
,
481 /* Some CPUs got no CPUID_SEP */
482 .ext_features
= CPUID_EXT_SSE3
| CPUID_EXT_MONITOR
| CPUID_EXT_SSSE3
|
483 CPUID_EXT_DSCPL
| CPUID_EXT_EST
| CPUID_EXT_TM2
| CPUID_EXT_XTPR
,
484 .ext2_features
= (PPRO_FEATURES
& EXT2_FEATURE_MASK
) | CPUID_EXT2_NX
,
485 .ext3_features
= CPUID_EXT3_LAHF_LM
,
486 .xlevel
= 0x8000000A,
487 .model_id
= "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
491 static int cpu_x86_fill_model_id(char *str
)
493 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
496 for (i
= 0; i
< 3; i
++) {
497 host_cpuid(0x80000002 + i
, 0, &eax
, &ebx
, &ecx
, &edx
);
498 memcpy(str
+ i
* 16 + 0, &eax
, 4);
499 memcpy(str
+ i
* 16 + 4, &ebx
, 4);
500 memcpy(str
+ i
* 16 + 8, &ecx
, 4);
501 memcpy(str
+ i
* 16 + 12, &edx
, 4);
506 static int cpu_x86_fill_host(x86_def_t
*x86_cpu_def
)
508 uint32_t eax
= 0, ebx
= 0, ecx
= 0, edx
= 0;
510 x86_cpu_def
->name
= "host";
511 host_cpuid(0x0, 0, &eax
, &ebx
, &ecx
, &edx
);
512 x86_cpu_def
->level
= eax
;
513 x86_cpu_def
->vendor1
= ebx
;
514 x86_cpu_def
->vendor2
= edx
;
515 x86_cpu_def
->vendor3
= ecx
;
517 host_cpuid(0x1, 0, &eax
, &ebx
, &ecx
, &edx
);
518 x86_cpu_def
->family
= ((eax
>> 8) & 0x0F) + ((eax
>> 20) & 0xFF);
519 x86_cpu_def
->model
= ((eax
>> 4) & 0x0F) | ((eax
& 0xF0000) >> 12);
520 x86_cpu_def
->stepping
= eax
& 0x0F;
521 x86_cpu_def
->ext_features
= ecx
;
522 x86_cpu_def
->features
= edx
;
524 host_cpuid(0x80000000, 0, &eax
, &ebx
, &ecx
, &edx
);
525 x86_cpu_def
->xlevel
= eax
;
527 host_cpuid(0x80000001, 0, &eax
, &ebx
, &ecx
, &edx
);
528 x86_cpu_def
->ext2_features
= edx
;
529 x86_cpu_def
->ext3_features
= ecx
;
530 cpu_x86_fill_model_id(x86_cpu_def
->model_id
);
531 x86_cpu_def
->vendor_override
= 0;
533 /* Call Centaur's CPUID instruction. */
534 if (x86_cpu_def
->vendor1
== CPUID_VENDOR_VIA_1
&&
535 x86_cpu_def
->vendor2
== CPUID_VENDOR_VIA_2
&&
536 x86_cpu_def
->vendor3
== CPUID_VENDOR_VIA_3
) {
537 host_cpuid(0xC0000000, 0, &eax
, &ebx
, &ecx
, &edx
);
538 if (eax
>= 0xC0000001) {
539 /* Support VIA max extended level */
540 x86_cpu_def
->xlevel2
= eax
;
541 host_cpuid(0xC0000001, 0, &eax
, &ebx
, &ecx
, &edx
);
542 x86_cpu_def
->ext4_features
= edx
;
547 * Every SVM feature requires emulation support in KVM - so we can't just
548 * read the host features here. KVM might even support SVM features not
549 * available on the host hardware. Just set all bits and mask out the
550 * unsupported ones later.
552 x86_cpu_def
->svm_features
= -1;
557 static int unavailable_host_feature(struct model_features_t
*f
, uint32_t mask
)
561 for (i
= 0; i
< 32; ++i
)
563 fprintf(stderr
, "warning: host cpuid %04x_%04x lacks requested"
564 " flag '%s' [0x%08x]\n",
565 f
->cpuid
>> 16, f
->cpuid
& 0xffff,
566 f
->flag_names
[i
] ? f
->flag_names
[i
] : "[reserved]", mask
);
572 /* best effort attempt to inform user requested cpu flags aren't making
573 * their way to the guest. Note: ft[].check_feat ideally should be
574 * specified via a guest_def field to suppress report of extraneous flags.
576 static int check_features_against_host(x86_def_t
*guest_def
)
581 struct model_features_t ft
[] = {
582 {&guest_def
->features
, &host_def
.features
,
583 ~0, feature_name
, 0x00000000},
584 {&guest_def
->ext_features
, &host_def
.ext_features
,
585 ~CPUID_EXT_HYPERVISOR
, ext_feature_name
, 0x00000001},
586 {&guest_def
->ext2_features
, &host_def
.ext2_features
,
587 ~PPRO_FEATURES
, ext2_feature_name
, 0x80000000},
588 {&guest_def
->ext3_features
, &host_def
.ext3_features
,
589 ~CPUID_EXT3_SVM
, ext3_feature_name
, 0x80000001}};
591 cpu_x86_fill_host(&host_def
);
592 for (rv
= 0, i
= 0; i
< ARRAY_SIZE(ft
); ++i
)
593 for (mask
= 1; mask
; mask
<<= 1)
594 if (ft
[i
].check_feat
& mask
&& *ft
[i
].guest_feat
& mask
&&
595 !(*ft
[i
].host_feat
& mask
)) {
596 unavailable_host_feature(&ft
[i
], mask
);
602 static void x86_cpuid_version_set_family(Object
*obj
, Visitor
*v
, void *opaque
,
603 const char *name
, Error
**errp
)
605 X86CPU
*cpu
= X86_CPU(obj
);
606 CPUX86State
*env
= &cpu
->env
;
607 const int64_t min
= 0;
608 const int64_t max
= 0xff + 0xf;
611 visit_type_int(v
, &value
, name
, errp
);
612 if (error_is_set(errp
)) {
615 if (value
< min
|| value
> max
) {
616 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
617 name
? name
: "null", value
, min
, max
);
621 env
->cpuid_version
&= ~0xff00f00;
623 env
->cpuid_version
|= 0xf00 | ((value
- 0x0f) << 20);
625 env
->cpuid_version
|= value
<< 8;
629 static void x86_cpuid_version_set_model(Object
*obj
, Visitor
*v
, void *opaque
,
630 const char *name
, Error
**errp
)
632 X86CPU
*cpu
= X86_CPU(obj
);
633 CPUX86State
*env
= &cpu
->env
;
634 const int64_t min
= 0;
635 const int64_t max
= 0xff;
638 visit_type_int(v
, &value
, name
, errp
);
639 if (error_is_set(errp
)) {
642 if (value
< min
|| value
> max
) {
643 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
644 name
? name
: "null", value
, min
, max
);
648 env
->cpuid_version
&= ~0xf00f0;
649 env
->cpuid_version
|= ((value
& 0xf) << 4) | ((value
>> 4) << 16);
652 static void x86_cpuid_version_set_stepping(Object
*obj
, Visitor
*v
,
653 void *opaque
, const char *name
,
656 X86CPU
*cpu
= X86_CPU(obj
);
657 CPUX86State
*env
= &cpu
->env
;
658 const int64_t min
= 0;
659 const int64_t max
= 0xf;
662 visit_type_int(v
, &value
, name
, errp
);
663 if (error_is_set(errp
)) {
666 if (value
< min
|| value
> max
) {
667 error_set(errp
, QERR_PROPERTY_VALUE_OUT_OF_RANGE
, "",
668 name
? name
: "null", value
, min
, max
);
672 env
->cpuid_version
&= ~0xf;
673 env
->cpuid_version
|= value
& 0xf;
676 static void x86_cpuid_set_model_id(Object
*obj
, const char *model_id
,
679 X86CPU
*cpu
= X86_CPU(obj
);
680 CPUX86State
*env
= &cpu
->env
;
683 if (model_id
== NULL
) {
686 len
= strlen(model_id
);
687 memset(env
->cpuid_model
, 0, 48);
688 for (i
= 0; i
< 48; i
++) {
692 c
= (uint8_t)model_id
[i
];
694 env
->cpuid_model
[i
>> 2] |= c
<< (8 * (i
& 3));
698 static int cpu_x86_find_by_name(x86_def_t
*x86_cpu_def
, const char *cpu_model
)
703 char *s
= g_strdup(cpu_model
);
704 char *featurestr
, *name
= strtok(s
, ",");
705 /* Features to be added*/
706 uint32_t plus_features
= 0, plus_ext_features
= 0;
707 uint32_t plus_ext2_features
= 0, plus_ext3_features
= 0;
708 uint32_t plus_kvm_features
= 0, plus_svm_features
= 0;
709 /* Features to be removed */
710 uint32_t minus_features
= 0, minus_ext_features
= 0;
711 uint32_t minus_ext2_features
= 0, minus_ext3_features
= 0;
712 uint32_t minus_kvm_features
= 0, minus_svm_features
= 0;
715 for (def
= x86_defs
; def
; def
= def
->next
)
716 if (name
&& !strcmp(name
, def
->name
))
718 if (kvm_enabled() && name
&& strcmp(name
, "host") == 0) {
719 cpu_x86_fill_host(x86_cpu_def
);
723 memcpy(x86_cpu_def
, def
, sizeof(*def
));
726 plus_kvm_features
= ~0; /* not supported bits will be filtered out later */
728 add_flagname_to_bitmaps("hypervisor", &plus_features
,
729 &plus_ext_features
, &plus_ext2_features
, &plus_ext3_features
,
730 &plus_kvm_features
, &plus_svm_features
);
732 featurestr
= strtok(NULL
, ",");
736 if (featurestr
[0] == '+') {
737 add_flagname_to_bitmaps(featurestr
+ 1, &plus_features
,
738 &plus_ext_features
, &plus_ext2_features
,
739 &plus_ext3_features
, &plus_kvm_features
,
741 } else if (featurestr
[0] == '-') {
742 add_flagname_to_bitmaps(featurestr
+ 1, &minus_features
,
743 &minus_ext_features
, &minus_ext2_features
,
744 &minus_ext3_features
, &minus_kvm_features
,
745 &minus_svm_features
);
746 } else if ((val
= strchr(featurestr
, '='))) {
748 if (!strcmp(featurestr
, "family")) {
750 numvalue
= strtoul(val
, &err
, 0);
751 if (!*val
|| *err
|| numvalue
> 0xff + 0xf) {
752 fprintf(stderr
, "bad numerical value %s\n", val
);
755 x86_cpu_def
->family
= numvalue
;
756 } else if (!strcmp(featurestr
, "model")) {
758 numvalue
= strtoul(val
, &err
, 0);
759 if (!*val
|| *err
|| numvalue
> 0xff) {
760 fprintf(stderr
, "bad numerical value %s\n", val
);
763 x86_cpu_def
->model
= numvalue
;
764 } else if (!strcmp(featurestr
, "stepping")) {
766 numvalue
= strtoul(val
, &err
, 0);
767 if (!*val
|| *err
|| numvalue
> 0xf) {
768 fprintf(stderr
, "bad numerical value %s\n", val
);
771 x86_cpu_def
->stepping
= numvalue
;
772 } else if (!strcmp(featurestr
, "level")) {
774 numvalue
= strtoul(val
, &err
, 0);
776 fprintf(stderr
, "bad numerical value %s\n", val
);
779 x86_cpu_def
->level
= numvalue
;
780 } else if (!strcmp(featurestr
, "xlevel")) {
782 numvalue
= strtoul(val
, &err
, 0);
784 fprintf(stderr
, "bad numerical value %s\n", val
);
787 if (numvalue
< 0x80000000) {
788 numvalue
+= 0x80000000;
790 x86_cpu_def
->xlevel
= numvalue
;
791 } else if (!strcmp(featurestr
, "vendor")) {
792 if (strlen(val
) != 12) {
793 fprintf(stderr
, "vendor string must be 12 chars long\n");
796 x86_cpu_def
->vendor1
= 0;
797 x86_cpu_def
->vendor2
= 0;
798 x86_cpu_def
->vendor3
= 0;
799 for(i
= 0; i
< 4; i
++) {
800 x86_cpu_def
->vendor1
|= ((uint8_t)val
[i
]) << (8 * i
);
801 x86_cpu_def
->vendor2
|= ((uint8_t)val
[i
+ 4]) << (8 * i
);
802 x86_cpu_def
->vendor3
|= ((uint8_t)val
[i
+ 8]) << (8 * i
);
804 x86_cpu_def
->vendor_override
= 1;
805 } else if (!strcmp(featurestr
, "model_id")) {
806 pstrcpy(x86_cpu_def
->model_id
, sizeof(x86_cpu_def
->model_id
),
808 } else if (!strcmp(featurestr
, "tsc_freq")) {
812 tsc_freq
= strtosz_suffix_unit(val
, &err
,
813 STRTOSZ_DEFSUFFIX_B
, 1000);
814 if (tsc_freq
< 0 || *err
) {
815 fprintf(stderr
, "bad numerical value %s\n", val
);
818 x86_cpu_def
->tsc_khz
= tsc_freq
/ 1000;
819 } else if (!strcmp(featurestr
, "hv_spinlocks")) {
821 numvalue
= strtoul(val
, &err
, 0);
823 fprintf(stderr
, "bad numerical value %s\n", val
);
826 hyperv_set_spinlock_retries(numvalue
);
828 fprintf(stderr
, "unrecognized feature %s\n", featurestr
);
831 } else if (!strcmp(featurestr
, "check")) {
833 } else if (!strcmp(featurestr
, "enforce")) {
834 check_cpuid
= enforce_cpuid
= 1;
835 } else if (!strcmp(featurestr
, "hv_relaxed")) {
836 hyperv_enable_relaxed_timing(true);
837 } else if (!strcmp(featurestr
, "hv_vapic")) {
838 hyperv_enable_vapic_recommended(true);
840 fprintf(stderr
, "feature string `%s' not in format (+feature|-feature|feature=xyz)\n", featurestr
);
843 featurestr
= strtok(NULL
, ",");
845 x86_cpu_def
->features
|= plus_features
;
846 x86_cpu_def
->ext_features
|= plus_ext_features
;
847 x86_cpu_def
->ext2_features
|= plus_ext2_features
;
848 x86_cpu_def
->ext3_features
|= plus_ext3_features
;
849 x86_cpu_def
->kvm_features
|= plus_kvm_features
;
850 x86_cpu_def
->svm_features
|= plus_svm_features
;
851 x86_cpu_def
->features
&= ~minus_features
;
852 x86_cpu_def
->ext_features
&= ~minus_ext_features
;
853 x86_cpu_def
->ext2_features
&= ~minus_ext2_features
;
854 x86_cpu_def
->ext3_features
&= ~minus_ext3_features
;
855 x86_cpu_def
->kvm_features
&= ~minus_kvm_features
;
856 x86_cpu_def
->svm_features
&= ~minus_svm_features
;
858 if (check_features_against_host(x86_cpu_def
) && enforce_cpuid
)
869 /* generate a composite string into buf of all cpuid names in featureset
870 * selected by fbits. indicate truncation at bufsize in the event of overflow.
871 * if flags, suppress names undefined in featureset.
873 static void listflags(char *buf
, int bufsize
, uint32_t fbits
,
874 const char **featureset
, uint32_t flags
)
876 const char **p
= &featureset
[31];
880 b
= 4 <= bufsize
? buf
+ (bufsize
-= 3) - 1 : NULL
;
882 for (q
= buf
, bit
= 31; fbits
&& bufsize
; --p
, fbits
&= ~(1 << bit
), --bit
)
883 if (fbits
& 1 << bit
&& (*p
|| !flags
)) {
885 nc
= snprintf(q
, bufsize
, "%s%s", q
== buf
? "" : " ", *p
);
887 nc
= snprintf(q
, bufsize
, "%s[%d]", q
== buf
? "" : " ", bit
);
890 memcpy(b
, "...", sizeof("..."));
899 /* generate CPU information:
900 * -? list model names
901 * -?model list model names/IDs
902 * -?dump output all model (x86_def_t) data
903 * -?cpuid list all recognized cpuid flag names
905 void x86_cpu_list(FILE *f
, fprintf_function cpu_fprintf
, const char *optarg
)
907 unsigned char model
= !strcmp("?model", optarg
);
908 unsigned char dump
= !strcmp("?dump", optarg
);
909 unsigned char cpuid
= !strcmp("?cpuid", optarg
);
914 (*cpu_fprintf
)(f
, "Recognized CPUID flags:\n");
915 listflags(buf
, sizeof (buf
), (uint32_t)~0, feature_name
, 1);
916 (*cpu_fprintf
)(f
, " f_edx: %s\n", buf
);
917 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext_feature_name
, 1);
918 (*cpu_fprintf
)(f
, " f_ecx: %s\n", buf
);
919 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext2_feature_name
, 1);
920 (*cpu_fprintf
)(f
, " extf_edx: %s\n", buf
);
921 listflags(buf
, sizeof (buf
), (uint32_t)~0, ext3_feature_name
, 1);
922 (*cpu_fprintf
)(f
, " extf_ecx: %s\n", buf
);
925 for (def
= x86_defs
; def
; def
= def
->next
) {
926 snprintf(buf
, sizeof (buf
), def
->flags
? "[%s]": "%s", def
->name
);
928 (*cpu_fprintf
)(f
, "x86 %16s %-48s\n", buf
, def
->model_id
);
930 (*cpu_fprintf
)(f
, "x86 %16s\n", buf
);
933 memcpy(buf
, &def
->vendor1
, sizeof (def
->vendor1
));
934 memcpy(buf
+ 4, &def
->vendor2
, sizeof (def
->vendor2
));
935 memcpy(buf
+ 8, &def
->vendor3
, sizeof (def
->vendor3
));
938 " family %d model %d stepping %d level %d xlevel 0x%x"
940 def
->family
, def
->model
, def
->stepping
, def
->level
,
942 listflags(buf
, sizeof (buf
), def
->features
, feature_name
, 0);
943 (*cpu_fprintf
)(f
, " feature_edx %08x (%s)\n", def
->features
,
945 listflags(buf
, sizeof (buf
), def
->ext_features
, ext_feature_name
,
947 (*cpu_fprintf
)(f
, " feature_ecx %08x (%s)\n", def
->ext_features
,
949 listflags(buf
, sizeof (buf
), def
->ext2_features
, ext2_feature_name
,
951 (*cpu_fprintf
)(f
, " extfeature_edx %08x (%s)\n",
952 def
->ext2_features
, buf
);
953 listflags(buf
, sizeof (buf
), def
->ext3_features
, ext3_feature_name
,
955 (*cpu_fprintf
)(f
, " extfeature_ecx %08x (%s)\n",
956 def
->ext3_features
, buf
);
957 (*cpu_fprintf
)(f
, "\n");
961 (*cpu_fprintf
)(f
, "x86 %16s\n", "[host]");
965 int cpu_x86_register(X86CPU
*cpu
, const char *cpu_model
)
967 CPUX86State
*env
= &cpu
->env
;
968 x86_def_t def1
, *def
= &def1
;
971 memset(def
, 0, sizeof(*def
));
973 if (cpu_x86_find_by_name(def
, cpu_model
) < 0)
976 env
->cpuid_vendor1
= def
->vendor1
;
977 env
->cpuid_vendor2
= def
->vendor2
;
978 env
->cpuid_vendor3
= def
->vendor3
;
980 env
->cpuid_vendor1
= CPUID_VENDOR_INTEL_1
;
981 env
->cpuid_vendor2
= CPUID_VENDOR_INTEL_2
;
982 env
->cpuid_vendor3
= CPUID_VENDOR_INTEL_3
;
984 env
->cpuid_vendor_override
= def
->vendor_override
;
985 env
->cpuid_level
= def
->level
;
986 object_property_set_int(OBJECT(cpu
), def
->family
, "family", &error
);
987 object_property_set_int(OBJECT(cpu
), def
->model
, "model", &error
);
988 object_property_set_int(OBJECT(cpu
), def
->stepping
, "stepping", &error
);
989 env
->cpuid_features
= def
->features
;
990 env
->cpuid_ext_features
= def
->ext_features
;
991 env
->cpuid_ext2_features
= def
->ext2_features
;
992 env
->cpuid_ext3_features
= def
->ext3_features
;
993 env
->cpuid_xlevel
= def
->xlevel
;
994 env
->cpuid_kvm_features
= def
->kvm_features
;
995 env
->cpuid_svm_features
= def
->svm_features
;
996 env
->cpuid_ext4_features
= def
->ext4_features
;
997 env
->cpuid_xlevel2
= def
->xlevel2
;
998 env
->tsc_khz
= def
->tsc_khz
;
999 if (!kvm_enabled()) {
1000 env
->cpuid_features
&= TCG_FEATURES
;
1001 env
->cpuid_ext_features
&= TCG_EXT_FEATURES
;
1002 env
->cpuid_ext2_features
&= (TCG_EXT2_FEATURES
1003 #ifdef TARGET_X86_64
1004 | CPUID_EXT2_SYSCALL
| CPUID_EXT2_LM
1007 env
->cpuid_ext3_features
&= TCG_EXT3_FEATURES
;
1008 env
->cpuid_svm_features
&= TCG_SVM_FEATURES
;
1010 object_property_set_str(OBJECT(cpu
), def
->model_id
, "model-id", &error
);
1011 if (error_is_set(&error
)) {
1018 #if !defined(CONFIG_USER_ONLY)
1019 /* copy vendor id string to 32 bit register, nul pad as needed
1021 static void cpyid(const char *s
, uint32_t *id
)
1023 char *d
= (char *)id
;
1026 for (i
= sizeof (*id
); i
--; )
1027 *d
++ = *s
? *s
++ : '\0';
1030 /* interpret radix and convert from string to arbitrary scalar,
1031 * otherwise flag failure
1033 #define setscalar(pval, str, perr) \
1038 ul = strtoul(str, &pend, 0); \
1039 *str && !*pend ? (*pval = ul) : (*perr = 1); \
1042 /* map cpuid options to feature bits, otherwise return failure
1043 * (option tags in *str are delimited by whitespace)
1045 static void setfeatures(uint32_t *pval
, const char *str
,
1046 const char **featureset
, int *perr
)
1050 for (q
= p
= str
; *p
|| *q
; q
= p
) {
1053 while (*p
&& !iswhite(*p
))
1057 if (!lookup_feature(pval
, q
, p
, featureset
)) {
1058 fprintf(stderr
, "error: feature \"%.*s\" not available in set\n",
1066 /* map config file options to x86_def_t form
1068 static int cpudef_setfield(const char *name
, const char *str
, void *opaque
)
1070 x86_def_t
*def
= opaque
;
1073 if (!strcmp(name
, "name")) {
1074 g_free((void *)def
->name
);
1075 def
->name
= g_strdup(str
);
1076 } else if (!strcmp(name
, "model_id")) {
1077 strncpy(def
->model_id
, str
, sizeof (def
->model_id
));
1078 } else if (!strcmp(name
, "level")) {
1079 setscalar(&def
->level
, str
, &err
)
1080 } else if (!strcmp(name
, "vendor")) {
1081 cpyid(&str
[0], &def
->vendor1
);
1082 cpyid(&str
[4], &def
->vendor2
);
1083 cpyid(&str
[8], &def
->vendor3
);
1084 } else if (!strcmp(name
, "family")) {
1085 setscalar(&def
->family
, str
, &err
)
1086 } else if (!strcmp(name
, "model")) {
1087 setscalar(&def
->model
, str
, &err
)
1088 } else if (!strcmp(name
, "stepping")) {
1089 setscalar(&def
->stepping
, str
, &err
)
1090 } else if (!strcmp(name
, "feature_edx")) {
1091 setfeatures(&def
->features
, str
, feature_name
, &err
);
1092 } else if (!strcmp(name
, "feature_ecx")) {
1093 setfeatures(&def
->ext_features
, str
, ext_feature_name
, &err
);
1094 } else if (!strcmp(name
, "extfeature_edx")) {
1095 setfeatures(&def
->ext2_features
, str
, ext2_feature_name
, &err
);
1096 } else if (!strcmp(name
, "extfeature_ecx")) {
1097 setfeatures(&def
->ext3_features
, str
, ext3_feature_name
, &err
);
1098 } else if (!strcmp(name
, "xlevel")) {
1099 setscalar(&def
->xlevel
, str
, &err
)
1101 fprintf(stderr
, "error: unknown option [%s = %s]\n", name
, str
);
1105 fprintf(stderr
, "error: bad option value [%s = %s]\n", name
, str
);
1111 /* register config file entry as x86_def_t
1113 static int cpudef_register(QemuOpts
*opts
, void *opaque
)
1115 x86_def_t
*def
= g_malloc0(sizeof (x86_def_t
));
1117 qemu_opt_foreach(opts
, cpudef_setfield
, def
, 1);
1118 def
->next
= x86_defs
;
1123 void cpu_clear_apic_feature(CPUX86State
*env
)
1125 env
->cpuid_features
&= ~CPUID_APIC
;
1128 #endif /* !CONFIG_USER_ONLY */
1130 /* register "cpudef" models defined in configuration file. Here we first
1131 * preload any built-in definitions
1133 void x86_cpudef_setup(void)
1137 for (i
= 0; i
< ARRAY_SIZE(builtin_x86_defs
); ++i
) {
1138 builtin_x86_defs
[i
].next
= x86_defs
;
1139 builtin_x86_defs
[i
].flags
= 1;
1140 x86_defs
= &builtin_x86_defs
[i
];
1142 #if !defined(CONFIG_USER_ONLY)
1143 qemu_opts_foreach(qemu_find_opts("cpudef"), cpudef_register
, NULL
, 0);
1147 static void get_cpuid_vendor(CPUX86State
*env
, uint32_t *ebx
,
1148 uint32_t *ecx
, uint32_t *edx
)
1150 *ebx
= env
->cpuid_vendor1
;
1151 *edx
= env
->cpuid_vendor2
;
1152 *ecx
= env
->cpuid_vendor3
;
1154 /* sysenter isn't supported on compatibility mode on AMD, syscall
1155 * isn't supported in compatibility mode on Intel.
1156 * Normally we advertise the actual cpu vendor, but you can override
1157 * this if you want to use KVM's sysenter/syscall emulation
1158 * in compatibility mode and when doing cross vendor migration
1160 if (kvm_enabled() && ! env
->cpuid_vendor_override
) {
1161 host_cpuid(0, 0, NULL
, ebx
, ecx
, edx
);
1165 void cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
1166 uint32_t *eax
, uint32_t *ebx
,
1167 uint32_t *ecx
, uint32_t *edx
)
1169 /* test if maximum index reached */
1170 if (index
& 0x80000000) {
1171 if (index
> env
->cpuid_xlevel
) {
1172 if (env
->cpuid_xlevel2
> 0) {
1173 /* Handle the Centaur's CPUID instruction. */
1174 if (index
> env
->cpuid_xlevel2
) {
1175 index
= env
->cpuid_xlevel2
;
1176 } else if (index
< 0xC0000000) {
1177 index
= env
->cpuid_xlevel
;
1180 index
= env
->cpuid_xlevel
;
1184 if (index
> env
->cpuid_level
)
1185 index
= env
->cpuid_level
;
1190 *eax
= env
->cpuid_level
;
1191 get_cpuid_vendor(env
, ebx
, ecx
, edx
);
1194 *eax
= env
->cpuid_version
;
1195 *ebx
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1196 *ecx
= env
->cpuid_ext_features
;
1197 *edx
= env
->cpuid_features
;
1198 if (env
->nr_cores
* env
->nr_threads
> 1) {
1199 *ebx
|= (env
->nr_cores
* env
->nr_threads
) << 16;
1200 *edx
|= 1 << 28; /* HTT bit */
1204 /* cache info: needed for Pentium Pro compatibility */
1211 /* cache info: needed for Core compatibility */
1212 if (env
->nr_cores
> 1) {
1213 *eax
= (env
->nr_cores
- 1) << 26;
1218 case 0: /* L1 dcache info */
1224 case 1: /* L1 icache info */
1230 case 2: /* L2 cache info */
1232 if (env
->nr_threads
> 1) {
1233 *eax
|= (env
->nr_threads
- 1) << 14;
1239 default: /* end of info */
1248 /* mwait info: needed for Core compatibility */
1249 *eax
= 0; /* Smallest monitor-line size in bytes */
1250 *ebx
= 0; /* Largest monitor-line size in bytes */
1251 *ecx
= CPUID_MWAIT_EMX
| CPUID_MWAIT_IBE
;
1255 /* Thermal and Power Leaf */
1262 if (kvm_enabled()) {
1263 KVMState
*s
= env
->kvm_state
;
1265 *eax
= kvm_arch_get_supported_cpuid(s
, 0x7, count
, R_EAX
);
1266 *ebx
= kvm_arch_get_supported_cpuid(s
, 0x7, count
, R_EBX
);
1267 *ecx
= kvm_arch_get_supported_cpuid(s
, 0x7, count
, R_ECX
);
1268 *edx
= kvm_arch_get_supported_cpuid(s
, 0x7, count
, R_EDX
);
1277 /* Direct Cache Access Information Leaf */
1278 *eax
= 0; /* Bits 0-31 in DCA_CAP MSR */
1284 /* Architectural Performance Monitoring Leaf */
1285 if (kvm_enabled()) {
1286 KVMState
*s
= env
->kvm_state
;
1288 *eax
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EAX
);
1289 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EBX
);
1290 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_ECX
);
1291 *edx
= kvm_arch_get_supported_cpuid(s
, 0xA, count
, R_EDX
);
1300 /* Processor Extended State */
1301 if (!(env
->cpuid_ext_features
& CPUID_EXT_XSAVE
)) {
1308 if (kvm_enabled()) {
1309 KVMState
*s
= env
->kvm_state
;
1311 *eax
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EAX
);
1312 *ebx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EBX
);
1313 *ecx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_ECX
);
1314 *edx
= kvm_arch_get_supported_cpuid(s
, 0xd, count
, R_EDX
);
1323 *eax
= env
->cpuid_xlevel
;
1324 *ebx
= env
->cpuid_vendor1
;
1325 *edx
= env
->cpuid_vendor2
;
1326 *ecx
= env
->cpuid_vendor3
;
1329 *eax
= env
->cpuid_version
;
1331 *ecx
= env
->cpuid_ext3_features
;
1332 *edx
= env
->cpuid_ext2_features
;
1334 /* The Linux kernel checks for the CMPLegacy bit and
1335 * discards multiple thread information if it is set.
1336 * So dont set it here for Intel to make Linux guests happy.
1338 if (env
->nr_cores
* env
->nr_threads
> 1) {
1339 uint32_t tebx
, tecx
, tedx
;
1340 get_cpuid_vendor(env
, &tebx
, &tecx
, &tedx
);
1341 if (tebx
!= CPUID_VENDOR_INTEL_1
||
1342 tedx
!= CPUID_VENDOR_INTEL_2
||
1343 tecx
!= CPUID_VENDOR_INTEL_3
) {
1344 *ecx
|= 1 << 1; /* CmpLegacy bit */
1351 *eax
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1352 *ebx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1353 *ecx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1354 *edx
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1357 /* cache info (L1 cache) */
1364 /* cache info (L2 cache) */
1371 /* virtual & phys address size in low 2 bytes. */
1372 /* XXX: This value must match the one used in the MMU code. */
1373 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
) {
1374 /* 64 bit processor */
1375 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1376 *eax
= 0x00003028; /* 48 bits virtual, 40 bits physical */
1378 if (env
->cpuid_features
& CPUID_PSE36
)
1379 *eax
= 0x00000024; /* 36 bits physical */
1381 *eax
= 0x00000020; /* 32 bits physical */
1386 if (env
->nr_cores
* env
->nr_threads
> 1) {
1387 *ecx
|= (env
->nr_cores
* env
->nr_threads
) - 1;
1391 if (env
->cpuid_ext3_features
& CPUID_EXT3_SVM
) {
1392 *eax
= 0x00000001; /* SVM Revision */
1393 *ebx
= 0x00000010; /* nr of ASIDs */
1395 *edx
= env
->cpuid_svm_features
; /* optional features */
1404 *eax
= env
->cpuid_xlevel2
;
1410 /* Support for VIA CPU's CPUID instruction */
1411 *eax
= env
->cpuid_version
;
1414 *edx
= env
->cpuid_ext4_features
;
1419 /* Reserved for the future, and now filled with zero */
1426 /* reserved values: zero */
1435 /* CPUClass::reset() */
1436 static void x86_cpu_reset(CPUState
*s
)
1438 X86CPU
*cpu
= X86_CPU(s
);
1439 X86CPUClass
*xcc
= X86_CPU_GET_CLASS(cpu
);
1440 CPUX86State
*env
= &cpu
->env
;
1443 if (qemu_loglevel_mask(CPU_LOG_RESET
)) {
1444 qemu_log("CPU Reset (CPU %d)\n", env
->cpu_index
);
1445 log_cpu_state(env
, X86_DUMP_FPU
| X86_DUMP_CCOP
);
1448 xcc
->parent_reset(s
);
1451 memset(env
, 0, offsetof(CPUX86State
, breakpoints
));
1455 env
->old_exception
= -1;
1457 /* init to reset state */
1459 #ifdef CONFIG_SOFTMMU
1460 env
->hflags
|= HF_SOFTMMU_MASK
;
1462 env
->hflags2
|= HF2_GIF_MASK
;
1464 cpu_x86_update_cr0(env
, 0x60000010);
1465 env
->a20_mask
= ~0x0;
1466 env
->smbase
= 0x30000;
1468 env
->idt
.limit
= 0xffff;
1469 env
->gdt
.limit
= 0xffff;
1470 env
->ldt
.limit
= 0xffff;
1471 env
->ldt
.flags
= DESC_P_MASK
| (2 << DESC_TYPE_SHIFT
);
1472 env
->tr
.limit
= 0xffff;
1473 env
->tr
.flags
= DESC_P_MASK
| (11 << DESC_TYPE_SHIFT
);
1475 cpu_x86_load_seg_cache(env
, R_CS
, 0xf000, 0xffff0000, 0xffff,
1476 DESC_P_MASK
| DESC_S_MASK
| DESC_CS_MASK
|
1477 DESC_R_MASK
| DESC_A_MASK
);
1478 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffff,
1479 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1481 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffff,
1482 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1484 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffff,
1485 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1487 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffff,
1488 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1490 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffff,
1491 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
1495 env
->regs
[R_EDX
] = env
->cpuid_version
;
1500 for (i
= 0; i
< 8; i
++) {
1505 env
->mxcsr
= 0x1f80;
1507 env
->pat
= 0x0007040600070406ULL
;
1508 env
->msr_ia32_misc_enable
= MSR_IA32_MISC_ENABLE_DEFAULT
;
1510 memset(env
->dr
, 0, sizeof(env
->dr
));
1511 env
->dr
[6] = DR6_FIXED_1
;
1512 env
->dr
[7] = DR7_FIXED_1
;
1513 cpu_breakpoint_remove_all(env
, BP_CPU
);
1514 cpu_watchpoint_remove_all(env
, BP_CPU
);
1517 static void mce_init(X86CPU
*cpu
)
1519 CPUX86State
*cenv
= &cpu
->env
;
1522 if (((cenv
->cpuid_version
>> 8) & 0xf) >= 6
1523 && (cenv
->cpuid_features
& (CPUID_MCE
| CPUID_MCA
)) ==
1524 (CPUID_MCE
| CPUID_MCA
)) {
1525 cenv
->mcg_cap
= MCE_CAP_DEF
| MCE_BANKS_DEF
;
1526 cenv
->mcg_ctl
= ~(uint64_t)0;
1527 for (bank
= 0; bank
< MCE_BANKS_DEF
; bank
++) {
1528 cenv
->mce_banks
[bank
* 4] = ~(uint64_t)0;
1533 static void x86_cpu_initfn(Object
*obj
)
1535 X86CPU
*cpu
= X86_CPU(obj
);
1536 CPUX86State
*env
= &cpu
->env
;
1540 object_property_add(obj
, "family", "int",
1542 x86_cpuid_version_set_family
, NULL
, NULL
, NULL
);
1543 object_property_add(obj
, "model", "int",
1545 x86_cpuid_version_set_model
, NULL
, NULL
, NULL
);
1546 object_property_add(obj
, "stepping", "int",
1548 x86_cpuid_version_set_stepping
, NULL
, NULL
, NULL
);
1549 object_property_add_str(obj
, "model-id",
1551 x86_cpuid_set_model_id
, NULL
);
1553 env
->cpuid_apic_id
= env
->cpu_index
;
1557 static void x86_cpu_common_class_init(ObjectClass
*oc
, void *data
)
1559 X86CPUClass
*xcc
= X86_CPU_CLASS(oc
);
1560 CPUClass
*cc
= CPU_CLASS(oc
);
1562 xcc
->parent_reset
= cc
->reset
;
1563 cc
->reset
= x86_cpu_reset
;
1566 static const TypeInfo x86_cpu_type_info
= {
1567 .name
= TYPE_X86_CPU
,
1569 .instance_size
= sizeof(X86CPU
),
1570 .instance_init
= x86_cpu_initfn
,
1572 .class_size
= sizeof(X86CPUClass
),
1573 .class_init
= x86_cpu_common_class_init
,
1576 static void x86_cpu_register_types(void)
1578 type_register_static(&x86_cpu_type_info
);
1581 type_init(x86_cpu_register_types
)