]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target-i386/cpu.c
exec: extract exec/tb-context.h
[mirror_qemu.git] / target-i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "qemu/osdep.h"
20#include "qemu/cutils.h"
21
22#include "cpu.h"
23#include "sysemu/kvm.h"
24#include "sysemu/cpus.h"
25#include "kvm_i386.h"
26
27#include "qemu/error-report.h"
28#include "qemu/option.h"
29#include "qemu/config-file.h"
30#include "qapi/qmp/qerror.h"
31
32#include "qapi-types.h"
33#include "qapi-visit.h"
34#include "qapi/visitor.h"
35#include "sysemu/arch_init.h"
36
37#if defined(CONFIG_KVM)
38#include <linux/kvm_para.h>
39#endif
40
41#include "sysemu/sysemu.h"
42#include "hw/qdev-properties.h"
43#ifndef CONFIG_USER_ONLY
44#include "exec/address-spaces.h"
45#include "hw/hw.h"
46#include "hw/xen/xen.h"
47#include "hw/i386/apic_internal.h"
48#endif
49
50
51/* Cache topology CPUID constants: */
52
53/* CPUID Leaf 2 Descriptors */
54
55#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
56#define CPUID_2_L1I_32KB_8WAY_64B 0x30
57#define CPUID_2_L2_2MB_8WAY_64B 0x7d
58
59
60/* CPUID Leaf 4 constants: */
61
62/* EAX: */
63#define CPUID_4_TYPE_DCACHE 1
64#define CPUID_4_TYPE_ICACHE 2
65#define CPUID_4_TYPE_UNIFIED 3
66
67#define CPUID_4_LEVEL(l) ((l) << 5)
68
69#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
70#define CPUID_4_FULLY_ASSOC (1 << 9)
71
72/* EDX: */
73#define CPUID_4_NO_INVD_SHARING (1 << 0)
74#define CPUID_4_INCLUSIVE (1 << 1)
75#define CPUID_4_COMPLEX_IDX (1 << 2)
76
77#define ASSOC_FULL 0xFF
78
79/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
80#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
81 a == 2 ? 0x2 : \
82 a == 4 ? 0x4 : \
83 a == 8 ? 0x6 : \
84 a == 16 ? 0x8 : \
85 a == 32 ? 0xA : \
86 a == 48 ? 0xB : \
87 a == 64 ? 0xC : \
88 a == 96 ? 0xD : \
89 a == 128 ? 0xE : \
90 a == ASSOC_FULL ? 0xF : \
91 0 /* invalid value */)
92
93
94/* Definitions of the hardcoded cache entries we expose: */
95
96/* L1 data cache: */
97#define L1D_LINE_SIZE 64
98#define L1D_ASSOCIATIVITY 8
99#define L1D_SETS 64
100#define L1D_PARTITIONS 1
101/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
102#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
103/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
104#define L1D_LINES_PER_TAG 1
105#define L1D_SIZE_KB_AMD 64
106#define L1D_ASSOCIATIVITY_AMD 2
107
108/* L1 instruction cache: */
109#define L1I_LINE_SIZE 64
110#define L1I_ASSOCIATIVITY 8
111#define L1I_SETS 64
112#define L1I_PARTITIONS 1
113/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
114#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
115/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
116#define L1I_LINES_PER_TAG 1
117#define L1I_SIZE_KB_AMD 64
118#define L1I_ASSOCIATIVITY_AMD 2
119
120/* Level 2 unified cache: */
121#define L2_LINE_SIZE 64
122#define L2_ASSOCIATIVITY 16
123#define L2_SETS 4096
124#define L2_PARTITIONS 1
125/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
126/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
127#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
128/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
129#define L2_LINES_PER_TAG 1
130#define L2_SIZE_KB_AMD 512
131
132/* No L3 cache: */
133#define L3_SIZE_KB 0 /* disabled */
134#define L3_ASSOCIATIVITY 0 /* disabled */
135#define L3_LINES_PER_TAG 0 /* disabled */
136#define L3_LINE_SIZE 0 /* disabled */
137
138/* TLB definitions: */
139
140#define L1_DTLB_2M_ASSOC 1
141#define L1_DTLB_2M_ENTRIES 255
142#define L1_DTLB_4K_ASSOC 1
143#define L1_DTLB_4K_ENTRIES 255
144
145#define L1_ITLB_2M_ASSOC 1
146#define L1_ITLB_2M_ENTRIES 255
147#define L1_ITLB_4K_ASSOC 1
148#define L1_ITLB_4K_ENTRIES 255
149
150#define L2_DTLB_2M_ASSOC 0 /* disabled */
151#define L2_DTLB_2M_ENTRIES 0 /* disabled */
152#define L2_DTLB_4K_ASSOC 4
153#define L2_DTLB_4K_ENTRIES 512
154
155#define L2_ITLB_2M_ASSOC 0 /* disabled */
156#define L2_ITLB_2M_ENTRIES 0 /* disabled */
157#define L2_ITLB_4K_ASSOC 4
158#define L2_ITLB_4K_ENTRIES 512
159
160
161
162static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
163 uint32_t vendor2, uint32_t vendor3)
164{
165 int i;
166 for (i = 0; i < 4; i++) {
167 dst[i] = vendor1 >> (8 * i);
168 dst[i + 4] = vendor2 >> (8 * i);
169 dst[i + 8] = vendor3 >> (8 * i);
170 }
171 dst[CPUID_VENDOR_SZ] = '\0';
172}
173
174/* feature flags taken from "Intel Processor Identification and the CPUID
175 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
176 * between feature naming conventions, aliases may be added.
177 */
178static const char *feature_name[] = {
179 "fpu", "vme", "de", "pse",
180 "tsc", "msr", "pae", "mce",
181 "cx8", "apic", NULL, "sep",
182 "mtrr", "pge", "mca", "cmov",
183 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
184 NULL, "ds" /* Intel dts */, "acpi", "mmx",
185 "fxsr", "sse", "sse2", "ss",
186 "ht" /* Intel htt */, "tm", "ia64", "pbe",
187};
188static const char *ext_feature_name[] = {
189 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
190 "ds_cpl", "vmx", "smx", "est",
191 "tm2", "ssse3", "cid", NULL,
192 "fma", "cx16", "xtpr", "pdcm",
193 NULL, "pcid", "dca", "sse4.1|sse4_1",
194 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
195 "tsc-deadline", "aes", "xsave", "osxsave",
196 "avx", "f16c", "rdrand", "hypervisor",
197};
198/* Feature names that are already defined on feature_name[] but are set on
199 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
200 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
201 * if and only if CPU vendor is AMD.
202 */
203static const char *ext2_feature_name[] = {
204 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
205 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
206 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
207 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
208 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
209 "nx|xd", NULL, "mmxext", NULL /* mmx */,
210 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
211 NULL, "lm|i64", "3dnowext", "3dnow",
212};
213static const char *ext3_feature_name[] = {
214 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
215 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
216 "3dnowprefetch", "osvw", "ibs", "xop",
217 "skinit", "wdt", NULL, "lwp",
218 "fma4", "tce", NULL, "nodeid_msr",
219 NULL, "tbm", "topoext", "perfctr_core",
220 "perfctr_nb", NULL, NULL, NULL,
221 NULL, NULL, NULL, NULL,
222};
223
224static const char *ext4_feature_name[] = {
225 NULL, NULL, "xstore", "xstore-en",
226 NULL, NULL, "xcrypt", "xcrypt-en",
227 "ace2", "ace2-en", "phe", "phe-en",
228 "pmm", "pmm-en", NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233};
234
235static const char *kvm_feature_name[] = {
236 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
237 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 "kvmclock-stable-bit", NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244};
245
246static const char *svm_feature_name[] = {
247 "npt", "lbrv", "svm_lock", "nrip_save",
248 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
249 NULL, NULL, "pause_filter", NULL,
250 "pfthreshold", NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255};
256
257static const char *cpuid_7_0_ebx_feature_name[] = {
258 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
259 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
260 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
261 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
262};
263
264static const char *cpuid_7_0_ecx_feature_name[] = {
265 NULL, NULL, NULL, "pku",
266 "ospke", NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273};
274
275static const char *cpuid_apm_edx_feature_name[] = {
276 NULL, NULL, NULL, NULL,
277 NULL, NULL, NULL, NULL,
278 "invtsc", NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284};
285
286static const char *cpuid_xsave_feature_name[] = {
287 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295};
296
297static const char *cpuid_6_feature_name[] = {
298 NULL, NULL, "arat", NULL,
299 NULL, NULL, NULL, NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306};
307
308#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
309#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
310 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
311#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
312 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
313 CPUID_PSE36 | CPUID_FXSR)
314#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
315#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
316 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
317 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
318 CPUID_PAE | CPUID_SEP | CPUID_APIC)
319
320#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
321 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
322 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
323 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
324 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
325 /* partly implemented:
326 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
327 /* missing:
328 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
329#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
330 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
331 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
332 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
333 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
334 /* missing:
335 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
336 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
337 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
338 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
339 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
340
341#ifdef TARGET_X86_64
342#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
343#else
344#define TCG_EXT2_X86_64_FEATURES 0
345#endif
346
347#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
348 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
349 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
350 TCG_EXT2_X86_64_FEATURES)
351#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
352 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
353#define TCG_EXT4_FEATURES 0
354#define TCG_SVM_FEATURES 0
355#define TCG_KVM_FEATURES 0
356#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
357 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
358 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
359 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
360 /* missing:
361 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
362 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
363 CPUID_7_0_EBX_RDSEED */
364#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
365#define TCG_APM_FEATURES 0
366#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
367#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
368 /* missing:
369 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
370
371typedef struct FeatureWordInfo {
372 const char **feat_names;
373 uint32_t cpuid_eax; /* Input EAX for CPUID */
374 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
375 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
376 int cpuid_reg; /* output register (R_* constant) */
377 uint32_t tcg_features; /* Feature flags supported by TCG */
378 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
379} FeatureWordInfo;
380
381static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
382 [FEAT_1_EDX] = {
383 .feat_names = feature_name,
384 .cpuid_eax = 1, .cpuid_reg = R_EDX,
385 .tcg_features = TCG_FEATURES,
386 },
387 [FEAT_1_ECX] = {
388 .feat_names = ext_feature_name,
389 .cpuid_eax = 1, .cpuid_reg = R_ECX,
390 .tcg_features = TCG_EXT_FEATURES,
391 },
392 [FEAT_8000_0001_EDX] = {
393 .feat_names = ext2_feature_name,
394 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
395 .tcg_features = TCG_EXT2_FEATURES,
396 },
397 [FEAT_8000_0001_ECX] = {
398 .feat_names = ext3_feature_name,
399 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
400 .tcg_features = TCG_EXT3_FEATURES,
401 },
402 [FEAT_C000_0001_EDX] = {
403 .feat_names = ext4_feature_name,
404 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
405 .tcg_features = TCG_EXT4_FEATURES,
406 },
407 [FEAT_KVM] = {
408 .feat_names = kvm_feature_name,
409 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
410 .tcg_features = TCG_KVM_FEATURES,
411 },
412 [FEAT_SVM] = {
413 .feat_names = svm_feature_name,
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
416 },
417 [FEAT_7_0_EBX] = {
418 .feat_names = cpuid_7_0_ebx_feature_name,
419 .cpuid_eax = 7,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
421 .cpuid_reg = R_EBX,
422 .tcg_features = TCG_7_0_EBX_FEATURES,
423 },
424 [FEAT_7_0_ECX] = {
425 .feat_names = cpuid_7_0_ecx_feature_name,
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_ECX,
429 .tcg_features = TCG_7_0_ECX_FEATURES,
430 },
431 [FEAT_8000_0007_EDX] = {
432 .feat_names = cpuid_apm_edx_feature_name,
433 .cpuid_eax = 0x80000007,
434 .cpuid_reg = R_EDX,
435 .tcg_features = TCG_APM_FEATURES,
436 .unmigratable_flags = CPUID_APM_INVTSC,
437 },
438 [FEAT_XSAVE] = {
439 .feat_names = cpuid_xsave_feature_name,
440 .cpuid_eax = 0xd,
441 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
442 .cpuid_reg = R_EAX,
443 .tcg_features = TCG_XSAVE_FEATURES,
444 },
445 [FEAT_6_EAX] = {
446 .feat_names = cpuid_6_feature_name,
447 .cpuid_eax = 6, .cpuid_reg = R_EAX,
448 .tcg_features = TCG_6_EAX_FEATURES,
449 },
450};
451
452typedef struct X86RegisterInfo32 {
453 /* Name of register */
454 const char *name;
455 /* QAPI enum value register */
456 X86CPURegister32 qapi_enum;
457} X86RegisterInfo32;
458
459#define REGISTER(reg) \
460 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
461static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
462 REGISTER(EAX),
463 REGISTER(ECX),
464 REGISTER(EDX),
465 REGISTER(EBX),
466 REGISTER(ESP),
467 REGISTER(EBP),
468 REGISTER(ESI),
469 REGISTER(EDI),
470};
471#undef REGISTER
472
473const ExtSaveArea x86_ext_save_areas[] = {
474 [XSTATE_YMM_BIT] =
475 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
476 .offset = 0x240, .size = 0x100 },
477 [XSTATE_BNDREGS_BIT] =
478 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
479 .offset = 0x3c0, .size = 0x40 },
480 [XSTATE_BNDCSR_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = 0x400, .size = 0x40 },
483 [XSTATE_OPMASK_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
485 .offset = 0x440, .size = 0x40 },
486 [XSTATE_ZMM_Hi256_BIT] =
487 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
488 .offset = 0x480, .size = 0x200 },
489 [XSTATE_Hi16_ZMM_BIT] =
490 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
491 .offset = 0x680, .size = 0x400 },
492 [XSTATE_PKRU_BIT] =
493 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
494 .offset = 0xA80, .size = 0x8 },
495};
496
497const char *get_register_name_32(unsigned int reg)
498{
499 if (reg >= CPU_NB_REGS32) {
500 return NULL;
501 }
502 return x86_reg_info_32[reg].name;
503}
504
505/*
506 * Returns the set of feature flags that are supported and migratable by
507 * QEMU, for a given FeatureWord.
508 */
509static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
510{
511 FeatureWordInfo *wi = &feature_word_info[w];
512 uint32_t r = 0;
513 int i;
514
515 for (i = 0; i < 32; i++) {
516 uint32_t f = 1U << i;
517 /* If the feature name is unknown, it is not supported by QEMU yet */
518 if (!wi->feat_names[i]) {
519 continue;
520 }
521 /* Skip features known to QEMU, but explicitly marked as unmigratable */
522 if (wi->unmigratable_flags & f) {
523 continue;
524 }
525 r |= f;
526 }
527 return r;
528}
529
530void host_cpuid(uint32_t function, uint32_t count,
531 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
532{
533 uint32_t vec[4];
534
535#ifdef __x86_64__
536 asm volatile("cpuid"
537 : "=a"(vec[0]), "=b"(vec[1]),
538 "=c"(vec[2]), "=d"(vec[3])
539 : "0"(function), "c"(count) : "cc");
540#elif defined(__i386__)
541 asm volatile("pusha \n\t"
542 "cpuid \n\t"
543 "mov %%eax, 0(%2) \n\t"
544 "mov %%ebx, 4(%2) \n\t"
545 "mov %%ecx, 8(%2) \n\t"
546 "mov %%edx, 12(%2) \n\t"
547 "popa"
548 : : "a"(function), "c"(count), "S"(vec)
549 : "memory", "cc");
550#else
551 abort();
552#endif
553
554 if (eax)
555 *eax = vec[0];
556 if (ebx)
557 *ebx = vec[1];
558 if (ecx)
559 *ecx = vec[2];
560 if (edx)
561 *edx = vec[3];
562}
563
564#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
565
566/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
567 * a substring. ex if !NULL points to the first char after a substring,
568 * otherwise the string is assumed to sized by a terminating nul.
569 * Return lexical ordering of *s1:*s2.
570 */
571static int sstrcmp(const char *s1, const char *e1,
572 const char *s2, const char *e2)
573{
574 for (;;) {
575 if (!*s1 || !*s2 || *s1 != *s2)
576 return (*s1 - *s2);
577 ++s1, ++s2;
578 if (s1 == e1 && s2 == e2)
579 return (0);
580 else if (s1 == e1)
581 return (*s2);
582 else if (s2 == e2)
583 return (*s1);
584 }
585}
586
587/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
588 * '|' delimited (possibly empty) strings in which case search for a match
589 * within the alternatives proceeds left to right. Return 0 for success,
590 * non-zero otherwise.
591 */
592static int altcmp(const char *s, const char *e, const char *altstr)
593{
594 const char *p, *q;
595
596 for (q = p = altstr; ; ) {
597 while (*p && *p != '|')
598 ++p;
599 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
600 return (0);
601 if (!*p)
602 return (1);
603 else
604 q = ++p;
605 }
606}
607
608/* search featureset for flag *[s..e), if found set corresponding bit in
609 * *pval and return true, otherwise return false
610 */
611static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
612 const char **featureset)
613{
614 uint32_t mask;
615 const char **ppc;
616 bool found = false;
617
618 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
619 if (*ppc && !altcmp(s, e, *ppc)) {
620 *pval |= mask;
621 found = true;
622 }
623 }
624 return found;
625}
626
627static void add_flagname_to_bitmaps(const char *flagname,
628 FeatureWordArray words,
629 Error **errp)
630{
631 FeatureWord w;
632 for (w = 0; w < FEATURE_WORDS; w++) {
633 FeatureWordInfo *wi = &feature_word_info[w];
634 if (wi->feat_names &&
635 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
636 break;
637 }
638 }
639 if (w == FEATURE_WORDS) {
640 error_setg(errp, "CPU feature %s not found", flagname);
641 }
642}
643
644/* CPU class name definitions: */
645
646#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
647#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
648
649/* Return type name for a given CPU model name
650 * Caller is responsible for freeing the returned string.
651 */
652static char *x86_cpu_type_name(const char *model_name)
653{
654 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
655}
656
657static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
658{
659 ObjectClass *oc;
660 char *typename;
661
662 if (cpu_model == NULL) {
663 return NULL;
664 }
665
666 typename = x86_cpu_type_name(cpu_model);
667 oc = object_class_by_name(typename);
668 g_free(typename);
669 return oc;
670}
671
672struct X86CPUDefinition {
673 const char *name;
674 uint32_t level;
675 uint32_t xlevel;
676 uint32_t xlevel2;
677 /* vendor is zero-terminated, 12 character ASCII string */
678 char vendor[CPUID_VENDOR_SZ + 1];
679 int family;
680 int model;
681 int stepping;
682 FeatureWordArray features;
683 char model_id[48];
684};
685
686static X86CPUDefinition builtin_x86_defs[] = {
687 {
688 .name = "qemu64",
689 .level = 0xd,
690 .vendor = CPUID_VENDOR_AMD,
691 .family = 6,
692 .model = 6,
693 .stepping = 3,
694 .features[FEAT_1_EDX] =
695 PPRO_FEATURES |
696 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
697 CPUID_PSE36,
698 .features[FEAT_1_ECX] =
699 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
700 .features[FEAT_8000_0001_EDX] =
701 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
702 .features[FEAT_8000_0001_ECX] =
703 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
704 .xlevel = 0x8000000A,
705 },
706 {
707 .name = "phenom",
708 .level = 5,
709 .vendor = CPUID_VENDOR_AMD,
710 .family = 16,
711 .model = 2,
712 .stepping = 3,
713 /* Missing: CPUID_HT */
714 .features[FEAT_1_EDX] =
715 PPRO_FEATURES |
716 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
717 CPUID_PSE36 | CPUID_VME,
718 .features[FEAT_1_ECX] =
719 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
720 CPUID_EXT_POPCNT,
721 .features[FEAT_8000_0001_EDX] =
722 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
723 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
724 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
725 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
726 CPUID_EXT3_CR8LEG,
727 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
728 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
729 .features[FEAT_8000_0001_ECX] =
730 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
731 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
732 /* Missing: CPUID_SVM_LBRV */
733 .features[FEAT_SVM] =
734 CPUID_SVM_NPT,
735 .xlevel = 0x8000001A,
736 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
737 },
738 {
739 .name = "core2duo",
740 .level = 10,
741 .vendor = CPUID_VENDOR_INTEL,
742 .family = 6,
743 .model = 15,
744 .stepping = 11,
745 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
746 .features[FEAT_1_EDX] =
747 PPRO_FEATURES |
748 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
749 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
750 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
751 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
754 CPUID_EXT_CX16,
755 .features[FEAT_8000_0001_EDX] =
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 .features[FEAT_8000_0001_ECX] =
758 CPUID_EXT3_LAHF_LM,
759 .xlevel = 0x80000008,
760 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
761 },
762 {
763 .name = "kvm64",
764 .level = 0xd,
765 .vendor = CPUID_VENDOR_INTEL,
766 .family = 15,
767 .model = 6,
768 .stepping = 1,
769 /* Missing: CPUID_HT */
770 .features[FEAT_1_EDX] =
771 PPRO_FEATURES | CPUID_VME |
772 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
773 CPUID_PSE36,
774 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
777 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
778 .features[FEAT_8000_0001_EDX] =
779 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
780 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
781 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
782 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
783 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
784 .features[FEAT_8000_0001_ECX] =
785 0,
786 .xlevel = 0x80000008,
787 .model_id = "Common KVM processor"
788 },
789 {
790 .name = "qemu32",
791 .level = 4,
792 .vendor = CPUID_VENDOR_INTEL,
793 .family = 6,
794 .model = 6,
795 .stepping = 3,
796 .features[FEAT_1_EDX] =
797 PPRO_FEATURES,
798 .features[FEAT_1_ECX] =
799 CPUID_EXT_SSE3,
800 .xlevel = 0x80000004,
801 },
802 {
803 .name = "kvm32",
804 .level = 5,
805 .vendor = CPUID_VENDOR_INTEL,
806 .family = 15,
807 .model = 6,
808 .stepping = 1,
809 .features[FEAT_1_EDX] =
810 PPRO_FEATURES | CPUID_VME |
811 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
812 .features[FEAT_1_ECX] =
813 CPUID_EXT_SSE3,
814 .features[FEAT_8000_0001_ECX] =
815 0,
816 .xlevel = 0x80000008,
817 .model_id = "Common 32-bit KVM processor"
818 },
819 {
820 .name = "coreduo",
821 .level = 10,
822 .vendor = CPUID_VENDOR_INTEL,
823 .family = 6,
824 .model = 14,
825 .stepping = 8,
826 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
827 .features[FEAT_1_EDX] =
828 PPRO_FEATURES | CPUID_VME |
829 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
830 CPUID_SS,
831 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
832 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
833 .features[FEAT_1_ECX] =
834 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
835 .features[FEAT_8000_0001_EDX] =
836 CPUID_EXT2_NX,
837 .xlevel = 0x80000008,
838 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
839 },
840 {
841 .name = "486",
842 .level = 1,
843 .vendor = CPUID_VENDOR_INTEL,
844 .family = 4,
845 .model = 8,
846 .stepping = 0,
847 .features[FEAT_1_EDX] =
848 I486_FEATURES,
849 .xlevel = 0,
850 },
851 {
852 .name = "pentium",
853 .level = 1,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 5,
856 .model = 4,
857 .stepping = 3,
858 .features[FEAT_1_EDX] =
859 PENTIUM_FEATURES,
860 .xlevel = 0,
861 },
862 {
863 .name = "pentium2",
864 .level = 2,
865 .vendor = CPUID_VENDOR_INTEL,
866 .family = 6,
867 .model = 5,
868 .stepping = 2,
869 .features[FEAT_1_EDX] =
870 PENTIUM2_FEATURES,
871 .xlevel = 0,
872 },
873 {
874 .name = "pentium3",
875 .level = 3,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 7,
879 .stepping = 3,
880 .features[FEAT_1_EDX] =
881 PENTIUM3_FEATURES,
882 .xlevel = 0,
883 },
884 {
885 .name = "athlon",
886 .level = 2,
887 .vendor = CPUID_VENDOR_AMD,
888 .family = 6,
889 .model = 2,
890 .stepping = 3,
891 .features[FEAT_1_EDX] =
892 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
893 CPUID_MCA,
894 .features[FEAT_8000_0001_EDX] =
895 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
896 .xlevel = 0x80000008,
897 },
898 {
899 .name = "n270",
900 .level = 10,
901 .vendor = CPUID_VENDOR_INTEL,
902 .family = 6,
903 .model = 28,
904 .stepping = 2,
905 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
906 .features[FEAT_1_EDX] =
907 PPRO_FEATURES |
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
909 CPUID_ACPI | CPUID_SS,
910 /* Some CPUs got no CPUID_SEP */
911 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
912 * CPUID_EXT_XTPR */
913 .features[FEAT_1_ECX] =
914 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
915 CPUID_EXT_MOVBE,
916 .features[FEAT_8000_0001_EDX] =
917 CPUID_EXT2_NX,
918 .features[FEAT_8000_0001_ECX] =
919 CPUID_EXT3_LAHF_LM,
920 .xlevel = 0x80000008,
921 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
922 },
923 {
924 .name = "Conroe",
925 .level = 10,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 15,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x80000008,
943 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
944 },
945 {
946 .name = "Penryn",
947 .level = 10,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 23,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
960 CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x80000008,
966 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
967 },
968 {
969 .name = "Nehalem",
970 .level = 11,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 26,
974 .stepping = 3,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
987 CPUID_EXT3_LAHF_LM,
988 .xlevel = 0x80000008,
989 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
990 },
991 {
992 .name = "Westmere",
993 .level = 11,
994 .vendor = CPUID_VENDOR_INTEL,
995 .family = 6,
996 .model = 44,
997 .stepping = 1,
998 .features[FEAT_1_EDX] =
999 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1006 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1007 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1008 .features[FEAT_8000_0001_EDX] =
1009 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1010 .features[FEAT_8000_0001_ECX] =
1011 CPUID_EXT3_LAHF_LM,
1012 .features[FEAT_6_EAX] =
1013 CPUID_6_EAX_ARAT,
1014 .xlevel = 0x80000008,
1015 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1016 },
1017 {
1018 .name = "SandyBridge",
1019 .level = 0xd,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 42,
1023 .stepping = 1,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1032 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1033 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1034 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1035 CPUID_EXT_SSE3,
1036 .features[FEAT_8000_0001_EDX] =
1037 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1038 CPUID_EXT2_SYSCALL,
1039 .features[FEAT_8000_0001_ECX] =
1040 CPUID_EXT3_LAHF_LM,
1041 .features[FEAT_XSAVE] =
1042 CPUID_XSAVE_XSAVEOPT,
1043 .features[FEAT_6_EAX] =
1044 CPUID_6_EAX_ARAT,
1045 .xlevel = 0x80000008,
1046 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1047 },
1048 {
1049 .name = "IvyBridge",
1050 .level = 0xd,
1051 .vendor = CPUID_VENDOR_INTEL,
1052 .family = 6,
1053 .model = 58,
1054 .stepping = 9,
1055 .features[FEAT_1_EDX] =
1056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1060 CPUID_DE | CPUID_FP87,
1061 .features[FEAT_1_ECX] =
1062 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1063 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1064 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1065 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1066 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1067 .features[FEAT_7_0_EBX] =
1068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1069 CPUID_7_0_EBX_ERMS,
1070 .features[FEAT_8000_0001_EDX] =
1071 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1072 CPUID_EXT2_SYSCALL,
1073 .features[FEAT_8000_0001_ECX] =
1074 CPUID_EXT3_LAHF_LM,
1075 .features[FEAT_XSAVE] =
1076 CPUID_XSAVE_XSAVEOPT,
1077 .features[FEAT_6_EAX] =
1078 CPUID_6_EAX_ARAT,
1079 .xlevel = 0x80000008,
1080 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1081 },
1082 {
1083 .name = "Haswell-noTSX",
1084 .level = 0xd,
1085 .vendor = CPUID_VENDOR_INTEL,
1086 .family = 6,
1087 .model = 60,
1088 .stepping = 1,
1089 .features[FEAT_1_EDX] =
1090 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1091 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1092 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1093 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1094 CPUID_DE | CPUID_FP87,
1095 .features[FEAT_1_ECX] =
1096 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1097 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1098 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1099 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1100 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1101 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1102 .features[FEAT_8000_0001_EDX] =
1103 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1104 CPUID_EXT2_SYSCALL,
1105 .features[FEAT_8000_0001_ECX] =
1106 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1107 .features[FEAT_7_0_EBX] =
1108 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1109 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1110 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1111 .features[FEAT_XSAVE] =
1112 CPUID_XSAVE_XSAVEOPT,
1113 .features[FEAT_6_EAX] =
1114 CPUID_6_EAX_ARAT,
1115 .xlevel = 0x80000008,
1116 .model_id = "Intel Core Processor (Haswell, no TSX)",
1117 }, {
1118 .name = "Haswell",
1119 .level = 0xd,
1120 .vendor = CPUID_VENDOR_INTEL,
1121 .family = 6,
1122 .model = 60,
1123 .stepping = 1,
1124 .features[FEAT_1_EDX] =
1125 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1126 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1127 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1128 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1129 CPUID_DE | CPUID_FP87,
1130 .features[FEAT_1_ECX] =
1131 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1132 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1133 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1134 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1136 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1137 .features[FEAT_8000_0001_EDX] =
1138 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1139 CPUID_EXT2_SYSCALL,
1140 .features[FEAT_8000_0001_ECX] =
1141 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1142 .features[FEAT_7_0_EBX] =
1143 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1144 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1145 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1146 CPUID_7_0_EBX_RTM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Core Processor (Haswell)",
1153 },
1154 {
1155 .name = "Broadwell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 61,
1160 .stepping = 2,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1183 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1184 CPUID_7_0_EBX_SMAP,
1185 .features[FEAT_XSAVE] =
1186 CPUID_XSAVE_XSAVEOPT,
1187 .features[FEAT_6_EAX] =
1188 CPUID_6_EAX_ARAT,
1189 .xlevel = 0x80000008,
1190 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1191 },
1192 {
1193 .name = "Broadwell",
1194 .level = 0xd,
1195 .vendor = CPUID_VENDOR_INTEL,
1196 .family = 6,
1197 .model = 61,
1198 .stepping = 2,
1199 .features[FEAT_1_EDX] =
1200 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1201 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1202 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1203 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1204 CPUID_DE | CPUID_FP87,
1205 .features[FEAT_1_ECX] =
1206 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1207 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1208 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1209 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1210 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1211 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1212 .features[FEAT_8000_0001_EDX] =
1213 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1214 CPUID_EXT2_SYSCALL,
1215 .features[FEAT_8000_0001_ECX] =
1216 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1217 .features[FEAT_7_0_EBX] =
1218 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1219 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1220 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1221 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1222 CPUID_7_0_EBX_SMAP,
1223 .features[FEAT_XSAVE] =
1224 CPUID_XSAVE_XSAVEOPT,
1225 .features[FEAT_6_EAX] =
1226 CPUID_6_EAX_ARAT,
1227 .xlevel = 0x80000008,
1228 .model_id = "Intel Core Processor (Broadwell)",
1229 },
1230 {
1231 .name = "Opteron_G1",
1232 .level = 5,
1233 .vendor = CPUID_VENDOR_AMD,
1234 .family = 15,
1235 .model = 6,
1236 .stepping = 1,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_SSE3,
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1247 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1248 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1249 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1250 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1251 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1252 .xlevel = 0x80000008,
1253 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1254 },
1255 {
1256 .name = "Opteron_G2",
1257 .level = 5,
1258 .vendor = CPUID_VENDOR_AMD,
1259 .family = 15,
1260 .model = 6,
1261 .stepping = 1,
1262 .features[FEAT_1_EDX] =
1263 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1264 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1265 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1266 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1267 CPUID_DE | CPUID_FP87,
1268 .features[FEAT_1_ECX] =
1269 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1270 /* Missing: CPUID_EXT2_RDTSCP */
1271 .features[FEAT_8000_0001_EDX] =
1272 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1273 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1274 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1275 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1276 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1277 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1278 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1279 .features[FEAT_8000_0001_ECX] =
1280 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1281 .xlevel = 0x80000008,
1282 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1283 },
1284 {
1285 .name = "Opteron_G3",
1286 .level = 5,
1287 .vendor = CPUID_VENDOR_AMD,
1288 .family = 15,
1289 .model = 6,
1290 .stepping = 1,
1291 .features[FEAT_1_EDX] =
1292 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1293 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1294 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1295 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1296 CPUID_DE | CPUID_FP87,
1297 .features[FEAT_1_ECX] =
1298 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1299 CPUID_EXT_SSE3,
1300 /* Missing: CPUID_EXT2_RDTSCP */
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1303 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1304 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1305 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1306 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1307 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1308 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1309 .features[FEAT_8000_0001_ECX] =
1310 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1311 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1312 .xlevel = 0x80000008,
1313 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1314 },
1315 {
1316 .name = "Opteron_G4",
1317 .level = 0xd,
1318 .vendor = CPUID_VENDOR_AMD,
1319 .family = 21,
1320 .model = 1,
1321 .stepping = 2,
1322 .features[FEAT_1_EDX] =
1323 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1324 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1325 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1326 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1327 CPUID_DE | CPUID_FP87,
1328 .features[FEAT_1_ECX] =
1329 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1330 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1331 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1332 CPUID_EXT_SSE3,
1333 /* Missing: CPUID_EXT2_RDTSCP */
1334 .features[FEAT_8000_0001_EDX] =
1335 CPUID_EXT2_LM |
1336 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1337 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1338 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1339 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1340 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1341 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1342 .features[FEAT_8000_0001_ECX] =
1343 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1344 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1345 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1346 CPUID_EXT3_LAHF_LM,
1347 /* no xsaveopt! */
1348 .xlevel = 0x8000001A,
1349 .model_id = "AMD Opteron 62xx class CPU",
1350 },
1351 {
1352 .name = "Opteron_G5",
1353 .level = 0xd,
1354 .vendor = CPUID_VENDOR_AMD,
1355 .family = 21,
1356 .model = 2,
1357 .stepping = 0,
1358 .features[FEAT_1_EDX] =
1359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1363 CPUID_DE | CPUID_FP87,
1364 .features[FEAT_1_ECX] =
1365 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1366 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1367 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1368 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1369 /* Missing: CPUID_EXT2_RDTSCP */
1370 .features[FEAT_8000_0001_EDX] =
1371 CPUID_EXT2_LM |
1372 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1373 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1374 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1375 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1376 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1377 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1378 .features[FEAT_8000_0001_ECX] =
1379 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1380 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1381 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1382 CPUID_EXT3_LAHF_LM,
1383 /* no xsaveopt! */
1384 .xlevel = 0x8000001A,
1385 .model_id = "AMD Opteron 63xx class CPU",
1386 },
1387};
1388
1389typedef struct PropValue {
1390 const char *prop, *value;
1391} PropValue;
1392
1393/* KVM-specific features that are automatically added/removed
1394 * from all CPU models when KVM is enabled.
1395 */
1396static PropValue kvm_default_props[] = {
1397 { "kvmclock", "on" },
1398 { "kvm-nopiodelay", "on" },
1399 { "kvm-asyncpf", "on" },
1400 { "kvm-steal-time", "on" },
1401 { "kvm-pv-eoi", "on" },
1402 { "kvmclock-stable-bit", "on" },
1403 { "x2apic", "on" },
1404 { "acpi", "off" },
1405 { "monitor", "off" },
1406 { "svm", "off" },
1407 { NULL, NULL },
1408};
1409
1410void x86_cpu_change_kvm_default(const char *prop, const char *value)
1411{
1412 PropValue *pv;
1413 for (pv = kvm_default_props; pv->prop; pv++) {
1414 if (!strcmp(pv->prop, prop)) {
1415 pv->value = value;
1416 break;
1417 }
1418 }
1419
1420 /* It is valid to call this function only for properties that
1421 * are already present in the kvm_default_props table.
1422 */
1423 assert(pv->prop);
1424}
1425
1426static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1427 bool migratable_only);
1428
1429#ifdef CONFIG_KVM
1430
1431static int cpu_x86_fill_model_id(char *str)
1432{
1433 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1434 int i;
1435
1436 for (i = 0; i < 3; i++) {
1437 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1438 memcpy(str + i * 16 + 0, &eax, 4);
1439 memcpy(str + i * 16 + 4, &ebx, 4);
1440 memcpy(str + i * 16 + 8, &ecx, 4);
1441 memcpy(str + i * 16 + 12, &edx, 4);
1442 }
1443 return 0;
1444}
1445
1446static X86CPUDefinition host_cpudef;
1447
1448static Property host_x86_cpu_properties[] = {
1449 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1450 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1451 DEFINE_PROP_END_OF_LIST()
1452};
1453
1454/* class_init for the "host" CPU model
1455 *
1456 * This function may be called before KVM is initialized.
1457 */
1458static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1459{
1460 DeviceClass *dc = DEVICE_CLASS(oc);
1461 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1462 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1463
1464 xcc->kvm_required = true;
1465
1466 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1467 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1468
1469 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1470 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1471 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1472 host_cpudef.stepping = eax & 0x0F;
1473
1474 cpu_x86_fill_model_id(host_cpudef.model_id);
1475
1476 xcc->cpu_def = &host_cpudef;
1477
1478 /* level, xlevel, xlevel2, and the feature words are initialized on
1479 * instance_init, because they require KVM to be initialized.
1480 */
1481
1482 dc->props = host_x86_cpu_properties;
1483 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1484 dc->cannot_destroy_with_object_finalize_yet = true;
1485}
1486
1487static void host_x86_cpu_initfn(Object *obj)
1488{
1489 X86CPU *cpu = X86_CPU(obj);
1490 CPUX86State *env = &cpu->env;
1491 KVMState *s = kvm_state;
1492
1493 assert(kvm_enabled());
1494
1495 /* We can't fill the features array here because we don't know yet if
1496 * "migratable" is true or false.
1497 */
1498 cpu->host_features = true;
1499
1500 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1501 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1502 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1503
1504 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1505}
1506
1507static const TypeInfo host_x86_cpu_type_info = {
1508 .name = X86_CPU_TYPE_NAME("host"),
1509 .parent = TYPE_X86_CPU,
1510 .instance_init = host_x86_cpu_initfn,
1511 .class_init = host_x86_cpu_class_init,
1512};
1513
1514#endif
1515
1516static void report_unavailable_features(FeatureWord w, uint32_t mask)
1517{
1518 FeatureWordInfo *f = &feature_word_info[w];
1519 int i;
1520
1521 for (i = 0; i < 32; ++i) {
1522 if ((1UL << i) & mask) {
1523 const char *reg = get_register_name_32(f->cpuid_reg);
1524 assert(reg);
1525 fprintf(stderr, "warning: %s doesn't support requested feature: "
1526 "CPUID.%02XH:%s%s%s [bit %d]\n",
1527 kvm_enabled() ? "host" : "TCG",
1528 f->cpuid_eax, reg,
1529 f->feat_names[i] ? "." : "",
1530 f->feat_names[i] ? f->feat_names[i] : "", i);
1531 }
1532 }
1533}
1534
1535static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1536 const char *name, void *opaque,
1537 Error **errp)
1538{
1539 X86CPU *cpu = X86_CPU(obj);
1540 CPUX86State *env = &cpu->env;
1541 int64_t value;
1542
1543 value = (env->cpuid_version >> 8) & 0xf;
1544 if (value == 0xf) {
1545 value += (env->cpuid_version >> 20) & 0xff;
1546 }
1547 visit_type_int(v, name, &value, errp);
1548}
1549
1550static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1551 const char *name, void *opaque,
1552 Error **errp)
1553{
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 const int64_t min = 0;
1557 const int64_t max = 0xff + 0xf;
1558 Error *local_err = NULL;
1559 int64_t value;
1560
1561 visit_type_int(v, name, &value, &local_err);
1562 if (local_err) {
1563 error_propagate(errp, local_err);
1564 return;
1565 }
1566 if (value < min || value > max) {
1567 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1568 name ? name : "null", value, min, max);
1569 return;
1570 }
1571
1572 env->cpuid_version &= ~0xff00f00;
1573 if (value > 0x0f) {
1574 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1575 } else {
1576 env->cpuid_version |= value << 8;
1577 }
1578}
1579
1580static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1581 const char *name, void *opaque,
1582 Error **errp)
1583{
1584 X86CPU *cpu = X86_CPU(obj);
1585 CPUX86State *env = &cpu->env;
1586 int64_t value;
1587
1588 value = (env->cpuid_version >> 4) & 0xf;
1589 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1590 visit_type_int(v, name, &value, errp);
1591}
1592
1593static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1594 const char *name, void *opaque,
1595 Error **errp)
1596{
1597 X86CPU *cpu = X86_CPU(obj);
1598 CPUX86State *env = &cpu->env;
1599 const int64_t min = 0;
1600 const int64_t max = 0xff;
1601 Error *local_err = NULL;
1602 int64_t value;
1603
1604 visit_type_int(v, name, &value, &local_err);
1605 if (local_err) {
1606 error_propagate(errp, local_err);
1607 return;
1608 }
1609 if (value < min || value > max) {
1610 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1611 name ? name : "null", value, min, max);
1612 return;
1613 }
1614
1615 env->cpuid_version &= ~0xf00f0;
1616 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1617}
1618
1619static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1620 const char *name, void *opaque,
1621 Error **errp)
1622{
1623 X86CPU *cpu = X86_CPU(obj);
1624 CPUX86State *env = &cpu->env;
1625 int64_t value;
1626
1627 value = env->cpuid_version & 0xf;
1628 visit_type_int(v, name, &value, errp);
1629}
1630
1631static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1632 const char *name, void *opaque,
1633 Error **errp)
1634{
1635 X86CPU *cpu = X86_CPU(obj);
1636 CPUX86State *env = &cpu->env;
1637 const int64_t min = 0;
1638 const int64_t max = 0xf;
1639 Error *local_err = NULL;
1640 int64_t value;
1641
1642 visit_type_int(v, name, &value, &local_err);
1643 if (local_err) {
1644 error_propagate(errp, local_err);
1645 return;
1646 }
1647 if (value < min || value > max) {
1648 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1649 name ? name : "null", value, min, max);
1650 return;
1651 }
1652
1653 env->cpuid_version &= ~0xf;
1654 env->cpuid_version |= value & 0xf;
1655}
1656
1657static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1658{
1659 X86CPU *cpu = X86_CPU(obj);
1660 CPUX86State *env = &cpu->env;
1661 char *value;
1662
1663 value = g_malloc(CPUID_VENDOR_SZ + 1);
1664 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1665 env->cpuid_vendor3);
1666 return value;
1667}
1668
1669static void x86_cpuid_set_vendor(Object *obj, const char *value,
1670 Error **errp)
1671{
1672 X86CPU *cpu = X86_CPU(obj);
1673 CPUX86State *env = &cpu->env;
1674 int i;
1675
1676 if (strlen(value) != CPUID_VENDOR_SZ) {
1677 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1678 return;
1679 }
1680
1681 env->cpuid_vendor1 = 0;
1682 env->cpuid_vendor2 = 0;
1683 env->cpuid_vendor3 = 0;
1684 for (i = 0; i < 4; i++) {
1685 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1686 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1687 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1688 }
1689}
1690
1691static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1692{
1693 X86CPU *cpu = X86_CPU(obj);
1694 CPUX86State *env = &cpu->env;
1695 char *value;
1696 int i;
1697
1698 value = g_malloc(48 + 1);
1699 for (i = 0; i < 48; i++) {
1700 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1701 }
1702 value[48] = '\0';
1703 return value;
1704}
1705
1706static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1707 Error **errp)
1708{
1709 X86CPU *cpu = X86_CPU(obj);
1710 CPUX86State *env = &cpu->env;
1711 int c, len, i;
1712
1713 if (model_id == NULL) {
1714 model_id = "";
1715 }
1716 len = strlen(model_id);
1717 memset(env->cpuid_model, 0, 48);
1718 for (i = 0; i < 48; i++) {
1719 if (i >= len) {
1720 c = '\0';
1721 } else {
1722 c = (uint8_t)model_id[i];
1723 }
1724 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1725 }
1726}
1727
1728static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1729 void *opaque, Error **errp)
1730{
1731 X86CPU *cpu = X86_CPU(obj);
1732 int64_t value;
1733
1734 value = cpu->env.tsc_khz * 1000;
1735 visit_type_int(v, name, &value, errp);
1736}
1737
1738static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1739 void *opaque, Error **errp)
1740{
1741 X86CPU *cpu = X86_CPU(obj);
1742 const int64_t min = 0;
1743 const int64_t max = INT64_MAX;
1744 Error *local_err = NULL;
1745 int64_t value;
1746
1747 visit_type_int(v, name, &value, &local_err);
1748 if (local_err) {
1749 error_propagate(errp, local_err);
1750 return;
1751 }
1752 if (value < min || value > max) {
1753 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1754 name ? name : "null", value, min, max);
1755 return;
1756 }
1757
1758 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1759}
1760
1761static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1762 void *opaque, Error **errp)
1763{
1764 X86CPU *cpu = X86_CPU(obj);
1765 int64_t value = cpu->apic_id;
1766
1767 visit_type_int(v, name, &value, errp);
1768}
1769
1770static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1771 void *opaque, Error **errp)
1772{
1773 X86CPU *cpu = X86_CPU(obj);
1774 DeviceState *dev = DEVICE(obj);
1775 const int64_t min = 0;
1776 const int64_t max = UINT32_MAX;
1777 Error *error = NULL;
1778 int64_t value;
1779
1780 if (dev->realized) {
1781 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1782 "it was realized", name, object_get_typename(obj));
1783 return;
1784 }
1785
1786 visit_type_int(v, name, &value, &error);
1787 if (error) {
1788 error_propagate(errp, error);
1789 return;
1790 }
1791 if (value < min || value > max) {
1792 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1793 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1794 object_get_typename(obj), name, value, min, max);
1795 return;
1796 }
1797
1798 if ((value != cpu->apic_id) && cpu_exists(value)) {
1799 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1800 return;
1801 }
1802 cpu->apic_id = value;
1803}
1804
1805/* Generic getter for "feature-words" and "filtered-features" properties */
1806static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1807 const char *name, void *opaque,
1808 Error **errp)
1809{
1810 uint32_t *array = (uint32_t *)opaque;
1811 FeatureWord w;
1812 Error *err = NULL;
1813 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1814 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1815 X86CPUFeatureWordInfoList *list = NULL;
1816
1817 for (w = 0; w < FEATURE_WORDS; w++) {
1818 FeatureWordInfo *wi = &feature_word_info[w];
1819 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1820 qwi->cpuid_input_eax = wi->cpuid_eax;
1821 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1822 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1823 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1824 qwi->features = array[w];
1825
1826 /* List will be in reverse order, but order shouldn't matter */
1827 list_entries[w].next = list;
1828 list_entries[w].value = &word_infos[w];
1829 list = &list_entries[w];
1830 }
1831
1832 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1833 error_propagate(errp, err);
1834}
1835
1836static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1838{
1839 X86CPU *cpu = X86_CPU(obj);
1840 int64_t value = cpu->hyperv_spinlock_attempts;
1841
1842 visit_type_int(v, name, &value, errp);
1843}
1844
1845static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1846 void *opaque, Error **errp)
1847{
1848 const int64_t min = 0xFFF;
1849 const int64_t max = UINT_MAX;
1850 X86CPU *cpu = X86_CPU(obj);
1851 Error *err = NULL;
1852 int64_t value;
1853
1854 visit_type_int(v, name, &value, &err);
1855 if (err) {
1856 error_propagate(errp, err);
1857 return;
1858 }
1859
1860 if (value < min || value > max) {
1861 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1862 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1863 object_get_typename(obj), name ? name : "null",
1864 value, min, max);
1865 return;
1866 }
1867 cpu->hyperv_spinlock_attempts = value;
1868}
1869
1870static PropertyInfo qdev_prop_spinlocks = {
1871 .name = "int",
1872 .get = x86_get_hv_spinlocks,
1873 .set = x86_set_hv_spinlocks,
1874};
1875
1876/* Convert all '_' in a feature string option name to '-', to make feature
1877 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1878 */
1879static inline void feat2prop(char *s)
1880{
1881 while ((s = strchr(s, '_'))) {
1882 *s = '-';
1883 }
1884}
1885
1886/* Parse "+feature,-feature,feature=foo" CPU feature string
1887 */
1888static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1889 Error **errp)
1890{
1891 X86CPU *cpu = X86_CPU(cs);
1892 char *featurestr; /* Single 'key=value" string being parsed */
1893 FeatureWord w;
1894 /* Features to be added */
1895 FeatureWordArray plus_features = { 0 };
1896 /* Features to be removed */
1897 FeatureWordArray minus_features = { 0 };
1898 uint32_t numvalue;
1899 CPUX86State *env = &cpu->env;
1900 Error *local_err = NULL;
1901
1902 featurestr = features ? strtok(features, ",") : NULL;
1903
1904 while (featurestr) {
1905 char *val;
1906 if (featurestr[0] == '+') {
1907 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1908 } else if (featurestr[0] == '-') {
1909 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1910 } else if ((val = strchr(featurestr, '='))) {
1911 *val = 0; val++;
1912 feat2prop(featurestr);
1913 if (!strcmp(featurestr, "xlevel")) {
1914 char *err;
1915 char num[32];
1916
1917 numvalue = strtoul(val, &err, 0);
1918 if (!*val || *err) {
1919 error_setg(errp, "bad numerical value %s", val);
1920 return;
1921 }
1922 if (numvalue < 0x80000000) {
1923 error_report("xlevel value shall always be >= 0x80000000"
1924 ", fixup will be removed in future versions");
1925 numvalue += 0x80000000;
1926 }
1927 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1928 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1929 } else if (!strcmp(featurestr, "tsc-freq")) {
1930 int64_t tsc_freq;
1931 char *err;
1932 char num[32];
1933
1934 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1935 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1936 if (tsc_freq < 0 || *err) {
1937 error_setg(errp, "bad numerical value %s", val);
1938 return;
1939 }
1940 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1941 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1942 &local_err);
1943 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1944 char *err;
1945 const int min = 0xFFF;
1946 char num[32];
1947 numvalue = strtoul(val, &err, 0);
1948 if (!*val || *err) {
1949 error_setg(errp, "bad numerical value %s", val);
1950 return;
1951 }
1952 if (numvalue < min) {
1953 error_report("hv-spinlocks value shall always be >= 0x%x"
1954 ", fixup will be removed in future versions",
1955 min);
1956 numvalue = min;
1957 }
1958 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1959 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1960 } else {
1961 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1962 }
1963 } else {
1964 feat2prop(featurestr);
1965 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1966 }
1967 if (local_err) {
1968 error_propagate(errp, local_err);
1969 return;
1970 }
1971 featurestr = strtok(NULL, ",");
1972 }
1973
1974 if (cpu->host_features) {
1975 for (w = 0; w < FEATURE_WORDS; w++) {
1976 env->features[w] =
1977 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1978 }
1979 }
1980
1981 for (w = 0; w < FEATURE_WORDS; w++) {
1982 env->features[w] |= plus_features[w];
1983 env->features[w] &= ~minus_features[w];
1984 }
1985}
1986
1987/* Print all cpuid feature names in featureset
1988 */
1989static void listflags(FILE *f, fprintf_function print, const char **featureset)
1990{
1991 int bit;
1992 bool first = true;
1993
1994 for (bit = 0; bit < 32; bit++) {
1995 if (featureset[bit]) {
1996 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1997 first = false;
1998 }
1999 }
2000}
2001
2002/* generate CPU information. */
2003void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2004{
2005 X86CPUDefinition *def;
2006 char buf[256];
2007 int i;
2008
2009 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2010 def = &builtin_x86_defs[i];
2011 snprintf(buf, sizeof(buf), "%s", def->name);
2012 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2013 }
2014#ifdef CONFIG_KVM
2015 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2016 "KVM processor with all supported host features "
2017 "(only available in KVM mode)");
2018#endif
2019
2020 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2021 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2022 FeatureWordInfo *fw = &feature_word_info[i];
2023
2024 (*cpu_fprintf)(f, " ");
2025 listflags(f, cpu_fprintf, fw->feat_names);
2026 (*cpu_fprintf)(f, "\n");
2027 }
2028}
2029
2030CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2031{
2032 CpuDefinitionInfoList *cpu_list = NULL;
2033 X86CPUDefinition *def;
2034 int i;
2035
2036 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2037 CpuDefinitionInfoList *entry;
2038 CpuDefinitionInfo *info;
2039
2040 def = &builtin_x86_defs[i];
2041 info = g_malloc0(sizeof(*info));
2042 info->name = g_strdup(def->name);
2043
2044 entry = g_malloc0(sizeof(*entry));
2045 entry->value = info;
2046 entry->next = cpu_list;
2047 cpu_list = entry;
2048 }
2049
2050 return cpu_list;
2051}
2052
2053static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2054 bool migratable_only)
2055{
2056 FeatureWordInfo *wi = &feature_word_info[w];
2057 uint32_t r;
2058
2059 if (kvm_enabled()) {
2060 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2061 wi->cpuid_ecx,
2062 wi->cpuid_reg);
2063 } else if (tcg_enabled()) {
2064 r = wi->tcg_features;
2065 } else {
2066 return ~0;
2067 }
2068 if (migratable_only) {
2069 r &= x86_cpu_get_migratable_flags(w);
2070 }
2071 return r;
2072}
2073
2074/*
2075 * Filters CPU feature words based on host availability of each feature.
2076 *
2077 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2078 */
2079static int x86_cpu_filter_features(X86CPU *cpu)
2080{
2081 CPUX86State *env = &cpu->env;
2082 FeatureWord w;
2083 int rv = 0;
2084
2085 for (w = 0; w < FEATURE_WORDS; w++) {
2086 uint32_t host_feat =
2087 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2088 uint32_t requested_features = env->features[w];
2089 env->features[w] &= host_feat;
2090 cpu->filtered_features[w] = requested_features & ~env->features[w];
2091 if (cpu->filtered_features[w]) {
2092 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2093 report_unavailable_features(w, cpu->filtered_features[w]);
2094 }
2095 rv = 1;
2096 }
2097 }
2098
2099 return rv;
2100}
2101
2102static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2103{
2104 PropValue *pv;
2105 for (pv = props; pv->prop; pv++) {
2106 if (!pv->value) {
2107 continue;
2108 }
2109 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2110 &error_abort);
2111 }
2112}
2113
2114/* Load data from X86CPUDefinition
2115 */
2116static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2117{
2118 CPUX86State *env = &cpu->env;
2119 const char *vendor;
2120 char host_vendor[CPUID_VENDOR_SZ + 1];
2121 FeatureWord w;
2122
2123 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2124 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2125 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2126 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2127 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2128 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2129 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2130 for (w = 0; w < FEATURE_WORDS; w++) {
2131 env->features[w] = def->features[w];
2132 }
2133
2134 /* Special cases not set in the X86CPUDefinition structs: */
2135 if (kvm_enabled()) {
2136 if (!kvm_irqchip_in_kernel()) {
2137 x86_cpu_change_kvm_default("x2apic", "off");
2138 }
2139
2140 x86_cpu_apply_props(cpu, kvm_default_props);
2141 }
2142
2143 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2144
2145 /* sysenter isn't supported in compatibility mode on AMD,
2146 * syscall isn't supported in compatibility mode on Intel.
2147 * Normally we advertise the actual CPU vendor, but you can
2148 * override this using the 'vendor' property if you want to use
2149 * KVM's sysenter/syscall emulation in compatibility mode and
2150 * when doing cross vendor migration
2151 */
2152 vendor = def->vendor;
2153 if (kvm_enabled()) {
2154 uint32_t ebx = 0, ecx = 0, edx = 0;
2155 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2156 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2157 vendor = host_vendor;
2158 }
2159
2160 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2161
2162}
2163
2164X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2165{
2166 X86CPU *cpu = NULL;
2167 X86CPUClass *xcc;
2168 ObjectClass *oc;
2169 gchar **model_pieces;
2170 char *name, *features;
2171 Error *error = NULL;
2172
2173 model_pieces = g_strsplit(cpu_model, ",", 2);
2174 if (!model_pieces[0]) {
2175 error_setg(&error, "Invalid/empty CPU model name");
2176 goto out;
2177 }
2178 name = model_pieces[0];
2179 features = model_pieces[1];
2180
2181 oc = x86_cpu_class_by_name(name);
2182 if (oc == NULL) {
2183 error_setg(&error, "Unable to find CPU definition: %s", name);
2184 goto out;
2185 }
2186 xcc = X86_CPU_CLASS(oc);
2187
2188 if (xcc->kvm_required && !kvm_enabled()) {
2189 error_setg(&error, "CPU model '%s' requires KVM", name);
2190 goto out;
2191 }
2192
2193 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2194
2195 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2196 if (error) {
2197 goto out;
2198 }
2199
2200out:
2201 if (error != NULL) {
2202 error_propagate(errp, error);
2203 if (cpu) {
2204 object_unref(OBJECT(cpu));
2205 cpu = NULL;
2206 }
2207 }
2208 g_strfreev(model_pieces);
2209 return cpu;
2210}
2211
2212X86CPU *cpu_x86_init(const char *cpu_model)
2213{
2214 Error *error = NULL;
2215 X86CPU *cpu;
2216
2217 cpu = cpu_x86_create(cpu_model, &error);
2218 if (error) {
2219 goto out;
2220 }
2221
2222 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2223
2224out:
2225 if (error) {
2226 error_report_err(error);
2227 if (cpu != NULL) {
2228 object_unref(OBJECT(cpu));
2229 cpu = NULL;
2230 }
2231 }
2232 return cpu;
2233}
2234
2235static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2236{
2237 X86CPUDefinition *cpudef = data;
2238 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2239
2240 xcc->cpu_def = cpudef;
2241}
2242
2243static void x86_register_cpudef_type(X86CPUDefinition *def)
2244{
2245 char *typename = x86_cpu_type_name(def->name);
2246 TypeInfo ti = {
2247 .name = typename,
2248 .parent = TYPE_X86_CPU,
2249 .class_init = x86_cpu_cpudef_class_init,
2250 .class_data = def,
2251 };
2252
2253 type_register(&ti);
2254 g_free(typename);
2255}
2256
2257#if !defined(CONFIG_USER_ONLY)
2258
2259void cpu_clear_apic_feature(CPUX86State *env)
2260{
2261 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2262}
2263
2264#endif /* !CONFIG_USER_ONLY */
2265
2266/* Initialize list of CPU models, filling some non-static fields if necessary
2267 */
2268void x86_cpudef_setup(void)
2269{
2270 int i, j;
2271 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2272
2273 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2274 X86CPUDefinition *def = &builtin_x86_defs[i];
2275
2276 /* Look for specific "cpudef" models that */
2277 /* have the QEMU version in .model_id */
2278 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2279 if (strcmp(model_with_versions[j], def->name) == 0) {
2280 pstrcpy(def->model_id, sizeof(def->model_id),
2281 "QEMU Virtual CPU version ");
2282 pstrcat(def->model_id, sizeof(def->model_id),
2283 qemu_hw_version());
2284 break;
2285 }
2286 }
2287 }
2288}
2289
2290void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2291 uint32_t *eax, uint32_t *ebx,
2292 uint32_t *ecx, uint32_t *edx)
2293{
2294 X86CPU *cpu = x86_env_get_cpu(env);
2295 CPUState *cs = CPU(cpu);
2296
2297 /* test if maximum index reached */
2298 if (index & 0x80000000) {
2299 if (index > env->cpuid_xlevel) {
2300 if (env->cpuid_xlevel2 > 0) {
2301 /* Handle the Centaur's CPUID instruction. */
2302 if (index > env->cpuid_xlevel2) {
2303 index = env->cpuid_xlevel2;
2304 } else if (index < 0xC0000000) {
2305 index = env->cpuid_xlevel;
2306 }
2307 } else {
2308 /* Intel documentation states that invalid EAX input will
2309 * return the same information as EAX=cpuid_level
2310 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2311 */
2312 index = env->cpuid_level;
2313 }
2314 }
2315 } else {
2316 if (index > env->cpuid_level)
2317 index = env->cpuid_level;
2318 }
2319
2320 switch(index) {
2321 case 0:
2322 *eax = env->cpuid_level;
2323 *ebx = env->cpuid_vendor1;
2324 *edx = env->cpuid_vendor2;
2325 *ecx = env->cpuid_vendor3;
2326 break;
2327 case 1:
2328 *eax = env->cpuid_version;
2329 *ebx = (cpu->apic_id << 24) |
2330 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2331 *ecx = env->features[FEAT_1_ECX];
2332 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2333 *ecx |= CPUID_EXT_OSXSAVE;
2334 }
2335 *edx = env->features[FEAT_1_EDX];
2336 if (cs->nr_cores * cs->nr_threads > 1) {
2337 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2338 *edx |= CPUID_HT;
2339 }
2340 break;
2341 case 2:
2342 /* cache info: needed for Pentium Pro compatibility */
2343 if (cpu->cache_info_passthrough) {
2344 host_cpuid(index, 0, eax, ebx, ecx, edx);
2345 break;
2346 }
2347 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2348 *ebx = 0;
2349 *ecx = 0;
2350 *edx = (L1D_DESCRIPTOR << 16) | \
2351 (L1I_DESCRIPTOR << 8) | \
2352 (L2_DESCRIPTOR);
2353 break;
2354 case 4:
2355 /* cache info: needed for Core compatibility */
2356 if (cpu->cache_info_passthrough) {
2357 host_cpuid(index, count, eax, ebx, ecx, edx);
2358 *eax &= ~0xFC000000;
2359 } else {
2360 *eax = 0;
2361 switch (count) {
2362 case 0: /* L1 dcache info */
2363 *eax |= CPUID_4_TYPE_DCACHE | \
2364 CPUID_4_LEVEL(1) | \
2365 CPUID_4_SELF_INIT_LEVEL;
2366 *ebx = (L1D_LINE_SIZE - 1) | \
2367 ((L1D_PARTITIONS - 1) << 12) | \
2368 ((L1D_ASSOCIATIVITY - 1) << 22);
2369 *ecx = L1D_SETS - 1;
2370 *edx = CPUID_4_NO_INVD_SHARING;
2371 break;
2372 case 1: /* L1 icache info */
2373 *eax |= CPUID_4_TYPE_ICACHE | \
2374 CPUID_4_LEVEL(1) | \
2375 CPUID_4_SELF_INIT_LEVEL;
2376 *ebx = (L1I_LINE_SIZE - 1) | \
2377 ((L1I_PARTITIONS - 1) << 12) | \
2378 ((L1I_ASSOCIATIVITY - 1) << 22);
2379 *ecx = L1I_SETS - 1;
2380 *edx = CPUID_4_NO_INVD_SHARING;
2381 break;
2382 case 2: /* L2 cache info */
2383 *eax |= CPUID_4_TYPE_UNIFIED | \
2384 CPUID_4_LEVEL(2) | \
2385 CPUID_4_SELF_INIT_LEVEL;
2386 if (cs->nr_threads > 1) {
2387 *eax |= (cs->nr_threads - 1) << 14;
2388 }
2389 *ebx = (L2_LINE_SIZE - 1) | \
2390 ((L2_PARTITIONS - 1) << 12) | \
2391 ((L2_ASSOCIATIVITY - 1) << 22);
2392 *ecx = L2_SETS - 1;
2393 *edx = CPUID_4_NO_INVD_SHARING;
2394 break;
2395 default: /* end of info */
2396 *eax = 0;
2397 *ebx = 0;
2398 *ecx = 0;
2399 *edx = 0;
2400 break;
2401 }
2402 }
2403
2404 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2405 if ((*eax & 31) && cs->nr_cores > 1) {
2406 *eax |= (cs->nr_cores - 1) << 26;
2407 }
2408 break;
2409 case 5:
2410 /* mwait info: needed for Core compatibility */
2411 *eax = 0; /* Smallest monitor-line size in bytes */
2412 *ebx = 0; /* Largest monitor-line size in bytes */
2413 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2414 *edx = 0;
2415 break;
2416 case 6:
2417 /* Thermal and Power Leaf */
2418 *eax = env->features[FEAT_6_EAX];
2419 *ebx = 0;
2420 *ecx = 0;
2421 *edx = 0;
2422 break;
2423 case 7:
2424 /* Structured Extended Feature Flags Enumeration Leaf */
2425 if (count == 0) {
2426 *eax = 0; /* Maximum ECX value for sub-leaves */
2427 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2428 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2429 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2430 *ecx |= CPUID_7_0_ECX_OSPKE;
2431 }
2432 *edx = 0; /* Reserved */
2433 } else {
2434 *eax = 0;
2435 *ebx = 0;
2436 *ecx = 0;
2437 *edx = 0;
2438 }
2439 break;
2440 case 9:
2441 /* Direct Cache Access Information Leaf */
2442 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2443 *ebx = 0;
2444 *ecx = 0;
2445 *edx = 0;
2446 break;
2447 case 0xA:
2448 /* Architectural Performance Monitoring Leaf */
2449 if (kvm_enabled() && cpu->enable_pmu) {
2450 KVMState *s = cs->kvm_state;
2451
2452 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2453 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2454 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2455 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2456 } else {
2457 *eax = 0;
2458 *ebx = 0;
2459 *ecx = 0;
2460 *edx = 0;
2461 }
2462 break;
2463 case 0xD: {
2464 KVMState *s = cs->kvm_state;
2465 uint64_t ena_mask;
2466 int i;
2467
2468 /* Processor Extended State */
2469 *eax = 0;
2470 *ebx = 0;
2471 *ecx = 0;
2472 *edx = 0;
2473 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2474 break;
2475 }
2476 if (kvm_enabled()) {
2477 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2478 ena_mask <<= 32;
2479 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2480 } else {
2481 ena_mask = -1;
2482 }
2483
2484 if (count == 0) {
2485 *ecx = 0x240;
2486 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2487 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2488 if ((env->features[esa->feature] & esa->bits) == esa->bits
2489 && ((ena_mask >> i) & 1) != 0) {
2490 if (i < 32) {
2491 *eax |= 1u << i;
2492 } else {
2493 *edx |= 1u << (i - 32);
2494 }
2495 *ecx = MAX(*ecx, esa->offset + esa->size);
2496 }
2497 }
2498 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2499 *ebx = *ecx;
2500 } else if (count == 1) {
2501 *eax = env->features[FEAT_XSAVE];
2502 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2503 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2504 if ((env->features[esa->feature] & esa->bits) == esa->bits
2505 && ((ena_mask >> count) & 1) != 0) {
2506 *eax = esa->size;
2507 *ebx = esa->offset;
2508 }
2509 }
2510 break;
2511 }
2512 case 0x80000000:
2513 *eax = env->cpuid_xlevel;
2514 *ebx = env->cpuid_vendor1;
2515 *edx = env->cpuid_vendor2;
2516 *ecx = env->cpuid_vendor3;
2517 break;
2518 case 0x80000001:
2519 *eax = env->cpuid_version;
2520 *ebx = 0;
2521 *ecx = env->features[FEAT_8000_0001_ECX];
2522 *edx = env->features[FEAT_8000_0001_EDX];
2523
2524 /* The Linux kernel checks for the CMPLegacy bit and
2525 * discards multiple thread information if it is set.
2526 * So don't set it here for Intel to make Linux guests happy.
2527 */
2528 if (cs->nr_cores * cs->nr_threads > 1) {
2529 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2530 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2531 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2532 *ecx |= 1 << 1; /* CmpLegacy bit */
2533 }
2534 }
2535 break;
2536 case 0x80000002:
2537 case 0x80000003:
2538 case 0x80000004:
2539 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2540 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2541 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2542 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2543 break;
2544 case 0x80000005:
2545 /* cache info (L1 cache) */
2546 if (cpu->cache_info_passthrough) {
2547 host_cpuid(index, 0, eax, ebx, ecx, edx);
2548 break;
2549 }
2550 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2551 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2552 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2553 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2554 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2555 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2556 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2557 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2558 break;
2559 case 0x80000006:
2560 /* cache info (L2 cache) */
2561 if (cpu->cache_info_passthrough) {
2562 host_cpuid(index, 0, eax, ebx, ecx, edx);
2563 break;
2564 }
2565 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2566 (L2_DTLB_2M_ENTRIES << 16) | \
2567 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2568 (L2_ITLB_2M_ENTRIES);
2569 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2570 (L2_DTLB_4K_ENTRIES << 16) | \
2571 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2572 (L2_ITLB_4K_ENTRIES);
2573 *ecx = (L2_SIZE_KB_AMD << 16) | \
2574 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2575 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2576 *edx = ((L3_SIZE_KB/512) << 18) | \
2577 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2578 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2579 break;
2580 case 0x80000007:
2581 *eax = 0;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = env->features[FEAT_8000_0007_EDX];
2585 break;
2586 case 0x80000008:
2587 /* virtual & phys address size in low 2 bytes. */
2588/* XXX: This value must match the one used in the MMU code. */
2589 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2590 /* 64 bit processor */
2591/* XXX: The physical address space is limited to 42 bits in exec.c. */
2592 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2593 } else {
2594 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2595 *eax = 0x00000024; /* 36 bits physical */
2596 } else {
2597 *eax = 0x00000020; /* 32 bits physical */
2598 }
2599 }
2600 *ebx = 0;
2601 *ecx = 0;
2602 *edx = 0;
2603 if (cs->nr_cores * cs->nr_threads > 1) {
2604 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2605 }
2606 break;
2607 case 0x8000000A:
2608 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2609 *eax = 0x00000001; /* SVM Revision */
2610 *ebx = 0x00000010; /* nr of ASIDs */
2611 *ecx = 0;
2612 *edx = env->features[FEAT_SVM]; /* optional features */
2613 } else {
2614 *eax = 0;
2615 *ebx = 0;
2616 *ecx = 0;
2617 *edx = 0;
2618 }
2619 break;
2620 case 0xC0000000:
2621 *eax = env->cpuid_xlevel2;
2622 *ebx = 0;
2623 *ecx = 0;
2624 *edx = 0;
2625 break;
2626 case 0xC0000001:
2627 /* Support for VIA CPU's CPUID instruction */
2628 *eax = env->cpuid_version;
2629 *ebx = 0;
2630 *ecx = 0;
2631 *edx = env->features[FEAT_C000_0001_EDX];
2632 break;
2633 case 0xC0000002:
2634 case 0xC0000003:
2635 case 0xC0000004:
2636 /* Reserved for the future, and now filled with zero */
2637 *eax = 0;
2638 *ebx = 0;
2639 *ecx = 0;
2640 *edx = 0;
2641 break;
2642 default:
2643 /* reserved values: zero */
2644 *eax = 0;
2645 *ebx = 0;
2646 *ecx = 0;
2647 *edx = 0;
2648 break;
2649 }
2650}
2651
2652/* CPUClass::reset() */
2653static void x86_cpu_reset(CPUState *s)
2654{
2655 X86CPU *cpu = X86_CPU(s);
2656 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2657 CPUX86State *env = &cpu->env;
2658 target_ulong cr4;
2659 uint64_t xcr0;
2660 int i;
2661
2662 xcc->parent_reset(s);
2663
2664 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2665
2666 tlb_flush(s, 1);
2667
2668 env->old_exception = -1;
2669
2670 /* init to reset state */
2671
2672#ifdef CONFIG_SOFTMMU
2673 env->hflags |= HF_SOFTMMU_MASK;
2674#endif
2675 env->hflags2 |= HF2_GIF_MASK;
2676
2677 cpu_x86_update_cr0(env, 0x60000010);
2678 env->a20_mask = ~0x0;
2679 env->smbase = 0x30000;
2680
2681 env->idt.limit = 0xffff;
2682 env->gdt.limit = 0xffff;
2683 env->ldt.limit = 0xffff;
2684 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2685 env->tr.limit = 0xffff;
2686 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2687
2688 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2689 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2690 DESC_R_MASK | DESC_A_MASK);
2691 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2692 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2693 DESC_A_MASK);
2694 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2695 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2696 DESC_A_MASK);
2697 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2698 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2699 DESC_A_MASK);
2700 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2701 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2702 DESC_A_MASK);
2703 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2704 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2705 DESC_A_MASK);
2706
2707 env->eip = 0xfff0;
2708 env->regs[R_EDX] = env->cpuid_version;
2709
2710 env->eflags = 0x2;
2711
2712 /* FPU init */
2713 for (i = 0; i < 8; i++) {
2714 env->fptags[i] = 1;
2715 }
2716 cpu_set_fpuc(env, 0x37f);
2717
2718 env->mxcsr = 0x1f80;
2719 /* All units are in INIT state. */
2720 env->xstate_bv = 0;
2721
2722 env->pat = 0x0007040600070406ULL;
2723 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2724
2725 memset(env->dr, 0, sizeof(env->dr));
2726 env->dr[6] = DR6_FIXED_1;
2727 env->dr[7] = DR7_FIXED_1;
2728 cpu_breakpoint_remove_all(s, BP_CPU);
2729 cpu_watchpoint_remove_all(s, BP_CPU);
2730
2731 cr4 = 0;
2732 xcr0 = XSTATE_FP_MASK;
2733
2734#ifdef CONFIG_USER_ONLY
2735 /* Enable all the features for user-mode. */
2736 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2737 xcr0 |= XSTATE_SSE_MASK;
2738 }
2739 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2740 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2741 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2742 xcr0 |= 1ull << i;
2743 }
2744 }
2745
2746 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2747 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2748 }
2749 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2750 cr4 |= CR4_FSGSBASE_MASK;
2751 }
2752#endif
2753
2754 env->xcr0 = xcr0;
2755 cpu_x86_update_cr4(env, cr4);
2756
2757 /*
2758 * SDM 11.11.5 requires:
2759 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2760 * - IA32_MTRR_PHYSMASKn.V = 0
2761 * All other bits are undefined. For simplification, zero it all.
2762 */
2763 env->mtrr_deftype = 0;
2764 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2765 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2766
2767#if !defined(CONFIG_USER_ONLY)
2768 /* We hard-wire the BSP to the first CPU. */
2769 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2770
2771 s->halted = !cpu_is_bsp(cpu);
2772
2773 if (kvm_enabled()) {
2774 kvm_arch_reset_vcpu(cpu);
2775 }
2776#endif
2777}
2778
2779#ifndef CONFIG_USER_ONLY
2780bool cpu_is_bsp(X86CPU *cpu)
2781{
2782 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2783}
2784
2785/* TODO: remove me, when reset over QOM tree is implemented */
2786static void x86_cpu_machine_reset_cb(void *opaque)
2787{
2788 X86CPU *cpu = opaque;
2789 cpu_reset(CPU(cpu));
2790}
2791#endif
2792
2793static void mce_init(X86CPU *cpu)
2794{
2795 CPUX86State *cenv = &cpu->env;
2796 unsigned int bank;
2797
2798 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2799 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2800 (CPUID_MCE | CPUID_MCA)) {
2801 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2802 cenv->mcg_ctl = ~(uint64_t)0;
2803 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2804 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2805 }
2806 }
2807}
2808
2809#ifndef CONFIG_USER_ONLY
2810static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2811{
2812 APICCommonState *apic;
2813 const char *apic_type = "apic";
2814
2815 if (kvm_apic_in_kernel()) {
2816 apic_type = "kvm-apic";
2817 } else if (xen_enabled()) {
2818 apic_type = "xen-apic";
2819 }
2820
2821 cpu->apic_state = DEVICE(object_new(apic_type));
2822
2823 object_property_add_child(OBJECT(cpu), "apic",
2824 OBJECT(cpu->apic_state), NULL);
2825 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2826 /* TODO: convert to link<> */
2827 apic = APIC_COMMON(cpu->apic_state);
2828 apic->cpu = cpu;
2829 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2830}
2831
2832static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2833{
2834 APICCommonState *apic;
2835 static bool apic_mmio_map_once;
2836
2837 if (cpu->apic_state == NULL) {
2838 return;
2839 }
2840 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2841 errp);
2842
2843 /* Map APIC MMIO area */
2844 apic = APIC_COMMON(cpu->apic_state);
2845 if (!apic_mmio_map_once) {
2846 memory_region_add_subregion_overlap(get_system_memory(),
2847 apic->apicbase &
2848 MSR_IA32_APICBASE_BASE,
2849 &apic->io_memory,
2850 0x1000);
2851 apic_mmio_map_once = true;
2852 }
2853}
2854
2855static void x86_cpu_machine_done(Notifier *n, void *unused)
2856{
2857 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2858 MemoryRegion *smram =
2859 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2860
2861 if (smram) {
2862 cpu->smram = g_new(MemoryRegion, 1);
2863 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2864 smram, 0, 1ull << 32);
2865 memory_region_set_enabled(cpu->smram, false);
2866 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2867 }
2868}
2869#else
2870static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2871{
2872}
2873#endif
2874
2875
2876#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2877 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2878 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2879#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2880 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2881 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2882static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2883{
2884 CPUState *cs = CPU(dev);
2885 X86CPU *cpu = X86_CPU(dev);
2886 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2887 CPUX86State *env = &cpu->env;
2888 Error *local_err = NULL;
2889 static bool ht_warned;
2890
2891 if (cpu->apic_id < 0) {
2892 error_setg(errp, "apic-id property was not initialized properly");
2893 return;
2894 }
2895
2896 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2897 env->cpuid_level = 7;
2898 }
2899
2900 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2901 error_setg(&local_err,
2902 kvm_enabled() ?
2903 "Host doesn't support requested features" :
2904 "TCG doesn't support requested features");
2905 goto out;
2906 }
2907
2908 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2909 * CPUID[1].EDX.
2910 */
2911 if (IS_AMD_CPU(env)) {
2912 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2913 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2914 & CPUID_EXT2_AMD_ALIASES);
2915 }
2916
2917
2918#ifndef CONFIG_USER_ONLY
2919 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2920
2921 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2922 x86_cpu_apic_create(cpu, &local_err);
2923 if (local_err != NULL) {
2924 goto out;
2925 }
2926 }
2927#endif
2928
2929 mce_init(cpu);
2930
2931#ifndef CONFIG_USER_ONLY
2932 if (tcg_enabled()) {
2933 AddressSpace *newas = g_new(AddressSpace, 1);
2934
2935 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2936 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2937
2938 /* Outer container... */
2939 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2940 memory_region_set_enabled(cpu->cpu_as_root, true);
2941
2942 /* ... with two regions inside: normal system memory with low
2943 * priority, and...
2944 */
2945 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2946 get_system_memory(), 0, ~0ull);
2947 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2948 memory_region_set_enabled(cpu->cpu_as_mem, true);
2949 address_space_init(newas, cpu->cpu_as_root, "CPU");
2950 cs->num_ases = 1;
2951 cpu_address_space_init(cs, newas, 0);
2952
2953 /* ... SMRAM with higher priority, linked from /machine/smram. */
2954 cpu->machine_done.notify = x86_cpu_machine_done;
2955 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2956 }
2957#endif
2958
2959 qemu_init_vcpu(cs);
2960
2961 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2962 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2963 * based on inputs (sockets,cores,threads), it is still better to gives
2964 * users a warning.
2965 *
2966 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2967 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2968 */
2969 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2970 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2971 " -smp options properly.");
2972 ht_warned = true;
2973 }
2974
2975 x86_cpu_apic_realize(cpu, &local_err);
2976 if (local_err != NULL) {
2977 goto out;
2978 }
2979 cpu_reset(cs);
2980
2981 xcc->parent_realize(dev, &local_err);
2982
2983out:
2984 if (local_err != NULL) {
2985 error_propagate(errp, local_err);
2986 return;
2987 }
2988}
2989
2990typedef struct BitProperty {
2991 uint32_t *ptr;
2992 uint32_t mask;
2993} BitProperty;
2994
2995static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2996 void *opaque, Error **errp)
2997{
2998 BitProperty *fp = opaque;
2999 bool value = (*fp->ptr & fp->mask) == fp->mask;
3000 visit_type_bool(v, name, &value, errp);
3001}
3002
3003static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3004 void *opaque, Error **errp)
3005{
3006 DeviceState *dev = DEVICE(obj);
3007 BitProperty *fp = opaque;
3008 Error *local_err = NULL;
3009 bool value;
3010
3011 if (dev->realized) {
3012 qdev_prop_set_after_realize(dev, name, errp);
3013 return;
3014 }
3015
3016 visit_type_bool(v, name, &value, &local_err);
3017 if (local_err) {
3018 error_propagate(errp, local_err);
3019 return;
3020 }
3021
3022 if (value) {
3023 *fp->ptr |= fp->mask;
3024 } else {
3025 *fp->ptr &= ~fp->mask;
3026 }
3027}
3028
3029static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3030 void *opaque)
3031{
3032 BitProperty *prop = opaque;
3033 g_free(prop);
3034}
3035
3036/* Register a boolean property to get/set a single bit in a uint32_t field.
3037 *
3038 * The same property name can be registered multiple times to make it affect
3039 * multiple bits in the same FeatureWord. In that case, the getter will return
3040 * true only if all bits are set.
3041 */
3042static void x86_cpu_register_bit_prop(X86CPU *cpu,
3043 const char *prop_name,
3044 uint32_t *field,
3045 int bitnr)
3046{
3047 BitProperty *fp;
3048 ObjectProperty *op;
3049 uint32_t mask = (1UL << bitnr);
3050
3051 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3052 if (op) {
3053 fp = op->opaque;
3054 assert(fp->ptr == field);
3055 fp->mask |= mask;
3056 } else {
3057 fp = g_new0(BitProperty, 1);
3058 fp->ptr = field;
3059 fp->mask = mask;
3060 object_property_add(OBJECT(cpu), prop_name, "bool",
3061 x86_cpu_get_bit_prop,
3062 x86_cpu_set_bit_prop,
3063 x86_cpu_release_bit_prop, fp, &error_abort);
3064 }
3065}
3066
3067static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3068 FeatureWord w,
3069 int bitnr)
3070{
3071 Object *obj = OBJECT(cpu);
3072 int i;
3073 char **names;
3074 FeatureWordInfo *fi = &feature_word_info[w];
3075
3076 if (!fi->feat_names) {
3077 return;
3078 }
3079 if (!fi->feat_names[bitnr]) {
3080 return;
3081 }
3082
3083 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3084
3085 feat2prop(names[0]);
3086 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3087
3088 for (i = 1; names[i]; i++) {
3089 feat2prop(names[i]);
3090 object_property_add_alias(obj, names[i], obj, names[0],
3091 &error_abort);
3092 }
3093
3094 g_strfreev(names);
3095}
3096
3097static void x86_cpu_initfn(Object *obj)
3098{
3099 CPUState *cs = CPU(obj);
3100 X86CPU *cpu = X86_CPU(obj);
3101 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3102 CPUX86State *env = &cpu->env;
3103 FeatureWord w;
3104 static int inited;
3105
3106 cs->env_ptr = env;
3107 cpu_exec_init(cs, &error_abort);
3108
3109 object_property_add(obj, "family", "int",
3110 x86_cpuid_version_get_family,
3111 x86_cpuid_version_set_family, NULL, NULL, NULL);
3112 object_property_add(obj, "model", "int",
3113 x86_cpuid_version_get_model,
3114 x86_cpuid_version_set_model, NULL, NULL, NULL);
3115 object_property_add(obj, "stepping", "int",
3116 x86_cpuid_version_get_stepping,
3117 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3118 object_property_add_str(obj, "vendor",
3119 x86_cpuid_get_vendor,
3120 x86_cpuid_set_vendor, NULL);
3121 object_property_add_str(obj, "model-id",
3122 x86_cpuid_get_model_id,
3123 x86_cpuid_set_model_id, NULL);
3124 object_property_add(obj, "tsc-frequency", "int",
3125 x86_cpuid_get_tsc_freq,
3126 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3127 object_property_add(obj, "apic-id", "int",
3128 x86_cpuid_get_apic_id,
3129 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3130 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3131 x86_cpu_get_feature_words,
3132 NULL, NULL, (void *)env->features, NULL);
3133 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3134 x86_cpu_get_feature_words,
3135 NULL, NULL, (void *)cpu->filtered_features, NULL);
3136
3137 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3138
3139#ifndef CONFIG_USER_ONLY
3140 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3141 cpu->apic_id = -1;
3142#endif
3143
3144 for (w = 0; w < FEATURE_WORDS; w++) {
3145 int bitnr;
3146
3147 for (bitnr = 0; bitnr < 32; bitnr++) {
3148 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3149 }
3150 }
3151
3152 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3153
3154 /* init various static tables used in TCG mode */
3155 if (tcg_enabled() && !inited) {
3156 inited = 1;
3157 tcg_x86_init();
3158 }
3159}
3160
3161static int64_t x86_cpu_get_arch_id(CPUState *cs)
3162{
3163 X86CPU *cpu = X86_CPU(cs);
3164
3165 return cpu->apic_id;
3166}
3167
3168static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3169{
3170 X86CPU *cpu = X86_CPU(cs);
3171
3172 return cpu->env.cr[0] & CR0_PG_MASK;
3173}
3174
3175static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3176{
3177 X86CPU *cpu = X86_CPU(cs);
3178
3179 cpu->env.eip = value;
3180}
3181
3182static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3183{
3184 X86CPU *cpu = X86_CPU(cs);
3185
3186 cpu->env.eip = tb->pc - tb->cs_base;
3187}
3188
3189static bool x86_cpu_has_work(CPUState *cs)
3190{
3191 X86CPU *cpu = X86_CPU(cs);
3192 CPUX86State *env = &cpu->env;
3193
3194 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3195 CPU_INTERRUPT_POLL)) &&
3196 (env->eflags & IF_MASK)) ||
3197 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3198 CPU_INTERRUPT_INIT |
3199 CPU_INTERRUPT_SIPI |
3200 CPU_INTERRUPT_MCE)) ||
3201 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3202 !(env->hflags & HF_SMM_MASK));
3203}
3204
3205static Property x86_cpu_properties[] = {
3206 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3207 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3208 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3209 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3210 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3211 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3212 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3213 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3214 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3215 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3216 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3217 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3218 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3219 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3220 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3221 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3222 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3223 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3224 DEFINE_PROP_END_OF_LIST()
3225};
3226
3227static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3228{
3229 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3230 CPUClass *cc = CPU_CLASS(oc);
3231 DeviceClass *dc = DEVICE_CLASS(oc);
3232
3233 xcc->parent_realize = dc->realize;
3234 dc->realize = x86_cpu_realizefn;
3235 dc->props = x86_cpu_properties;
3236
3237 xcc->parent_reset = cc->reset;
3238 cc->reset = x86_cpu_reset;
3239 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3240
3241 cc->class_by_name = x86_cpu_class_by_name;
3242 cc->parse_features = x86_cpu_parse_featurestr;
3243 cc->has_work = x86_cpu_has_work;
3244 cc->do_interrupt = x86_cpu_do_interrupt;
3245 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3246 cc->dump_state = x86_cpu_dump_state;
3247 cc->set_pc = x86_cpu_set_pc;
3248 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3249 cc->gdb_read_register = x86_cpu_gdb_read_register;
3250 cc->gdb_write_register = x86_cpu_gdb_write_register;
3251 cc->get_arch_id = x86_cpu_get_arch_id;
3252 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3253#ifdef CONFIG_USER_ONLY
3254 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3255#else
3256 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3257 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3258 cc->write_elf64_note = x86_cpu_write_elf64_note;
3259 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3260 cc->write_elf32_note = x86_cpu_write_elf32_note;
3261 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3262 cc->vmsd = &vmstate_x86_cpu;
3263#endif
3264 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3265#ifndef CONFIG_USER_ONLY
3266 cc->debug_excp_handler = breakpoint_handler;
3267#endif
3268 cc->cpu_exec_enter = x86_cpu_exec_enter;
3269 cc->cpu_exec_exit = x86_cpu_exec_exit;
3270
3271 /*
3272 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3273 * object in cpus -> dangling pointer after final object_unref().
3274 */
3275 dc->cannot_destroy_with_object_finalize_yet = true;
3276}
3277
3278static const TypeInfo x86_cpu_type_info = {
3279 .name = TYPE_X86_CPU,
3280 .parent = TYPE_CPU,
3281 .instance_size = sizeof(X86CPU),
3282 .instance_init = x86_cpu_initfn,
3283 .abstract = true,
3284 .class_size = sizeof(X86CPUClass),
3285 .class_init = x86_cpu_common_class_init,
3286};
3287
3288static void x86_cpu_register_types(void)
3289{
3290 int i;
3291
3292 type_register_static(&x86_cpu_type_info);
3293 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3294 x86_register_cpudef_type(&builtin_x86_defs[i]);
3295 }
3296#ifdef CONFIG_KVM
3297 type_register_static(&host_x86_cpu_type_info);
3298#endif
3299}
3300
3301type_init(x86_cpu_register_types)