]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target-i386/cpu.c
target-i386: Remove xlevel & hv-spinlocks option fixups
[mirror_qemu.git] / target-i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "qemu/osdep.h"
20#include "qemu/cutils.h"
21
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "sysemu/kvm.h"
25#include "sysemu/cpus.h"
26#include "kvm_i386.h"
27
28#include "qemu/error-report.h"
29#include "qemu/option.h"
30#include "qemu/config-file.h"
31#include "qapi/qmp/qerror.h"
32
33#include "qapi-types.h"
34#include "qapi-visit.h"
35#include "qapi/visitor.h"
36#include "sysemu/arch_init.h"
37
38#if defined(CONFIG_KVM)
39#include <linux/kvm_para.h>
40#endif
41
42#include "sysemu/sysemu.h"
43#include "hw/qdev-properties.h"
44#include "hw/i386/topology.h"
45#ifndef CONFIG_USER_ONLY
46#include "exec/address-spaces.h"
47#include "hw/hw.h"
48#include "hw/xen/xen.h"
49#include "hw/i386/apic_internal.h"
50#endif
51
52
53/* Cache topology CPUID constants: */
54
55/* CPUID Leaf 2 Descriptors */
56
57#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58#define CPUID_2_L1I_32KB_8WAY_64B 0x30
59#define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62/* CPUID Leaf 4 constants: */
63
64/* EAX: */
65#define CPUID_4_TYPE_DCACHE 1
66#define CPUID_4_TYPE_ICACHE 2
67#define CPUID_4_TYPE_UNIFIED 3
68
69#define CPUID_4_LEVEL(l) ((l) << 5)
70
71#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72#define CPUID_4_FULLY_ASSOC (1 << 9)
73
74/* EDX: */
75#define CPUID_4_NO_INVD_SHARING (1 << 0)
76#define CPUID_4_INCLUSIVE (1 << 1)
77#define CPUID_4_COMPLEX_IDX (1 << 2)
78
79#define ASSOC_FULL 0xFF
80
81/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96/* Definitions of the hardcoded cache entries we expose: */
97
98/* L1 data cache: */
99#define L1D_LINE_SIZE 64
100#define L1D_ASSOCIATIVITY 8
101#define L1D_SETS 64
102#define L1D_PARTITIONS 1
103/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106#define L1D_LINES_PER_TAG 1
107#define L1D_SIZE_KB_AMD 64
108#define L1D_ASSOCIATIVITY_AMD 2
109
110/* L1 instruction cache: */
111#define L1I_LINE_SIZE 64
112#define L1I_ASSOCIATIVITY 8
113#define L1I_SETS 64
114#define L1I_PARTITIONS 1
115/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118#define L1I_LINES_PER_TAG 1
119#define L1I_SIZE_KB_AMD 64
120#define L1I_ASSOCIATIVITY_AMD 2
121
122/* Level 2 unified cache: */
123#define L2_LINE_SIZE 64
124#define L2_ASSOCIATIVITY 16
125#define L2_SETS 4096
126#define L2_PARTITIONS 1
127/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131#define L2_LINES_PER_TAG 1
132#define L2_SIZE_KB_AMD 512
133
134/* No L3 cache: */
135#define L3_SIZE_KB 0 /* disabled */
136#define L3_ASSOCIATIVITY 0 /* disabled */
137#define L3_LINES_PER_TAG 0 /* disabled */
138#define L3_LINE_SIZE 0 /* disabled */
139
140/* TLB definitions: */
141
142#define L1_DTLB_2M_ASSOC 1
143#define L1_DTLB_2M_ENTRIES 255
144#define L1_DTLB_4K_ASSOC 1
145#define L1_DTLB_4K_ENTRIES 255
146
147#define L1_ITLB_2M_ASSOC 1
148#define L1_ITLB_2M_ENTRIES 255
149#define L1_ITLB_4K_ASSOC 1
150#define L1_ITLB_4K_ENTRIES 255
151
152#define L2_DTLB_2M_ASSOC 0 /* disabled */
153#define L2_DTLB_2M_ENTRIES 0 /* disabled */
154#define L2_DTLB_4K_ASSOC 4
155#define L2_DTLB_4K_ENTRIES 512
156
157#define L2_ITLB_2M_ASSOC 0 /* disabled */
158#define L2_ITLB_2M_ENTRIES 0 /* disabled */
159#define L2_ITLB_4K_ASSOC 4
160#define L2_ITLB_4K_ENTRIES 512
161
162
163
164static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166{
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174}
175
176/* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189};
190static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199};
200/* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214};
215static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224};
225
226static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235};
236
237static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246};
247
248static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257};
258
259static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264};
265
266static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275};
276
277static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286};
287
288static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297};
298
299static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308};
309
310#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343#ifdef TARGET_X86_64
344#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345#else
346#define TCG_EXT2_X86_64_FEATURES 0
347#endif
348
349#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355#define TCG_EXT4_FEATURES 0
356#define TCG_SVM_FEATURES 0
357#define TCG_KVM_FEATURES 0
358#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367#define TCG_APM_FEATURES 0
368#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372
373typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381} FeatureWordInfo;
382
383static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
388 },
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
393 },
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
398 },
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
403 },
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
408 },
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
413 },
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
425 },
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
432 },
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
439 },
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
446 },
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
451 },
452};
453
454typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459} X86RegisterInfo32;
460
461#define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
472};
473#undef REGISTER
474
475const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
504};
505
506const char *get_register_name_32(unsigned int reg)
507{
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
510 }
511 return x86_reg_info_32[reg].name;
512}
513
514/*
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
517 */
518static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519{
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
523
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
529 }
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
533 }
534 r |= f;
535 }
536 return r;
537}
538
539void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541{
542 uint32_t vec[4];
543
544#ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549#elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559#else
560 abort();
561#endif
562
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
571}
572
573#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574
575/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
579 */
580static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
582{
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
593 }
594}
595
596/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
600 */
601static int altcmp(const char *s, const char *e, const char *altstr)
602{
603 const char *p, *q;
604
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
614 }
615}
616
617/* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
619 */
620static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
622{
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
626
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
631 }
632 }
633 return found;
634}
635
636static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
639{
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
646 }
647 }
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
650 }
651}
652
653/* CPU class name definitions: */
654
655#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657
658/* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
660 */
661static char *x86_cpu_type_name(const char *model_name)
662{
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
664}
665
666static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667{
668 ObjectClass *oc;
669 char *typename;
670
671 if (cpu_model == NULL) {
672 return NULL;
673 }
674
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
679}
680
681struct X86CPUDefinition {
682 const char *name;
683 uint32_t level;
684 uint32_t xlevel;
685 uint32_t xlevel2;
686 /* vendor is zero-terminated, 12 character ASCII string */
687 char vendor[CPUID_VENDOR_SZ + 1];
688 int family;
689 int model;
690 int stepping;
691 FeatureWordArray features;
692 char model_id[48];
693};
694
695static X86CPUDefinition builtin_x86_defs[] = {
696 {
697 .name = "qemu64",
698 .level = 0xd,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
713 .xlevel = 0x8000000A,
714 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
715 },
716 {
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
747 },
748 {
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
771 },
772 {
773 .name = "kvm64",
774 .level = 0xd,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
795 0,
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
798 },
799 {
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3,
810 .xlevel = 0x80000004,
811 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
812 },
813 {
814 .name = "kvm32",
815 .level = 5,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 15,
818 .model = 6,
819 .stepping = 1,
820 .features[FEAT_1_EDX] =
821 PPRO_FEATURES | CPUID_VME |
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
823 .features[FEAT_1_ECX] =
824 CPUID_EXT_SSE3,
825 .features[FEAT_8000_0001_ECX] =
826 0,
827 .xlevel = 0x80000008,
828 .model_id = "Common 32-bit KVM processor"
829 },
830 {
831 .name = "coreduo",
832 .level = 10,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 6,
835 .model = 14,
836 .stepping = 8,
837 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
838 .features[FEAT_1_EDX] =
839 PPRO_FEATURES | CPUID_VME |
840 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
841 CPUID_SS,
842 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
843 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
846 .features[FEAT_8000_0001_EDX] =
847 CPUID_EXT2_NX,
848 .xlevel = 0x80000008,
849 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
850 },
851 {
852 .name = "486",
853 .level = 1,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 4,
856 .model = 8,
857 .stepping = 0,
858 .features[FEAT_1_EDX] =
859 I486_FEATURES,
860 .xlevel = 0,
861 },
862 {
863 .name = "pentium",
864 .level = 1,
865 .vendor = CPUID_VENDOR_INTEL,
866 .family = 5,
867 .model = 4,
868 .stepping = 3,
869 .features[FEAT_1_EDX] =
870 PENTIUM_FEATURES,
871 .xlevel = 0,
872 },
873 {
874 .name = "pentium2",
875 .level = 2,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 6,
878 .model = 5,
879 .stepping = 2,
880 .features[FEAT_1_EDX] =
881 PENTIUM2_FEATURES,
882 .xlevel = 0,
883 },
884 {
885 .name = "pentium3",
886 .level = 3,
887 .vendor = CPUID_VENDOR_INTEL,
888 .family = 6,
889 .model = 7,
890 .stepping = 3,
891 .features[FEAT_1_EDX] =
892 PENTIUM3_FEATURES,
893 .xlevel = 0,
894 },
895 {
896 .name = "athlon",
897 .level = 2,
898 .vendor = CPUID_VENDOR_AMD,
899 .family = 6,
900 .model = 2,
901 .stepping = 3,
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
904 CPUID_MCA,
905 .features[FEAT_8000_0001_EDX] =
906 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
907 .xlevel = 0x80000008,
908 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
909 },
910 {
911 .name = "n270",
912 .level = 10,
913 .vendor = CPUID_VENDOR_INTEL,
914 .family = 6,
915 .model = 28,
916 .stepping = 2,
917 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
918 .features[FEAT_1_EDX] =
919 PPRO_FEATURES |
920 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
921 CPUID_ACPI | CPUID_SS,
922 /* Some CPUs got no CPUID_SEP */
923 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
924 * CPUID_EXT_XTPR */
925 .features[FEAT_1_ECX] =
926 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
927 CPUID_EXT_MOVBE,
928 .features[FEAT_8000_0001_EDX] =
929 CPUID_EXT2_NX,
930 .features[FEAT_8000_0001_ECX] =
931 CPUID_EXT3_LAHF_LM,
932 .xlevel = 0x80000008,
933 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
934 },
935 {
936 .name = "Conroe",
937 .level = 10,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 15,
941 .stepping = 3,
942 .features[FEAT_1_EDX] =
943 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
944 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
945 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
946 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
947 CPUID_DE | CPUID_FP87,
948 .features[FEAT_1_ECX] =
949 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
950 .features[FEAT_8000_0001_EDX] =
951 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
952 .features[FEAT_8000_0001_ECX] =
953 CPUID_EXT3_LAHF_LM,
954 .xlevel = 0x80000008,
955 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
956 },
957 {
958 .name = "Penryn",
959 .level = 10,
960 .vendor = CPUID_VENDOR_INTEL,
961 .family = 6,
962 .model = 23,
963 .stepping = 3,
964 .features[FEAT_1_EDX] =
965 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
966 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
967 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
968 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
969 CPUID_DE | CPUID_FP87,
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
972 CPUID_EXT_SSE3,
973 .features[FEAT_8000_0001_EDX] =
974 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
975 .features[FEAT_8000_0001_ECX] =
976 CPUID_EXT3_LAHF_LM,
977 .xlevel = 0x80000008,
978 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
979 },
980 {
981 .name = "Nehalem",
982 .level = 11,
983 .vendor = CPUID_VENDOR_INTEL,
984 .family = 6,
985 .model = 26,
986 .stepping = 3,
987 .features[FEAT_1_EDX] =
988 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
989 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
990 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
991 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
992 CPUID_DE | CPUID_FP87,
993 .features[FEAT_1_ECX] =
994 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
995 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
996 .features[FEAT_8000_0001_EDX] =
997 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
998 .features[FEAT_8000_0001_ECX] =
999 CPUID_EXT3_LAHF_LM,
1000 .xlevel = 0x80000008,
1001 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1002 },
1003 {
1004 .name = "Westmere",
1005 .level = 11,
1006 .vendor = CPUID_VENDOR_INTEL,
1007 .family = 6,
1008 .model = 44,
1009 .stepping = 1,
1010 .features[FEAT_1_EDX] =
1011 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1012 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1013 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1014 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1015 CPUID_DE | CPUID_FP87,
1016 .features[FEAT_1_ECX] =
1017 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1018 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1019 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1020 .features[FEAT_8000_0001_EDX] =
1021 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1022 .features[FEAT_8000_0001_ECX] =
1023 CPUID_EXT3_LAHF_LM,
1024 .features[FEAT_6_EAX] =
1025 CPUID_6_EAX_ARAT,
1026 .xlevel = 0x80000008,
1027 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1028 },
1029 {
1030 .name = "SandyBridge",
1031 .level = 0xd,
1032 .vendor = CPUID_VENDOR_INTEL,
1033 .family = 6,
1034 .model = 42,
1035 .stepping = 1,
1036 .features[FEAT_1_EDX] =
1037 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1038 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1039 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1040 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1041 CPUID_DE | CPUID_FP87,
1042 .features[FEAT_1_ECX] =
1043 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1044 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1045 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1046 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1047 CPUID_EXT_SSE3,
1048 .features[FEAT_8000_0001_EDX] =
1049 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1050 CPUID_EXT2_SYSCALL,
1051 .features[FEAT_8000_0001_ECX] =
1052 CPUID_EXT3_LAHF_LM,
1053 .features[FEAT_XSAVE] =
1054 CPUID_XSAVE_XSAVEOPT,
1055 .features[FEAT_6_EAX] =
1056 CPUID_6_EAX_ARAT,
1057 .xlevel = 0x80000008,
1058 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1059 },
1060 {
1061 .name = "IvyBridge",
1062 .level = 0xd,
1063 .vendor = CPUID_VENDOR_INTEL,
1064 .family = 6,
1065 .model = 58,
1066 .stepping = 9,
1067 .features[FEAT_1_EDX] =
1068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1072 CPUID_DE | CPUID_FP87,
1073 .features[FEAT_1_ECX] =
1074 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1075 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1076 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1077 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1078 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_7_0_EBX] =
1080 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1081 CPUID_7_0_EBX_ERMS,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1084 CPUID_EXT2_SYSCALL,
1085 .features[FEAT_8000_0001_ECX] =
1086 CPUID_EXT3_LAHF_LM,
1087 .features[FEAT_XSAVE] =
1088 CPUID_XSAVE_XSAVEOPT,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1093 },
1094 {
1095 .name = "Haswell-noTSX",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1123 .features[FEAT_XSAVE] =
1124 CPUID_XSAVE_XSAVEOPT,
1125 .features[FEAT_6_EAX] =
1126 CPUID_6_EAX_ARAT,
1127 .xlevel = 0x80000008,
1128 .model_id = "Intel Core Processor (Haswell, no TSX)",
1129 }, {
1130 .name = "Haswell",
1131 .level = 0xd,
1132 .vendor = CPUID_VENDOR_INTEL,
1133 .family = 6,
1134 .model = 60,
1135 .stepping = 1,
1136 .features[FEAT_1_EDX] =
1137 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1138 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1139 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1140 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1141 CPUID_DE | CPUID_FP87,
1142 .features[FEAT_1_ECX] =
1143 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1144 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1145 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1146 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1147 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1148 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1149 .features[FEAT_8000_0001_EDX] =
1150 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1151 CPUID_EXT2_SYSCALL,
1152 .features[FEAT_8000_0001_ECX] =
1153 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1154 .features[FEAT_7_0_EBX] =
1155 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1156 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1157 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1158 CPUID_7_0_EBX_RTM,
1159 .features[FEAT_XSAVE] =
1160 CPUID_XSAVE_XSAVEOPT,
1161 .features[FEAT_6_EAX] =
1162 CPUID_6_EAX_ARAT,
1163 .xlevel = 0x80000008,
1164 .model_id = "Intel Core Processor (Haswell)",
1165 },
1166 {
1167 .name = "Broadwell-noTSX",
1168 .level = 0xd,
1169 .vendor = CPUID_VENDOR_INTEL,
1170 .family = 6,
1171 .model = 61,
1172 .stepping = 2,
1173 .features[FEAT_1_EDX] =
1174 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1175 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1176 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1177 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1178 CPUID_DE | CPUID_FP87,
1179 .features[FEAT_1_ECX] =
1180 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1181 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1182 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1183 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1184 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1185 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1186 .features[FEAT_8000_0001_EDX] =
1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1188 CPUID_EXT2_SYSCALL,
1189 .features[FEAT_8000_0001_ECX] =
1190 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1191 .features[FEAT_7_0_EBX] =
1192 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1193 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1194 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1195 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1196 CPUID_7_0_EBX_SMAP,
1197 .features[FEAT_XSAVE] =
1198 CPUID_XSAVE_XSAVEOPT,
1199 .features[FEAT_6_EAX] =
1200 CPUID_6_EAX_ARAT,
1201 .xlevel = 0x80000008,
1202 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1203 },
1204 {
1205 .name = "Broadwell",
1206 .level = 0xd,
1207 .vendor = CPUID_VENDOR_INTEL,
1208 .family = 6,
1209 .model = 61,
1210 .stepping = 2,
1211 .features[FEAT_1_EDX] =
1212 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1213 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1214 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1215 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1216 CPUID_DE | CPUID_FP87,
1217 .features[FEAT_1_ECX] =
1218 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1219 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1220 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1221 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1222 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1223 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1226 CPUID_EXT2_SYSCALL,
1227 .features[FEAT_8000_0001_ECX] =
1228 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1229 .features[FEAT_7_0_EBX] =
1230 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1231 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1232 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1233 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1234 CPUID_7_0_EBX_SMAP,
1235 .features[FEAT_XSAVE] =
1236 CPUID_XSAVE_XSAVEOPT,
1237 .features[FEAT_6_EAX] =
1238 CPUID_6_EAX_ARAT,
1239 .xlevel = 0x80000008,
1240 .model_id = "Intel Core Processor (Broadwell)",
1241 },
1242 {
1243 .name = "Skylake-Client",
1244 .level = 0xd,
1245 .vendor = CPUID_VENDOR_INTEL,
1246 .family = 6,
1247 .model = 94,
1248 .stepping = 3,
1249 .features[FEAT_1_EDX] =
1250 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1251 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1252 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1253 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1254 CPUID_DE | CPUID_FP87,
1255 .features[FEAT_1_ECX] =
1256 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1257 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1258 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1259 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1260 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1261 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1267 .features[FEAT_7_0_EBX] =
1268 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1269 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1270 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1271 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1272 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1273 /* Missing: XSAVES (not supported by some Linux versions,
1274 * including v4.1 to v4.6).
1275 * KVM doesn't yet expose any XSAVES state save component,
1276 * and the only one defined in Skylake (processor tracing)
1277 * probably will block migration anyway.
1278 */
1279 .features[FEAT_XSAVE] =
1280 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1281 CPUID_XSAVE_XGETBV1,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Skylake)",
1286 },
1287 {
1288 .name = "Opteron_G1",
1289 .level = 5,
1290 .vendor = CPUID_VENDOR_AMD,
1291 .family = 15,
1292 .model = 6,
1293 .stepping = 1,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_SSE3,
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1304 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1305 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1306 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1307 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1308 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1309 .xlevel = 0x80000008,
1310 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1311 },
1312 {
1313 .name = "Opteron_G2",
1314 .level = 5,
1315 .vendor = CPUID_VENDOR_AMD,
1316 .family = 15,
1317 .model = 6,
1318 .stepping = 1,
1319 .features[FEAT_1_EDX] =
1320 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1321 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1322 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1323 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1324 CPUID_DE | CPUID_FP87,
1325 .features[FEAT_1_ECX] =
1326 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1327 /* Missing: CPUID_EXT2_RDTSCP */
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1330 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1331 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1332 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1333 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1334 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1335 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1336 .features[FEAT_8000_0001_ECX] =
1337 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1338 .xlevel = 0x80000008,
1339 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1340 },
1341 {
1342 .name = "Opteron_G3",
1343 .level = 5,
1344 .vendor = CPUID_VENDOR_AMD,
1345 .family = 15,
1346 .model = 6,
1347 .stepping = 1,
1348 .features[FEAT_1_EDX] =
1349 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1350 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1351 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1352 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1353 CPUID_DE | CPUID_FP87,
1354 .features[FEAT_1_ECX] =
1355 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1356 CPUID_EXT_SSE3,
1357 /* Missing: CPUID_EXT2_RDTSCP */
1358 .features[FEAT_8000_0001_EDX] =
1359 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1360 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1361 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1362 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1363 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1364 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1365 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1366 .features[FEAT_8000_0001_ECX] =
1367 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1368 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1369 .xlevel = 0x80000008,
1370 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1371 },
1372 {
1373 .name = "Opteron_G4",
1374 .level = 0xd,
1375 .vendor = CPUID_VENDOR_AMD,
1376 .family = 21,
1377 .model = 1,
1378 .stepping = 2,
1379 .features[FEAT_1_EDX] =
1380 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1381 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1382 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1383 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1384 CPUID_DE | CPUID_FP87,
1385 .features[FEAT_1_ECX] =
1386 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1387 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1388 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1389 CPUID_EXT_SSE3,
1390 /* Missing: CPUID_EXT2_RDTSCP */
1391 .features[FEAT_8000_0001_EDX] =
1392 CPUID_EXT2_LM |
1393 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1394 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1395 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1396 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1397 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1398 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1399 .features[FEAT_8000_0001_ECX] =
1400 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1401 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1402 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1403 CPUID_EXT3_LAHF_LM,
1404 /* no xsaveopt! */
1405 .xlevel = 0x8000001A,
1406 .model_id = "AMD Opteron 62xx class CPU",
1407 },
1408 {
1409 .name = "Opteron_G5",
1410 .level = 0xd,
1411 .vendor = CPUID_VENDOR_AMD,
1412 .family = 21,
1413 .model = 2,
1414 .stepping = 0,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1423 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1424 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1425 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1426 /* Missing: CPUID_EXT2_RDTSCP */
1427 .features[FEAT_8000_0001_EDX] =
1428 CPUID_EXT2_LM |
1429 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1430 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1431 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1432 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1433 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1434 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1435 .features[FEAT_8000_0001_ECX] =
1436 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1437 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1438 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1439 CPUID_EXT3_LAHF_LM,
1440 /* no xsaveopt! */
1441 .xlevel = 0x8000001A,
1442 .model_id = "AMD Opteron 63xx class CPU",
1443 },
1444};
1445
1446typedef struct PropValue {
1447 const char *prop, *value;
1448} PropValue;
1449
1450/* KVM-specific features that are automatically added/removed
1451 * from all CPU models when KVM is enabled.
1452 */
1453static PropValue kvm_default_props[] = {
1454 { "kvmclock", "on" },
1455 { "kvm-nopiodelay", "on" },
1456 { "kvm-asyncpf", "on" },
1457 { "kvm-steal-time", "on" },
1458 { "kvm-pv-eoi", "on" },
1459 { "kvmclock-stable-bit", "on" },
1460 { "x2apic", "on" },
1461 { "acpi", "off" },
1462 { "monitor", "off" },
1463 { "svm", "off" },
1464 { NULL, NULL },
1465};
1466
1467void x86_cpu_change_kvm_default(const char *prop, const char *value)
1468{
1469 PropValue *pv;
1470 for (pv = kvm_default_props; pv->prop; pv++) {
1471 if (!strcmp(pv->prop, prop)) {
1472 pv->value = value;
1473 break;
1474 }
1475 }
1476
1477 /* It is valid to call this function only for properties that
1478 * are already present in the kvm_default_props table.
1479 */
1480 assert(pv->prop);
1481}
1482
1483static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1484 bool migratable_only);
1485
1486#ifdef CONFIG_KVM
1487
1488static int cpu_x86_fill_model_id(char *str)
1489{
1490 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1491 int i;
1492
1493 for (i = 0; i < 3; i++) {
1494 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1495 memcpy(str + i * 16 + 0, &eax, 4);
1496 memcpy(str + i * 16 + 4, &ebx, 4);
1497 memcpy(str + i * 16 + 8, &ecx, 4);
1498 memcpy(str + i * 16 + 12, &edx, 4);
1499 }
1500 return 0;
1501}
1502
1503static X86CPUDefinition host_cpudef;
1504
1505static Property host_x86_cpu_properties[] = {
1506 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1507 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1508 DEFINE_PROP_END_OF_LIST()
1509};
1510
1511/* class_init for the "host" CPU model
1512 *
1513 * This function may be called before KVM is initialized.
1514 */
1515static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1516{
1517 DeviceClass *dc = DEVICE_CLASS(oc);
1518 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1519 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1520
1521 xcc->kvm_required = true;
1522
1523 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1524 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1525
1526 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1527 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1528 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1529 host_cpudef.stepping = eax & 0x0F;
1530
1531 cpu_x86_fill_model_id(host_cpudef.model_id);
1532
1533 xcc->cpu_def = &host_cpudef;
1534
1535 /* level, xlevel, xlevel2, and the feature words are initialized on
1536 * instance_init, because they require KVM to be initialized.
1537 */
1538
1539 dc->props = host_x86_cpu_properties;
1540 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1541 dc->cannot_destroy_with_object_finalize_yet = true;
1542}
1543
1544static void host_x86_cpu_initfn(Object *obj)
1545{
1546 X86CPU *cpu = X86_CPU(obj);
1547 CPUX86State *env = &cpu->env;
1548 KVMState *s = kvm_state;
1549
1550 assert(kvm_enabled());
1551
1552 /* We can't fill the features array here because we don't know yet if
1553 * "migratable" is true or false.
1554 */
1555 cpu->host_features = true;
1556
1557 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1558 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1559 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1560
1561 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1562}
1563
1564static const TypeInfo host_x86_cpu_type_info = {
1565 .name = X86_CPU_TYPE_NAME("host"),
1566 .parent = TYPE_X86_CPU,
1567 .instance_init = host_x86_cpu_initfn,
1568 .class_init = host_x86_cpu_class_init,
1569};
1570
1571#endif
1572
1573static void report_unavailable_features(FeatureWord w, uint32_t mask)
1574{
1575 FeatureWordInfo *f = &feature_word_info[w];
1576 int i;
1577
1578 for (i = 0; i < 32; ++i) {
1579 if ((1UL << i) & mask) {
1580 const char *reg = get_register_name_32(f->cpuid_reg);
1581 assert(reg);
1582 fprintf(stderr, "warning: %s doesn't support requested feature: "
1583 "CPUID.%02XH:%s%s%s [bit %d]\n",
1584 kvm_enabled() ? "host" : "TCG",
1585 f->cpuid_eax, reg,
1586 f->feat_names[i] ? "." : "",
1587 f->feat_names[i] ? f->feat_names[i] : "", i);
1588 }
1589 }
1590}
1591
1592static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1593 const char *name, void *opaque,
1594 Error **errp)
1595{
1596 X86CPU *cpu = X86_CPU(obj);
1597 CPUX86State *env = &cpu->env;
1598 int64_t value;
1599
1600 value = (env->cpuid_version >> 8) & 0xf;
1601 if (value == 0xf) {
1602 value += (env->cpuid_version >> 20) & 0xff;
1603 }
1604 visit_type_int(v, name, &value, errp);
1605}
1606
1607static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1608 const char *name, void *opaque,
1609 Error **errp)
1610{
1611 X86CPU *cpu = X86_CPU(obj);
1612 CPUX86State *env = &cpu->env;
1613 const int64_t min = 0;
1614 const int64_t max = 0xff + 0xf;
1615 Error *local_err = NULL;
1616 int64_t value;
1617
1618 visit_type_int(v, name, &value, &local_err);
1619 if (local_err) {
1620 error_propagate(errp, local_err);
1621 return;
1622 }
1623 if (value < min || value > max) {
1624 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1625 name ? name : "null", value, min, max);
1626 return;
1627 }
1628
1629 env->cpuid_version &= ~0xff00f00;
1630 if (value > 0x0f) {
1631 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1632 } else {
1633 env->cpuid_version |= value << 8;
1634 }
1635}
1636
1637static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1638 const char *name, void *opaque,
1639 Error **errp)
1640{
1641 X86CPU *cpu = X86_CPU(obj);
1642 CPUX86State *env = &cpu->env;
1643 int64_t value;
1644
1645 value = (env->cpuid_version >> 4) & 0xf;
1646 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1647 visit_type_int(v, name, &value, errp);
1648}
1649
1650static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1651 const char *name, void *opaque,
1652 Error **errp)
1653{
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 const int64_t min = 0;
1657 const int64_t max = 0xff;
1658 Error *local_err = NULL;
1659 int64_t value;
1660
1661 visit_type_int(v, name, &value, &local_err);
1662 if (local_err) {
1663 error_propagate(errp, local_err);
1664 return;
1665 }
1666 if (value < min || value > max) {
1667 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1668 name ? name : "null", value, min, max);
1669 return;
1670 }
1671
1672 env->cpuid_version &= ~0xf00f0;
1673 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1674}
1675
1676static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1679{
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int64_t value;
1683
1684 value = env->cpuid_version & 0xf;
1685 visit_type_int(v, name, &value, errp);
1686}
1687
1688static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1690 Error **errp)
1691{
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 const int64_t min = 0;
1695 const int64_t max = 0xf;
1696 Error *local_err = NULL;
1697 int64_t value;
1698
1699 visit_type_int(v, name, &value, &local_err);
1700 if (local_err) {
1701 error_propagate(errp, local_err);
1702 return;
1703 }
1704 if (value < min || value > max) {
1705 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1706 name ? name : "null", value, min, max);
1707 return;
1708 }
1709
1710 env->cpuid_version &= ~0xf;
1711 env->cpuid_version |= value & 0xf;
1712}
1713
1714static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1715{
1716 X86CPU *cpu = X86_CPU(obj);
1717 CPUX86State *env = &cpu->env;
1718 char *value;
1719
1720 value = g_malloc(CPUID_VENDOR_SZ + 1);
1721 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1722 env->cpuid_vendor3);
1723 return value;
1724}
1725
1726static void x86_cpuid_set_vendor(Object *obj, const char *value,
1727 Error **errp)
1728{
1729 X86CPU *cpu = X86_CPU(obj);
1730 CPUX86State *env = &cpu->env;
1731 int i;
1732
1733 if (strlen(value) != CPUID_VENDOR_SZ) {
1734 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1735 return;
1736 }
1737
1738 env->cpuid_vendor1 = 0;
1739 env->cpuid_vendor2 = 0;
1740 env->cpuid_vendor3 = 0;
1741 for (i = 0; i < 4; i++) {
1742 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1743 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1744 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1745 }
1746}
1747
1748static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1749{
1750 X86CPU *cpu = X86_CPU(obj);
1751 CPUX86State *env = &cpu->env;
1752 char *value;
1753 int i;
1754
1755 value = g_malloc(48 + 1);
1756 for (i = 0; i < 48; i++) {
1757 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1758 }
1759 value[48] = '\0';
1760 return value;
1761}
1762
1763static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1764 Error **errp)
1765{
1766 X86CPU *cpu = X86_CPU(obj);
1767 CPUX86State *env = &cpu->env;
1768 int c, len, i;
1769
1770 if (model_id == NULL) {
1771 model_id = "";
1772 }
1773 len = strlen(model_id);
1774 memset(env->cpuid_model, 0, 48);
1775 for (i = 0; i < 48; i++) {
1776 if (i >= len) {
1777 c = '\0';
1778 } else {
1779 c = (uint8_t)model_id[i];
1780 }
1781 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1782 }
1783}
1784
1785static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1786 void *opaque, Error **errp)
1787{
1788 X86CPU *cpu = X86_CPU(obj);
1789 int64_t value;
1790
1791 value = cpu->env.tsc_khz * 1000;
1792 visit_type_int(v, name, &value, errp);
1793}
1794
1795static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1796 void *opaque, Error **errp)
1797{
1798 X86CPU *cpu = X86_CPU(obj);
1799 const int64_t min = 0;
1800 const int64_t max = INT64_MAX;
1801 Error *local_err = NULL;
1802 int64_t value;
1803
1804 visit_type_int(v, name, &value, &local_err);
1805 if (local_err) {
1806 error_propagate(errp, local_err);
1807 return;
1808 }
1809 if (value < min || value > max) {
1810 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1811 name ? name : "null", value, min, max);
1812 return;
1813 }
1814
1815 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1816}
1817
1818static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1819 void *opaque, Error **errp)
1820{
1821 X86CPU *cpu = X86_CPU(obj);
1822 int64_t value = cpu->apic_id;
1823
1824 visit_type_int(v, name, &value, errp);
1825}
1826
1827static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1829{
1830 X86CPU *cpu = X86_CPU(obj);
1831 DeviceState *dev = DEVICE(obj);
1832 const int64_t min = 0;
1833 const int64_t max = UINT32_MAX;
1834 Error *error = NULL;
1835 int64_t value;
1836
1837 if (dev->realized) {
1838 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1839 "it was realized", name, object_get_typename(obj));
1840 return;
1841 }
1842
1843 visit_type_int(v, name, &value, &error);
1844 if (error) {
1845 error_propagate(errp, error);
1846 return;
1847 }
1848 if (value < min || value > max) {
1849 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1850 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1851 object_get_typename(obj), name, value, min, max);
1852 return;
1853 }
1854
1855 if ((value != cpu->apic_id) && cpu_exists(value)) {
1856 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1857 return;
1858 }
1859 cpu->apic_id = value;
1860}
1861
1862/* Generic getter for "feature-words" and "filtered-features" properties */
1863static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1864 const char *name, void *opaque,
1865 Error **errp)
1866{
1867 uint32_t *array = (uint32_t *)opaque;
1868 FeatureWord w;
1869 Error *err = NULL;
1870 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1871 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1872 X86CPUFeatureWordInfoList *list = NULL;
1873
1874 for (w = 0; w < FEATURE_WORDS; w++) {
1875 FeatureWordInfo *wi = &feature_word_info[w];
1876 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1877 qwi->cpuid_input_eax = wi->cpuid_eax;
1878 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1879 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1880 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1881 qwi->features = array[w];
1882
1883 /* List will be in reverse order, but order shouldn't matter */
1884 list_entries[w].next = list;
1885 list_entries[w].value = &word_infos[w];
1886 list = &list_entries[w];
1887 }
1888
1889 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1890 error_propagate(errp, err);
1891}
1892
1893static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1895{
1896 X86CPU *cpu = X86_CPU(obj);
1897 int64_t value = cpu->hyperv_spinlock_attempts;
1898
1899 visit_type_int(v, name, &value, errp);
1900}
1901
1902static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1903 void *opaque, Error **errp)
1904{
1905 const int64_t min = 0xFFF;
1906 const int64_t max = UINT_MAX;
1907 X86CPU *cpu = X86_CPU(obj);
1908 Error *err = NULL;
1909 int64_t value;
1910
1911 visit_type_int(v, name, &value, &err);
1912 if (err) {
1913 error_propagate(errp, err);
1914 return;
1915 }
1916
1917 if (value < min || value > max) {
1918 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1919 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1920 object_get_typename(obj), name ? name : "null",
1921 value, min, max);
1922 return;
1923 }
1924 cpu->hyperv_spinlock_attempts = value;
1925}
1926
1927static PropertyInfo qdev_prop_spinlocks = {
1928 .name = "int",
1929 .get = x86_get_hv_spinlocks,
1930 .set = x86_set_hv_spinlocks,
1931};
1932
1933/* Convert all '_' in a feature string option name to '-', to make feature
1934 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1935 */
1936static inline void feat2prop(char *s)
1937{
1938 while ((s = strchr(s, '_'))) {
1939 *s = '-';
1940 }
1941}
1942
1943/* Parse "+feature,-feature,feature=foo" CPU feature string
1944 */
1945static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1946 Error **errp)
1947{
1948 X86CPU *cpu = X86_CPU(cs);
1949 char *featurestr; /* Single 'key=value" string being parsed */
1950 FeatureWord w;
1951 /* Features to be added */
1952 FeatureWordArray plus_features = { 0 };
1953 /* Features to be removed */
1954 FeatureWordArray minus_features = { 0 };
1955 CPUX86State *env = &cpu->env;
1956 Error *local_err = NULL;
1957
1958 featurestr = features ? strtok(features, ",") : NULL;
1959
1960 while (featurestr) {
1961 char *val;
1962 if (featurestr[0] == '+') {
1963 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1964 } else if (featurestr[0] == '-') {
1965 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1966 } else if ((val = strchr(featurestr, '='))) {
1967 *val = 0; val++;
1968 feat2prop(featurestr);
1969 if (!strcmp(featurestr, "tsc-freq")) {
1970 int64_t tsc_freq;
1971 char *err;
1972 char num[32];
1973
1974 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1975 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1976 if (tsc_freq < 0 || *err) {
1977 error_setg(errp, "bad numerical value %s", val);
1978 return;
1979 }
1980 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1981 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1982 &local_err);
1983 } else {
1984 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1985 }
1986 } else {
1987 feat2prop(featurestr);
1988 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1989 }
1990 if (local_err) {
1991 error_propagate(errp, local_err);
1992 return;
1993 }
1994 featurestr = strtok(NULL, ",");
1995 }
1996
1997 if (cpu->host_features) {
1998 for (w = 0; w < FEATURE_WORDS; w++) {
1999 env->features[w] =
2000 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2001 }
2002 }
2003
2004 for (w = 0; w < FEATURE_WORDS; w++) {
2005 env->features[w] |= plus_features[w];
2006 env->features[w] &= ~minus_features[w];
2007 }
2008}
2009
2010/* Print all cpuid feature names in featureset
2011 */
2012static void listflags(FILE *f, fprintf_function print, const char **featureset)
2013{
2014 int bit;
2015 bool first = true;
2016
2017 for (bit = 0; bit < 32; bit++) {
2018 if (featureset[bit]) {
2019 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2020 first = false;
2021 }
2022 }
2023}
2024
2025/* generate CPU information. */
2026void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2027{
2028 X86CPUDefinition *def;
2029 char buf[256];
2030 int i;
2031
2032 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2033 def = &builtin_x86_defs[i];
2034 snprintf(buf, sizeof(buf), "%s", def->name);
2035 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2036 }
2037#ifdef CONFIG_KVM
2038 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2039 "KVM processor with all supported host features "
2040 "(only available in KVM mode)");
2041#endif
2042
2043 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2044 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2045 FeatureWordInfo *fw = &feature_word_info[i];
2046
2047 (*cpu_fprintf)(f, " ");
2048 listflags(f, cpu_fprintf, fw->feat_names);
2049 (*cpu_fprintf)(f, "\n");
2050 }
2051}
2052
2053CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2054{
2055 CpuDefinitionInfoList *cpu_list = NULL;
2056 X86CPUDefinition *def;
2057 int i;
2058
2059 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2060 CpuDefinitionInfoList *entry;
2061 CpuDefinitionInfo *info;
2062
2063 def = &builtin_x86_defs[i];
2064 info = g_malloc0(sizeof(*info));
2065 info->name = g_strdup(def->name);
2066
2067 entry = g_malloc0(sizeof(*entry));
2068 entry->value = info;
2069 entry->next = cpu_list;
2070 cpu_list = entry;
2071 }
2072
2073 return cpu_list;
2074}
2075
2076static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2077 bool migratable_only)
2078{
2079 FeatureWordInfo *wi = &feature_word_info[w];
2080 uint32_t r;
2081
2082 if (kvm_enabled()) {
2083 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2084 wi->cpuid_ecx,
2085 wi->cpuid_reg);
2086 } else if (tcg_enabled()) {
2087 r = wi->tcg_features;
2088 } else {
2089 return ~0;
2090 }
2091 if (migratable_only) {
2092 r &= x86_cpu_get_migratable_flags(w);
2093 }
2094 return r;
2095}
2096
2097/*
2098 * Filters CPU feature words based on host availability of each feature.
2099 *
2100 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2101 */
2102static int x86_cpu_filter_features(X86CPU *cpu)
2103{
2104 CPUX86State *env = &cpu->env;
2105 FeatureWord w;
2106 int rv = 0;
2107
2108 for (w = 0; w < FEATURE_WORDS; w++) {
2109 uint32_t host_feat =
2110 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2111 uint32_t requested_features = env->features[w];
2112 env->features[w] &= host_feat;
2113 cpu->filtered_features[w] = requested_features & ~env->features[w];
2114 if (cpu->filtered_features[w]) {
2115 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2116 report_unavailable_features(w, cpu->filtered_features[w]);
2117 }
2118 rv = 1;
2119 }
2120 }
2121
2122 return rv;
2123}
2124
2125static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2126{
2127 PropValue *pv;
2128 for (pv = props; pv->prop; pv++) {
2129 if (!pv->value) {
2130 continue;
2131 }
2132 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2133 &error_abort);
2134 }
2135}
2136
2137/* Load data from X86CPUDefinition
2138 */
2139static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2140{
2141 CPUX86State *env = &cpu->env;
2142 const char *vendor;
2143 char host_vendor[CPUID_VENDOR_SZ + 1];
2144 FeatureWord w;
2145
2146 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2147 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2148 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2149 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2150 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2151 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2152 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2153 for (w = 0; w < FEATURE_WORDS; w++) {
2154 env->features[w] = def->features[w];
2155 }
2156
2157 /* Special cases not set in the X86CPUDefinition structs: */
2158 if (kvm_enabled()) {
2159 if (!kvm_irqchip_in_kernel()) {
2160 x86_cpu_change_kvm_default("x2apic", "off");
2161 }
2162
2163 x86_cpu_apply_props(cpu, kvm_default_props);
2164 }
2165
2166 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2167
2168 /* sysenter isn't supported in compatibility mode on AMD,
2169 * syscall isn't supported in compatibility mode on Intel.
2170 * Normally we advertise the actual CPU vendor, but you can
2171 * override this using the 'vendor' property if you want to use
2172 * KVM's sysenter/syscall emulation in compatibility mode and
2173 * when doing cross vendor migration
2174 */
2175 vendor = def->vendor;
2176 if (kvm_enabled()) {
2177 uint32_t ebx = 0, ecx = 0, edx = 0;
2178 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2179 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2180 vendor = host_vendor;
2181 }
2182
2183 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2184
2185}
2186
2187X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2188{
2189 X86CPU *cpu = NULL;
2190 X86CPUClass *xcc;
2191 ObjectClass *oc;
2192 gchar **model_pieces;
2193 char *name, *features;
2194 Error *error = NULL;
2195
2196 model_pieces = g_strsplit(cpu_model, ",", 2);
2197 if (!model_pieces[0]) {
2198 error_setg(&error, "Invalid/empty CPU model name");
2199 goto out;
2200 }
2201 name = model_pieces[0];
2202 features = model_pieces[1];
2203
2204 oc = x86_cpu_class_by_name(name);
2205 if (oc == NULL) {
2206 error_setg(&error, "Unable to find CPU definition: %s", name);
2207 goto out;
2208 }
2209 xcc = X86_CPU_CLASS(oc);
2210
2211 if (xcc->kvm_required && !kvm_enabled()) {
2212 error_setg(&error, "CPU model '%s' requires KVM", name);
2213 goto out;
2214 }
2215
2216 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2217
2218 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2219 if (error) {
2220 goto out;
2221 }
2222
2223out:
2224 if (error != NULL) {
2225 error_propagate(errp, error);
2226 if (cpu) {
2227 object_unref(OBJECT(cpu));
2228 cpu = NULL;
2229 }
2230 }
2231 g_strfreev(model_pieces);
2232 return cpu;
2233}
2234
2235X86CPU *cpu_x86_init(const char *cpu_model)
2236{
2237 Error *error = NULL;
2238 X86CPU *cpu;
2239
2240 cpu = cpu_x86_create(cpu_model, &error);
2241 if (error) {
2242 goto out;
2243 }
2244
2245 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2246
2247out:
2248 if (error) {
2249 error_report_err(error);
2250 if (cpu != NULL) {
2251 object_unref(OBJECT(cpu));
2252 cpu = NULL;
2253 }
2254 }
2255 return cpu;
2256}
2257
2258static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2259{
2260 X86CPUDefinition *cpudef = data;
2261 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2262
2263 xcc->cpu_def = cpudef;
2264}
2265
2266static void x86_register_cpudef_type(X86CPUDefinition *def)
2267{
2268 char *typename = x86_cpu_type_name(def->name);
2269 TypeInfo ti = {
2270 .name = typename,
2271 .parent = TYPE_X86_CPU,
2272 .class_init = x86_cpu_cpudef_class_init,
2273 .class_data = def,
2274 };
2275
2276 type_register(&ti);
2277 g_free(typename);
2278}
2279
2280#if !defined(CONFIG_USER_ONLY)
2281
2282void cpu_clear_apic_feature(CPUX86State *env)
2283{
2284 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2285}
2286
2287#endif /* !CONFIG_USER_ONLY */
2288
2289void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2290 uint32_t *eax, uint32_t *ebx,
2291 uint32_t *ecx, uint32_t *edx)
2292{
2293 X86CPU *cpu = x86_env_get_cpu(env);
2294 CPUState *cs = CPU(cpu);
2295
2296 /* test if maximum index reached */
2297 if (index & 0x80000000) {
2298 if (index > env->cpuid_xlevel) {
2299 if (env->cpuid_xlevel2 > 0) {
2300 /* Handle the Centaur's CPUID instruction. */
2301 if (index > env->cpuid_xlevel2) {
2302 index = env->cpuid_xlevel2;
2303 } else if (index < 0xC0000000) {
2304 index = env->cpuid_xlevel;
2305 }
2306 } else {
2307 /* Intel documentation states that invalid EAX input will
2308 * return the same information as EAX=cpuid_level
2309 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2310 */
2311 index = env->cpuid_level;
2312 }
2313 }
2314 } else {
2315 if (index > env->cpuid_level)
2316 index = env->cpuid_level;
2317 }
2318
2319 switch(index) {
2320 case 0:
2321 *eax = env->cpuid_level;
2322 *ebx = env->cpuid_vendor1;
2323 *edx = env->cpuid_vendor2;
2324 *ecx = env->cpuid_vendor3;
2325 break;
2326 case 1:
2327 *eax = env->cpuid_version;
2328 *ebx = (cpu->apic_id << 24) |
2329 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2330 *ecx = env->features[FEAT_1_ECX];
2331 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2332 *ecx |= CPUID_EXT_OSXSAVE;
2333 }
2334 *edx = env->features[FEAT_1_EDX];
2335 if (cs->nr_cores * cs->nr_threads > 1) {
2336 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2337 *edx |= CPUID_HT;
2338 }
2339 break;
2340 case 2:
2341 /* cache info: needed for Pentium Pro compatibility */
2342 if (cpu->cache_info_passthrough) {
2343 host_cpuid(index, 0, eax, ebx, ecx, edx);
2344 break;
2345 }
2346 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2347 *ebx = 0;
2348 *ecx = 0;
2349 *edx = (L1D_DESCRIPTOR << 16) | \
2350 (L1I_DESCRIPTOR << 8) | \
2351 (L2_DESCRIPTOR);
2352 break;
2353 case 4:
2354 /* cache info: needed for Core compatibility */
2355 if (cpu->cache_info_passthrough) {
2356 host_cpuid(index, count, eax, ebx, ecx, edx);
2357 *eax &= ~0xFC000000;
2358 } else {
2359 *eax = 0;
2360 switch (count) {
2361 case 0: /* L1 dcache info */
2362 *eax |= CPUID_4_TYPE_DCACHE | \
2363 CPUID_4_LEVEL(1) | \
2364 CPUID_4_SELF_INIT_LEVEL;
2365 *ebx = (L1D_LINE_SIZE - 1) | \
2366 ((L1D_PARTITIONS - 1) << 12) | \
2367 ((L1D_ASSOCIATIVITY - 1) << 22);
2368 *ecx = L1D_SETS - 1;
2369 *edx = CPUID_4_NO_INVD_SHARING;
2370 break;
2371 case 1: /* L1 icache info */
2372 *eax |= CPUID_4_TYPE_ICACHE | \
2373 CPUID_4_LEVEL(1) | \
2374 CPUID_4_SELF_INIT_LEVEL;
2375 *ebx = (L1I_LINE_SIZE - 1) | \
2376 ((L1I_PARTITIONS - 1) << 12) | \
2377 ((L1I_ASSOCIATIVITY - 1) << 22);
2378 *ecx = L1I_SETS - 1;
2379 *edx = CPUID_4_NO_INVD_SHARING;
2380 break;
2381 case 2: /* L2 cache info */
2382 *eax |= CPUID_4_TYPE_UNIFIED | \
2383 CPUID_4_LEVEL(2) | \
2384 CPUID_4_SELF_INIT_LEVEL;
2385 if (cs->nr_threads > 1) {
2386 *eax |= (cs->nr_threads - 1) << 14;
2387 }
2388 *ebx = (L2_LINE_SIZE - 1) | \
2389 ((L2_PARTITIONS - 1) << 12) | \
2390 ((L2_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L2_SETS - 1;
2392 *edx = CPUID_4_NO_INVD_SHARING;
2393 break;
2394 default: /* end of info */
2395 *eax = 0;
2396 *ebx = 0;
2397 *ecx = 0;
2398 *edx = 0;
2399 break;
2400 }
2401 }
2402
2403 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2404 if ((*eax & 31) && cs->nr_cores > 1) {
2405 *eax |= (cs->nr_cores - 1) << 26;
2406 }
2407 break;
2408 case 5:
2409 /* mwait info: needed for Core compatibility */
2410 *eax = 0; /* Smallest monitor-line size in bytes */
2411 *ebx = 0; /* Largest monitor-line size in bytes */
2412 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2413 *edx = 0;
2414 break;
2415 case 6:
2416 /* Thermal and Power Leaf */
2417 *eax = env->features[FEAT_6_EAX];
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 break;
2422 case 7:
2423 /* Structured Extended Feature Flags Enumeration Leaf */
2424 if (count == 0) {
2425 *eax = 0; /* Maximum ECX value for sub-leaves */
2426 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2427 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2428 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2429 *ecx |= CPUID_7_0_ECX_OSPKE;
2430 }
2431 *edx = 0; /* Reserved */
2432 } else {
2433 *eax = 0;
2434 *ebx = 0;
2435 *ecx = 0;
2436 *edx = 0;
2437 }
2438 break;
2439 case 9:
2440 /* Direct Cache Access Information Leaf */
2441 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2442 *ebx = 0;
2443 *ecx = 0;
2444 *edx = 0;
2445 break;
2446 case 0xA:
2447 /* Architectural Performance Monitoring Leaf */
2448 if (kvm_enabled() && cpu->enable_pmu) {
2449 KVMState *s = cs->kvm_state;
2450
2451 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2452 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2453 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2454 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2455 } else {
2456 *eax = 0;
2457 *ebx = 0;
2458 *ecx = 0;
2459 *edx = 0;
2460 }
2461 break;
2462 case 0xB:
2463 /* Extended Topology Enumeration Leaf */
2464 if (!cpu->enable_cpuid_0xb) {
2465 *eax = *ebx = *ecx = *edx = 0;
2466 break;
2467 }
2468
2469 *ecx = count & 0xff;
2470 *edx = cpu->apic_id;
2471
2472 switch (count) {
2473 case 0:
2474 *eax = apicid_core_offset(smp_cores, smp_threads);
2475 *ebx = smp_threads;
2476 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2477 break;
2478 case 1:
2479 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2480 *ebx = smp_cores * smp_threads;
2481 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2482 break;
2483 default:
2484 *eax = 0;
2485 *ebx = 0;
2486 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2487 }
2488
2489 assert(!(*eax & ~0x1f));
2490 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2491 break;
2492 case 0xD: {
2493 KVMState *s = cs->kvm_state;
2494 uint64_t ena_mask;
2495 int i;
2496
2497 /* Processor Extended State */
2498 *eax = 0;
2499 *ebx = 0;
2500 *ecx = 0;
2501 *edx = 0;
2502 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2503 break;
2504 }
2505 if (kvm_enabled()) {
2506 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2507 ena_mask <<= 32;
2508 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2509 } else {
2510 ena_mask = -1;
2511 }
2512
2513 if (count == 0) {
2514 *ecx = 0x240;
2515 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2516 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2517 if ((env->features[esa->feature] & esa->bits) == esa->bits
2518 && ((ena_mask >> i) & 1) != 0) {
2519 if (i < 32) {
2520 *eax |= 1u << i;
2521 } else {
2522 *edx |= 1u << (i - 32);
2523 }
2524 *ecx = MAX(*ecx, esa->offset + esa->size);
2525 }
2526 }
2527 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2528 *ebx = *ecx;
2529 } else if (count == 1) {
2530 *eax = env->features[FEAT_XSAVE];
2531 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2532 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2533 if ((env->features[esa->feature] & esa->bits) == esa->bits
2534 && ((ena_mask >> count) & 1) != 0) {
2535 *eax = esa->size;
2536 *ebx = esa->offset;
2537 }
2538 }
2539 break;
2540 }
2541 case 0x80000000:
2542 *eax = env->cpuid_xlevel;
2543 *ebx = env->cpuid_vendor1;
2544 *edx = env->cpuid_vendor2;
2545 *ecx = env->cpuid_vendor3;
2546 break;
2547 case 0x80000001:
2548 *eax = env->cpuid_version;
2549 *ebx = 0;
2550 *ecx = env->features[FEAT_8000_0001_ECX];
2551 *edx = env->features[FEAT_8000_0001_EDX];
2552
2553 /* The Linux kernel checks for the CMPLegacy bit and
2554 * discards multiple thread information if it is set.
2555 * So don't set it here for Intel to make Linux guests happy.
2556 */
2557 if (cs->nr_cores * cs->nr_threads > 1) {
2558 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2559 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2560 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2561 *ecx |= 1 << 1; /* CmpLegacy bit */
2562 }
2563 }
2564 break;
2565 case 0x80000002:
2566 case 0x80000003:
2567 case 0x80000004:
2568 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2569 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2570 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2571 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2572 break;
2573 case 0x80000005:
2574 /* cache info (L1 cache) */
2575 if (cpu->cache_info_passthrough) {
2576 host_cpuid(index, 0, eax, ebx, ecx, edx);
2577 break;
2578 }
2579 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2580 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2581 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2582 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2583 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2584 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2585 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2586 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2587 break;
2588 case 0x80000006:
2589 /* cache info (L2 cache) */
2590 if (cpu->cache_info_passthrough) {
2591 host_cpuid(index, 0, eax, ebx, ecx, edx);
2592 break;
2593 }
2594 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2595 (L2_DTLB_2M_ENTRIES << 16) | \
2596 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2597 (L2_ITLB_2M_ENTRIES);
2598 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2599 (L2_DTLB_4K_ENTRIES << 16) | \
2600 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2601 (L2_ITLB_4K_ENTRIES);
2602 *ecx = (L2_SIZE_KB_AMD << 16) | \
2603 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2604 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2605 *edx = ((L3_SIZE_KB/512) << 18) | \
2606 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2607 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2608 break;
2609 case 0x80000007:
2610 *eax = 0;
2611 *ebx = 0;
2612 *ecx = 0;
2613 *edx = env->features[FEAT_8000_0007_EDX];
2614 break;
2615 case 0x80000008:
2616 /* virtual & phys address size in low 2 bytes. */
2617/* XXX: This value must match the one used in the MMU code. */
2618 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2619 /* 64 bit processor */
2620/* XXX: The physical address space is limited to 42 bits in exec.c. */
2621 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2622 } else {
2623 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2624 *eax = 0x00000024; /* 36 bits physical */
2625 } else {
2626 *eax = 0x00000020; /* 32 bits physical */
2627 }
2628 }
2629 *ebx = 0;
2630 *ecx = 0;
2631 *edx = 0;
2632 if (cs->nr_cores * cs->nr_threads > 1) {
2633 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2634 }
2635 break;
2636 case 0x8000000A:
2637 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2638 *eax = 0x00000001; /* SVM Revision */
2639 *ebx = 0x00000010; /* nr of ASIDs */
2640 *ecx = 0;
2641 *edx = env->features[FEAT_SVM]; /* optional features */
2642 } else {
2643 *eax = 0;
2644 *ebx = 0;
2645 *ecx = 0;
2646 *edx = 0;
2647 }
2648 break;
2649 case 0xC0000000:
2650 *eax = env->cpuid_xlevel2;
2651 *ebx = 0;
2652 *ecx = 0;
2653 *edx = 0;
2654 break;
2655 case 0xC0000001:
2656 /* Support for VIA CPU's CPUID instruction */
2657 *eax = env->cpuid_version;
2658 *ebx = 0;
2659 *ecx = 0;
2660 *edx = env->features[FEAT_C000_0001_EDX];
2661 break;
2662 case 0xC0000002:
2663 case 0xC0000003:
2664 case 0xC0000004:
2665 /* Reserved for the future, and now filled with zero */
2666 *eax = 0;
2667 *ebx = 0;
2668 *ecx = 0;
2669 *edx = 0;
2670 break;
2671 default:
2672 /* reserved values: zero */
2673 *eax = 0;
2674 *ebx = 0;
2675 *ecx = 0;
2676 *edx = 0;
2677 break;
2678 }
2679}
2680
2681/* CPUClass::reset() */
2682static void x86_cpu_reset(CPUState *s)
2683{
2684 X86CPU *cpu = X86_CPU(s);
2685 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2686 CPUX86State *env = &cpu->env;
2687 target_ulong cr4;
2688 uint64_t xcr0;
2689 int i;
2690
2691 xcc->parent_reset(s);
2692
2693 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2694
2695 tlb_flush(s, 1);
2696
2697 env->old_exception = -1;
2698
2699 /* init to reset state */
2700
2701#ifdef CONFIG_SOFTMMU
2702 env->hflags |= HF_SOFTMMU_MASK;
2703#endif
2704 env->hflags2 |= HF2_GIF_MASK;
2705
2706 cpu_x86_update_cr0(env, 0x60000010);
2707 env->a20_mask = ~0x0;
2708 env->smbase = 0x30000;
2709
2710 env->idt.limit = 0xffff;
2711 env->gdt.limit = 0xffff;
2712 env->ldt.limit = 0xffff;
2713 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2714 env->tr.limit = 0xffff;
2715 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2716
2717 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2718 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2719 DESC_R_MASK | DESC_A_MASK);
2720 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2721 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2722 DESC_A_MASK);
2723 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2724 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2725 DESC_A_MASK);
2726 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2727 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2728 DESC_A_MASK);
2729 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2730 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2731 DESC_A_MASK);
2732 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 DESC_A_MASK);
2735
2736 env->eip = 0xfff0;
2737 env->regs[R_EDX] = env->cpuid_version;
2738
2739 env->eflags = 0x2;
2740
2741 /* FPU init */
2742 for (i = 0; i < 8; i++) {
2743 env->fptags[i] = 1;
2744 }
2745 cpu_set_fpuc(env, 0x37f);
2746
2747 env->mxcsr = 0x1f80;
2748 /* All units are in INIT state. */
2749 env->xstate_bv = 0;
2750
2751 env->pat = 0x0007040600070406ULL;
2752 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2753
2754 memset(env->dr, 0, sizeof(env->dr));
2755 env->dr[6] = DR6_FIXED_1;
2756 env->dr[7] = DR7_FIXED_1;
2757 cpu_breakpoint_remove_all(s, BP_CPU);
2758 cpu_watchpoint_remove_all(s, BP_CPU);
2759
2760 cr4 = 0;
2761 xcr0 = XSTATE_FP_MASK;
2762
2763#ifdef CONFIG_USER_ONLY
2764 /* Enable all the features for user-mode. */
2765 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2766 xcr0 |= XSTATE_SSE_MASK;
2767 }
2768 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2769 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2770 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2771 xcr0 |= 1ull << i;
2772 }
2773 }
2774
2775 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2776 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2777 }
2778 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2779 cr4 |= CR4_FSGSBASE_MASK;
2780 }
2781#endif
2782
2783 env->xcr0 = xcr0;
2784 cpu_x86_update_cr4(env, cr4);
2785
2786 /*
2787 * SDM 11.11.5 requires:
2788 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2789 * - IA32_MTRR_PHYSMASKn.V = 0
2790 * All other bits are undefined. For simplification, zero it all.
2791 */
2792 env->mtrr_deftype = 0;
2793 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2794 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2795
2796#if !defined(CONFIG_USER_ONLY)
2797 /* We hard-wire the BSP to the first CPU. */
2798 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2799
2800 s->halted = !cpu_is_bsp(cpu);
2801
2802 if (kvm_enabled()) {
2803 kvm_arch_reset_vcpu(cpu);
2804 }
2805#endif
2806}
2807
2808#ifndef CONFIG_USER_ONLY
2809bool cpu_is_bsp(X86CPU *cpu)
2810{
2811 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2812}
2813
2814/* TODO: remove me, when reset over QOM tree is implemented */
2815static void x86_cpu_machine_reset_cb(void *opaque)
2816{
2817 X86CPU *cpu = opaque;
2818 cpu_reset(CPU(cpu));
2819}
2820#endif
2821
2822static void mce_init(X86CPU *cpu)
2823{
2824 CPUX86State *cenv = &cpu->env;
2825 unsigned int bank;
2826
2827 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2828 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2829 (CPUID_MCE | CPUID_MCA)) {
2830 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2831 cenv->mcg_ctl = ~(uint64_t)0;
2832 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2833 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2834 }
2835 }
2836}
2837
2838#ifndef CONFIG_USER_ONLY
2839static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2840{
2841 APICCommonState *apic;
2842 const char *apic_type = "apic";
2843
2844 if (kvm_apic_in_kernel()) {
2845 apic_type = "kvm-apic";
2846 } else if (xen_enabled()) {
2847 apic_type = "xen-apic";
2848 }
2849
2850 cpu->apic_state = DEVICE(object_new(apic_type));
2851
2852 object_property_add_child(OBJECT(cpu), "apic",
2853 OBJECT(cpu->apic_state), NULL);
2854 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2855 /* TODO: convert to link<> */
2856 apic = APIC_COMMON(cpu->apic_state);
2857 apic->cpu = cpu;
2858 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2859}
2860
2861static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2862{
2863 APICCommonState *apic;
2864 static bool apic_mmio_map_once;
2865
2866 if (cpu->apic_state == NULL) {
2867 return;
2868 }
2869 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2870 errp);
2871
2872 /* Map APIC MMIO area */
2873 apic = APIC_COMMON(cpu->apic_state);
2874 if (!apic_mmio_map_once) {
2875 memory_region_add_subregion_overlap(get_system_memory(),
2876 apic->apicbase &
2877 MSR_IA32_APICBASE_BASE,
2878 &apic->io_memory,
2879 0x1000);
2880 apic_mmio_map_once = true;
2881 }
2882}
2883
2884static void x86_cpu_machine_done(Notifier *n, void *unused)
2885{
2886 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2887 MemoryRegion *smram =
2888 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2889
2890 if (smram) {
2891 cpu->smram = g_new(MemoryRegion, 1);
2892 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2893 smram, 0, 1ull << 32);
2894 memory_region_set_enabled(cpu->smram, false);
2895 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2896 }
2897}
2898#else
2899static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2900{
2901}
2902#endif
2903
2904
2905#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2906 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2907 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2908#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2909 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2910 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2911static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2912{
2913 CPUState *cs = CPU(dev);
2914 X86CPU *cpu = X86_CPU(dev);
2915 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2916 CPUX86State *env = &cpu->env;
2917 Error *local_err = NULL;
2918 static bool ht_warned;
2919
2920 if (cpu->apic_id < 0) {
2921 error_setg(errp, "apic-id property was not initialized properly");
2922 return;
2923 }
2924
2925 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2926 env->cpuid_level = 7;
2927 }
2928
2929 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2930 error_setg(&local_err,
2931 kvm_enabled() ?
2932 "Host doesn't support requested features" :
2933 "TCG doesn't support requested features");
2934 goto out;
2935 }
2936
2937 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2938 * CPUID[1].EDX.
2939 */
2940 if (IS_AMD_CPU(env)) {
2941 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2942 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2943 & CPUID_EXT2_AMD_ALIASES);
2944 }
2945
2946
2947 cpu_exec_init(cs, &error_abort);
2948
2949 if (tcg_enabled()) {
2950 tcg_x86_init();
2951 }
2952
2953#ifndef CONFIG_USER_ONLY
2954 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2955
2956 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2957 x86_cpu_apic_create(cpu, &local_err);
2958 if (local_err != NULL) {
2959 goto out;
2960 }
2961 }
2962#endif
2963
2964 mce_init(cpu);
2965
2966#ifndef CONFIG_USER_ONLY
2967 if (tcg_enabled()) {
2968 AddressSpace *newas = g_new(AddressSpace, 1);
2969
2970 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2971 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2972
2973 /* Outer container... */
2974 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2975 memory_region_set_enabled(cpu->cpu_as_root, true);
2976
2977 /* ... with two regions inside: normal system memory with low
2978 * priority, and...
2979 */
2980 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2981 get_system_memory(), 0, ~0ull);
2982 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2983 memory_region_set_enabled(cpu->cpu_as_mem, true);
2984 address_space_init(newas, cpu->cpu_as_root, "CPU");
2985 cs->num_ases = 1;
2986 cpu_address_space_init(cs, newas, 0);
2987
2988 /* ... SMRAM with higher priority, linked from /machine/smram. */
2989 cpu->machine_done.notify = x86_cpu_machine_done;
2990 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2991 }
2992#endif
2993
2994 qemu_init_vcpu(cs);
2995
2996 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2997 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2998 * based on inputs (sockets,cores,threads), it is still better to gives
2999 * users a warning.
3000 *
3001 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3002 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3003 */
3004 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3005 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3006 " -smp options properly.");
3007 ht_warned = true;
3008 }
3009
3010 x86_cpu_apic_realize(cpu, &local_err);
3011 if (local_err != NULL) {
3012 goto out;
3013 }
3014 cpu_reset(cs);
3015
3016 xcc->parent_realize(dev, &local_err);
3017
3018out:
3019 if (local_err != NULL) {
3020 error_propagate(errp, local_err);
3021 return;
3022 }
3023}
3024
3025typedef struct BitProperty {
3026 uint32_t *ptr;
3027 uint32_t mask;
3028} BitProperty;
3029
3030static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3031 void *opaque, Error **errp)
3032{
3033 BitProperty *fp = opaque;
3034 bool value = (*fp->ptr & fp->mask) == fp->mask;
3035 visit_type_bool(v, name, &value, errp);
3036}
3037
3038static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3039 void *opaque, Error **errp)
3040{
3041 DeviceState *dev = DEVICE(obj);
3042 BitProperty *fp = opaque;
3043 Error *local_err = NULL;
3044 bool value;
3045
3046 if (dev->realized) {
3047 qdev_prop_set_after_realize(dev, name, errp);
3048 return;
3049 }
3050
3051 visit_type_bool(v, name, &value, &local_err);
3052 if (local_err) {
3053 error_propagate(errp, local_err);
3054 return;
3055 }
3056
3057 if (value) {
3058 *fp->ptr |= fp->mask;
3059 } else {
3060 *fp->ptr &= ~fp->mask;
3061 }
3062}
3063
3064static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3065 void *opaque)
3066{
3067 BitProperty *prop = opaque;
3068 g_free(prop);
3069}
3070
3071/* Register a boolean property to get/set a single bit in a uint32_t field.
3072 *
3073 * The same property name can be registered multiple times to make it affect
3074 * multiple bits in the same FeatureWord. In that case, the getter will return
3075 * true only if all bits are set.
3076 */
3077static void x86_cpu_register_bit_prop(X86CPU *cpu,
3078 const char *prop_name,
3079 uint32_t *field,
3080 int bitnr)
3081{
3082 BitProperty *fp;
3083 ObjectProperty *op;
3084 uint32_t mask = (1UL << bitnr);
3085
3086 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3087 if (op) {
3088 fp = op->opaque;
3089 assert(fp->ptr == field);
3090 fp->mask |= mask;
3091 } else {
3092 fp = g_new0(BitProperty, 1);
3093 fp->ptr = field;
3094 fp->mask = mask;
3095 object_property_add(OBJECT(cpu), prop_name, "bool",
3096 x86_cpu_get_bit_prop,
3097 x86_cpu_set_bit_prop,
3098 x86_cpu_release_bit_prop, fp, &error_abort);
3099 }
3100}
3101
3102static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3103 FeatureWord w,
3104 int bitnr)
3105{
3106 Object *obj = OBJECT(cpu);
3107 int i;
3108 char **names;
3109 FeatureWordInfo *fi = &feature_word_info[w];
3110
3111 if (!fi->feat_names) {
3112 return;
3113 }
3114 if (!fi->feat_names[bitnr]) {
3115 return;
3116 }
3117
3118 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3119
3120 feat2prop(names[0]);
3121 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3122
3123 for (i = 1; names[i]; i++) {
3124 feat2prop(names[i]);
3125 object_property_add_alias(obj, names[i], obj, names[0],
3126 &error_abort);
3127 }
3128
3129 g_strfreev(names);
3130}
3131
3132static void x86_cpu_initfn(Object *obj)
3133{
3134 CPUState *cs = CPU(obj);
3135 X86CPU *cpu = X86_CPU(obj);
3136 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3137 CPUX86State *env = &cpu->env;
3138 FeatureWord w;
3139
3140 cs->env_ptr = env;
3141
3142 object_property_add(obj, "family", "int",
3143 x86_cpuid_version_get_family,
3144 x86_cpuid_version_set_family, NULL, NULL, NULL);
3145 object_property_add(obj, "model", "int",
3146 x86_cpuid_version_get_model,
3147 x86_cpuid_version_set_model, NULL, NULL, NULL);
3148 object_property_add(obj, "stepping", "int",
3149 x86_cpuid_version_get_stepping,
3150 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3151 object_property_add_str(obj, "vendor",
3152 x86_cpuid_get_vendor,
3153 x86_cpuid_set_vendor, NULL);
3154 object_property_add_str(obj, "model-id",
3155 x86_cpuid_get_model_id,
3156 x86_cpuid_set_model_id, NULL);
3157 object_property_add(obj, "tsc-frequency", "int",
3158 x86_cpuid_get_tsc_freq,
3159 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3160 object_property_add(obj, "apic-id", "int",
3161 x86_cpuid_get_apic_id,
3162 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3163 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3164 x86_cpu_get_feature_words,
3165 NULL, NULL, (void *)env->features, NULL);
3166 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3167 x86_cpu_get_feature_words,
3168 NULL, NULL, (void *)cpu->filtered_features, NULL);
3169
3170 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3171
3172#ifndef CONFIG_USER_ONLY
3173 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3174 cpu->apic_id = -1;
3175#endif
3176
3177 for (w = 0; w < FEATURE_WORDS; w++) {
3178 int bitnr;
3179
3180 for (bitnr = 0; bitnr < 32; bitnr++) {
3181 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3182 }
3183 }
3184
3185 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3186}
3187
3188static int64_t x86_cpu_get_arch_id(CPUState *cs)
3189{
3190 X86CPU *cpu = X86_CPU(cs);
3191
3192 return cpu->apic_id;
3193}
3194
3195static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3196{
3197 X86CPU *cpu = X86_CPU(cs);
3198
3199 return cpu->env.cr[0] & CR0_PG_MASK;
3200}
3201
3202static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3203{
3204 X86CPU *cpu = X86_CPU(cs);
3205
3206 cpu->env.eip = value;
3207}
3208
3209static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3210{
3211 X86CPU *cpu = X86_CPU(cs);
3212
3213 cpu->env.eip = tb->pc - tb->cs_base;
3214}
3215
3216static bool x86_cpu_has_work(CPUState *cs)
3217{
3218 X86CPU *cpu = X86_CPU(cs);
3219 CPUX86State *env = &cpu->env;
3220
3221 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3222 CPU_INTERRUPT_POLL)) &&
3223 (env->eflags & IF_MASK)) ||
3224 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3225 CPU_INTERRUPT_INIT |
3226 CPU_INTERRUPT_SIPI |
3227 CPU_INTERRUPT_MCE)) ||
3228 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3229 !(env->hflags & HF_SMM_MASK));
3230}
3231
3232static Property x86_cpu_properties[] = {
3233 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3234 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3235 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3236 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3237 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3238 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3239 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3240 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3241 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3242 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3243 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3244 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3245 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3246 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3247 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3248 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3249 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3250 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3251 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3252 DEFINE_PROP_END_OF_LIST()
3253};
3254
3255static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3256{
3257 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3258 CPUClass *cc = CPU_CLASS(oc);
3259 DeviceClass *dc = DEVICE_CLASS(oc);
3260
3261 xcc->parent_realize = dc->realize;
3262 dc->realize = x86_cpu_realizefn;
3263 dc->props = x86_cpu_properties;
3264
3265 xcc->parent_reset = cc->reset;
3266 cc->reset = x86_cpu_reset;
3267 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3268
3269 cc->class_by_name = x86_cpu_class_by_name;
3270 cc->parse_features = x86_cpu_parse_featurestr;
3271 cc->has_work = x86_cpu_has_work;
3272 cc->do_interrupt = x86_cpu_do_interrupt;
3273 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3274 cc->dump_state = x86_cpu_dump_state;
3275 cc->set_pc = x86_cpu_set_pc;
3276 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3277 cc->gdb_read_register = x86_cpu_gdb_read_register;
3278 cc->gdb_write_register = x86_cpu_gdb_write_register;
3279 cc->get_arch_id = x86_cpu_get_arch_id;
3280 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3281#ifdef CONFIG_USER_ONLY
3282 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3283#else
3284 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3285 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3286 cc->write_elf64_note = x86_cpu_write_elf64_note;
3287 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3288 cc->write_elf32_note = x86_cpu_write_elf32_note;
3289 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3290 cc->vmsd = &vmstate_x86_cpu;
3291#endif
3292 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3293#ifndef CONFIG_USER_ONLY
3294 cc->debug_excp_handler = breakpoint_handler;
3295#endif
3296 cc->cpu_exec_enter = x86_cpu_exec_enter;
3297 cc->cpu_exec_exit = x86_cpu_exec_exit;
3298
3299 /*
3300 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3301 * object in cpus -> dangling pointer after final object_unref().
3302 */
3303 dc->cannot_destroy_with_object_finalize_yet = true;
3304}
3305
3306static const TypeInfo x86_cpu_type_info = {
3307 .name = TYPE_X86_CPU,
3308 .parent = TYPE_CPU,
3309 .instance_size = sizeof(X86CPU),
3310 .instance_init = x86_cpu_initfn,
3311 .abstract = true,
3312 .class_size = sizeof(X86CPUClass),
3313 .class_init = x86_cpu_common_class_init,
3314};
3315
3316static void x86_cpu_register_types(void)
3317{
3318 int i;
3319
3320 type_register_static(&x86_cpu_type_info);
3321 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3322 x86_register_cpudef_type(&builtin_x86_defs[i]);
3323 }
3324#ifdef CONFIG_KVM
3325 type_register_static(&host_x86_cpu_type_info);
3326#endif
3327}
3328
3329type_init(x86_cpu_register_types)