]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
Replaced get_tick_per_sec() by NANOSECONDS_PER_SECOND
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20
21 #include "cpu.h"
22 #include "sysemu/kvm.h"
23 #include "sysemu/cpus.h"
24 #include "kvm_i386.h"
25
26 #include "qemu/error-report.h"
27 #include "qemu/option.h"
28 #include "qemu/config-file.h"
29 #include "qapi/qmp/qerror.h"
30
31 #include "qapi-types.h"
32 #include "qapi-visit.h"
33 #include "qapi/visitor.h"
34 #include "sysemu/arch_init.h"
35
36 #include "hw/hw.h"
37 #if defined(CONFIG_KVM)
38 #include <linux/kvm_para.h>
39 #endif
40
41 #include "sysemu/sysemu.h"
42 #include "hw/qdev-properties.h"
43 #ifndef CONFIG_USER_ONLY
44 #include "exec/address-spaces.h"
45 #include "hw/xen/xen.h"
46 #include "hw/i386/apic_internal.h"
47 #endif
48
49
50 /* Cache topology CPUID constants: */
51
52 /* CPUID Leaf 2 Descriptors */
53
54 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
55 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
56 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
57
58
59 /* CPUID Leaf 4 constants: */
60
61 /* EAX: */
62 #define CPUID_4_TYPE_DCACHE 1
63 #define CPUID_4_TYPE_ICACHE 2
64 #define CPUID_4_TYPE_UNIFIED 3
65
66 #define CPUID_4_LEVEL(l) ((l) << 5)
67
68 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
69 #define CPUID_4_FULLY_ASSOC (1 << 9)
70
71 /* EDX: */
72 #define CPUID_4_NO_INVD_SHARING (1 << 0)
73 #define CPUID_4_INCLUSIVE (1 << 1)
74 #define CPUID_4_COMPLEX_IDX (1 << 2)
75
76 #define ASSOC_FULL 0xFF
77
78 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
79 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
80 a == 2 ? 0x2 : \
81 a == 4 ? 0x4 : \
82 a == 8 ? 0x6 : \
83 a == 16 ? 0x8 : \
84 a == 32 ? 0xA : \
85 a == 48 ? 0xB : \
86 a == 64 ? 0xC : \
87 a == 96 ? 0xD : \
88 a == 128 ? 0xE : \
89 a == ASSOC_FULL ? 0xF : \
90 0 /* invalid value */)
91
92
93 /* Definitions of the hardcoded cache entries we expose: */
94
95 /* L1 data cache: */
96 #define L1D_LINE_SIZE 64
97 #define L1D_ASSOCIATIVITY 8
98 #define L1D_SETS 64
99 #define L1D_PARTITIONS 1
100 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
101 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
102 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
103 #define L1D_LINES_PER_TAG 1
104 #define L1D_SIZE_KB_AMD 64
105 #define L1D_ASSOCIATIVITY_AMD 2
106
107 /* L1 instruction cache: */
108 #define L1I_LINE_SIZE 64
109 #define L1I_ASSOCIATIVITY 8
110 #define L1I_SETS 64
111 #define L1I_PARTITIONS 1
112 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
113 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
114 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
115 #define L1I_LINES_PER_TAG 1
116 #define L1I_SIZE_KB_AMD 64
117 #define L1I_ASSOCIATIVITY_AMD 2
118
119 /* Level 2 unified cache: */
120 #define L2_LINE_SIZE 64
121 #define L2_ASSOCIATIVITY 16
122 #define L2_SETS 4096
123 #define L2_PARTITIONS 1
124 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
125 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
126 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
127 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
128 #define L2_LINES_PER_TAG 1
129 #define L2_SIZE_KB_AMD 512
130
131 /* No L3 cache: */
132 #define L3_SIZE_KB 0 /* disabled */
133 #define L3_ASSOCIATIVITY 0 /* disabled */
134 #define L3_LINES_PER_TAG 0 /* disabled */
135 #define L3_LINE_SIZE 0 /* disabled */
136
137 /* TLB definitions: */
138
139 #define L1_DTLB_2M_ASSOC 1
140 #define L1_DTLB_2M_ENTRIES 255
141 #define L1_DTLB_4K_ASSOC 1
142 #define L1_DTLB_4K_ENTRIES 255
143
144 #define L1_ITLB_2M_ASSOC 1
145 #define L1_ITLB_2M_ENTRIES 255
146 #define L1_ITLB_4K_ASSOC 1
147 #define L1_ITLB_4K_ENTRIES 255
148
149 #define L2_DTLB_2M_ASSOC 0 /* disabled */
150 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
151 #define L2_DTLB_4K_ASSOC 4
152 #define L2_DTLB_4K_ENTRIES 512
153
154 #define L2_ITLB_2M_ASSOC 0 /* disabled */
155 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
156 #define L2_ITLB_4K_ASSOC 4
157 #define L2_ITLB_4K_ENTRIES 512
158
159
160
161 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
162 uint32_t vendor2, uint32_t vendor3)
163 {
164 int i;
165 for (i = 0; i < 4; i++) {
166 dst[i] = vendor1 >> (8 * i);
167 dst[i + 4] = vendor2 >> (8 * i);
168 dst[i + 8] = vendor3 >> (8 * i);
169 }
170 dst[CPUID_VENDOR_SZ] = '\0';
171 }
172
173 /* feature flags taken from "Intel Processor Identification and the CPUID
174 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
175 * between feature naming conventions, aliases may be added.
176 */
177 static const char *feature_name[] = {
178 "fpu", "vme", "de", "pse",
179 "tsc", "msr", "pae", "mce",
180 "cx8", "apic", NULL, "sep",
181 "mtrr", "pge", "mca", "cmov",
182 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
183 NULL, "ds" /* Intel dts */, "acpi", "mmx",
184 "fxsr", "sse", "sse2", "ss",
185 "ht" /* Intel htt */, "tm", "ia64", "pbe",
186 };
187 static const char *ext_feature_name[] = {
188 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
189 "ds_cpl", "vmx", "smx", "est",
190 "tm2", "ssse3", "cid", NULL,
191 "fma", "cx16", "xtpr", "pdcm",
192 NULL, "pcid", "dca", "sse4.1|sse4_1",
193 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
194 "tsc-deadline", "aes", "xsave", "osxsave",
195 "avx", "f16c", "rdrand", "hypervisor",
196 };
197 /* Feature names that are already defined on feature_name[] but are set on
198 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
199 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
200 * if and only if CPU vendor is AMD.
201 */
202 static const char *ext2_feature_name[] = {
203 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
204 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
205 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
206 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
207 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
208 "nx|xd", NULL, "mmxext", NULL /* mmx */,
209 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
210 NULL, "lm|i64", "3dnowext", "3dnow",
211 };
212 static const char *ext3_feature_name[] = {
213 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
214 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
215 "3dnowprefetch", "osvw", "ibs", "xop",
216 "skinit", "wdt", NULL, "lwp",
217 "fma4", "tce", NULL, "nodeid_msr",
218 NULL, "tbm", "topoext", "perfctr_core",
219 "perfctr_nb", NULL, NULL, NULL,
220 NULL, NULL, NULL, NULL,
221 };
222
223 static const char *ext4_feature_name[] = {
224 NULL, NULL, "xstore", "xstore-en",
225 NULL, NULL, "xcrypt", "xcrypt-en",
226 "ace2", "ace2-en", "phe", "phe-en",
227 "pmm", "pmm-en", NULL, NULL,
228 NULL, NULL, NULL, NULL,
229 NULL, NULL, NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 };
233
234 static const char *kvm_feature_name[] = {
235 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
236 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
237 NULL, NULL, NULL, NULL,
238 NULL, NULL, NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 "kvmclock-stable-bit", NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 };
244
245 static const char *svm_feature_name[] = {
246 "npt", "lbrv", "svm_lock", "nrip_save",
247 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
248 NULL, NULL, "pause_filter", NULL,
249 "pfthreshold", NULL, NULL, NULL,
250 NULL, NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 };
255
256 static const char *cpuid_7_0_ebx_feature_name[] = {
257 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
258 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
259 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
260 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
261 };
262
263 static const char *cpuid_7_0_ecx_feature_name[] = {
264 NULL, NULL, NULL, "pku",
265 "ospke", NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 };
273
274 static const char *cpuid_apm_edx_feature_name[] = {
275 NULL, NULL, NULL, NULL,
276 NULL, NULL, NULL, NULL,
277 "invtsc", NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 };
284
285 static const char *cpuid_xsave_feature_name[] = {
286 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
287 NULL, NULL, NULL, NULL,
288 NULL, NULL, NULL, NULL,
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 };
295
296 static const char *cpuid_6_feature_name[] = {
297 NULL, NULL, "arat", NULL,
298 NULL, NULL, NULL, NULL,
299 NULL, NULL, NULL, NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 };
306
307 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
308 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
309 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
310 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
311 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
312 CPUID_PSE36 | CPUID_FXSR)
313 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
314 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
315 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
316 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
317 CPUID_PAE | CPUID_SEP | CPUID_APIC)
318
319 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
320 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
321 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
322 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
323 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
324 /* partly implemented:
325 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
326 /* missing:
327 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
328 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
329 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
330 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
331 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
332 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
333 /* missing:
334 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
335 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
336 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
337 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
338 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
339
340 #ifdef TARGET_X86_64
341 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
342 #else
343 #define TCG_EXT2_X86_64_FEATURES 0
344 #endif
345
346 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
347 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
348 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
349 TCG_EXT2_X86_64_FEATURES)
350 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
351 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
352 #define TCG_EXT4_FEATURES 0
353 #define TCG_SVM_FEATURES 0
354 #define TCG_KVM_FEATURES 0
355 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
356 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
357 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
358 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
359 /* missing:
360 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
361 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
362 CPUID_7_0_EBX_RDSEED */
363 #define TCG_7_0_ECX_FEATURES 0
364 #define TCG_APM_FEATURES 0
365 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
366 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
367 /* missing:
368 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
369
370 typedef struct FeatureWordInfo {
371 const char **feat_names;
372 uint32_t cpuid_eax; /* Input EAX for CPUID */
373 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
374 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
375 int cpuid_reg; /* output register (R_* constant) */
376 uint32_t tcg_features; /* Feature flags supported by TCG */
377 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
378 } FeatureWordInfo;
379
380 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
381 [FEAT_1_EDX] = {
382 .feat_names = feature_name,
383 .cpuid_eax = 1, .cpuid_reg = R_EDX,
384 .tcg_features = TCG_FEATURES,
385 },
386 [FEAT_1_ECX] = {
387 .feat_names = ext_feature_name,
388 .cpuid_eax = 1, .cpuid_reg = R_ECX,
389 .tcg_features = TCG_EXT_FEATURES,
390 },
391 [FEAT_8000_0001_EDX] = {
392 .feat_names = ext2_feature_name,
393 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
394 .tcg_features = TCG_EXT2_FEATURES,
395 },
396 [FEAT_8000_0001_ECX] = {
397 .feat_names = ext3_feature_name,
398 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
399 .tcg_features = TCG_EXT3_FEATURES,
400 },
401 [FEAT_C000_0001_EDX] = {
402 .feat_names = ext4_feature_name,
403 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_EXT4_FEATURES,
405 },
406 [FEAT_KVM] = {
407 .feat_names = kvm_feature_name,
408 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
409 .tcg_features = TCG_KVM_FEATURES,
410 },
411 [FEAT_SVM] = {
412 .feat_names = svm_feature_name,
413 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
414 .tcg_features = TCG_SVM_FEATURES,
415 },
416 [FEAT_7_0_EBX] = {
417 .feat_names = cpuid_7_0_ebx_feature_name,
418 .cpuid_eax = 7,
419 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
420 .cpuid_reg = R_EBX,
421 .tcg_features = TCG_7_0_EBX_FEATURES,
422 },
423 [FEAT_7_0_ECX] = {
424 .feat_names = cpuid_7_0_ecx_feature_name,
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_ECX,
428 .tcg_features = TCG_7_0_ECX_FEATURES,
429 },
430 [FEAT_8000_0007_EDX] = {
431 .feat_names = cpuid_apm_edx_feature_name,
432 .cpuid_eax = 0x80000007,
433 .cpuid_reg = R_EDX,
434 .tcg_features = TCG_APM_FEATURES,
435 .unmigratable_flags = CPUID_APM_INVTSC,
436 },
437 [FEAT_XSAVE] = {
438 .feat_names = cpuid_xsave_feature_name,
439 .cpuid_eax = 0xd,
440 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
441 .cpuid_reg = R_EAX,
442 .tcg_features = TCG_XSAVE_FEATURES,
443 },
444 [FEAT_6_EAX] = {
445 .feat_names = cpuid_6_feature_name,
446 .cpuid_eax = 6, .cpuid_reg = R_EAX,
447 .tcg_features = TCG_6_EAX_FEATURES,
448 },
449 };
450
451 typedef struct X86RegisterInfo32 {
452 /* Name of register */
453 const char *name;
454 /* QAPI enum value register */
455 X86CPURegister32 qapi_enum;
456 } X86RegisterInfo32;
457
458 #define REGISTER(reg) \
459 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
460 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
461 REGISTER(EAX),
462 REGISTER(ECX),
463 REGISTER(EDX),
464 REGISTER(EBX),
465 REGISTER(ESP),
466 REGISTER(EBP),
467 REGISTER(ESI),
468 REGISTER(EDI),
469 };
470 #undef REGISTER
471
472 const ExtSaveArea x86_ext_save_areas[] = {
473 [XSTATE_YMM_BIT] =
474 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
475 .offset = 0x240, .size = 0x100 },
476 [XSTATE_BNDREGS_BIT] =
477 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
478 .offset = 0x3c0, .size = 0x40 },
479 [XSTATE_BNDCSR_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x400, .size = 0x40 },
482 [XSTATE_OPMASK_BIT] =
483 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
484 .offset = 0x440, .size = 0x40 },
485 [XSTATE_ZMM_Hi256_BIT] =
486 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x480, .size = 0x200 },
488 [XSTATE_Hi16_ZMM_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = 0x680, .size = 0x400 },
491 [XSTATE_PKRU_BIT] =
492 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
493 .offset = 0xA80, .size = 0x8 },
494 };
495
496 const char *get_register_name_32(unsigned int reg)
497 {
498 if (reg >= CPU_NB_REGS32) {
499 return NULL;
500 }
501 return x86_reg_info_32[reg].name;
502 }
503
504 /*
505 * Returns the set of feature flags that are supported and migratable by
506 * QEMU, for a given FeatureWord.
507 */
508 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
509 {
510 FeatureWordInfo *wi = &feature_word_info[w];
511 uint32_t r = 0;
512 int i;
513
514 for (i = 0; i < 32; i++) {
515 uint32_t f = 1U << i;
516 /* If the feature name is unknown, it is not supported by QEMU yet */
517 if (!wi->feat_names[i]) {
518 continue;
519 }
520 /* Skip features known to QEMU, but explicitly marked as unmigratable */
521 if (wi->unmigratable_flags & f) {
522 continue;
523 }
524 r |= f;
525 }
526 return r;
527 }
528
529 void host_cpuid(uint32_t function, uint32_t count,
530 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
531 {
532 uint32_t vec[4];
533
534 #ifdef __x86_64__
535 asm volatile("cpuid"
536 : "=a"(vec[0]), "=b"(vec[1]),
537 "=c"(vec[2]), "=d"(vec[3])
538 : "0"(function), "c"(count) : "cc");
539 #elif defined(__i386__)
540 asm volatile("pusha \n\t"
541 "cpuid \n\t"
542 "mov %%eax, 0(%2) \n\t"
543 "mov %%ebx, 4(%2) \n\t"
544 "mov %%ecx, 8(%2) \n\t"
545 "mov %%edx, 12(%2) \n\t"
546 "popa"
547 : : "a"(function), "c"(count), "S"(vec)
548 : "memory", "cc");
549 #else
550 abort();
551 #endif
552
553 if (eax)
554 *eax = vec[0];
555 if (ebx)
556 *ebx = vec[1];
557 if (ecx)
558 *ecx = vec[2];
559 if (edx)
560 *edx = vec[3];
561 }
562
563 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
564
565 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
566 * a substring. ex if !NULL points to the first char after a substring,
567 * otherwise the string is assumed to sized by a terminating nul.
568 * Return lexical ordering of *s1:*s2.
569 */
570 static int sstrcmp(const char *s1, const char *e1,
571 const char *s2, const char *e2)
572 {
573 for (;;) {
574 if (!*s1 || !*s2 || *s1 != *s2)
575 return (*s1 - *s2);
576 ++s1, ++s2;
577 if (s1 == e1 && s2 == e2)
578 return (0);
579 else if (s1 == e1)
580 return (*s2);
581 else if (s2 == e2)
582 return (*s1);
583 }
584 }
585
586 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
587 * '|' delimited (possibly empty) strings in which case search for a match
588 * within the alternatives proceeds left to right. Return 0 for success,
589 * non-zero otherwise.
590 */
591 static int altcmp(const char *s, const char *e, const char *altstr)
592 {
593 const char *p, *q;
594
595 for (q = p = altstr; ; ) {
596 while (*p && *p != '|')
597 ++p;
598 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
599 return (0);
600 if (!*p)
601 return (1);
602 else
603 q = ++p;
604 }
605 }
606
607 /* search featureset for flag *[s..e), if found set corresponding bit in
608 * *pval and return true, otherwise return false
609 */
610 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
611 const char **featureset)
612 {
613 uint32_t mask;
614 const char **ppc;
615 bool found = false;
616
617 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
618 if (*ppc && !altcmp(s, e, *ppc)) {
619 *pval |= mask;
620 found = true;
621 }
622 }
623 return found;
624 }
625
626 static void add_flagname_to_bitmaps(const char *flagname,
627 FeatureWordArray words,
628 Error **errp)
629 {
630 FeatureWord w;
631 for (w = 0; w < FEATURE_WORDS; w++) {
632 FeatureWordInfo *wi = &feature_word_info[w];
633 if (wi->feat_names &&
634 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
635 break;
636 }
637 }
638 if (w == FEATURE_WORDS) {
639 error_setg(errp, "CPU feature %s not found", flagname);
640 }
641 }
642
643 /* CPU class name definitions: */
644
645 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
646 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
647
648 /* Return type name for a given CPU model name
649 * Caller is responsible for freeing the returned string.
650 */
651 static char *x86_cpu_type_name(const char *model_name)
652 {
653 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
654 }
655
656 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
657 {
658 ObjectClass *oc;
659 char *typename;
660
661 if (cpu_model == NULL) {
662 return NULL;
663 }
664
665 typename = x86_cpu_type_name(cpu_model);
666 oc = object_class_by_name(typename);
667 g_free(typename);
668 return oc;
669 }
670
671 struct X86CPUDefinition {
672 const char *name;
673 uint32_t level;
674 uint32_t xlevel;
675 uint32_t xlevel2;
676 /* vendor is zero-terminated, 12 character ASCII string */
677 char vendor[CPUID_VENDOR_SZ + 1];
678 int family;
679 int model;
680 int stepping;
681 FeatureWordArray features;
682 char model_id[48];
683 };
684
685 static X86CPUDefinition builtin_x86_defs[] = {
686 {
687 .name = "qemu64",
688 .level = 0xd,
689 .vendor = CPUID_VENDOR_AMD,
690 .family = 6,
691 .model = 6,
692 .stepping = 3,
693 .features[FEAT_1_EDX] =
694 PPRO_FEATURES |
695 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
696 CPUID_PSE36,
697 .features[FEAT_1_ECX] =
698 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
699 .features[FEAT_8000_0001_EDX] =
700 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
701 .features[FEAT_8000_0001_ECX] =
702 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
703 .xlevel = 0x8000000A,
704 },
705 {
706 .name = "phenom",
707 .level = 5,
708 .vendor = CPUID_VENDOR_AMD,
709 .family = 16,
710 .model = 2,
711 .stepping = 3,
712 /* Missing: CPUID_HT */
713 .features[FEAT_1_EDX] =
714 PPRO_FEATURES |
715 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
716 CPUID_PSE36 | CPUID_VME,
717 .features[FEAT_1_ECX] =
718 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
719 CPUID_EXT_POPCNT,
720 .features[FEAT_8000_0001_EDX] =
721 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
722 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
723 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
724 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
725 CPUID_EXT3_CR8LEG,
726 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
727 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
728 .features[FEAT_8000_0001_ECX] =
729 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
730 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
731 /* Missing: CPUID_SVM_LBRV */
732 .features[FEAT_SVM] =
733 CPUID_SVM_NPT,
734 .xlevel = 0x8000001A,
735 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
736 },
737 {
738 .name = "core2duo",
739 .level = 10,
740 .vendor = CPUID_VENDOR_INTEL,
741 .family = 6,
742 .model = 15,
743 .stepping = 11,
744 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
745 .features[FEAT_1_EDX] =
746 PPRO_FEATURES |
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
749 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
750 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
751 .features[FEAT_1_ECX] =
752 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
753 CPUID_EXT_CX16,
754 .features[FEAT_8000_0001_EDX] =
755 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
756 .features[FEAT_8000_0001_ECX] =
757 CPUID_EXT3_LAHF_LM,
758 .xlevel = 0x80000008,
759 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
760 },
761 {
762 .name = "kvm64",
763 .level = 0xd,
764 .vendor = CPUID_VENDOR_INTEL,
765 .family = 15,
766 .model = 6,
767 .stepping = 1,
768 /* Missing: CPUID_HT */
769 .features[FEAT_1_EDX] =
770 PPRO_FEATURES | CPUID_VME |
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
772 CPUID_PSE36,
773 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
774 .features[FEAT_1_ECX] =
775 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
776 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
777 .features[FEAT_8000_0001_EDX] =
778 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
779 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
780 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
781 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
782 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
783 .features[FEAT_8000_0001_ECX] =
784 0,
785 .xlevel = 0x80000008,
786 .model_id = "Common KVM processor"
787 },
788 {
789 .name = "qemu32",
790 .level = 4,
791 .vendor = CPUID_VENDOR_INTEL,
792 .family = 6,
793 .model = 6,
794 .stepping = 3,
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES,
797 .features[FEAT_1_ECX] =
798 CPUID_EXT_SSE3,
799 .xlevel = 0x80000004,
800 },
801 {
802 .name = "kvm32",
803 .level = 5,
804 .vendor = CPUID_VENDOR_INTEL,
805 .family = 15,
806 .model = 6,
807 .stepping = 1,
808 .features[FEAT_1_EDX] =
809 PPRO_FEATURES | CPUID_VME |
810 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3,
813 .features[FEAT_8000_0001_ECX] =
814 0,
815 .xlevel = 0x80000008,
816 .model_id = "Common 32-bit KVM processor"
817 },
818 {
819 .name = "coreduo",
820 .level = 10,
821 .vendor = CPUID_VENDOR_INTEL,
822 .family = 6,
823 .model = 14,
824 .stepping = 8,
825 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
826 .features[FEAT_1_EDX] =
827 PPRO_FEATURES | CPUID_VME |
828 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
829 CPUID_SS,
830 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
831 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
832 .features[FEAT_1_ECX] =
833 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
834 .features[FEAT_8000_0001_EDX] =
835 CPUID_EXT2_NX,
836 .xlevel = 0x80000008,
837 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
838 },
839 {
840 .name = "486",
841 .level = 1,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 4,
844 .model = 8,
845 .stepping = 0,
846 .features[FEAT_1_EDX] =
847 I486_FEATURES,
848 .xlevel = 0,
849 },
850 {
851 .name = "pentium",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 5,
855 .model = 4,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PENTIUM_FEATURES,
859 .xlevel = 0,
860 },
861 {
862 .name = "pentium2",
863 .level = 2,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 6,
866 .model = 5,
867 .stepping = 2,
868 .features[FEAT_1_EDX] =
869 PENTIUM2_FEATURES,
870 .xlevel = 0,
871 },
872 {
873 .name = "pentium3",
874 .level = 3,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 7,
878 .stepping = 3,
879 .features[FEAT_1_EDX] =
880 PENTIUM3_FEATURES,
881 .xlevel = 0,
882 },
883 {
884 .name = "athlon",
885 .level = 2,
886 .vendor = CPUID_VENDOR_AMD,
887 .family = 6,
888 .model = 2,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
892 CPUID_MCA,
893 .features[FEAT_8000_0001_EDX] =
894 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
895 .xlevel = 0x80000008,
896 },
897 {
898 .name = "n270",
899 .level = 10,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 6,
902 .model = 28,
903 .stepping = 2,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
908 CPUID_ACPI | CPUID_SS,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
911 * CPUID_EXT_XTPR */
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
914 CPUID_EXT_MOVBE,
915 .features[FEAT_8000_0001_EDX] =
916 CPUID_EXT2_NX,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x80000008,
920 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
921 },
922 {
923 .name = "Conroe",
924 .level = 10,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 15,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x80000008,
942 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
943 },
944 {
945 .name = "Penryn",
946 .level = 10,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 23,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
959 CPUID_EXT_SSE3,
960 .features[FEAT_8000_0001_EDX] =
961 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
962 .features[FEAT_8000_0001_ECX] =
963 CPUID_EXT3_LAHF_LM,
964 .xlevel = 0x80000008,
965 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
966 },
967 {
968 .name = "Nehalem",
969 .level = 11,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 26,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
976 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
977 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
978 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
979 CPUID_DE | CPUID_FP87,
980 .features[FEAT_1_ECX] =
981 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
982 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
985 .features[FEAT_8000_0001_ECX] =
986 CPUID_EXT3_LAHF_LM,
987 .xlevel = 0x80000008,
988 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
989 },
990 {
991 .name = "Westmere",
992 .level = 11,
993 .vendor = CPUID_VENDOR_INTEL,
994 .family = 6,
995 .model = 44,
996 .stepping = 1,
997 .features[FEAT_1_EDX] =
998 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
999 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1000 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1001 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1002 CPUID_DE | CPUID_FP87,
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1005 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1006 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1007 .features[FEAT_8000_0001_EDX] =
1008 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1009 .features[FEAT_8000_0001_ECX] =
1010 CPUID_EXT3_LAHF_LM,
1011 .features[FEAT_6_EAX] =
1012 CPUID_6_EAX_ARAT,
1013 .xlevel = 0x80000008,
1014 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1015 },
1016 {
1017 .name = "SandyBridge",
1018 .level = 0xd,
1019 .vendor = CPUID_VENDOR_INTEL,
1020 .family = 6,
1021 .model = 42,
1022 .stepping = 1,
1023 .features[FEAT_1_EDX] =
1024 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1025 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1026 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1027 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1028 CPUID_DE | CPUID_FP87,
1029 .features[FEAT_1_ECX] =
1030 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1031 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1032 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1033 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1034 CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1037 CPUID_EXT2_SYSCALL,
1038 .features[FEAT_8000_0001_ECX] =
1039 CPUID_EXT3_LAHF_LM,
1040 .features[FEAT_XSAVE] =
1041 CPUID_XSAVE_XSAVEOPT,
1042 .features[FEAT_6_EAX] =
1043 CPUID_6_EAX_ARAT,
1044 .xlevel = 0x80000008,
1045 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1046 },
1047 {
1048 .name = "IvyBridge",
1049 .level = 0xd,
1050 .vendor = CPUID_VENDOR_INTEL,
1051 .family = 6,
1052 .model = 58,
1053 .stepping = 9,
1054 .features[FEAT_1_EDX] =
1055 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1056 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1057 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1058 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1059 CPUID_DE | CPUID_FP87,
1060 .features[FEAT_1_ECX] =
1061 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1062 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1063 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1064 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1065 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1066 .features[FEAT_7_0_EBX] =
1067 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1068 CPUID_7_0_EBX_ERMS,
1069 .features[FEAT_8000_0001_EDX] =
1070 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1071 CPUID_EXT2_SYSCALL,
1072 .features[FEAT_8000_0001_ECX] =
1073 CPUID_EXT3_LAHF_LM,
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .features[FEAT_6_EAX] =
1077 CPUID_6_EAX_ARAT,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1080 },
1081 {
1082 .name = "Haswell-noTSX",
1083 .level = 0xd,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 60,
1087 .stepping = 1,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1096 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1097 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1098 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1099 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1100 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1101 .features[FEAT_8000_0001_EDX] =
1102 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1103 CPUID_EXT2_SYSCALL,
1104 .features[FEAT_8000_0001_ECX] =
1105 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1106 .features[FEAT_7_0_EBX] =
1107 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1108 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1109 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1110 .features[FEAT_XSAVE] =
1111 CPUID_XSAVE_XSAVEOPT,
1112 .features[FEAT_6_EAX] =
1113 CPUID_6_EAX_ARAT,
1114 .xlevel = 0x80000008,
1115 .model_id = "Intel Core Processor (Haswell, no TSX)",
1116 }, {
1117 .name = "Haswell",
1118 .level = 0xd,
1119 .vendor = CPUID_VENDOR_INTEL,
1120 .family = 6,
1121 .model = 60,
1122 .stepping = 1,
1123 .features[FEAT_1_EDX] =
1124 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1125 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1126 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1127 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1128 CPUID_DE | CPUID_FP87,
1129 .features[FEAT_1_ECX] =
1130 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1131 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1132 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1133 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1134 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1135 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1136 .features[FEAT_8000_0001_EDX] =
1137 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1138 CPUID_EXT2_SYSCALL,
1139 .features[FEAT_8000_0001_ECX] =
1140 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1141 .features[FEAT_7_0_EBX] =
1142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1143 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1144 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1145 CPUID_7_0_EBX_RTM,
1146 .features[FEAT_XSAVE] =
1147 CPUID_XSAVE_XSAVEOPT,
1148 .features[FEAT_6_EAX] =
1149 CPUID_6_EAX_ARAT,
1150 .xlevel = 0x80000008,
1151 .model_id = "Intel Core Processor (Haswell)",
1152 },
1153 {
1154 .name = "Broadwell-noTSX",
1155 .level = 0xd,
1156 .vendor = CPUID_VENDOR_INTEL,
1157 .family = 6,
1158 .model = 61,
1159 .stepping = 2,
1160 .features[FEAT_1_EDX] =
1161 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1162 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1163 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1164 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1165 CPUID_DE | CPUID_FP87,
1166 .features[FEAT_1_ECX] =
1167 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1168 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1169 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1170 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1171 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1172 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1173 .features[FEAT_8000_0001_EDX] =
1174 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1175 CPUID_EXT2_SYSCALL,
1176 .features[FEAT_8000_0001_ECX] =
1177 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1178 .features[FEAT_7_0_EBX] =
1179 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1180 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1181 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1182 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1183 CPUID_7_0_EBX_SMAP,
1184 .features[FEAT_XSAVE] =
1185 CPUID_XSAVE_XSAVEOPT,
1186 .features[FEAT_6_EAX] =
1187 CPUID_6_EAX_ARAT,
1188 .xlevel = 0x80000008,
1189 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1190 },
1191 {
1192 .name = "Broadwell",
1193 .level = 0xd,
1194 .vendor = CPUID_VENDOR_INTEL,
1195 .family = 6,
1196 .model = 61,
1197 .stepping = 2,
1198 .features[FEAT_1_EDX] =
1199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1203 CPUID_DE | CPUID_FP87,
1204 .features[FEAT_1_ECX] =
1205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1211 .features[FEAT_8000_0001_EDX] =
1212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1213 CPUID_EXT2_SYSCALL,
1214 .features[FEAT_8000_0001_ECX] =
1215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1216 .features[FEAT_7_0_EBX] =
1217 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1218 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1219 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1220 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1221 CPUID_7_0_EBX_SMAP,
1222 .features[FEAT_XSAVE] =
1223 CPUID_XSAVE_XSAVEOPT,
1224 .features[FEAT_6_EAX] =
1225 CPUID_6_EAX_ARAT,
1226 .xlevel = 0x80000008,
1227 .model_id = "Intel Core Processor (Broadwell)",
1228 },
1229 {
1230 .name = "Opteron_G1",
1231 .level = 5,
1232 .vendor = CPUID_VENDOR_AMD,
1233 .family = 15,
1234 .model = 6,
1235 .stepping = 1,
1236 .features[FEAT_1_EDX] =
1237 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1238 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1239 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1240 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1241 CPUID_DE | CPUID_FP87,
1242 .features[FEAT_1_ECX] =
1243 CPUID_EXT_SSE3,
1244 .features[FEAT_8000_0001_EDX] =
1245 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1246 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1247 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1248 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1249 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1250 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1251 .xlevel = 0x80000008,
1252 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1253 },
1254 {
1255 .name = "Opteron_G2",
1256 .level = 5,
1257 .vendor = CPUID_VENDOR_AMD,
1258 .family = 15,
1259 .model = 6,
1260 .stepping = 1,
1261 .features[FEAT_1_EDX] =
1262 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1263 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1264 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1265 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1266 CPUID_DE | CPUID_FP87,
1267 .features[FEAT_1_ECX] =
1268 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1269 /* Missing: CPUID_EXT2_RDTSCP */
1270 .features[FEAT_8000_0001_EDX] =
1271 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1272 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1273 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1274 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1275 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1276 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1277 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1278 .features[FEAT_8000_0001_ECX] =
1279 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1280 .xlevel = 0x80000008,
1281 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1282 },
1283 {
1284 .name = "Opteron_G3",
1285 .level = 5,
1286 .vendor = CPUID_VENDOR_AMD,
1287 .family = 15,
1288 .model = 6,
1289 .stepping = 1,
1290 .features[FEAT_1_EDX] =
1291 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1292 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1293 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1294 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1295 CPUID_DE | CPUID_FP87,
1296 .features[FEAT_1_ECX] =
1297 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1298 CPUID_EXT_SSE3,
1299 /* Missing: CPUID_EXT2_RDTSCP */
1300 .features[FEAT_8000_0001_EDX] =
1301 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1302 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1303 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1304 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1305 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1306 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1307 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1308 .features[FEAT_8000_0001_ECX] =
1309 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1310 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1311 .xlevel = 0x80000008,
1312 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1313 },
1314 {
1315 .name = "Opteron_G4",
1316 .level = 0xd,
1317 .vendor = CPUID_VENDOR_AMD,
1318 .family = 21,
1319 .model = 1,
1320 .stepping = 2,
1321 .features[FEAT_1_EDX] =
1322 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1323 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1324 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1325 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1326 CPUID_DE | CPUID_FP87,
1327 .features[FEAT_1_ECX] =
1328 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1329 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1330 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1331 CPUID_EXT_SSE3,
1332 /* Missing: CPUID_EXT2_RDTSCP */
1333 .features[FEAT_8000_0001_EDX] =
1334 CPUID_EXT2_LM |
1335 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1336 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1337 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1338 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1339 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1340 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1341 .features[FEAT_8000_0001_ECX] =
1342 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1343 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1344 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1345 CPUID_EXT3_LAHF_LM,
1346 /* no xsaveopt! */
1347 .xlevel = 0x8000001A,
1348 .model_id = "AMD Opteron 62xx class CPU",
1349 },
1350 {
1351 .name = "Opteron_G5",
1352 .level = 0xd,
1353 .vendor = CPUID_VENDOR_AMD,
1354 .family = 21,
1355 .model = 2,
1356 .stepping = 0,
1357 .features[FEAT_1_EDX] =
1358 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1359 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1360 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1361 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1362 CPUID_DE | CPUID_FP87,
1363 .features[FEAT_1_ECX] =
1364 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1365 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1366 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1367 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1368 /* Missing: CPUID_EXT2_RDTSCP */
1369 .features[FEAT_8000_0001_EDX] =
1370 CPUID_EXT2_LM |
1371 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1372 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1373 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1374 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1375 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1376 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1377 .features[FEAT_8000_0001_ECX] =
1378 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1379 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1380 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1381 CPUID_EXT3_LAHF_LM,
1382 /* no xsaveopt! */
1383 .xlevel = 0x8000001A,
1384 .model_id = "AMD Opteron 63xx class CPU",
1385 },
1386 };
1387
1388 typedef struct PropValue {
1389 const char *prop, *value;
1390 } PropValue;
1391
1392 /* KVM-specific features that are automatically added/removed
1393 * from all CPU models when KVM is enabled.
1394 */
1395 static PropValue kvm_default_props[] = {
1396 { "kvmclock", "on" },
1397 { "kvm-nopiodelay", "on" },
1398 { "kvm-asyncpf", "on" },
1399 { "kvm-steal-time", "on" },
1400 { "kvm-pv-eoi", "on" },
1401 { "kvmclock-stable-bit", "on" },
1402 { "x2apic", "on" },
1403 { "acpi", "off" },
1404 { "monitor", "off" },
1405 { "svm", "off" },
1406 { NULL, NULL },
1407 };
1408
1409 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1410 {
1411 PropValue *pv;
1412 for (pv = kvm_default_props; pv->prop; pv++) {
1413 if (!strcmp(pv->prop, prop)) {
1414 pv->value = value;
1415 break;
1416 }
1417 }
1418
1419 /* It is valid to call this function only for properties that
1420 * are already present in the kvm_default_props table.
1421 */
1422 assert(pv->prop);
1423 }
1424
1425 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1426 bool migratable_only);
1427
1428 #ifdef CONFIG_KVM
1429
1430 static int cpu_x86_fill_model_id(char *str)
1431 {
1432 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1433 int i;
1434
1435 for (i = 0; i < 3; i++) {
1436 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1437 memcpy(str + i * 16 + 0, &eax, 4);
1438 memcpy(str + i * 16 + 4, &ebx, 4);
1439 memcpy(str + i * 16 + 8, &ecx, 4);
1440 memcpy(str + i * 16 + 12, &edx, 4);
1441 }
1442 return 0;
1443 }
1444
1445 static X86CPUDefinition host_cpudef;
1446
1447 static Property host_x86_cpu_properties[] = {
1448 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1449 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1450 DEFINE_PROP_END_OF_LIST()
1451 };
1452
1453 /* class_init for the "host" CPU model
1454 *
1455 * This function may be called before KVM is initialized.
1456 */
1457 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1458 {
1459 DeviceClass *dc = DEVICE_CLASS(oc);
1460 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1461 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1462
1463 xcc->kvm_required = true;
1464
1465 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1466 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1467
1468 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1469 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1470 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1471 host_cpudef.stepping = eax & 0x0F;
1472
1473 cpu_x86_fill_model_id(host_cpudef.model_id);
1474
1475 xcc->cpu_def = &host_cpudef;
1476
1477 /* level, xlevel, xlevel2, and the feature words are initialized on
1478 * instance_init, because they require KVM to be initialized.
1479 */
1480
1481 dc->props = host_x86_cpu_properties;
1482 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1483 dc->cannot_destroy_with_object_finalize_yet = true;
1484 }
1485
1486 static void host_x86_cpu_initfn(Object *obj)
1487 {
1488 X86CPU *cpu = X86_CPU(obj);
1489 CPUX86State *env = &cpu->env;
1490 KVMState *s = kvm_state;
1491
1492 assert(kvm_enabled());
1493
1494 /* We can't fill the features array here because we don't know yet if
1495 * "migratable" is true or false.
1496 */
1497 cpu->host_features = true;
1498
1499 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1500 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1501 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1502
1503 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1504 }
1505
1506 static const TypeInfo host_x86_cpu_type_info = {
1507 .name = X86_CPU_TYPE_NAME("host"),
1508 .parent = TYPE_X86_CPU,
1509 .instance_init = host_x86_cpu_initfn,
1510 .class_init = host_x86_cpu_class_init,
1511 };
1512
1513 #endif
1514
1515 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1516 {
1517 FeatureWordInfo *f = &feature_word_info[w];
1518 int i;
1519
1520 for (i = 0; i < 32; ++i) {
1521 if ((1UL << i) & mask) {
1522 const char *reg = get_register_name_32(f->cpuid_reg);
1523 assert(reg);
1524 fprintf(stderr, "warning: %s doesn't support requested feature: "
1525 "CPUID.%02XH:%s%s%s [bit %d]\n",
1526 kvm_enabled() ? "host" : "TCG",
1527 f->cpuid_eax, reg,
1528 f->feat_names[i] ? "." : "",
1529 f->feat_names[i] ? f->feat_names[i] : "", i);
1530 }
1531 }
1532 }
1533
1534 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1535 const char *name, void *opaque,
1536 Error **errp)
1537 {
1538 X86CPU *cpu = X86_CPU(obj);
1539 CPUX86State *env = &cpu->env;
1540 int64_t value;
1541
1542 value = (env->cpuid_version >> 8) & 0xf;
1543 if (value == 0xf) {
1544 value += (env->cpuid_version >> 20) & 0xff;
1545 }
1546 visit_type_int(v, name, &value, errp);
1547 }
1548
1549 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1550 const char *name, void *opaque,
1551 Error **errp)
1552 {
1553 X86CPU *cpu = X86_CPU(obj);
1554 CPUX86State *env = &cpu->env;
1555 const int64_t min = 0;
1556 const int64_t max = 0xff + 0xf;
1557 Error *local_err = NULL;
1558 int64_t value;
1559
1560 visit_type_int(v, name, &value, &local_err);
1561 if (local_err) {
1562 error_propagate(errp, local_err);
1563 return;
1564 }
1565 if (value < min || value > max) {
1566 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1567 name ? name : "null", value, min, max);
1568 return;
1569 }
1570
1571 env->cpuid_version &= ~0xff00f00;
1572 if (value > 0x0f) {
1573 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1574 } else {
1575 env->cpuid_version |= value << 8;
1576 }
1577 }
1578
1579 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1580 const char *name, void *opaque,
1581 Error **errp)
1582 {
1583 X86CPU *cpu = X86_CPU(obj);
1584 CPUX86State *env = &cpu->env;
1585 int64_t value;
1586
1587 value = (env->cpuid_version >> 4) & 0xf;
1588 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1589 visit_type_int(v, name, &value, errp);
1590 }
1591
1592 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1593 const char *name, void *opaque,
1594 Error **errp)
1595 {
1596 X86CPU *cpu = X86_CPU(obj);
1597 CPUX86State *env = &cpu->env;
1598 const int64_t min = 0;
1599 const int64_t max = 0xff;
1600 Error *local_err = NULL;
1601 int64_t value;
1602
1603 visit_type_int(v, name, &value, &local_err);
1604 if (local_err) {
1605 error_propagate(errp, local_err);
1606 return;
1607 }
1608 if (value < min || value > max) {
1609 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1610 name ? name : "null", value, min, max);
1611 return;
1612 }
1613
1614 env->cpuid_version &= ~0xf00f0;
1615 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1616 }
1617
1618 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1619 const char *name, void *opaque,
1620 Error **errp)
1621 {
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 int64_t value;
1625
1626 value = env->cpuid_version & 0xf;
1627 visit_type_int(v, name, &value, errp);
1628 }
1629
1630 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1631 const char *name, void *opaque,
1632 Error **errp)
1633 {
1634 X86CPU *cpu = X86_CPU(obj);
1635 CPUX86State *env = &cpu->env;
1636 const int64_t min = 0;
1637 const int64_t max = 0xf;
1638 Error *local_err = NULL;
1639 int64_t value;
1640
1641 visit_type_int(v, name, &value, &local_err);
1642 if (local_err) {
1643 error_propagate(errp, local_err);
1644 return;
1645 }
1646 if (value < min || value > max) {
1647 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1648 name ? name : "null", value, min, max);
1649 return;
1650 }
1651
1652 env->cpuid_version &= ~0xf;
1653 env->cpuid_version |= value & 0xf;
1654 }
1655
1656 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1657 {
1658 X86CPU *cpu = X86_CPU(obj);
1659 CPUX86State *env = &cpu->env;
1660 char *value;
1661
1662 value = g_malloc(CPUID_VENDOR_SZ + 1);
1663 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1664 env->cpuid_vendor3);
1665 return value;
1666 }
1667
1668 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1669 Error **errp)
1670 {
1671 X86CPU *cpu = X86_CPU(obj);
1672 CPUX86State *env = &cpu->env;
1673 int i;
1674
1675 if (strlen(value) != CPUID_VENDOR_SZ) {
1676 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1677 return;
1678 }
1679
1680 env->cpuid_vendor1 = 0;
1681 env->cpuid_vendor2 = 0;
1682 env->cpuid_vendor3 = 0;
1683 for (i = 0; i < 4; i++) {
1684 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1685 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1686 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1687 }
1688 }
1689
1690 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1691 {
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 char *value;
1695 int i;
1696
1697 value = g_malloc(48 + 1);
1698 for (i = 0; i < 48; i++) {
1699 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1700 }
1701 value[48] = '\0';
1702 return value;
1703 }
1704
1705 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1706 Error **errp)
1707 {
1708 X86CPU *cpu = X86_CPU(obj);
1709 CPUX86State *env = &cpu->env;
1710 int c, len, i;
1711
1712 if (model_id == NULL) {
1713 model_id = "";
1714 }
1715 len = strlen(model_id);
1716 memset(env->cpuid_model, 0, 48);
1717 for (i = 0; i < 48; i++) {
1718 if (i >= len) {
1719 c = '\0';
1720 } else {
1721 c = (uint8_t)model_id[i];
1722 }
1723 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1724 }
1725 }
1726
1727 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1728 void *opaque, Error **errp)
1729 {
1730 X86CPU *cpu = X86_CPU(obj);
1731 int64_t value;
1732
1733 value = cpu->env.tsc_khz * 1000;
1734 visit_type_int(v, name, &value, errp);
1735 }
1736
1737 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1738 void *opaque, Error **errp)
1739 {
1740 X86CPU *cpu = X86_CPU(obj);
1741 const int64_t min = 0;
1742 const int64_t max = INT64_MAX;
1743 Error *local_err = NULL;
1744 int64_t value;
1745
1746 visit_type_int(v, name, &value, &local_err);
1747 if (local_err) {
1748 error_propagate(errp, local_err);
1749 return;
1750 }
1751 if (value < min || value > max) {
1752 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1753 name ? name : "null", value, min, max);
1754 return;
1755 }
1756
1757 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1758 }
1759
1760 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1761 void *opaque, Error **errp)
1762 {
1763 X86CPU *cpu = X86_CPU(obj);
1764 int64_t value = cpu->apic_id;
1765
1766 visit_type_int(v, name, &value, errp);
1767 }
1768
1769 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1770 void *opaque, Error **errp)
1771 {
1772 X86CPU *cpu = X86_CPU(obj);
1773 DeviceState *dev = DEVICE(obj);
1774 const int64_t min = 0;
1775 const int64_t max = UINT32_MAX;
1776 Error *error = NULL;
1777 int64_t value;
1778
1779 if (dev->realized) {
1780 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1781 "it was realized", name, object_get_typename(obj));
1782 return;
1783 }
1784
1785 visit_type_int(v, name, &value, &error);
1786 if (error) {
1787 error_propagate(errp, error);
1788 return;
1789 }
1790 if (value < min || value > max) {
1791 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1792 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1793 object_get_typename(obj), name, value, min, max);
1794 return;
1795 }
1796
1797 if ((value != cpu->apic_id) && cpu_exists(value)) {
1798 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1799 return;
1800 }
1801 cpu->apic_id = value;
1802 }
1803
1804 /* Generic getter for "feature-words" and "filtered-features" properties */
1805 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1806 const char *name, void *opaque,
1807 Error **errp)
1808 {
1809 uint32_t *array = (uint32_t *)opaque;
1810 FeatureWord w;
1811 Error *err = NULL;
1812 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1813 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1814 X86CPUFeatureWordInfoList *list = NULL;
1815
1816 for (w = 0; w < FEATURE_WORDS; w++) {
1817 FeatureWordInfo *wi = &feature_word_info[w];
1818 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1819 qwi->cpuid_input_eax = wi->cpuid_eax;
1820 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1821 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1822 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1823 qwi->features = array[w];
1824
1825 /* List will be in reverse order, but order shouldn't matter */
1826 list_entries[w].next = list;
1827 list_entries[w].value = &word_infos[w];
1828 list = &list_entries[w];
1829 }
1830
1831 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1832 error_propagate(errp, err);
1833 }
1834
1835 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1836 void *opaque, Error **errp)
1837 {
1838 X86CPU *cpu = X86_CPU(obj);
1839 int64_t value = cpu->hyperv_spinlock_attempts;
1840
1841 visit_type_int(v, name, &value, errp);
1842 }
1843
1844 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1845 void *opaque, Error **errp)
1846 {
1847 const int64_t min = 0xFFF;
1848 const int64_t max = UINT_MAX;
1849 X86CPU *cpu = X86_CPU(obj);
1850 Error *err = NULL;
1851 int64_t value;
1852
1853 visit_type_int(v, name, &value, &err);
1854 if (err) {
1855 error_propagate(errp, err);
1856 return;
1857 }
1858
1859 if (value < min || value > max) {
1860 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1861 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1862 object_get_typename(obj), name ? name : "null",
1863 value, min, max);
1864 return;
1865 }
1866 cpu->hyperv_spinlock_attempts = value;
1867 }
1868
1869 static PropertyInfo qdev_prop_spinlocks = {
1870 .name = "int",
1871 .get = x86_get_hv_spinlocks,
1872 .set = x86_set_hv_spinlocks,
1873 };
1874
1875 /* Convert all '_' in a feature string option name to '-', to make feature
1876 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1877 */
1878 static inline void feat2prop(char *s)
1879 {
1880 while ((s = strchr(s, '_'))) {
1881 *s = '-';
1882 }
1883 }
1884
1885 /* Parse "+feature,-feature,feature=foo" CPU feature string
1886 */
1887 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1888 Error **errp)
1889 {
1890 X86CPU *cpu = X86_CPU(cs);
1891 char *featurestr; /* Single 'key=value" string being parsed */
1892 FeatureWord w;
1893 /* Features to be added */
1894 FeatureWordArray plus_features = { 0 };
1895 /* Features to be removed */
1896 FeatureWordArray minus_features = { 0 };
1897 uint32_t numvalue;
1898 CPUX86State *env = &cpu->env;
1899 Error *local_err = NULL;
1900
1901 featurestr = features ? strtok(features, ",") : NULL;
1902
1903 while (featurestr) {
1904 char *val;
1905 if (featurestr[0] == '+') {
1906 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1907 } else if (featurestr[0] == '-') {
1908 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1909 } else if ((val = strchr(featurestr, '='))) {
1910 *val = 0; val++;
1911 feat2prop(featurestr);
1912 if (!strcmp(featurestr, "xlevel")) {
1913 char *err;
1914 char num[32];
1915
1916 numvalue = strtoul(val, &err, 0);
1917 if (!*val || *err) {
1918 error_setg(errp, "bad numerical value %s", val);
1919 return;
1920 }
1921 if (numvalue < 0x80000000) {
1922 error_report("xlevel value shall always be >= 0x80000000"
1923 ", fixup will be removed in future versions");
1924 numvalue += 0x80000000;
1925 }
1926 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1927 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1928 } else if (!strcmp(featurestr, "tsc-freq")) {
1929 int64_t tsc_freq;
1930 char *err;
1931 char num[32];
1932
1933 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1934 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1935 if (tsc_freq < 0 || *err) {
1936 error_setg(errp, "bad numerical value %s", val);
1937 return;
1938 }
1939 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1940 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1941 &local_err);
1942 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1943 char *err;
1944 const int min = 0xFFF;
1945 char num[32];
1946 numvalue = strtoul(val, &err, 0);
1947 if (!*val || *err) {
1948 error_setg(errp, "bad numerical value %s", val);
1949 return;
1950 }
1951 if (numvalue < min) {
1952 error_report("hv-spinlocks value shall always be >= 0x%x"
1953 ", fixup will be removed in future versions",
1954 min);
1955 numvalue = min;
1956 }
1957 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1958 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1959 } else {
1960 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1961 }
1962 } else {
1963 feat2prop(featurestr);
1964 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1965 }
1966 if (local_err) {
1967 error_propagate(errp, local_err);
1968 return;
1969 }
1970 featurestr = strtok(NULL, ",");
1971 }
1972
1973 if (cpu->host_features) {
1974 for (w = 0; w < FEATURE_WORDS; w++) {
1975 env->features[w] =
1976 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1977 }
1978 }
1979
1980 for (w = 0; w < FEATURE_WORDS; w++) {
1981 env->features[w] |= plus_features[w];
1982 env->features[w] &= ~minus_features[w];
1983 }
1984 }
1985
1986 /* Print all cpuid feature names in featureset
1987 */
1988 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1989 {
1990 int bit;
1991 bool first = true;
1992
1993 for (bit = 0; bit < 32; bit++) {
1994 if (featureset[bit]) {
1995 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1996 first = false;
1997 }
1998 }
1999 }
2000
2001 /* generate CPU information. */
2002 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2003 {
2004 X86CPUDefinition *def;
2005 char buf[256];
2006 int i;
2007
2008 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2009 def = &builtin_x86_defs[i];
2010 snprintf(buf, sizeof(buf), "%s", def->name);
2011 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2012 }
2013 #ifdef CONFIG_KVM
2014 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2015 "KVM processor with all supported host features "
2016 "(only available in KVM mode)");
2017 #endif
2018
2019 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2020 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2021 FeatureWordInfo *fw = &feature_word_info[i];
2022
2023 (*cpu_fprintf)(f, " ");
2024 listflags(f, cpu_fprintf, fw->feat_names);
2025 (*cpu_fprintf)(f, "\n");
2026 }
2027 }
2028
2029 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2030 {
2031 CpuDefinitionInfoList *cpu_list = NULL;
2032 X86CPUDefinition *def;
2033 int i;
2034
2035 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2036 CpuDefinitionInfoList *entry;
2037 CpuDefinitionInfo *info;
2038
2039 def = &builtin_x86_defs[i];
2040 info = g_malloc0(sizeof(*info));
2041 info->name = g_strdup(def->name);
2042
2043 entry = g_malloc0(sizeof(*entry));
2044 entry->value = info;
2045 entry->next = cpu_list;
2046 cpu_list = entry;
2047 }
2048
2049 return cpu_list;
2050 }
2051
2052 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2053 bool migratable_only)
2054 {
2055 FeatureWordInfo *wi = &feature_word_info[w];
2056 uint32_t r;
2057
2058 if (kvm_enabled()) {
2059 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2060 wi->cpuid_ecx,
2061 wi->cpuid_reg);
2062 } else if (tcg_enabled()) {
2063 r = wi->tcg_features;
2064 } else {
2065 return ~0;
2066 }
2067 if (migratable_only) {
2068 r &= x86_cpu_get_migratable_flags(w);
2069 }
2070 return r;
2071 }
2072
2073 /*
2074 * Filters CPU feature words based on host availability of each feature.
2075 *
2076 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2077 */
2078 static int x86_cpu_filter_features(X86CPU *cpu)
2079 {
2080 CPUX86State *env = &cpu->env;
2081 FeatureWord w;
2082 int rv = 0;
2083
2084 for (w = 0; w < FEATURE_WORDS; w++) {
2085 uint32_t host_feat =
2086 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2087 uint32_t requested_features = env->features[w];
2088 env->features[w] &= host_feat;
2089 cpu->filtered_features[w] = requested_features & ~env->features[w];
2090 if (cpu->filtered_features[w]) {
2091 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2092 report_unavailable_features(w, cpu->filtered_features[w]);
2093 }
2094 rv = 1;
2095 }
2096 }
2097
2098 return rv;
2099 }
2100
2101 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2102 {
2103 PropValue *pv;
2104 for (pv = props; pv->prop; pv++) {
2105 if (!pv->value) {
2106 continue;
2107 }
2108 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2109 &error_abort);
2110 }
2111 }
2112
2113 /* Load data from X86CPUDefinition
2114 */
2115 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2116 {
2117 CPUX86State *env = &cpu->env;
2118 const char *vendor;
2119 char host_vendor[CPUID_VENDOR_SZ + 1];
2120 FeatureWord w;
2121
2122 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2123 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2124 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2125 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2126 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2127 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2128 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2129 for (w = 0; w < FEATURE_WORDS; w++) {
2130 env->features[w] = def->features[w];
2131 }
2132
2133 /* Special cases not set in the X86CPUDefinition structs: */
2134 if (kvm_enabled()) {
2135 if (!kvm_irqchip_in_kernel()) {
2136 x86_cpu_change_kvm_default("x2apic", "off");
2137 }
2138
2139 x86_cpu_apply_props(cpu, kvm_default_props);
2140 }
2141
2142 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2143
2144 /* sysenter isn't supported in compatibility mode on AMD,
2145 * syscall isn't supported in compatibility mode on Intel.
2146 * Normally we advertise the actual CPU vendor, but you can
2147 * override this using the 'vendor' property if you want to use
2148 * KVM's sysenter/syscall emulation in compatibility mode and
2149 * when doing cross vendor migration
2150 */
2151 vendor = def->vendor;
2152 if (kvm_enabled()) {
2153 uint32_t ebx = 0, ecx = 0, edx = 0;
2154 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2155 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2156 vendor = host_vendor;
2157 }
2158
2159 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2160
2161 }
2162
2163 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2164 {
2165 X86CPU *cpu = NULL;
2166 X86CPUClass *xcc;
2167 ObjectClass *oc;
2168 gchar **model_pieces;
2169 char *name, *features;
2170 Error *error = NULL;
2171
2172 model_pieces = g_strsplit(cpu_model, ",", 2);
2173 if (!model_pieces[0]) {
2174 error_setg(&error, "Invalid/empty CPU model name");
2175 goto out;
2176 }
2177 name = model_pieces[0];
2178 features = model_pieces[1];
2179
2180 oc = x86_cpu_class_by_name(name);
2181 if (oc == NULL) {
2182 error_setg(&error, "Unable to find CPU definition: %s", name);
2183 goto out;
2184 }
2185 xcc = X86_CPU_CLASS(oc);
2186
2187 if (xcc->kvm_required && !kvm_enabled()) {
2188 error_setg(&error, "CPU model '%s' requires KVM", name);
2189 goto out;
2190 }
2191
2192 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2193
2194 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2195 if (error) {
2196 goto out;
2197 }
2198
2199 out:
2200 if (error != NULL) {
2201 error_propagate(errp, error);
2202 if (cpu) {
2203 object_unref(OBJECT(cpu));
2204 cpu = NULL;
2205 }
2206 }
2207 g_strfreev(model_pieces);
2208 return cpu;
2209 }
2210
2211 X86CPU *cpu_x86_init(const char *cpu_model)
2212 {
2213 Error *error = NULL;
2214 X86CPU *cpu;
2215
2216 cpu = cpu_x86_create(cpu_model, &error);
2217 if (error) {
2218 goto out;
2219 }
2220
2221 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2222
2223 out:
2224 if (error) {
2225 error_report_err(error);
2226 if (cpu != NULL) {
2227 object_unref(OBJECT(cpu));
2228 cpu = NULL;
2229 }
2230 }
2231 return cpu;
2232 }
2233
2234 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2235 {
2236 X86CPUDefinition *cpudef = data;
2237 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2238
2239 xcc->cpu_def = cpudef;
2240 }
2241
2242 static void x86_register_cpudef_type(X86CPUDefinition *def)
2243 {
2244 char *typename = x86_cpu_type_name(def->name);
2245 TypeInfo ti = {
2246 .name = typename,
2247 .parent = TYPE_X86_CPU,
2248 .class_init = x86_cpu_cpudef_class_init,
2249 .class_data = def,
2250 };
2251
2252 type_register(&ti);
2253 g_free(typename);
2254 }
2255
2256 #if !defined(CONFIG_USER_ONLY)
2257
2258 void cpu_clear_apic_feature(CPUX86State *env)
2259 {
2260 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2261 }
2262
2263 #endif /* !CONFIG_USER_ONLY */
2264
2265 /* Initialize list of CPU models, filling some non-static fields if necessary
2266 */
2267 void x86_cpudef_setup(void)
2268 {
2269 int i, j;
2270 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2271
2272 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2273 X86CPUDefinition *def = &builtin_x86_defs[i];
2274
2275 /* Look for specific "cpudef" models that */
2276 /* have the QEMU version in .model_id */
2277 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2278 if (strcmp(model_with_versions[j], def->name) == 0) {
2279 pstrcpy(def->model_id, sizeof(def->model_id),
2280 "QEMU Virtual CPU version ");
2281 pstrcat(def->model_id, sizeof(def->model_id),
2282 qemu_hw_version());
2283 break;
2284 }
2285 }
2286 }
2287 }
2288
2289 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2290 uint32_t *eax, uint32_t *ebx,
2291 uint32_t *ecx, uint32_t *edx)
2292 {
2293 X86CPU *cpu = x86_env_get_cpu(env);
2294 CPUState *cs = CPU(cpu);
2295
2296 /* test if maximum index reached */
2297 if (index & 0x80000000) {
2298 if (index > env->cpuid_xlevel) {
2299 if (env->cpuid_xlevel2 > 0) {
2300 /* Handle the Centaur's CPUID instruction. */
2301 if (index > env->cpuid_xlevel2) {
2302 index = env->cpuid_xlevel2;
2303 } else if (index < 0xC0000000) {
2304 index = env->cpuid_xlevel;
2305 }
2306 } else {
2307 /* Intel documentation states that invalid EAX input will
2308 * return the same information as EAX=cpuid_level
2309 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2310 */
2311 index = env->cpuid_level;
2312 }
2313 }
2314 } else {
2315 if (index > env->cpuid_level)
2316 index = env->cpuid_level;
2317 }
2318
2319 switch(index) {
2320 case 0:
2321 *eax = env->cpuid_level;
2322 *ebx = env->cpuid_vendor1;
2323 *edx = env->cpuid_vendor2;
2324 *ecx = env->cpuid_vendor3;
2325 break;
2326 case 1:
2327 *eax = env->cpuid_version;
2328 *ebx = (cpu->apic_id << 24) |
2329 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2330 *ecx = env->features[FEAT_1_ECX];
2331 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2332 *ecx |= CPUID_EXT_OSXSAVE;
2333 }
2334 *edx = env->features[FEAT_1_EDX];
2335 if (cs->nr_cores * cs->nr_threads > 1) {
2336 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2337 *edx |= CPUID_HT;
2338 }
2339 break;
2340 case 2:
2341 /* cache info: needed for Pentium Pro compatibility */
2342 if (cpu->cache_info_passthrough) {
2343 host_cpuid(index, 0, eax, ebx, ecx, edx);
2344 break;
2345 }
2346 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2347 *ebx = 0;
2348 *ecx = 0;
2349 *edx = (L1D_DESCRIPTOR << 16) | \
2350 (L1I_DESCRIPTOR << 8) | \
2351 (L2_DESCRIPTOR);
2352 break;
2353 case 4:
2354 /* cache info: needed for Core compatibility */
2355 if (cpu->cache_info_passthrough) {
2356 host_cpuid(index, count, eax, ebx, ecx, edx);
2357 *eax &= ~0xFC000000;
2358 } else {
2359 *eax = 0;
2360 switch (count) {
2361 case 0: /* L1 dcache info */
2362 *eax |= CPUID_4_TYPE_DCACHE | \
2363 CPUID_4_LEVEL(1) | \
2364 CPUID_4_SELF_INIT_LEVEL;
2365 *ebx = (L1D_LINE_SIZE - 1) | \
2366 ((L1D_PARTITIONS - 1) << 12) | \
2367 ((L1D_ASSOCIATIVITY - 1) << 22);
2368 *ecx = L1D_SETS - 1;
2369 *edx = CPUID_4_NO_INVD_SHARING;
2370 break;
2371 case 1: /* L1 icache info */
2372 *eax |= CPUID_4_TYPE_ICACHE | \
2373 CPUID_4_LEVEL(1) | \
2374 CPUID_4_SELF_INIT_LEVEL;
2375 *ebx = (L1I_LINE_SIZE - 1) | \
2376 ((L1I_PARTITIONS - 1) << 12) | \
2377 ((L1I_ASSOCIATIVITY - 1) << 22);
2378 *ecx = L1I_SETS - 1;
2379 *edx = CPUID_4_NO_INVD_SHARING;
2380 break;
2381 case 2: /* L2 cache info */
2382 *eax |= CPUID_4_TYPE_UNIFIED | \
2383 CPUID_4_LEVEL(2) | \
2384 CPUID_4_SELF_INIT_LEVEL;
2385 if (cs->nr_threads > 1) {
2386 *eax |= (cs->nr_threads - 1) << 14;
2387 }
2388 *ebx = (L2_LINE_SIZE - 1) | \
2389 ((L2_PARTITIONS - 1) << 12) | \
2390 ((L2_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L2_SETS - 1;
2392 *edx = CPUID_4_NO_INVD_SHARING;
2393 break;
2394 default: /* end of info */
2395 *eax = 0;
2396 *ebx = 0;
2397 *ecx = 0;
2398 *edx = 0;
2399 break;
2400 }
2401 }
2402
2403 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2404 if ((*eax & 31) && cs->nr_cores > 1) {
2405 *eax |= (cs->nr_cores - 1) << 26;
2406 }
2407 break;
2408 case 5:
2409 /* mwait info: needed for Core compatibility */
2410 *eax = 0; /* Smallest monitor-line size in bytes */
2411 *ebx = 0; /* Largest monitor-line size in bytes */
2412 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2413 *edx = 0;
2414 break;
2415 case 6:
2416 /* Thermal and Power Leaf */
2417 *eax = env->features[FEAT_6_EAX];
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 break;
2422 case 7:
2423 /* Structured Extended Feature Flags Enumeration Leaf */
2424 if (count == 0) {
2425 *eax = 0; /* Maximum ECX value for sub-leaves */
2426 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2427 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2428 *edx = 0; /* Reserved */
2429 } else {
2430 *eax = 0;
2431 *ebx = 0;
2432 *ecx = 0;
2433 *edx = 0;
2434 }
2435 break;
2436 case 9:
2437 /* Direct Cache Access Information Leaf */
2438 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2439 *ebx = 0;
2440 *ecx = 0;
2441 *edx = 0;
2442 break;
2443 case 0xA:
2444 /* Architectural Performance Monitoring Leaf */
2445 if (kvm_enabled() && cpu->enable_pmu) {
2446 KVMState *s = cs->kvm_state;
2447
2448 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2449 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2450 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2451 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2452 } else {
2453 *eax = 0;
2454 *ebx = 0;
2455 *ecx = 0;
2456 *edx = 0;
2457 }
2458 break;
2459 case 0xD: {
2460 KVMState *s = cs->kvm_state;
2461 uint64_t ena_mask;
2462 int i;
2463
2464 /* Processor Extended State */
2465 *eax = 0;
2466 *ebx = 0;
2467 *ecx = 0;
2468 *edx = 0;
2469 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2470 break;
2471 }
2472 if (kvm_enabled()) {
2473 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2474 ena_mask <<= 32;
2475 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2476 } else {
2477 ena_mask = -1;
2478 }
2479
2480 if (count == 0) {
2481 *ecx = 0x240;
2482 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2483 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2484 if ((env->features[esa->feature] & esa->bits) == esa->bits
2485 && ((ena_mask >> i) & 1) != 0) {
2486 if (i < 32) {
2487 *eax |= 1u << i;
2488 } else {
2489 *edx |= 1u << (i - 32);
2490 }
2491 *ecx = MAX(*ecx, esa->offset + esa->size);
2492 }
2493 }
2494 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2495 *ebx = *ecx;
2496 } else if (count == 1) {
2497 *eax = env->features[FEAT_XSAVE];
2498 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2499 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2500 if ((env->features[esa->feature] & esa->bits) == esa->bits
2501 && ((ena_mask >> count) & 1) != 0) {
2502 *eax = esa->size;
2503 *ebx = esa->offset;
2504 }
2505 }
2506 break;
2507 }
2508 case 0x80000000:
2509 *eax = env->cpuid_xlevel;
2510 *ebx = env->cpuid_vendor1;
2511 *edx = env->cpuid_vendor2;
2512 *ecx = env->cpuid_vendor3;
2513 break;
2514 case 0x80000001:
2515 *eax = env->cpuid_version;
2516 *ebx = 0;
2517 *ecx = env->features[FEAT_8000_0001_ECX];
2518 *edx = env->features[FEAT_8000_0001_EDX];
2519
2520 /* The Linux kernel checks for the CMPLegacy bit and
2521 * discards multiple thread information if it is set.
2522 * So dont set it here for Intel to make Linux guests happy.
2523 */
2524 if (cs->nr_cores * cs->nr_threads > 1) {
2525 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2526 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2527 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2528 *ecx |= 1 << 1; /* CmpLegacy bit */
2529 }
2530 }
2531 break;
2532 case 0x80000002:
2533 case 0x80000003:
2534 case 0x80000004:
2535 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2536 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2537 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2538 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2539 break;
2540 case 0x80000005:
2541 /* cache info (L1 cache) */
2542 if (cpu->cache_info_passthrough) {
2543 host_cpuid(index, 0, eax, ebx, ecx, edx);
2544 break;
2545 }
2546 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2547 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2548 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2549 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2550 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2551 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2552 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2553 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2554 break;
2555 case 0x80000006:
2556 /* cache info (L2 cache) */
2557 if (cpu->cache_info_passthrough) {
2558 host_cpuid(index, 0, eax, ebx, ecx, edx);
2559 break;
2560 }
2561 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2562 (L2_DTLB_2M_ENTRIES << 16) | \
2563 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2564 (L2_ITLB_2M_ENTRIES);
2565 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2566 (L2_DTLB_4K_ENTRIES << 16) | \
2567 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2568 (L2_ITLB_4K_ENTRIES);
2569 *ecx = (L2_SIZE_KB_AMD << 16) | \
2570 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2571 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2572 *edx = ((L3_SIZE_KB/512) << 18) | \
2573 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2574 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2575 break;
2576 case 0x80000007:
2577 *eax = 0;
2578 *ebx = 0;
2579 *ecx = 0;
2580 *edx = env->features[FEAT_8000_0007_EDX];
2581 break;
2582 case 0x80000008:
2583 /* virtual & phys address size in low 2 bytes. */
2584 /* XXX: This value must match the one used in the MMU code. */
2585 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2586 /* 64 bit processor */
2587 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2588 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2589 } else {
2590 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2591 *eax = 0x00000024; /* 36 bits physical */
2592 } else {
2593 *eax = 0x00000020; /* 32 bits physical */
2594 }
2595 }
2596 *ebx = 0;
2597 *ecx = 0;
2598 *edx = 0;
2599 if (cs->nr_cores * cs->nr_threads > 1) {
2600 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2601 }
2602 break;
2603 case 0x8000000A:
2604 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2605 *eax = 0x00000001; /* SVM Revision */
2606 *ebx = 0x00000010; /* nr of ASIDs */
2607 *ecx = 0;
2608 *edx = env->features[FEAT_SVM]; /* optional features */
2609 } else {
2610 *eax = 0;
2611 *ebx = 0;
2612 *ecx = 0;
2613 *edx = 0;
2614 }
2615 break;
2616 case 0xC0000000:
2617 *eax = env->cpuid_xlevel2;
2618 *ebx = 0;
2619 *ecx = 0;
2620 *edx = 0;
2621 break;
2622 case 0xC0000001:
2623 /* Support for VIA CPU's CPUID instruction */
2624 *eax = env->cpuid_version;
2625 *ebx = 0;
2626 *ecx = 0;
2627 *edx = env->features[FEAT_C000_0001_EDX];
2628 break;
2629 case 0xC0000002:
2630 case 0xC0000003:
2631 case 0xC0000004:
2632 /* Reserved for the future, and now filled with zero */
2633 *eax = 0;
2634 *ebx = 0;
2635 *ecx = 0;
2636 *edx = 0;
2637 break;
2638 default:
2639 /* reserved values: zero */
2640 *eax = 0;
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = 0;
2644 break;
2645 }
2646 }
2647
2648 /* CPUClass::reset() */
2649 static void x86_cpu_reset(CPUState *s)
2650 {
2651 X86CPU *cpu = X86_CPU(s);
2652 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2653 CPUX86State *env = &cpu->env;
2654 target_ulong cr4;
2655 uint64_t xcr0;
2656 int i;
2657
2658 xcc->parent_reset(s);
2659
2660 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2661
2662 tlb_flush(s, 1);
2663
2664 env->old_exception = -1;
2665
2666 /* init to reset state */
2667
2668 #ifdef CONFIG_SOFTMMU
2669 env->hflags |= HF_SOFTMMU_MASK;
2670 #endif
2671 env->hflags2 |= HF2_GIF_MASK;
2672
2673 cpu_x86_update_cr0(env, 0x60000010);
2674 env->a20_mask = ~0x0;
2675 env->smbase = 0x30000;
2676
2677 env->idt.limit = 0xffff;
2678 env->gdt.limit = 0xffff;
2679 env->ldt.limit = 0xffff;
2680 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2681 env->tr.limit = 0xffff;
2682 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2683
2684 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2685 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2686 DESC_R_MASK | DESC_A_MASK);
2687 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2688 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2689 DESC_A_MASK);
2690 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2691 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2692 DESC_A_MASK);
2693 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2694 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2695 DESC_A_MASK);
2696 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2697 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2698 DESC_A_MASK);
2699 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2700 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2701 DESC_A_MASK);
2702
2703 env->eip = 0xfff0;
2704 env->regs[R_EDX] = env->cpuid_version;
2705
2706 env->eflags = 0x2;
2707
2708 /* FPU init */
2709 for (i = 0; i < 8; i++) {
2710 env->fptags[i] = 1;
2711 }
2712 cpu_set_fpuc(env, 0x37f);
2713
2714 env->mxcsr = 0x1f80;
2715 /* All units are in INIT state. */
2716 env->xstate_bv = 0;
2717
2718 env->pat = 0x0007040600070406ULL;
2719 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2720
2721 memset(env->dr, 0, sizeof(env->dr));
2722 env->dr[6] = DR6_FIXED_1;
2723 env->dr[7] = DR7_FIXED_1;
2724 cpu_breakpoint_remove_all(s, BP_CPU);
2725 cpu_watchpoint_remove_all(s, BP_CPU);
2726
2727 cr4 = 0;
2728 xcr0 = XSTATE_FP_MASK;
2729
2730 #ifdef CONFIG_USER_ONLY
2731 /* Enable all the features for user-mode. */
2732 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2733 xcr0 |= XSTATE_SSE_MASK;
2734 }
2735 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
2736 xcr0 |= XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK;
2737 }
2738 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2739 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2740 }
2741 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2742 cr4 |= CR4_FSGSBASE_MASK;
2743 }
2744 #endif
2745
2746 env->xcr0 = xcr0;
2747 cpu_x86_update_cr4(env, cr4);
2748
2749 /*
2750 * SDM 11.11.5 requires:
2751 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2752 * - IA32_MTRR_PHYSMASKn.V = 0
2753 * All other bits are undefined. For simplification, zero it all.
2754 */
2755 env->mtrr_deftype = 0;
2756 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2757 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2758
2759 #if !defined(CONFIG_USER_ONLY)
2760 /* We hard-wire the BSP to the first CPU. */
2761 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2762
2763 s->halted = !cpu_is_bsp(cpu);
2764
2765 if (kvm_enabled()) {
2766 kvm_arch_reset_vcpu(cpu);
2767 }
2768 #endif
2769 }
2770
2771 #ifndef CONFIG_USER_ONLY
2772 bool cpu_is_bsp(X86CPU *cpu)
2773 {
2774 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2775 }
2776
2777 /* TODO: remove me, when reset over QOM tree is implemented */
2778 static void x86_cpu_machine_reset_cb(void *opaque)
2779 {
2780 X86CPU *cpu = opaque;
2781 cpu_reset(CPU(cpu));
2782 }
2783 #endif
2784
2785 static void mce_init(X86CPU *cpu)
2786 {
2787 CPUX86State *cenv = &cpu->env;
2788 unsigned int bank;
2789
2790 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2791 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2792 (CPUID_MCE | CPUID_MCA)) {
2793 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2794 cenv->mcg_ctl = ~(uint64_t)0;
2795 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2796 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2797 }
2798 }
2799 }
2800
2801 #ifndef CONFIG_USER_ONLY
2802 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2803 {
2804 APICCommonState *apic;
2805 const char *apic_type = "apic";
2806
2807 if (kvm_apic_in_kernel()) {
2808 apic_type = "kvm-apic";
2809 } else if (xen_enabled()) {
2810 apic_type = "xen-apic";
2811 }
2812
2813 cpu->apic_state = DEVICE(object_new(apic_type));
2814
2815 object_property_add_child(OBJECT(cpu), "apic",
2816 OBJECT(cpu->apic_state), NULL);
2817 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2818 /* TODO: convert to link<> */
2819 apic = APIC_COMMON(cpu->apic_state);
2820 apic->cpu = cpu;
2821 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2822 }
2823
2824 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2825 {
2826 APICCommonState *apic;
2827 static bool apic_mmio_map_once;
2828
2829 if (cpu->apic_state == NULL) {
2830 return;
2831 }
2832 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2833 errp);
2834
2835 /* Map APIC MMIO area */
2836 apic = APIC_COMMON(cpu->apic_state);
2837 if (!apic_mmio_map_once) {
2838 memory_region_add_subregion_overlap(get_system_memory(),
2839 apic->apicbase &
2840 MSR_IA32_APICBASE_BASE,
2841 &apic->io_memory,
2842 0x1000);
2843 apic_mmio_map_once = true;
2844 }
2845 }
2846
2847 static void x86_cpu_machine_done(Notifier *n, void *unused)
2848 {
2849 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2850 MemoryRegion *smram =
2851 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2852
2853 if (smram) {
2854 cpu->smram = g_new(MemoryRegion, 1);
2855 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2856 smram, 0, 1ull << 32);
2857 memory_region_set_enabled(cpu->smram, false);
2858 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2859 }
2860 }
2861 #else
2862 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2863 {
2864 }
2865 #endif
2866
2867
2868 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2869 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2870 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2871 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2872 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2873 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2874 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2875 {
2876 CPUState *cs = CPU(dev);
2877 X86CPU *cpu = X86_CPU(dev);
2878 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2879 CPUX86State *env = &cpu->env;
2880 Error *local_err = NULL;
2881 static bool ht_warned;
2882
2883 if (cpu->apic_id < 0) {
2884 error_setg(errp, "apic-id property was not initialized properly");
2885 return;
2886 }
2887
2888 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2889 env->cpuid_level = 7;
2890 }
2891
2892 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2893 * CPUID[1].EDX.
2894 */
2895 if (IS_AMD_CPU(env)) {
2896 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2897 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2898 & CPUID_EXT2_AMD_ALIASES);
2899 }
2900
2901
2902 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2903 error_setg(&local_err,
2904 kvm_enabled() ?
2905 "Host doesn't support requested features" :
2906 "TCG doesn't support requested features");
2907 goto out;
2908 }
2909
2910 #ifndef CONFIG_USER_ONLY
2911 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2912
2913 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2914 x86_cpu_apic_create(cpu, &local_err);
2915 if (local_err != NULL) {
2916 goto out;
2917 }
2918 }
2919 #endif
2920
2921 mce_init(cpu);
2922
2923 #ifndef CONFIG_USER_ONLY
2924 if (tcg_enabled()) {
2925 AddressSpace *newas = g_new(AddressSpace, 1);
2926
2927 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2928 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2929
2930 /* Outer container... */
2931 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2932 memory_region_set_enabled(cpu->cpu_as_root, true);
2933
2934 /* ... with two regions inside: normal system memory with low
2935 * priority, and...
2936 */
2937 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2938 get_system_memory(), 0, ~0ull);
2939 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2940 memory_region_set_enabled(cpu->cpu_as_mem, true);
2941 address_space_init(newas, cpu->cpu_as_root, "CPU");
2942 cs->num_ases = 1;
2943 cpu_address_space_init(cs, newas, 0);
2944
2945 /* ... SMRAM with higher priority, linked from /machine/smram. */
2946 cpu->machine_done.notify = x86_cpu_machine_done;
2947 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2948 }
2949 #endif
2950
2951 qemu_init_vcpu(cs);
2952
2953 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2954 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2955 * based on inputs (sockets,cores,threads), it is still better to gives
2956 * users a warning.
2957 *
2958 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2959 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2960 */
2961 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2962 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2963 " -smp options properly.");
2964 ht_warned = true;
2965 }
2966
2967 x86_cpu_apic_realize(cpu, &local_err);
2968 if (local_err != NULL) {
2969 goto out;
2970 }
2971 cpu_reset(cs);
2972
2973 xcc->parent_realize(dev, &local_err);
2974
2975 out:
2976 if (local_err != NULL) {
2977 error_propagate(errp, local_err);
2978 return;
2979 }
2980 }
2981
2982 typedef struct BitProperty {
2983 uint32_t *ptr;
2984 uint32_t mask;
2985 } BitProperty;
2986
2987 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2988 void *opaque, Error **errp)
2989 {
2990 BitProperty *fp = opaque;
2991 bool value = (*fp->ptr & fp->mask) == fp->mask;
2992 visit_type_bool(v, name, &value, errp);
2993 }
2994
2995 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2996 void *opaque, Error **errp)
2997 {
2998 DeviceState *dev = DEVICE(obj);
2999 BitProperty *fp = opaque;
3000 Error *local_err = NULL;
3001 bool value;
3002
3003 if (dev->realized) {
3004 qdev_prop_set_after_realize(dev, name, errp);
3005 return;
3006 }
3007
3008 visit_type_bool(v, name, &value, &local_err);
3009 if (local_err) {
3010 error_propagate(errp, local_err);
3011 return;
3012 }
3013
3014 if (value) {
3015 *fp->ptr |= fp->mask;
3016 } else {
3017 *fp->ptr &= ~fp->mask;
3018 }
3019 }
3020
3021 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3022 void *opaque)
3023 {
3024 BitProperty *prop = opaque;
3025 g_free(prop);
3026 }
3027
3028 /* Register a boolean property to get/set a single bit in a uint32_t field.
3029 *
3030 * The same property name can be registered multiple times to make it affect
3031 * multiple bits in the same FeatureWord. In that case, the getter will return
3032 * true only if all bits are set.
3033 */
3034 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3035 const char *prop_name,
3036 uint32_t *field,
3037 int bitnr)
3038 {
3039 BitProperty *fp;
3040 ObjectProperty *op;
3041 uint32_t mask = (1UL << bitnr);
3042
3043 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3044 if (op) {
3045 fp = op->opaque;
3046 assert(fp->ptr == field);
3047 fp->mask |= mask;
3048 } else {
3049 fp = g_new0(BitProperty, 1);
3050 fp->ptr = field;
3051 fp->mask = mask;
3052 object_property_add(OBJECT(cpu), prop_name, "bool",
3053 x86_cpu_get_bit_prop,
3054 x86_cpu_set_bit_prop,
3055 x86_cpu_release_bit_prop, fp, &error_abort);
3056 }
3057 }
3058
3059 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3060 FeatureWord w,
3061 int bitnr)
3062 {
3063 Object *obj = OBJECT(cpu);
3064 int i;
3065 char **names;
3066 FeatureWordInfo *fi = &feature_word_info[w];
3067
3068 if (!fi->feat_names) {
3069 return;
3070 }
3071 if (!fi->feat_names[bitnr]) {
3072 return;
3073 }
3074
3075 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3076
3077 feat2prop(names[0]);
3078 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3079
3080 for (i = 1; names[i]; i++) {
3081 feat2prop(names[i]);
3082 object_property_add_alias(obj, names[i], obj, names[0],
3083 &error_abort);
3084 }
3085
3086 g_strfreev(names);
3087 }
3088
3089 static void x86_cpu_initfn(Object *obj)
3090 {
3091 CPUState *cs = CPU(obj);
3092 X86CPU *cpu = X86_CPU(obj);
3093 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3094 CPUX86State *env = &cpu->env;
3095 FeatureWord w;
3096 static int inited;
3097
3098 cs->env_ptr = env;
3099 cpu_exec_init(cs, &error_abort);
3100
3101 object_property_add(obj, "family", "int",
3102 x86_cpuid_version_get_family,
3103 x86_cpuid_version_set_family, NULL, NULL, NULL);
3104 object_property_add(obj, "model", "int",
3105 x86_cpuid_version_get_model,
3106 x86_cpuid_version_set_model, NULL, NULL, NULL);
3107 object_property_add(obj, "stepping", "int",
3108 x86_cpuid_version_get_stepping,
3109 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3110 object_property_add_str(obj, "vendor",
3111 x86_cpuid_get_vendor,
3112 x86_cpuid_set_vendor, NULL);
3113 object_property_add_str(obj, "model-id",
3114 x86_cpuid_get_model_id,
3115 x86_cpuid_set_model_id, NULL);
3116 object_property_add(obj, "tsc-frequency", "int",
3117 x86_cpuid_get_tsc_freq,
3118 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3119 object_property_add(obj, "apic-id", "int",
3120 x86_cpuid_get_apic_id,
3121 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3122 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3123 x86_cpu_get_feature_words,
3124 NULL, NULL, (void *)env->features, NULL);
3125 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3126 x86_cpu_get_feature_words,
3127 NULL, NULL, (void *)cpu->filtered_features, NULL);
3128
3129 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3130
3131 #ifndef CONFIG_USER_ONLY
3132 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3133 cpu->apic_id = -1;
3134 #endif
3135
3136 for (w = 0; w < FEATURE_WORDS; w++) {
3137 int bitnr;
3138
3139 for (bitnr = 0; bitnr < 32; bitnr++) {
3140 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3141 }
3142 }
3143
3144 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3145
3146 /* init various static tables used in TCG mode */
3147 if (tcg_enabled() && !inited) {
3148 inited = 1;
3149 tcg_x86_init();
3150 }
3151 }
3152
3153 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3154 {
3155 X86CPU *cpu = X86_CPU(cs);
3156
3157 return cpu->apic_id;
3158 }
3159
3160 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3161 {
3162 X86CPU *cpu = X86_CPU(cs);
3163
3164 return cpu->env.cr[0] & CR0_PG_MASK;
3165 }
3166
3167 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3168 {
3169 X86CPU *cpu = X86_CPU(cs);
3170
3171 cpu->env.eip = value;
3172 }
3173
3174 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3175 {
3176 X86CPU *cpu = X86_CPU(cs);
3177
3178 cpu->env.eip = tb->pc - tb->cs_base;
3179 }
3180
3181 static bool x86_cpu_has_work(CPUState *cs)
3182 {
3183 X86CPU *cpu = X86_CPU(cs);
3184 CPUX86State *env = &cpu->env;
3185
3186 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3187 CPU_INTERRUPT_POLL)) &&
3188 (env->eflags & IF_MASK)) ||
3189 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3190 CPU_INTERRUPT_INIT |
3191 CPU_INTERRUPT_SIPI |
3192 CPU_INTERRUPT_MCE)) ||
3193 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3194 !(env->hflags & HF_SMM_MASK));
3195 }
3196
3197 static Property x86_cpu_properties[] = {
3198 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3199 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3200 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3201 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3202 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3203 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3204 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3205 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3206 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3207 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3208 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3209 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3210 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3211 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3212 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3213 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3214 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3215 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3216 DEFINE_PROP_END_OF_LIST()
3217 };
3218
3219 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3220 {
3221 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3222 CPUClass *cc = CPU_CLASS(oc);
3223 DeviceClass *dc = DEVICE_CLASS(oc);
3224
3225 xcc->parent_realize = dc->realize;
3226 dc->realize = x86_cpu_realizefn;
3227 dc->props = x86_cpu_properties;
3228
3229 xcc->parent_reset = cc->reset;
3230 cc->reset = x86_cpu_reset;
3231 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3232
3233 cc->class_by_name = x86_cpu_class_by_name;
3234 cc->parse_features = x86_cpu_parse_featurestr;
3235 cc->has_work = x86_cpu_has_work;
3236 cc->do_interrupt = x86_cpu_do_interrupt;
3237 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3238 cc->dump_state = x86_cpu_dump_state;
3239 cc->set_pc = x86_cpu_set_pc;
3240 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3241 cc->gdb_read_register = x86_cpu_gdb_read_register;
3242 cc->gdb_write_register = x86_cpu_gdb_write_register;
3243 cc->get_arch_id = x86_cpu_get_arch_id;
3244 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3245 #ifdef CONFIG_USER_ONLY
3246 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3247 #else
3248 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3249 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3250 cc->write_elf64_note = x86_cpu_write_elf64_note;
3251 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3252 cc->write_elf32_note = x86_cpu_write_elf32_note;
3253 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3254 cc->vmsd = &vmstate_x86_cpu;
3255 #endif
3256 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3257 #ifndef CONFIG_USER_ONLY
3258 cc->debug_excp_handler = breakpoint_handler;
3259 #endif
3260 cc->cpu_exec_enter = x86_cpu_exec_enter;
3261 cc->cpu_exec_exit = x86_cpu_exec_exit;
3262
3263 /*
3264 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3265 * object in cpus -> dangling pointer after final object_unref().
3266 */
3267 dc->cannot_destroy_with_object_finalize_yet = true;
3268 }
3269
3270 static const TypeInfo x86_cpu_type_info = {
3271 .name = TYPE_X86_CPU,
3272 .parent = TYPE_CPU,
3273 .instance_size = sizeof(X86CPU),
3274 .instance_init = x86_cpu_initfn,
3275 .abstract = true,
3276 .class_size = sizeof(X86CPUClass),
3277 .class_init = x86_cpu_common_class_init,
3278 };
3279
3280 static void x86_cpu_register_types(void)
3281 {
3282 int i;
3283
3284 type_register_static(&x86_cpu_type_info);
3285 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3286 x86_register_cpudef_type(&builtin_x86_defs[i]);
3287 }
3288 #ifdef CONFIG_KVM
3289 type_register_static(&host_x86_cpu_type_info);
3290 #endif
3291 }
3292
3293 type_init(x86_cpu_register_types)