]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: fix confusion in xcr0 bit position vs. mask
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
382
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
388 },
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
393 },
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
398 },
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
403 },
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
408 },
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
413 },
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
425 },
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
432 },
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
439 },
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
446 },
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
451 },
452 };
453
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
460
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
472 };
473 #undef REGISTER
474
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = 0x240, .size = 0x100 },
479 [XSTATE_BNDREGS_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x3c0, .size = 0x40 },
482 [XSTATE_BNDCSR_BIT] =
483 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
484 .offset = 0x400, .size = 0x40 },
485 [XSTATE_OPMASK_BIT] =
486 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x440, .size = 0x40 },
488 [XSTATE_ZMM_Hi256_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = 0x480, .size = 0x200 },
491 [XSTATE_Hi16_ZMM_BIT] =
492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
493 .offset = 0x680, .size = 0x400 },
494 [XSTATE_PKRU_BIT] =
495 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
496 .offset = 0xA80, .size = 0x8 },
497 };
498
499 const char *get_register_name_32(unsigned int reg)
500 {
501 if (reg >= CPU_NB_REGS32) {
502 return NULL;
503 }
504 return x86_reg_info_32[reg].name;
505 }
506
507 /*
508 * Returns the set of feature flags that are supported and migratable by
509 * QEMU, for a given FeatureWord.
510 */
511 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
512 {
513 FeatureWordInfo *wi = &feature_word_info[w];
514 uint32_t r = 0;
515 int i;
516
517 for (i = 0; i < 32; i++) {
518 uint32_t f = 1U << i;
519 /* If the feature name is unknown, it is not supported by QEMU yet */
520 if (!wi->feat_names[i]) {
521 continue;
522 }
523 /* Skip features known to QEMU, but explicitly marked as unmigratable */
524 if (wi->unmigratable_flags & f) {
525 continue;
526 }
527 r |= f;
528 }
529 return r;
530 }
531
532 void host_cpuid(uint32_t function, uint32_t count,
533 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
534 {
535 uint32_t vec[4];
536
537 #ifdef __x86_64__
538 asm volatile("cpuid"
539 : "=a"(vec[0]), "=b"(vec[1]),
540 "=c"(vec[2]), "=d"(vec[3])
541 : "0"(function), "c"(count) : "cc");
542 #elif defined(__i386__)
543 asm volatile("pusha \n\t"
544 "cpuid \n\t"
545 "mov %%eax, 0(%2) \n\t"
546 "mov %%ebx, 4(%2) \n\t"
547 "mov %%ecx, 8(%2) \n\t"
548 "mov %%edx, 12(%2) \n\t"
549 "popa"
550 : : "a"(function), "c"(count), "S"(vec)
551 : "memory", "cc");
552 #else
553 abort();
554 #endif
555
556 if (eax)
557 *eax = vec[0];
558 if (ebx)
559 *ebx = vec[1];
560 if (ecx)
561 *ecx = vec[2];
562 if (edx)
563 *edx = vec[3];
564 }
565
566 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
567
568 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
569 * a substring. ex if !NULL points to the first char after a substring,
570 * otherwise the string is assumed to sized by a terminating nul.
571 * Return lexical ordering of *s1:*s2.
572 */
573 static int sstrcmp(const char *s1, const char *e1,
574 const char *s2, const char *e2)
575 {
576 for (;;) {
577 if (!*s1 || !*s2 || *s1 != *s2)
578 return (*s1 - *s2);
579 ++s1, ++s2;
580 if (s1 == e1 && s2 == e2)
581 return (0);
582 else if (s1 == e1)
583 return (*s2);
584 else if (s2 == e2)
585 return (*s1);
586 }
587 }
588
589 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
590 * '|' delimited (possibly empty) strings in which case search for a match
591 * within the alternatives proceeds left to right. Return 0 for success,
592 * non-zero otherwise.
593 */
594 static int altcmp(const char *s, const char *e, const char *altstr)
595 {
596 const char *p, *q;
597
598 for (q = p = altstr; ; ) {
599 while (*p && *p != '|')
600 ++p;
601 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
602 return (0);
603 if (!*p)
604 return (1);
605 else
606 q = ++p;
607 }
608 }
609
610 /* search featureset for flag *[s..e), if found set corresponding bit in
611 * *pval and return true, otherwise return false
612 */
613 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
614 const char **featureset)
615 {
616 uint32_t mask;
617 const char **ppc;
618 bool found = false;
619
620 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
621 if (*ppc && !altcmp(s, e, *ppc)) {
622 *pval |= mask;
623 found = true;
624 }
625 }
626 return found;
627 }
628
629 static void add_flagname_to_bitmaps(const char *flagname,
630 FeatureWordArray words,
631 Error **errp)
632 {
633 FeatureWord w;
634 for (w = 0; w < FEATURE_WORDS; w++) {
635 FeatureWordInfo *wi = &feature_word_info[w];
636 if (wi->feat_names &&
637 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
638 break;
639 }
640 }
641 if (w == FEATURE_WORDS) {
642 error_setg(errp, "CPU feature %s not found", flagname);
643 }
644 }
645
646 /* CPU class name definitions: */
647
648 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
649 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
650
651 /* Return type name for a given CPU model name
652 * Caller is responsible for freeing the returned string.
653 */
654 static char *x86_cpu_type_name(const char *model_name)
655 {
656 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
657 }
658
659 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
660 {
661 ObjectClass *oc;
662 char *typename;
663
664 if (cpu_model == NULL) {
665 return NULL;
666 }
667
668 typename = x86_cpu_type_name(cpu_model);
669 oc = object_class_by_name(typename);
670 g_free(typename);
671 return oc;
672 }
673
674 struct X86CPUDefinition {
675 const char *name;
676 uint32_t level;
677 uint32_t xlevel;
678 uint32_t xlevel2;
679 /* vendor is zero-terminated, 12 character ASCII string */
680 char vendor[CPUID_VENDOR_SZ + 1];
681 int family;
682 int model;
683 int stepping;
684 FeatureWordArray features;
685 char model_id[48];
686 };
687
688 static X86CPUDefinition builtin_x86_defs[] = {
689 {
690 .name = "qemu64",
691 .level = 0xd,
692 .vendor = CPUID_VENDOR_AMD,
693 .family = 6,
694 .model = 6,
695 .stepping = 3,
696 .features[FEAT_1_EDX] =
697 PPRO_FEATURES |
698 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
699 CPUID_PSE36,
700 .features[FEAT_1_ECX] =
701 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
702 .features[FEAT_8000_0001_EDX] =
703 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
704 .features[FEAT_8000_0001_ECX] =
705 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
706 .xlevel = 0x8000000A,
707 },
708 {
709 .name = "phenom",
710 .level = 5,
711 .vendor = CPUID_VENDOR_AMD,
712 .family = 16,
713 .model = 2,
714 .stepping = 3,
715 /* Missing: CPUID_HT */
716 .features[FEAT_1_EDX] =
717 PPRO_FEATURES |
718 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
719 CPUID_PSE36 | CPUID_VME,
720 .features[FEAT_1_ECX] =
721 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
722 CPUID_EXT_POPCNT,
723 .features[FEAT_8000_0001_EDX] =
724 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
725 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
726 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
727 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
728 CPUID_EXT3_CR8LEG,
729 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
730 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
731 .features[FEAT_8000_0001_ECX] =
732 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
733 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
734 /* Missing: CPUID_SVM_LBRV */
735 .features[FEAT_SVM] =
736 CPUID_SVM_NPT,
737 .xlevel = 0x8000001A,
738 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
739 },
740 {
741 .name = "core2duo",
742 .level = 10,
743 .vendor = CPUID_VENDOR_INTEL,
744 .family = 6,
745 .model = 15,
746 .stepping = 11,
747 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
748 .features[FEAT_1_EDX] =
749 PPRO_FEATURES |
750 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
751 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
752 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
753 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
754 .features[FEAT_1_ECX] =
755 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
756 CPUID_EXT_CX16,
757 .features[FEAT_8000_0001_EDX] =
758 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
759 .features[FEAT_8000_0001_ECX] =
760 CPUID_EXT3_LAHF_LM,
761 .xlevel = 0x80000008,
762 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
763 },
764 {
765 .name = "kvm64",
766 .level = 0xd,
767 .vendor = CPUID_VENDOR_INTEL,
768 .family = 15,
769 .model = 6,
770 .stepping = 1,
771 /* Missing: CPUID_HT */
772 .features[FEAT_1_EDX] =
773 PPRO_FEATURES | CPUID_VME |
774 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
775 CPUID_PSE36,
776 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
777 .features[FEAT_1_ECX] =
778 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
779 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
780 .features[FEAT_8000_0001_EDX] =
781 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
782 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
783 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
784 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
785 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
786 .features[FEAT_8000_0001_ECX] =
787 0,
788 .xlevel = 0x80000008,
789 .model_id = "Common KVM processor"
790 },
791 {
792 .name = "qemu32",
793 .level = 4,
794 .vendor = CPUID_VENDOR_INTEL,
795 .family = 6,
796 .model = 6,
797 .stepping = 3,
798 .features[FEAT_1_EDX] =
799 PPRO_FEATURES,
800 .features[FEAT_1_ECX] =
801 CPUID_EXT_SSE3,
802 .xlevel = 0x80000004,
803 },
804 {
805 .name = "kvm32",
806 .level = 5,
807 .vendor = CPUID_VENDOR_INTEL,
808 .family = 15,
809 .model = 6,
810 .stepping = 1,
811 .features[FEAT_1_EDX] =
812 PPRO_FEATURES | CPUID_VME |
813 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
814 .features[FEAT_1_ECX] =
815 CPUID_EXT_SSE3,
816 .features[FEAT_8000_0001_ECX] =
817 0,
818 .xlevel = 0x80000008,
819 .model_id = "Common 32-bit KVM processor"
820 },
821 {
822 .name = "coreduo",
823 .level = 10,
824 .vendor = CPUID_VENDOR_INTEL,
825 .family = 6,
826 .model = 14,
827 .stepping = 8,
828 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
829 .features[FEAT_1_EDX] =
830 PPRO_FEATURES | CPUID_VME |
831 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
832 CPUID_SS,
833 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
834 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
835 .features[FEAT_1_ECX] =
836 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
837 .features[FEAT_8000_0001_EDX] =
838 CPUID_EXT2_NX,
839 .xlevel = 0x80000008,
840 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
841 },
842 {
843 .name = "486",
844 .level = 1,
845 .vendor = CPUID_VENDOR_INTEL,
846 .family = 4,
847 .model = 8,
848 .stepping = 0,
849 .features[FEAT_1_EDX] =
850 I486_FEATURES,
851 .xlevel = 0,
852 },
853 {
854 .name = "pentium",
855 .level = 1,
856 .vendor = CPUID_VENDOR_INTEL,
857 .family = 5,
858 .model = 4,
859 .stepping = 3,
860 .features[FEAT_1_EDX] =
861 PENTIUM_FEATURES,
862 .xlevel = 0,
863 },
864 {
865 .name = "pentium2",
866 .level = 2,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 6,
869 .model = 5,
870 .stepping = 2,
871 .features[FEAT_1_EDX] =
872 PENTIUM2_FEATURES,
873 .xlevel = 0,
874 },
875 {
876 .name = "pentium3",
877 .level = 3,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 7,
881 .stepping = 3,
882 .features[FEAT_1_EDX] =
883 PENTIUM3_FEATURES,
884 .xlevel = 0,
885 },
886 {
887 .name = "athlon",
888 .level = 2,
889 .vendor = CPUID_VENDOR_AMD,
890 .family = 6,
891 .model = 2,
892 .stepping = 3,
893 .features[FEAT_1_EDX] =
894 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
895 CPUID_MCA,
896 .features[FEAT_8000_0001_EDX] =
897 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
898 .xlevel = 0x80000008,
899 },
900 {
901 .name = "n270",
902 .level = 10,
903 .vendor = CPUID_VENDOR_INTEL,
904 .family = 6,
905 .model = 28,
906 .stepping = 2,
907 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
908 .features[FEAT_1_EDX] =
909 PPRO_FEATURES |
910 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
911 CPUID_ACPI | CPUID_SS,
912 /* Some CPUs got no CPUID_SEP */
913 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
914 * CPUID_EXT_XTPR */
915 .features[FEAT_1_ECX] =
916 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
917 CPUID_EXT_MOVBE,
918 .features[FEAT_8000_0001_EDX] =
919 CPUID_EXT2_NX,
920 .features[FEAT_8000_0001_ECX] =
921 CPUID_EXT3_LAHF_LM,
922 .xlevel = 0x80000008,
923 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
924 },
925 {
926 .name = "Conroe",
927 .level = 10,
928 .vendor = CPUID_VENDOR_INTEL,
929 .family = 6,
930 .model = 15,
931 .stepping = 3,
932 .features[FEAT_1_EDX] =
933 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
934 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
935 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
936 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
937 CPUID_DE | CPUID_FP87,
938 .features[FEAT_1_ECX] =
939 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
940 .features[FEAT_8000_0001_EDX] =
941 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
942 .features[FEAT_8000_0001_ECX] =
943 CPUID_EXT3_LAHF_LM,
944 .xlevel = 0x80000008,
945 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
946 },
947 {
948 .name = "Penryn",
949 .level = 10,
950 .vendor = CPUID_VENDOR_INTEL,
951 .family = 6,
952 .model = 23,
953 .stepping = 3,
954 .features[FEAT_1_EDX] =
955 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
956 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
957 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
958 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
959 CPUID_DE | CPUID_FP87,
960 .features[FEAT_1_ECX] =
961 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
962 CPUID_EXT_SSE3,
963 .features[FEAT_8000_0001_EDX] =
964 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
965 .features[FEAT_8000_0001_ECX] =
966 CPUID_EXT3_LAHF_LM,
967 .xlevel = 0x80000008,
968 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
969 },
970 {
971 .name = "Nehalem",
972 .level = 11,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 26,
976 .stepping = 3,
977 .features[FEAT_1_EDX] =
978 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
979 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
980 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
981 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
982 CPUID_DE | CPUID_FP87,
983 .features[FEAT_1_ECX] =
984 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
985 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
986 .features[FEAT_8000_0001_EDX] =
987 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
988 .features[FEAT_8000_0001_ECX] =
989 CPUID_EXT3_LAHF_LM,
990 .xlevel = 0x80000008,
991 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
992 },
993 {
994 .name = "Westmere",
995 .level = 11,
996 .vendor = CPUID_VENDOR_INTEL,
997 .family = 6,
998 .model = 44,
999 .stepping = 1,
1000 .features[FEAT_1_EDX] =
1001 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1002 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1003 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1004 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1005 CPUID_DE | CPUID_FP87,
1006 .features[FEAT_1_ECX] =
1007 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1008 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1009 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .features[FEAT_6_EAX] =
1015 CPUID_6_EAX_ARAT,
1016 .xlevel = 0x80000008,
1017 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1018 },
1019 {
1020 .name = "SandyBridge",
1021 .level = 0xd,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 42,
1025 .stepping = 1,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1034 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1035 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1036 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1037 CPUID_EXT_SSE3,
1038 .features[FEAT_8000_0001_EDX] =
1039 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1040 CPUID_EXT2_SYSCALL,
1041 .features[FEAT_8000_0001_ECX] =
1042 CPUID_EXT3_LAHF_LM,
1043 .features[FEAT_XSAVE] =
1044 CPUID_XSAVE_XSAVEOPT,
1045 .features[FEAT_6_EAX] =
1046 CPUID_6_EAX_ARAT,
1047 .xlevel = 0x80000008,
1048 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1049 },
1050 {
1051 .name = "IvyBridge",
1052 .level = 0xd,
1053 .vendor = CPUID_VENDOR_INTEL,
1054 .family = 6,
1055 .model = 58,
1056 .stepping = 9,
1057 .features[FEAT_1_EDX] =
1058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1062 CPUID_DE | CPUID_FP87,
1063 .features[FEAT_1_ECX] =
1064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1065 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1066 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1067 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1068 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1069 .features[FEAT_7_0_EBX] =
1070 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1071 CPUID_7_0_EBX_ERMS,
1072 .features[FEAT_8000_0001_EDX] =
1073 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1074 CPUID_EXT2_SYSCALL,
1075 .features[FEAT_8000_0001_ECX] =
1076 CPUID_EXT3_LAHF_LM,
1077 .features[FEAT_XSAVE] =
1078 CPUID_XSAVE_XSAVEOPT,
1079 .features[FEAT_6_EAX] =
1080 CPUID_6_EAX_ARAT,
1081 .xlevel = 0x80000008,
1082 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1083 },
1084 {
1085 .name = "Haswell-noTSX",
1086 .level = 0xd,
1087 .vendor = CPUID_VENDOR_INTEL,
1088 .family = 6,
1089 .model = 60,
1090 .stepping = 1,
1091 .features[FEAT_1_EDX] =
1092 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1093 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1094 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1095 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1096 CPUID_DE | CPUID_FP87,
1097 .features[FEAT_1_ECX] =
1098 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1099 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1100 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1101 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1102 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1103 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1104 .features[FEAT_8000_0001_EDX] =
1105 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1106 CPUID_EXT2_SYSCALL,
1107 .features[FEAT_8000_0001_ECX] =
1108 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1109 .features[FEAT_7_0_EBX] =
1110 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1111 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1112 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Core Processor (Haswell, no TSX)",
1119 }, {
1120 .name = "Haswell",
1121 .level = 0xd,
1122 .vendor = CPUID_VENDOR_INTEL,
1123 .family = 6,
1124 .model = 60,
1125 .stepping = 1,
1126 .features[FEAT_1_EDX] =
1127 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1128 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1129 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1130 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1131 CPUID_DE | CPUID_FP87,
1132 .features[FEAT_1_ECX] =
1133 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1134 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1135 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1136 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1137 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1138 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_8000_0001_EDX] =
1140 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1141 CPUID_EXT2_SYSCALL,
1142 .features[FEAT_8000_0001_ECX] =
1143 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1144 .features[FEAT_7_0_EBX] =
1145 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1146 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1147 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1148 CPUID_7_0_EBX_RTM,
1149 .features[FEAT_XSAVE] =
1150 CPUID_XSAVE_XSAVEOPT,
1151 .features[FEAT_6_EAX] =
1152 CPUID_6_EAX_ARAT,
1153 .xlevel = 0x80000008,
1154 .model_id = "Intel Core Processor (Haswell)",
1155 },
1156 {
1157 .name = "Broadwell-noTSX",
1158 .level = 0xd,
1159 .vendor = CPUID_VENDOR_INTEL,
1160 .family = 6,
1161 .model = 61,
1162 .stepping = 2,
1163 .features[FEAT_1_EDX] =
1164 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1165 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1166 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1167 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1168 CPUID_DE | CPUID_FP87,
1169 .features[FEAT_1_ECX] =
1170 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1171 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1172 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1173 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1174 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1175 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1176 .features[FEAT_8000_0001_EDX] =
1177 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1178 CPUID_EXT2_SYSCALL,
1179 .features[FEAT_8000_0001_ECX] =
1180 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1181 .features[FEAT_7_0_EBX] =
1182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1183 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1185 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1186 CPUID_7_0_EBX_SMAP,
1187 .features[FEAT_XSAVE] =
1188 CPUID_XSAVE_XSAVEOPT,
1189 .features[FEAT_6_EAX] =
1190 CPUID_6_EAX_ARAT,
1191 .xlevel = 0x80000008,
1192 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1193 },
1194 {
1195 .name = "Broadwell",
1196 .level = 0xd,
1197 .vendor = CPUID_VENDOR_INTEL,
1198 .family = 6,
1199 .model = 61,
1200 .stepping = 2,
1201 .features[FEAT_1_EDX] =
1202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1206 CPUID_DE | CPUID_FP87,
1207 .features[FEAT_1_ECX] =
1208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1216 CPUID_EXT2_SYSCALL,
1217 .features[FEAT_8000_0001_ECX] =
1218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1219 .features[FEAT_7_0_EBX] =
1220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1223 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1224 CPUID_7_0_EBX_SMAP,
1225 .features[FEAT_XSAVE] =
1226 CPUID_XSAVE_XSAVEOPT,
1227 .features[FEAT_6_EAX] =
1228 CPUID_6_EAX_ARAT,
1229 .xlevel = 0x80000008,
1230 .model_id = "Intel Core Processor (Broadwell)",
1231 },
1232 {
1233 .name = "Opteron_G1",
1234 .level = 5,
1235 .vendor = CPUID_VENDOR_AMD,
1236 .family = 15,
1237 .model = 6,
1238 .stepping = 1,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_SSE3,
1247 .features[FEAT_8000_0001_EDX] =
1248 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1249 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1250 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1251 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1252 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1253 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1254 .xlevel = 0x80000008,
1255 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1256 },
1257 {
1258 .name = "Opteron_G2",
1259 .level = 5,
1260 .vendor = CPUID_VENDOR_AMD,
1261 .family = 15,
1262 .model = 6,
1263 .stepping = 1,
1264 .features[FEAT_1_EDX] =
1265 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1266 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1267 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1268 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1269 CPUID_DE | CPUID_FP87,
1270 .features[FEAT_1_ECX] =
1271 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1272 /* Missing: CPUID_EXT2_RDTSCP */
1273 .features[FEAT_8000_0001_EDX] =
1274 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1275 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1276 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1277 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1278 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1279 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1280 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1281 .features[FEAT_8000_0001_ECX] =
1282 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1283 .xlevel = 0x80000008,
1284 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1285 },
1286 {
1287 .name = "Opteron_G3",
1288 .level = 5,
1289 .vendor = CPUID_VENDOR_AMD,
1290 .family = 15,
1291 .model = 6,
1292 .stepping = 1,
1293 .features[FEAT_1_EDX] =
1294 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1295 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1296 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1297 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1298 CPUID_DE | CPUID_FP87,
1299 .features[FEAT_1_ECX] =
1300 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1301 CPUID_EXT_SSE3,
1302 /* Missing: CPUID_EXT2_RDTSCP */
1303 .features[FEAT_8000_0001_EDX] =
1304 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1305 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1306 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1307 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1308 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1309 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1310 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1311 .features[FEAT_8000_0001_ECX] =
1312 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1313 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1314 .xlevel = 0x80000008,
1315 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1316 },
1317 {
1318 .name = "Opteron_G4",
1319 .level = 0xd,
1320 .vendor = CPUID_VENDOR_AMD,
1321 .family = 21,
1322 .model = 1,
1323 .stepping = 2,
1324 .features[FEAT_1_EDX] =
1325 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1326 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1327 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1328 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1329 CPUID_DE | CPUID_FP87,
1330 .features[FEAT_1_ECX] =
1331 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1332 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1333 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1334 CPUID_EXT_SSE3,
1335 /* Missing: CPUID_EXT2_RDTSCP */
1336 .features[FEAT_8000_0001_EDX] =
1337 CPUID_EXT2_LM |
1338 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1339 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1340 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1341 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1342 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1343 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1344 .features[FEAT_8000_0001_ECX] =
1345 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1346 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1347 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1348 CPUID_EXT3_LAHF_LM,
1349 /* no xsaveopt! */
1350 .xlevel = 0x8000001A,
1351 .model_id = "AMD Opteron 62xx class CPU",
1352 },
1353 {
1354 .name = "Opteron_G5",
1355 .level = 0xd,
1356 .vendor = CPUID_VENDOR_AMD,
1357 .family = 21,
1358 .model = 2,
1359 .stepping = 0,
1360 .features[FEAT_1_EDX] =
1361 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1362 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1363 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1364 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1365 CPUID_DE | CPUID_FP87,
1366 .features[FEAT_1_ECX] =
1367 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1368 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1369 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1370 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1371 /* Missing: CPUID_EXT2_RDTSCP */
1372 .features[FEAT_8000_0001_EDX] =
1373 CPUID_EXT2_LM |
1374 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1375 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1376 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1377 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1378 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1379 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1380 .features[FEAT_8000_0001_ECX] =
1381 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1382 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1383 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1384 CPUID_EXT3_LAHF_LM,
1385 /* no xsaveopt! */
1386 .xlevel = 0x8000001A,
1387 .model_id = "AMD Opteron 63xx class CPU",
1388 },
1389 };
1390
1391 typedef struct PropValue {
1392 const char *prop, *value;
1393 } PropValue;
1394
1395 /* KVM-specific features that are automatically added/removed
1396 * from all CPU models when KVM is enabled.
1397 */
1398 static PropValue kvm_default_props[] = {
1399 { "kvmclock", "on" },
1400 { "kvm-nopiodelay", "on" },
1401 { "kvm-asyncpf", "on" },
1402 { "kvm-steal-time", "on" },
1403 { "kvm-pv-eoi", "on" },
1404 { "kvmclock-stable-bit", "on" },
1405 { "x2apic", "on" },
1406 { "acpi", "off" },
1407 { "monitor", "off" },
1408 { "svm", "off" },
1409 { NULL, NULL },
1410 };
1411
1412 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1413 {
1414 PropValue *pv;
1415 for (pv = kvm_default_props; pv->prop; pv++) {
1416 if (!strcmp(pv->prop, prop)) {
1417 pv->value = value;
1418 break;
1419 }
1420 }
1421
1422 /* It is valid to call this function only for properties that
1423 * are already present in the kvm_default_props table.
1424 */
1425 assert(pv->prop);
1426 }
1427
1428 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1429 bool migratable_only);
1430
1431 #ifdef CONFIG_KVM
1432
1433 static int cpu_x86_fill_model_id(char *str)
1434 {
1435 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1436 int i;
1437
1438 for (i = 0; i < 3; i++) {
1439 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1440 memcpy(str + i * 16 + 0, &eax, 4);
1441 memcpy(str + i * 16 + 4, &ebx, 4);
1442 memcpy(str + i * 16 + 8, &ecx, 4);
1443 memcpy(str + i * 16 + 12, &edx, 4);
1444 }
1445 return 0;
1446 }
1447
1448 static X86CPUDefinition host_cpudef;
1449
1450 static Property host_x86_cpu_properties[] = {
1451 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1452 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1453 DEFINE_PROP_END_OF_LIST()
1454 };
1455
1456 /* class_init for the "host" CPU model
1457 *
1458 * This function may be called before KVM is initialized.
1459 */
1460 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1461 {
1462 DeviceClass *dc = DEVICE_CLASS(oc);
1463 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1464 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1465
1466 xcc->kvm_required = true;
1467
1468 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1469 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1470
1471 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1472 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1473 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1474 host_cpudef.stepping = eax & 0x0F;
1475
1476 cpu_x86_fill_model_id(host_cpudef.model_id);
1477
1478 xcc->cpu_def = &host_cpudef;
1479
1480 /* level, xlevel, xlevel2, and the feature words are initialized on
1481 * instance_init, because they require KVM to be initialized.
1482 */
1483
1484 dc->props = host_x86_cpu_properties;
1485 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1486 dc->cannot_destroy_with_object_finalize_yet = true;
1487 }
1488
1489 static void host_x86_cpu_initfn(Object *obj)
1490 {
1491 X86CPU *cpu = X86_CPU(obj);
1492 CPUX86State *env = &cpu->env;
1493 KVMState *s = kvm_state;
1494
1495 assert(kvm_enabled());
1496
1497 /* We can't fill the features array here because we don't know yet if
1498 * "migratable" is true or false.
1499 */
1500 cpu->host_features = true;
1501
1502 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1503 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1504 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1505
1506 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1507 }
1508
1509 static const TypeInfo host_x86_cpu_type_info = {
1510 .name = X86_CPU_TYPE_NAME("host"),
1511 .parent = TYPE_X86_CPU,
1512 .instance_init = host_x86_cpu_initfn,
1513 .class_init = host_x86_cpu_class_init,
1514 };
1515
1516 #endif
1517
1518 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1519 {
1520 FeatureWordInfo *f = &feature_word_info[w];
1521 int i;
1522
1523 for (i = 0; i < 32; ++i) {
1524 if ((1UL << i) & mask) {
1525 const char *reg = get_register_name_32(f->cpuid_reg);
1526 assert(reg);
1527 fprintf(stderr, "warning: %s doesn't support requested feature: "
1528 "CPUID.%02XH:%s%s%s [bit %d]\n",
1529 kvm_enabled() ? "host" : "TCG",
1530 f->cpuid_eax, reg,
1531 f->feat_names[i] ? "." : "",
1532 f->feat_names[i] ? f->feat_names[i] : "", i);
1533 }
1534 }
1535 }
1536
1537 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1538 const char *name, void *opaque,
1539 Error **errp)
1540 {
1541 X86CPU *cpu = X86_CPU(obj);
1542 CPUX86State *env = &cpu->env;
1543 int64_t value;
1544
1545 value = (env->cpuid_version >> 8) & 0xf;
1546 if (value == 0xf) {
1547 value += (env->cpuid_version >> 20) & 0xff;
1548 }
1549 visit_type_int(v, name, &value, errp);
1550 }
1551
1552 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1553 const char *name, void *opaque,
1554 Error **errp)
1555 {
1556 X86CPU *cpu = X86_CPU(obj);
1557 CPUX86State *env = &cpu->env;
1558 const int64_t min = 0;
1559 const int64_t max = 0xff + 0xf;
1560 Error *local_err = NULL;
1561 int64_t value;
1562
1563 visit_type_int(v, name, &value, &local_err);
1564 if (local_err) {
1565 error_propagate(errp, local_err);
1566 return;
1567 }
1568 if (value < min || value > max) {
1569 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1570 name ? name : "null", value, min, max);
1571 return;
1572 }
1573
1574 env->cpuid_version &= ~0xff00f00;
1575 if (value > 0x0f) {
1576 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1577 } else {
1578 env->cpuid_version |= value << 8;
1579 }
1580 }
1581
1582 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1583 const char *name, void *opaque,
1584 Error **errp)
1585 {
1586 X86CPU *cpu = X86_CPU(obj);
1587 CPUX86State *env = &cpu->env;
1588 int64_t value;
1589
1590 value = (env->cpuid_version >> 4) & 0xf;
1591 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1592 visit_type_int(v, name, &value, errp);
1593 }
1594
1595 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1596 const char *name, void *opaque,
1597 Error **errp)
1598 {
1599 X86CPU *cpu = X86_CPU(obj);
1600 CPUX86State *env = &cpu->env;
1601 const int64_t min = 0;
1602 const int64_t max = 0xff;
1603 Error *local_err = NULL;
1604 int64_t value;
1605
1606 visit_type_int(v, name, &value, &local_err);
1607 if (local_err) {
1608 error_propagate(errp, local_err);
1609 return;
1610 }
1611 if (value < min || value > max) {
1612 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1613 name ? name : "null", value, min, max);
1614 return;
1615 }
1616
1617 env->cpuid_version &= ~0xf00f0;
1618 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1619 }
1620
1621 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1622 const char *name, void *opaque,
1623 Error **errp)
1624 {
1625 X86CPU *cpu = X86_CPU(obj);
1626 CPUX86State *env = &cpu->env;
1627 int64_t value;
1628
1629 value = env->cpuid_version & 0xf;
1630 visit_type_int(v, name, &value, errp);
1631 }
1632
1633 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1634 const char *name, void *opaque,
1635 Error **errp)
1636 {
1637 X86CPU *cpu = X86_CPU(obj);
1638 CPUX86State *env = &cpu->env;
1639 const int64_t min = 0;
1640 const int64_t max = 0xf;
1641 Error *local_err = NULL;
1642 int64_t value;
1643
1644 visit_type_int(v, name, &value, &local_err);
1645 if (local_err) {
1646 error_propagate(errp, local_err);
1647 return;
1648 }
1649 if (value < min || value > max) {
1650 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1651 name ? name : "null", value, min, max);
1652 return;
1653 }
1654
1655 env->cpuid_version &= ~0xf;
1656 env->cpuid_version |= value & 0xf;
1657 }
1658
1659 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1660 {
1661 X86CPU *cpu = X86_CPU(obj);
1662 CPUX86State *env = &cpu->env;
1663 char *value;
1664
1665 value = g_malloc(CPUID_VENDOR_SZ + 1);
1666 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1667 env->cpuid_vendor3);
1668 return value;
1669 }
1670
1671 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1672 Error **errp)
1673 {
1674 X86CPU *cpu = X86_CPU(obj);
1675 CPUX86State *env = &cpu->env;
1676 int i;
1677
1678 if (strlen(value) != CPUID_VENDOR_SZ) {
1679 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1680 return;
1681 }
1682
1683 env->cpuid_vendor1 = 0;
1684 env->cpuid_vendor2 = 0;
1685 env->cpuid_vendor3 = 0;
1686 for (i = 0; i < 4; i++) {
1687 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1688 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1689 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1690 }
1691 }
1692
1693 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1694 {
1695 X86CPU *cpu = X86_CPU(obj);
1696 CPUX86State *env = &cpu->env;
1697 char *value;
1698 int i;
1699
1700 value = g_malloc(48 + 1);
1701 for (i = 0; i < 48; i++) {
1702 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1703 }
1704 value[48] = '\0';
1705 return value;
1706 }
1707
1708 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1709 Error **errp)
1710 {
1711 X86CPU *cpu = X86_CPU(obj);
1712 CPUX86State *env = &cpu->env;
1713 int c, len, i;
1714
1715 if (model_id == NULL) {
1716 model_id = "";
1717 }
1718 len = strlen(model_id);
1719 memset(env->cpuid_model, 0, 48);
1720 for (i = 0; i < 48; i++) {
1721 if (i >= len) {
1722 c = '\0';
1723 } else {
1724 c = (uint8_t)model_id[i];
1725 }
1726 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1727 }
1728 }
1729
1730 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1731 void *opaque, Error **errp)
1732 {
1733 X86CPU *cpu = X86_CPU(obj);
1734 int64_t value;
1735
1736 value = cpu->env.tsc_khz * 1000;
1737 visit_type_int(v, name, &value, errp);
1738 }
1739
1740 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1741 void *opaque, Error **errp)
1742 {
1743 X86CPU *cpu = X86_CPU(obj);
1744 const int64_t min = 0;
1745 const int64_t max = INT64_MAX;
1746 Error *local_err = NULL;
1747 int64_t value;
1748
1749 visit_type_int(v, name, &value, &local_err);
1750 if (local_err) {
1751 error_propagate(errp, local_err);
1752 return;
1753 }
1754 if (value < min || value > max) {
1755 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1756 name ? name : "null", value, min, max);
1757 return;
1758 }
1759
1760 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1761 }
1762
1763 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1764 void *opaque, Error **errp)
1765 {
1766 X86CPU *cpu = X86_CPU(obj);
1767 int64_t value = cpu->apic_id;
1768
1769 visit_type_int(v, name, &value, errp);
1770 }
1771
1772 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1773 void *opaque, Error **errp)
1774 {
1775 X86CPU *cpu = X86_CPU(obj);
1776 DeviceState *dev = DEVICE(obj);
1777 const int64_t min = 0;
1778 const int64_t max = UINT32_MAX;
1779 Error *error = NULL;
1780 int64_t value;
1781
1782 if (dev->realized) {
1783 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1784 "it was realized", name, object_get_typename(obj));
1785 return;
1786 }
1787
1788 visit_type_int(v, name, &value, &error);
1789 if (error) {
1790 error_propagate(errp, error);
1791 return;
1792 }
1793 if (value < min || value > max) {
1794 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1795 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1796 object_get_typename(obj), name, value, min, max);
1797 return;
1798 }
1799
1800 if ((value != cpu->apic_id) && cpu_exists(value)) {
1801 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1802 return;
1803 }
1804 cpu->apic_id = value;
1805 }
1806
1807 /* Generic getter for "feature-words" and "filtered-features" properties */
1808 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1809 const char *name, void *opaque,
1810 Error **errp)
1811 {
1812 uint32_t *array = (uint32_t *)opaque;
1813 FeatureWord w;
1814 Error *err = NULL;
1815 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1816 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1817 X86CPUFeatureWordInfoList *list = NULL;
1818
1819 for (w = 0; w < FEATURE_WORDS; w++) {
1820 FeatureWordInfo *wi = &feature_word_info[w];
1821 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1822 qwi->cpuid_input_eax = wi->cpuid_eax;
1823 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1824 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1825 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1826 qwi->features = array[w];
1827
1828 /* List will be in reverse order, but order shouldn't matter */
1829 list_entries[w].next = list;
1830 list_entries[w].value = &word_infos[w];
1831 list = &list_entries[w];
1832 }
1833
1834 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1835 error_propagate(errp, err);
1836 }
1837
1838 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1839 void *opaque, Error **errp)
1840 {
1841 X86CPU *cpu = X86_CPU(obj);
1842 int64_t value = cpu->hyperv_spinlock_attempts;
1843
1844 visit_type_int(v, name, &value, errp);
1845 }
1846
1847 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1848 void *opaque, Error **errp)
1849 {
1850 const int64_t min = 0xFFF;
1851 const int64_t max = UINT_MAX;
1852 X86CPU *cpu = X86_CPU(obj);
1853 Error *err = NULL;
1854 int64_t value;
1855
1856 visit_type_int(v, name, &value, &err);
1857 if (err) {
1858 error_propagate(errp, err);
1859 return;
1860 }
1861
1862 if (value < min || value > max) {
1863 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1864 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1865 object_get_typename(obj), name ? name : "null",
1866 value, min, max);
1867 return;
1868 }
1869 cpu->hyperv_spinlock_attempts = value;
1870 }
1871
1872 static PropertyInfo qdev_prop_spinlocks = {
1873 .name = "int",
1874 .get = x86_get_hv_spinlocks,
1875 .set = x86_set_hv_spinlocks,
1876 };
1877
1878 /* Convert all '_' in a feature string option name to '-', to make feature
1879 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1880 */
1881 static inline void feat2prop(char *s)
1882 {
1883 while ((s = strchr(s, '_'))) {
1884 *s = '-';
1885 }
1886 }
1887
1888 /* Parse "+feature,-feature,feature=foo" CPU feature string
1889 */
1890 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1891 Error **errp)
1892 {
1893 X86CPU *cpu = X86_CPU(cs);
1894 char *featurestr; /* Single 'key=value" string being parsed */
1895 FeatureWord w;
1896 /* Features to be added */
1897 FeatureWordArray plus_features = { 0 };
1898 /* Features to be removed */
1899 FeatureWordArray minus_features = { 0 };
1900 uint32_t numvalue;
1901 CPUX86State *env = &cpu->env;
1902 Error *local_err = NULL;
1903
1904 featurestr = features ? strtok(features, ",") : NULL;
1905
1906 while (featurestr) {
1907 char *val;
1908 if (featurestr[0] == '+') {
1909 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1910 } else if (featurestr[0] == '-') {
1911 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1912 } else if ((val = strchr(featurestr, '='))) {
1913 *val = 0; val++;
1914 feat2prop(featurestr);
1915 if (!strcmp(featurestr, "xlevel")) {
1916 char *err;
1917 char num[32];
1918
1919 numvalue = strtoul(val, &err, 0);
1920 if (!*val || *err) {
1921 error_setg(errp, "bad numerical value %s", val);
1922 return;
1923 }
1924 if (numvalue < 0x80000000) {
1925 error_report("xlevel value shall always be >= 0x80000000"
1926 ", fixup will be removed in future versions");
1927 numvalue += 0x80000000;
1928 }
1929 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1930 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1931 } else if (!strcmp(featurestr, "tsc-freq")) {
1932 int64_t tsc_freq;
1933 char *err;
1934 char num[32];
1935
1936 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1937 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1938 if (tsc_freq < 0 || *err) {
1939 error_setg(errp, "bad numerical value %s", val);
1940 return;
1941 }
1942 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1943 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1944 &local_err);
1945 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1946 char *err;
1947 const int min = 0xFFF;
1948 char num[32];
1949 numvalue = strtoul(val, &err, 0);
1950 if (!*val || *err) {
1951 error_setg(errp, "bad numerical value %s", val);
1952 return;
1953 }
1954 if (numvalue < min) {
1955 error_report("hv-spinlocks value shall always be >= 0x%x"
1956 ", fixup will be removed in future versions",
1957 min);
1958 numvalue = min;
1959 }
1960 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1961 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1962 } else {
1963 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1964 }
1965 } else {
1966 feat2prop(featurestr);
1967 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1968 }
1969 if (local_err) {
1970 error_propagate(errp, local_err);
1971 return;
1972 }
1973 featurestr = strtok(NULL, ",");
1974 }
1975
1976 if (cpu->host_features) {
1977 for (w = 0; w < FEATURE_WORDS; w++) {
1978 env->features[w] =
1979 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1980 }
1981 }
1982
1983 for (w = 0; w < FEATURE_WORDS; w++) {
1984 env->features[w] |= plus_features[w];
1985 env->features[w] &= ~minus_features[w];
1986 }
1987 }
1988
1989 /* Print all cpuid feature names in featureset
1990 */
1991 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1992 {
1993 int bit;
1994 bool first = true;
1995
1996 for (bit = 0; bit < 32; bit++) {
1997 if (featureset[bit]) {
1998 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1999 first = false;
2000 }
2001 }
2002 }
2003
2004 /* generate CPU information. */
2005 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2006 {
2007 X86CPUDefinition *def;
2008 char buf[256];
2009 int i;
2010
2011 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2012 def = &builtin_x86_defs[i];
2013 snprintf(buf, sizeof(buf), "%s", def->name);
2014 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2015 }
2016 #ifdef CONFIG_KVM
2017 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2018 "KVM processor with all supported host features "
2019 "(only available in KVM mode)");
2020 #endif
2021
2022 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2023 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2024 FeatureWordInfo *fw = &feature_word_info[i];
2025
2026 (*cpu_fprintf)(f, " ");
2027 listflags(f, cpu_fprintf, fw->feat_names);
2028 (*cpu_fprintf)(f, "\n");
2029 }
2030 }
2031
2032 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2033 {
2034 CpuDefinitionInfoList *cpu_list = NULL;
2035 X86CPUDefinition *def;
2036 int i;
2037
2038 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2039 CpuDefinitionInfoList *entry;
2040 CpuDefinitionInfo *info;
2041
2042 def = &builtin_x86_defs[i];
2043 info = g_malloc0(sizeof(*info));
2044 info->name = g_strdup(def->name);
2045
2046 entry = g_malloc0(sizeof(*entry));
2047 entry->value = info;
2048 entry->next = cpu_list;
2049 cpu_list = entry;
2050 }
2051
2052 return cpu_list;
2053 }
2054
2055 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2056 bool migratable_only)
2057 {
2058 FeatureWordInfo *wi = &feature_word_info[w];
2059 uint32_t r;
2060
2061 if (kvm_enabled()) {
2062 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2063 wi->cpuid_ecx,
2064 wi->cpuid_reg);
2065 } else if (tcg_enabled()) {
2066 r = wi->tcg_features;
2067 } else {
2068 return ~0;
2069 }
2070 if (migratable_only) {
2071 r &= x86_cpu_get_migratable_flags(w);
2072 }
2073 return r;
2074 }
2075
2076 /*
2077 * Filters CPU feature words based on host availability of each feature.
2078 *
2079 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2080 */
2081 static int x86_cpu_filter_features(X86CPU *cpu)
2082 {
2083 CPUX86State *env = &cpu->env;
2084 FeatureWord w;
2085 int rv = 0;
2086
2087 for (w = 0; w < FEATURE_WORDS; w++) {
2088 uint32_t host_feat =
2089 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2090 uint32_t requested_features = env->features[w];
2091 env->features[w] &= host_feat;
2092 cpu->filtered_features[w] = requested_features & ~env->features[w];
2093 if (cpu->filtered_features[w]) {
2094 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2095 report_unavailable_features(w, cpu->filtered_features[w]);
2096 }
2097 rv = 1;
2098 }
2099 }
2100
2101 return rv;
2102 }
2103
2104 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2105 {
2106 PropValue *pv;
2107 for (pv = props; pv->prop; pv++) {
2108 if (!pv->value) {
2109 continue;
2110 }
2111 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2112 &error_abort);
2113 }
2114 }
2115
2116 /* Load data from X86CPUDefinition
2117 */
2118 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2119 {
2120 CPUX86State *env = &cpu->env;
2121 const char *vendor;
2122 char host_vendor[CPUID_VENDOR_SZ + 1];
2123 FeatureWord w;
2124
2125 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2126 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2127 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2128 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2129 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2130 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2131 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2132 for (w = 0; w < FEATURE_WORDS; w++) {
2133 env->features[w] = def->features[w];
2134 }
2135
2136 /* Special cases not set in the X86CPUDefinition structs: */
2137 if (kvm_enabled()) {
2138 x86_cpu_apply_props(cpu, kvm_default_props);
2139 }
2140
2141 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2142
2143 /* sysenter isn't supported in compatibility mode on AMD,
2144 * syscall isn't supported in compatibility mode on Intel.
2145 * Normally we advertise the actual CPU vendor, but you can
2146 * override this using the 'vendor' property if you want to use
2147 * KVM's sysenter/syscall emulation in compatibility mode and
2148 * when doing cross vendor migration
2149 */
2150 vendor = def->vendor;
2151 if (kvm_enabled()) {
2152 uint32_t ebx = 0, ecx = 0, edx = 0;
2153 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2154 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2155 vendor = host_vendor;
2156 }
2157
2158 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2159
2160 }
2161
2162 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2163 {
2164 X86CPU *cpu = NULL;
2165 X86CPUClass *xcc;
2166 ObjectClass *oc;
2167 gchar **model_pieces;
2168 char *name, *features;
2169 Error *error = NULL;
2170
2171 model_pieces = g_strsplit(cpu_model, ",", 2);
2172 if (!model_pieces[0]) {
2173 error_setg(&error, "Invalid/empty CPU model name");
2174 goto out;
2175 }
2176 name = model_pieces[0];
2177 features = model_pieces[1];
2178
2179 oc = x86_cpu_class_by_name(name);
2180 if (oc == NULL) {
2181 error_setg(&error, "Unable to find CPU definition: %s", name);
2182 goto out;
2183 }
2184 xcc = X86_CPU_CLASS(oc);
2185
2186 if (xcc->kvm_required && !kvm_enabled()) {
2187 error_setg(&error, "CPU model '%s' requires KVM", name);
2188 goto out;
2189 }
2190
2191 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2192
2193 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2194 if (error) {
2195 goto out;
2196 }
2197
2198 out:
2199 if (error != NULL) {
2200 error_propagate(errp, error);
2201 if (cpu) {
2202 object_unref(OBJECT(cpu));
2203 cpu = NULL;
2204 }
2205 }
2206 g_strfreev(model_pieces);
2207 return cpu;
2208 }
2209
2210 X86CPU *cpu_x86_init(const char *cpu_model)
2211 {
2212 Error *error = NULL;
2213 X86CPU *cpu;
2214
2215 cpu = cpu_x86_create(cpu_model, &error);
2216 if (error) {
2217 goto out;
2218 }
2219
2220 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2221
2222 out:
2223 if (error) {
2224 error_report_err(error);
2225 if (cpu != NULL) {
2226 object_unref(OBJECT(cpu));
2227 cpu = NULL;
2228 }
2229 }
2230 return cpu;
2231 }
2232
2233 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2234 {
2235 X86CPUDefinition *cpudef = data;
2236 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2237
2238 xcc->cpu_def = cpudef;
2239 }
2240
2241 static void x86_register_cpudef_type(X86CPUDefinition *def)
2242 {
2243 char *typename = x86_cpu_type_name(def->name);
2244 TypeInfo ti = {
2245 .name = typename,
2246 .parent = TYPE_X86_CPU,
2247 .class_init = x86_cpu_cpudef_class_init,
2248 .class_data = def,
2249 };
2250
2251 type_register(&ti);
2252 g_free(typename);
2253 }
2254
2255 #if !defined(CONFIG_USER_ONLY)
2256
2257 void cpu_clear_apic_feature(CPUX86State *env)
2258 {
2259 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2260 }
2261
2262 #endif /* !CONFIG_USER_ONLY */
2263
2264 /* Initialize list of CPU models, filling some non-static fields if necessary
2265 */
2266 void x86_cpudef_setup(void)
2267 {
2268 int i, j;
2269 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2270
2271 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2272 X86CPUDefinition *def = &builtin_x86_defs[i];
2273
2274 /* Look for specific "cpudef" models that */
2275 /* have the QEMU version in .model_id */
2276 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2277 if (strcmp(model_with_versions[j], def->name) == 0) {
2278 pstrcpy(def->model_id, sizeof(def->model_id),
2279 "QEMU Virtual CPU version ");
2280 pstrcat(def->model_id, sizeof(def->model_id),
2281 qemu_hw_version());
2282 break;
2283 }
2284 }
2285 }
2286 }
2287
2288 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2289 uint32_t *eax, uint32_t *ebx,
2290 uint32_t *ecx, uint32_t *edx)
2291 {
2292 X86CPU *cpu = x86_env_get_cpu(env);
2293 CPUState *cs = CPU(cpu);
2294
2295 /* test if maximum index reached */
2296 if (index & 0x80000000) {
2297 if (index > env->cpuid_xlevel) {
2298 if (env->cpuid_xlevel2 > 0) {
2299 /* Handle the Centaur's CPUID instruction. */
2300 if (index > env->cpuid_xlevel2) {
2301 index = env->cpuid_xlevel2;
2302 } else if (index < 0xC0000000) {
2303 index = env->cpuid_xlevel;
2304 }
2305 } else {
2306 /* Intel documentation states that invalid EAX input will
2307 * return the same information as EAX=cpuid_level
2308 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2309 */
2310 index = env->cpuid_level;
2311 }
2312 }
2313 } else {
2314 if (index > env->cpuid_level)
2315 index = env->cpuid_level;
2316 }
2317
2318 switch(index) {
2319 case 0:
2320 *eax = env->cpuid_level;
2321 *ebx = env->cpuid_vendor1;
2322 *edx = env->cpuid_vendor2;
2323 *ecx = env->cpuid_vendor3;
2324 break;
2325 case 1:
2326 *eax = env->cpuid_version;
2327 *ebx = (cpu->apic_id << 24) |
2328 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2329 *ecx = env->features[FEAT_1_ECX];
2330 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2331 *ecx |= CPUID_EXT_OSXSAVE;
2332 }
2333 *edx = env->features[FEAT_1_EDX];
2334 if (cs->nr_cores * cs->nr_threads > 1) {
2335 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2336 *edx |= CPUID_HT;
2337 }
2338 break;
2339 case 2:
2340 /* cache info: needed for Pentium Pro compatibility */
2341 if (cpu->cache_info_passthrough) {
2342 host_cpuid(index, 0, eax, ebx, ecx, edx);
2343 break;
2344 }
2345 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2346 *ebx = 0;
2347 *ecx = 0;
2348 *edx = (L1D_DESCRIPTOR << 16) | \
2349 (L1I_DESCRIPTOR << 8) | \
2350 (L2_DESCRIPTOR);
2351 break;
2352 case 4:
2353 /* cache info: needed for Core compatibility */
2354 if (cpu->cache_info_passthrough) {
2355 host_cpuid(index, count, eax, ebx, ecx, edx);
2356 *eax &= ~0xFC000000;
2357 } else {
2358 *eax = 0;
2359 switch (count) {
2360 case 0: /* L1 dcache info */
2361 *eax |= CPUID_4_TYPE_DCACHE | \
2362 CPUID_4_LEVEL(1) | \
2363 CPUID_4_SELF_INIT_LEVEL;
2364 *ebx = (L1D_LINE_SIZE - 1) | \
2365 ((L1D_PARTITIONS - 1) << 12) | \
2366 ((L1D_ASSOCIATIVITY - 1) << 22);
2367 *ecx = L1D_SETS - 1;
2368 *edx = CPUID_4_NO_INVD_SHARING;
2369 break;
2370 case 1: /* L1 icache info */
2371 *eax |= CPUID_4_TYPE_ICACHE | \
2372 CPUID_4_LEVEL(1) | \
2373 CPUID_4_SELF_INIT_LEVEL;
2374 *ebx = (L1I_LINE_SIZE - 1) | \
2375 ((L1I_PARTITIONS - 1) << 12) | \
2376 ((L1I_ASSOCIATIVITY - 1) << 22);
2377 *ecx = L1I_SETS - 1;
2378 *edx = CPUID_4_NO_INVD_SHARING;
2379 break;
2380 case 2: /* L2 cache info */
2381 *eax |= CPUID_4_TYPE_UNIFIED | \
2382 CPUID_4_LEVEL(2) | \
2383 CPUID_4_SELF_INIT_LEVEL;
2384 if (cs->nr_threads > 1) {
2385 *eax |= (cs->nr_threads - 1) << 14;
2386 }
2387 *ebx = (L2_LINE_SIZE - 1) | \
2388 ((L2_PARTITIONS - 1) << 12) | \
2389 ((L2_ASSOCIATIVITY - 1) << 22);
2390 *ecx = L2_SETS - 1;
2391 *edx = CPUID_4_NO_INVD_SHARING;
2392 break;
2393 default: /* end of info */
2394 *eax = 0;
2395 *ebx = 0;
2396 *ecx = 0;
2397 *edx = 0;
2398 break;
2399 }
2400 }
2401
2402 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2403 if ((*eax & 31) && cs->nr_cores > 1) {
2404 *eax |= (cs->nr_cores - 1) << 26;
2405 }
2406 break;
2407 case 5:
2408 /* mwait info: needed for Core compatibility */
2409 *eax = 0; /* Smallest monitor-line size in bytes */
2410 *ebx = 0; /* Largest monitor-line size in bytes */
2411 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2412 *edx = 0;
2413 break;
2414 case 6:
2415 /* Thermal and Power Leaf */
2416 *eax = env->features[FEAT_6_EAX];
2417 *ebx = 0;
2418 *ecx = 0;
2419 *edx = 0;
2420 break;
2421 case 7:
2422 /* Structured Extended Feature Flags Enumeration Leaf */
2423 if (count == 0) {
2424 *eax = 0; /* Maximum ECX value for sub-leaves */
2425 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2426 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2427 *edx = 0; /* Reserved */
2428 } else {
2429 *eax = 0;
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2433 }
2434 break;
2435 case 9:
2436 /* Direct Cache Access Information Leaf */
2437 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2438 *ebx = 0;
2439 *ecx = 0;
2440 *edx = 0;
2441 break;
2442 case 0xA:
2443 /* Architectural Performance Monitoring Leaf */
2444 if (kvm_enabled() && cpu->enable_pmu) {
2445 KVMState *s = cs->kvm_state;
2446
2447 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2448 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2449 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2450 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2451 } else {
2452 *eax = 0;
2453 *ebx = 0;
2454 *ecx = 0;
2455 *edx = 0;
2456 }
2457 break;
2458 case 0xD: {
2459 KVMState *s = cs->kvm_state;
2460 uint64_t ena_mask;
2461 int i;
2462
2463 /* Processor Extended State */
2464 *eax = 0;
2465 *ebx = 0;
2466 *ecx = 0;
2467 *edx = 0;
2468 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2469 break;
2470 }
2471 if (kvm_enabled()) {
2472 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2473 ena_mask <<= 32;
2474 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2475 } else {
2476 ena_mask = -1;
2477 }
2478
2479 if (count == 0) {
2480 *ecx = 0x240;
2481 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2482 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2483 if ((env->features[esa->feature] & esa->bits) == esa->bits
2484 && ((ena_mask >> i) & 1) != 0) {
2485 if (i < 32) {
2486 *eax |= 1u << i;
2487 } else {
2488 *edx |= 1u << (i - 32);
2489 }
2490 *ecx = MAX(*ecx, esa->offset + esa->size);
2491 }
2492 }
2493 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2494 *ebx = *ecx;
2495 } else if (count == 1) {
2496 *eax = env->features[FEAT_XSAVE];
2497 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2498 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2499 if ((env->features[esa->feature] & esa->bits) == esa->bits
2500 && ((ena_mask >> count) & 1) != 0) {
2501 *eax = esa->size;
2502 *ebx = esa->offset;
2503 }
2504 }
2505 break;
2506 }
2507 case 0x80000000:
2508 *eax = env->cpuid_xlevel;
2509 *ebx = env->cpuid_vendor1;
2510 *edx = env->cpuid_vendor2;
2511 *ecx = env->cpuid_vendor3;
2512 break;
2513 case 0x80000001:
2514 *eax = env->cpuid_version;
2515 *ebx = 0;
2516 *ecx = env->features[FEAT_8000_0001_ECX];
2517 *edx = env->features[FEAT_8000_0001_EDX];
2518
2519 /* The Linux kernel checks for the CMPLegacy bit and
2520 * discards multiple thread information if it is set.
2521 * So dont set it here for Intel to make Linux guests happy.
2522 */
2523 if (cs->nr_cores * cs->nr_threads > 1) {
2524 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2525 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2526 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2527 *ecx |= 1 << 1; /* CmpLegacy bit */
2528 }
2529 }
2530 break;
2531 case 0x80000002:
2532 case 0x80000003:
2533 case 0x80000004:
2534 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2535 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2536 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2537 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2538 break;
2539 case 0x80000005:
2540 /* cache info (L1 cache) */
2541 if (cpu->cache_info_passthrough) {
2542 host_cpuid(index, 0, eax, ebx, ecx, edx);
2543 break;
2544 }
2545 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2546 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2547 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2548 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2549 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2550 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2551 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2552 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2553 break;
2554 case 0x80000006:
2555 /* cache info (L2 cache) */
2556 if (cpu->cache_info_passthrough) {
2557 host_cpuid(index, 0, eax, ebx, ecx, edx);
2558 break;
2559 }
2560 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2561 (L2_DTLB_2M_ENTRIES << 16) | \
2562 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2563 (L2_ITLB_2M_ENTRIES);
2564 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2565 (L2_DTLB_4K_ENTRIES << 16) | \
2566 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2567 (L2_ITLB_4K_ENTRIES);
2568 *ecx = (L2_SIZE_KB_AMD << 16) | \
2569 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2570 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2571 *edx = ((L3_SIZE_KB/512) << 18) | \
2572 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2573 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2574 break;
2575 case 0x80000007:
2576 *eax = 0;
2577 *ebx = 0;
2578 *ecx = 0;
2579 *edx = env->features[FEAT_8000_0007_EDX];
2580 break;
2581 case 0x80000008:
2582 /* virtual & phys address size in low 2 bytes. */
2583 /* XXX: This value must match the one used in the MMU code. */
2584 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2585 /* 64 bit processor */
2586 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2587 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2588 } else {
2589 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2590 *eax = 0x00000024; /* 36 bits physical */
2591 } else {
2592 *eax = 0x00000020; /* 32 bits physical */
2593 }
2594 }
2595 *ebx = 0;
2596 *ecx = 0;
2597 *edx = 0;
2598 if (cs->nr_cores * cs->nr_threads > 1) {
2599 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2600 }
2601 break;
2602 case 0x8000000A:
2603 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2604 *eax = 0x00000001; /* SVM Revision */
2605 *ebx = 0x00000010; /* nr of ASIDs */
2606 *ecx = 0;
2607 *edx = env->features[FEAT_SVM]; /* optional features */
2608 } else {
2609 *eax = 0;
2610 *ebx = 0;
2611 *ecx = 0;
2612 *edx = 0;
2613 }
2614 break;
2615 case 0xC0000000:
2616 *eax = env->cpuid_xlevel2;
2617 *ebx = 0;
2618 *ecx = 0;
2619 *edx = 0;
2620 break;
2621 case 0xC0000001:
2622 /* Support for VIA CPU's CPUID instruction */
2623 *eax = env->cpuid_version;
2624 *ebx = 0;
2625 *ecx = 0;
2626 *edx = env->features[FEAT_C000_0001_EDX];
2627 break;
2628 case 0xC0000002:
2629 case 0xC0000003:
2630 case 0xC0000004:
2631 /* Reserved for the future, and now filled with zero */
2632 *eax = 0;
2633 *ebx = 0;
2634 *ecx = 0;
2635 *edx = 0;
2636 break;
2637 default:
2638 /* reserved values: zero */
2639 *eax = 0;
2640 *ebx = 0;
2641 *ecx = 0;
2642 *edx = 0;
2643 break;
2644 }
2645 }
2646
2647 /* CPUClass::reset() */
2648 static void x86_cpu_reset(CPUState *s)
2649 {
2650 X86CPU *cpu = X86_CPU(s);
2651 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2652 CPUX86State *env = &cpu->env;
2653 target_ulong cr4;
2654 uint64_t xcr0;
2655 int i;
2656
2657 xcc->parent_reset(s);
2658
2659 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2660
2661 tlb_flush(s, 1);
2662
2663 env->old_exception = -1;
2664
2665 /* init to reset state */
2666
2667 #ifdef CONFIG_SOFTMMU
2668 env->hflags |= HF_SOFTMMU_MASK;
2669 #endif
2670 env->hflags2 |= HF2_GIF_MASK;
2671
2672 cpu_x86_update_cr0(env, 0x60000010);
2673 env->a20_mask = ~0x0;
2674 env->smbase = 0x30000;
2675
2676 env->idt.limit = 0xffff;
2677 env->gdt.limit = 0xffff;
2678 env->ldt.limit = 0xffff;
2679 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2680 env->tr.limit = 0xffff;
2681 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2682
2683 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2684 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2685 DESC_R_MASK | DESC_A_MASK);
2686 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2687 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2688 DESC_A_MASK);
2689 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2690 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2691 DESC_A_MASK);
2692 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2693 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2694 DESC_A_MASK);
2695 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2696 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2697 DESC_A_MASK);
2698 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2699 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2700 DESC_A_MASK);
2701
2702 env->eip = 0xfff0;
2703 env->regs[R_EDX] = env->cpuid_version;
2704
2705 env->eflags = 0x2;
2706
2707 /* FPU init */
2708 for (i = 0; i < 8; i++) {
2709 env->fptags[i] = 1;
2710 }
2711 cpu_set_fpuc(env, 0x37f);
2712
2713 env->mxcsr = 0x1f80;
2714 /* All units are in INIT state. */
2715 env->xstate_bv = 0;
2716
2717 env->pat = 0x0007040600070406ULL;
2718 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2719
2720 memset(env->dr, 0, sizeof(env->dr));
2721 env->dr[6] = DR6_FIXED_1;
2722 env->dr[7] = DR7_FIXED_1;
2723 cpu_breakpoint_remove_all(s, BP_CPU);
2724 cpu_watchpoint_remove_all(s, BP_CPU);
2725
2726 cr4 = 0;
2727 xcr0 = XSTATE_FP_MASK;
2728
2729 #ifdef CONFIG_USER_ONLY
2730 /* Enable all the features for user-mode. */
2731 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2732 xcr0 |= XSTATE_SSE_MASK;
2733 }
2734 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
2735 xcr0 |= XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK;
2736 }
2737 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2738 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2739 }
2740 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2741 cr4 |= CR4_FSGSBASE_MASK;
2742 }
2743 #endif
2744
2745 env->xcr0 = xcr0;
2746 cpu_x86_update_cr4(env, cr4);
2747
2748 /*
2749 * SDM 11.11.5 requires:
2750 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2751 * - IA32_MTRR_PHYSMASKn.V = 0
2752 * All other bits are undefined. For simplification, zero it all.
2753 */
2754 env->mtrr_deftype = 0;
2755 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2756 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2757
2758 #if !defined(CONFIG_USER_ONLY)
2759 /* We hard-wire the BSP to the first CPU. */
2760 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2761
2762 s->halted = !cpu_is_bsp(cpu);
2763
2764 if (kvm_enabled()) {
2765 kvm_arch_reset_vcpu(cpu);
2766 }
2767 #endif
2768 }
2769
2770 #ifndef CONFIG_USER_ONLY
2771 bool cpu_is_bsp(X86CPU *cpu)
2772 {
2773 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2774 }
2775
2776 /* TODO: remove me, when reset over QOM tree is implemented */
2777 static void x86_cpu_machine_reset_cb(void *opaque)
2778 {
2779 X86CPU *cpu = opaque;
2780 cpu_reset(CPU(cpu));
2781 }
2782 #endif
2783
2784 static void mce_init(X86CPU *cpu)
2785 {
2786 CPUX86State *cenv = &cpu->env;
2787 unsigned int bank;
2788
2789 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2790 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2791 (CPUID_MCE | CPUID_MCA)) {
2792 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2793 cenv->mcg_ctl = ~(uint64_t)0;
2794 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2795 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2796 }
2797 }
2798 }
2799
2800 #ifndef CONFIG_USER_ONLY
2801 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2802 {
2803 APICCommonState *apic;
2804 const char *apic_type = "apic";
2805
2806 if (kvm_apic_in_kernel()) {
2807 apic_type = "kvm-apic";
2808 } else if (xen_enabled()) {
2809 apic_type = "xen-apic";
2810 }
2811
2812 cpu->apic_state = DEVICE(object_new(apic_type));
2813
2814 object_property_add_child(OBJECT(cpu), "apic",
2815 OBJECT(cpu->apic_state), NULL);
2816 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2817 /* TODO: convert to link<> */
2818 apic = APIC_COMMON(cpu->apic_state);
2819 apic->cpu = cpu;
2820 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2821 }
2822
2823 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2824 {
2825 APICCommonState *apic;
2826 static bool apic_mmio_map_once;
2827
2828 if (cpu->apic_state == NULL) {
2829 return;
2830 }
2831 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2832 errp);
2833
2834 /* Map APIC MMIO area */
2835 apic = APIC_COMMON(cpu->apic_state);
2836 if (!apic_mmio_map_once) {
2837 memory_region_add_subregion_overlap(get_system_memory(),
2838 apic->apicbase &
2839 MSR_IA32_APICBASE_BASE,
2840 &apic->io_memory,
2841 0x1000);
2842 apic_mmio_map_once = true;
2843 }
2844 }
2845
2846 static void x86_cpu_machine_done(Notifier *n, void *unused)
2847 {
2848 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2849 MemoryRegion *smram =
2850 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2851
2852 if (smram) {
2853 cpu->smram = g_new(MemoryRegion, 1);
2854 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2855 smram, 0, 1ull << 32);
2856 memory_region_set_enabled(cpu->smram, false);
2857 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2858 }
2859 }
2860 #else
2861 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2862 {
2863 }
2864 #endif
2865
2866
2867 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2868 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2869 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2870 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2871 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2872 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2873 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2874 {
2875 CPUState *cs = CPU(dev);
2876 X86CPU *cpu = X86_CPU(dev);
2877 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2878 CPUX86State *env = &cpu->env;
2879 Error *local_err = NULL;
2880 static bool ht_warned;
2881
2882 if (cpu->apic_id < 0) {
2883 error_setg(errp, "apic-id property was not initialized properly");
2884 return;
2885 }
2886
2887 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2888 env->cpuid_level = 7;
2889 }
2890
2891 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2892 * CPUID[1].EDX.
2893 */
2894 if (IS_AMD_CPU(env)) {
2895 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2896 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2897 & CPUID_EXT2_AMD_ALIASES);
2898 }
2899
2900
2901 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2902 error_setg(&local_err,
2903 kvm_enabled() ?
2904 "Host doesn't support requested features" :
2905 "TCG doesn't support requested features");
2906 goto out;
2907 }
2908
2909 #ifndef CONFIG_USER_ONLY
2910 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2911
2912 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2913 x86_cpu_apic_create(cpu, &local_err);
2914 if (local_err != NULL) {
2915 goto out;
2916 }
2917 }
2918 #endif
2919
2920 mce_init(cpu);
2921
2922 #ifndef CONFIG_USER_ONLY
2923 if (tcg_enabled()) {
2924 AddressSpace *newas = g_new(AddressSpace, 1);
2925
2926 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2927 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2928
2929 /* Outer container... */
2930 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2931 memory_region_set_enabled(cpu->cpu_as_root, true);
2932
2933 /* ... with two regions inside: normal system memory with low
2934 * priority, and...
2935 */
2936 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2937 get_system_memory(), 0, ~0ull);
2938 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2939 memory_region_set_enabled(cpu->cpu_as_mem, true);
2940 address_space_init(newas, cpu->cpu_as_root, "CPU");
2941 cs->num_ases = 1;
2942 cpu_address_space_init(cs, newas, 0);
2943
2944 /* ... SMRAM with higher priority, linked from /machine/smram. */
2945 cpu->machine_done.notify = x86_cpu_machine_done;
2946 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2947 }
2948 #endif
2949
2950 qemu_init_vcpu(cs);
2951
2952 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2953 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2954 * based on inputs (sockets,cores,threads), it is still better to gives
2955 * users a warning.
2956 *
2957 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2958 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2959 */
2960 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2961 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2962 " -smp options properly.");
2963 ht_warned = true;
2964 }
2965
2966 x86_cpu_apic_realize(cpu, &local_err);
2967 if (local_err != NULL) {
2968 goto out;
2969 }
2970 cpu_reset(cs);
2971
2972 xcc->parent_realize(dev, &local_err);
2973
2974 out:
2975 if (local_err != NULL) {
2976 error_propagate(errp, local_err);
2977 return;
2978 }
2979 }
2980
2981 typedef struct BitProperty {
2982 uint32_t *ptr;
2983 uint32_t mask;
2984 } BitProperty;
2985
2986 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2987 void *opaque, Error **errp)
2988 {
2989 BitProperty *fp = opaque;
2990 bool value = (*fp->ptr & fp->mask) == fp->mask;
2991 visit_type_bool(v, name, &value, errp);
2992 }
2993
2994 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2995 void *opaque, Error **errp)
2996 {
2997 DeviceState *dev = DEVICE(obj);
2998 BitProperty *fp = opaque;
2999 Error *local_err = NULL;
3000 bool value;
3001
3002 if (dev->realized) {
3003 qdev_prop_set_after_realize(dev, name, errp);
3004 return;
3005 }
3006
3007 visit_type_bool(v, name, &value, &local_err);
3008 if (local_err) {
3009 error_propagate(errp, local_err);
3010 return;
3011 }
3012
3013 if (value) {
3014 *fp->ptr |= fp->mask;
3015 } else {
3016 *fp->ptr &= ~fp->mask;
3017 }
3018 }
3019
3020 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3021 void *opaque)
3022 {
3023 BitProperty *prop = opaque;
3024 g_free(prop);
3025 }
3026
3027 /* Register a boolean property to get/set a single bit in a uint32_t field.
3028 *
3029 * The same property name can be registered multiple times to make it affect
3030 * multiple bits in the same FeatureWord. In that case, the getter will return
3031 * true only if all bits are set.
3032 */
3033 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3034 const char *prop_name,
3035 uint32_t *field,
3036 int bitnr)
3037 {
3038 BitProperty *fp;
3039 ObjectProperty *op;
3040 uint32_t mask = (1UL << bitnr);
3041
3042 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3043 if (op) {
3044 fp = op->opaque;
3045 assert(fp->ptr == field);
3046 fp->mask |= mask;
3047 } else {
3048 fp = g_new0(BitProperty, 1);
3049 fp->ptr = field;
3050 fp->mask = mask;
3051 object_property_add(OBJECT(cpu), prop_name, "bool",
3052 x86_cpu_get_bit_prop,
3053 x86_cpu_set_bit_prop,
3054 x86_cpu_release_bit_prop, fp, &error_abort);
3055 }
3056 }
3057
3058 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3059 FeatureWord w,
3060 int bitnr)
3061 {
3062 Object *obj = OBJECT(cpu);
3063 int i;
3064 char **names;
3065 FeatureWordInfo *fi = &feature_word_info[w];
3066
3067 if (!fi->feat_names) {
3068 return;
3069 }
3070 if (!fi->feat_names[bitnr]) {
3071 return;
3072 }
3073
3074 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3075
3076 feat2prop(names[0]);
3077 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3078
3079 for (i = 1; names[i]; i++) {
3080 feat2prop(names[i]);
3081 object_property_add_alias(obj, names[i], obj, names[0],
3082 &error_abort);
3083 }
3084
3085 g_strfreev(names);
3086 }
3087
3088 static void x86_cpu_initfn(Object *obj)
3089 {
3090 CPUState *cs = CPU(obj);
3091 X86CPU *cpu = X86_CPU(obj);
3092 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3093 CPUX86State *env = &cpu->env;
3094 FeatureWord w;
3095 static int inited;
3096
3097 cs->env_ptr = env;
3098 cpu_exec_init(cs, &error_abort);
3099
3100 object_property_add(obj, "family", "int",
3101 x86_cpuid_version_get_family,
3102 x86_cpuid_version_set_family, NULL, NULL, NULL);
3103 object_property_add(obj, "model", "int",
3104 x86_cpuid_version_get_model,
3105 x86_cpuid_version_set_model, NULL, NULL, NULL);
3106 object_property_add(obj, "stepping", "int",
3107 x86_cpuid_version_get_stepping,
3108 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3109 object_property_add_str(obj, "vendor",
3110 x86_cpuid_get_vendor,
3111 x86_cpuid_set_vendor, NULL);
3112 object_property_add_str(obj, "model-id",
3113 x86_cpuid_get_model_id,
3114 x86_cpuid_set_model_id, NULL);
3115 object_property_add(obj, "tsc-frequency", "int",
3116 x86_cpuid_get_tsc_freq,
3117 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3118 object_property_add(obj, "apic-id", "int",
3119 x86_cpuid_get_apic_id,
3120 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3121 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3122 x86_cpu_get_feature_words,
3123 NULL, NULL, (void *)env->features, NULL);
3124 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3125 x86_cpu_get_feature_words,
3126 NULL, NULL, (void *)cpu->filtered_features, NULL);
3127
3128 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3129
3130 #ifndef CONFIG_USER_ONLY
3131 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3132 cpu->apic_id = -1;
3133 #endif
3134
3135 for (w = 0; w < FEATURE_WORDS; w++) {
3136 int bitnr;
3137
3138 for (bitnr = 0; bitnr < 32; bitnr++) {
3139 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3140 }
3141 }
3142
3143 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3144
3145 /* init various static tables used in TCG mode */
3146 if (tcg_enabled() && !inited) {
3147 inited = 1;
3148 tcg_x86_init();
3149 }
3150 }
3151
3152 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3153 {
3154 X86CPU *cpu = X86_CPU(cs);
3155
3156 return cpu->apic_id;
3157 }
3158
3159 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3160 {
3161 X86CPU *cpu = X86_CPU(cs);
3162
3163 return cpu->env.cr[0] & CR0_PG_MASK;
3164 }
3165
3166 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3167 {
3168 X86CPU *cpu = X86_CPU(cs);
3169
3170 cpu->env.eip = value;
3171 }
3172
3173 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3174 {
3175 X86CPU *cpu = X86_CPU(cs);
3176
3177 cpu->env.eip = tb->pc - tb->cs_base;
3178 }
3179
3180 static bool x86_cpu_has_work(CPUState *cs)
3181 {
3182 X86CPU *cpu = X86_CPU(cs);
3183 CPUX86State *env = &cpu->env;
3184
3185 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3186 CPU_INTERRUPT_POLL)) &&
3187 (env->eflags & IF_MASK)) ||
3188 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3189 CPU_INTERRUPT_INIT |
3190 CPU_INTERRUPT_SIPI |
3191 CPU_INTERRUPT_MCE)) ||
3192 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3193 !(env->hflags & HF_SMM_MASK));
3194 }
3195
3196 static Property x86_cpu_properties[] = {
3197 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3198 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3199 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3200 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3201 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3202 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3203 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3204 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3205 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3206 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3207 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3208 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3209 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3210 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3211 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3212 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3213 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3214 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3215 DEFINE_PROP_END_OF_LIST()
3216 };
3217
3218 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3219 {
3220 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3221 CPUClass *cc = CPU_CLASS(oc);
3222 DeviceClass *dc = DEVICE_CLASS(oc);
3223
3224 xcc->parent_realize = dc->realize;
3225 dc->realize = x86_cpu_realizefn;
3226 dc->props = x86_cpu_properties;
3227
3228 xcc->parent_reset = cc->reset;
3229 cc->reset = x86_cpu_reset;
3230 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3231
3232 cc->class_by_name = x86_cpu_class_by_name;
3233 cc->parse_features = x86_cpu_parse_featurestr;
3234 cc->has_work = x86_cpu_has_work;
3235 cc->do_interrupt = x86_cpu_do_interrupt;
3236 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3237 cc->dump_state = x86_cpu_dump_state;
3238 cc->set_pc = x86_cpu_set_pc;
3239 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3240 cc->gdb_read_register = x86_cpu_gdb_read_register;
3241 cc->gdb_write_register = x86_cpu_gdb_write_register;
3242 cc->get_arch_id = x86_cpu_get_arch_id;
3243 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3244 #ifdef CONFIG_USER_ONLY
3245 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3246 #else
3247 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3248 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3249 cc->write_elf64_note = x86_cpu_write_elf64_note;
3250 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3251 cc->write_elf32_note = x86_cpu_write_elf32_note;
3252 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3253 cc->vmsd = &vmstate_x86_cpu;
3254 #endif
3255 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3256 #ifndef CONFIG_USER_ONLY
3257 cc->debug_excp_handler = breakpoint_handler;
3258 #endif
3259 cc->cpu_exec_enter = x86_cpu_exec_enter;
3260 cc->cpu_exec_exit = x86_cpu_exec_exit;
3261
3262 /*
3263 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3264 * object in cpus -> dangling pointer after final object_unref().
3265 */
3266 dc->cannot_destroy_with_object_finalize_yet = true;
3267 }
3268
3269 static const TypeInfo x86_cpu_type_info = {
3270 .name = TYPE_X86_CPU,
3271 .parent = TYPE_CPU,
3272 .instance_size = sizeof(X86CPU),
3273 .instance_init = x86_cpu_initfn,
3274 .abstract = true,
3275 .class_size = sizeof(X86CPUClass),
3276 .class_init = x86_cpu_common_class_init,
3277 };
3278
3279 static void x86_cpu_register_types(void)
3280 {
3281 int i;
3282
3283 type_register_static(&x86_cpu_type_info);
3284 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3285 x86_register_cpudef_type(&builtin_x86_defs[i]);
3286 }
3287 #ifdef CONFIG_KVM
3288 type_register_static(&host_x86_cpu_type_info);
3289 #endif
3290 }
3291
3292 type_init(x86_cpu_register_types)