]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Enable control registers for MPX
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX)
362 /* missing:
363 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
382
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
388 },
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
393 },
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
398 },
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
403 },
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
408 },
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
413 },
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
425 },
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
432 },
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
439 },
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
446 },
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
451 },
452 };
453
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
460
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
472 };
473 #undef REGISTER
474
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = 0x240, .size = 0x100 },
478 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
479 .offset = 0x3c0, .size = 0x40 },
480 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = 0x400, .size = 0x40 },
482 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
483 .offset = 0x440, .size = 0x40 },
484 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
485 .offset = 0x480, .size = 0x200 },
486 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
487 .offset = 0x680, .size = 0x400 },
488 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
489 .offset = 0xA80, .size = 0x8 },
490 };
491
492 const char *get_register_name_32(unsigned int reg)
493 {
494 if (reg >= CPU_NB_REGS32) {
495 return NULL;
496 }
497 return x86_reg_info_32[reg].name;
498 }
499
500 /*
501 * Returns the set of feature flags that are supported and migratable by
502 * QEMU, for a given FeatureWord.
503 */
504 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
505 {
506 FeatureWordInfo *wi = &feature_word_info[w];
507 uint32_t r = 0;
508 int i;
509
510 for (i = 0; i < 32; i++) {
511 uint32_t f = 1U << i;
512 /* If the feature name is unknown, it is not supported by QEMU yet */
513 if (!wi->feat_names[i]) {
514 continue;
515 }
516 /* Skip features known to QEMU, but explicitly marked as unmigratable */
517 if (wi->unmigratable_flags & f) {
518 continue;
519 }
520 r |= f;
521 }
522 return r;
523 }
524
525 void host_cpuid(uint32_t function, uint32_t count,
526 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
527 {
528 uint32_t vec[4];
529
530 #ifdef __x86_64__
531 asm volatile("cpuid"
532 : "=a"(vec[0]), "=b"(vec[1]),
533 "=c"(vec[2]), "=d"(vec[3])
534 : "0"(function), "c"(count) : "cc");
535 #elif defined(__i386__)
536 asm volatile("pusha \n\t"
537 "cpuid \n\t"
538 "mov %%eax, 0(%2) \n\t"
539 "mov %%ebx, 4(%2) \n\t"
540 "mov %%ecx, 8(%2) \n\t"
541 "mov %%edx, 12(%2) \n\t"
542 "popa"
543 : : "a"(function), "c"(count), "S"(vec)
544 : "memory", "cc");
545 #else
546 abort();
547 #endif
548
549 if (eax)
550 *eax = vec[0];
551 if (ebx)
552 *ebx = vec[1];
553 if (ecx)
554 *ecx = vec[2];
555 if (edx)
556 *edx = vec[3];
557 }
558
559 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
560
561 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
562 * a substring. ex if !NULL points to the first char after a substring,
563 * otherwise the string is assumed to sized by a terminating nul.
564 * Return lexical ordering of *s1:*s2.
565 */
566 static int sstrcmp(const char *s1, const char *e1,
567 const char *s2, const char *e2)
568 {
569 for (;;) {
570 if (!*s1 || !*s2 || *s1 != *s2)
571 return (*s1 - *s2);
572 ++s1, ++s2;
573 if (s1 == e1 && s2 == e2)
574 return (0);
575 else if (s1 == e1)
576 return (*s2);
577 else if (s2 == e2)
578 return (*s1);
579 }
580 }
581
582 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
583 * '|' delimited (possibly empty) strings in which case search for a match
584 * within the alternatives proceeds left to right. Return 0 for success,
585 * non-zero otherwise.
586 */
587 static int altcmp(const char *s, const char *e, const char *altstr)
588 {
589 const char *p, *q;
590
591 for (q = p = altstr; ; ) {
592 while (*p && *p != '|')
593 ++p;
594 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
595 return (0);
596 if (!*p)
597 return (1);
598 else
599 q = ++p;
600 }
601 }
602
603 /* search featureset for flag *[s..e), if found set corresponding bit in
604 * *pval and return true, otherwise return false
605 */
606 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
607 const char **featureset)
608 {
609 uint32_t mask;
610 const char **ppc;
611 bool found = false;
612
613 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
614 if (*ppc && !altcmp(s, e, *ppc)) {
615 *pval |= mask;
616 found = true;
617 }
618 }
619 return found;
620 }
621
622 static void add_flagname_to_bitmaps(const char *flagname,
623 FeatureWordArray words,
624 Error **errp)
625 {
626 FeatureWord w;
627 for (w = 0; w < FEATURE_WORDS; w++) {
628 FeatureWordInfo *wi = &feature_word_info[w];
629 if (wi->feat_names &&
630 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
631 break;
632 }
633 }
634 if (w == FEATURE_WORDS) {
635 error_setg(errp, "CPU feature %s not found", flagname);
636 }
637 }
638
639 /* CPU class name definitions: */
640
641 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
642 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
643
644 /* Return type name for a given CPU model name
645 * Caller is responsible for freeing the returned string.
646 */
647 static char *x86_cpu_type_name(const char *model_name)
648 {
649 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
650 }
651
652 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
653 {
654 ObjectClass *oc;
655 char *typename;
656
657 if (cpu_model == NULL) {
658 return NULL;
659 }
660
661 typename = x86_cpu_type_name(cpu_model);
662 oc = object_class_by_name(typename);
663 g_free(typename);
664 return oc;
665 }
666
667 struct X86CPUDefinition {
668 const char *name;
669 uint32_t level;
670 uint32_t xlevel;
671 uint32_t xlevel2;
672 /* vendor is zero-terminated, 12 character ASCII string */
673 char vendor[CPUID_VENDOR_SZ + 1];
674 int family;
675 int model;
676 int stepping;
677 FeatureWordArray features;
678 char model_id[48];
679 };
680
681 static X86CPUDefinition builtin_x86_defs[] = {
682 {
683 .name = "qemu64",
684 .level = 0xd,
685 .vendor = CPUID_VENDOR_AMD,
686 .family = 6,
687 .model = 6,
688 .stepping = 3,
689 .features[FEAT_1_EDX] =
690 PPRO_FEATURES |
691 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
692 CPUID_PSE36,
693 .features[FEAT_1_ECX] =
694 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
695 .features[FEAT_8000_0001_EDX] =
696 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
697 .features[FEAT_8000_0001_ECX] =
698 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
699 .xlevel = 0x8000000A,
700 },
701 {
702 .name = "phenom",
703 .level = 5,
704 .vendor = CPUID_VENDOR_AMD,
705 .family = 16,
706 .model = 2,
707 .stepping = 3,
708 /* Missing: CPUID_HT */
709 .features[FEAT_1_EDX] =
710 PPRO_FEATURES |
711 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
712 CPUID_PSE36 | CPUID_VME,
713 .features[FEAT_1_ECX] =
714 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
715 CPUID_EXT_POPCNT,
716 .features[FEAT_8000_0001_EDX] =
717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
718 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
719 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
720 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
721 CPUID_EXT3_CR8LEG,
722 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
723 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
724 .features[FEAT_8000_0001_ECX] =
725 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
726 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
727 /* Missing: CPUID_SVM_LBRV */
728 .features[FEAT_SVM] =
729 CPUID_SVM_NPT,
730 .xlevel = 0x8000001A,
731 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
732 },
733 {
734 .name = "core2duo",
735 .level = 10,
736 .vendor = CPUID_VENDOR_INTEL,
737 .family = 6,
738 .model = 15,
739 .stepping = 11,
740 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
741 .features[FEAT_1_EDX] =
742 PPRO_FEATURES |
743 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
744 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
745 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
746 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
747 .features[FEAT_1_ECX] =
748 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
749 CPUID_EXT_CX16,
750 .features[FEAT_8000_0001_EDX] =
751 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
752 .features[FEAT_8000_0001_ECX] =
753 CPUID_EXT3_LAHF_LM,
754 .xlevel = 0x80000008,
755 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
756 },
757 {
758 .name = "kvm64",
759 .level = 0xd,
760 .vendor = CPUID_VENDOR_INTEL,
761 .family = 15,
762 .model = 6,
763 .stepping = 1,
764 /* Missing: CPUID_HT */
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES | CPUID_VME |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
768 CPUID_PSE36,
769 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
772 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
780 0,
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
783 },
784 {
785 .name = "qemu32",
786 .level = 4,
787 .vendor = CPUID_VENDOR_INTEL,
788 .family = 6,
789 .model = 6,
790 .stepping = 3,
791 .features[FEAT_1_EDX] =
792 PPRO_FEATURES,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3,
795 .xlevel = 0x80000004,
796 },
797 {
798 .name = "kvm32",
799 .level = 5,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 15,
802 .model = 6,
803 .stepping = 1,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .features[FEAT_8000_0001_ECX] =
810 0,
811 .xlevel = 0x80000008,
812 .model_id = "Common 32-bit KVM processor"
813 },
814 {
815 .name = "coreduo",
816 .level = 10,
817 .vendor = CPUID_VENDOR_INTEL,
818 .family = 6,
819 .model = 14,
820 .stepping = 8,
821 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
822 .features[FEAT_1_EDX] =
823 PPRO_FEATURES | CPUID_VME |
824 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
825 CPUID_SS,
826 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
827 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
828 .features[FEAT_1_ECX] =
829 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
830 .features[FEAT_8000_0001_EDX] =
831 CPUID_EXT2_NX,
832 .xlevel = 0x80000008,
833 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
834 },
835 {
836 .name = "486",
837 .level = 1,
838 .vendor = CPUID_VENDOR_INTEL,
839 .family = 4,
840 .model = 8,
841 .stepping = 0,
842 .features[FEAT_1_EDX] =
843 I486_FEATURES,
844 .xlevel = 0,
845 },
846 {
847 .name = "pentium",
848 .level = 1,
849 .vendor = CPUID_VENDOR_INTEL,
850 .family = 5,
851 .model = 4,
852 .stepping = 3,
853 .features[FEAT_1_EDX] =
854 PENTIUM_FEATURES,
855 .xlevel = 0,
856 },
857 {
858 .name = "pentium2",
859 .level = 2,
860 .vendor = CPUID_VENDOR_INTEL,
861 .family = 6,
862 .model = 5,
863 .stepping = 2,
864 .features[FEAT_1_EDX] =
865 PENTIUM2_FEATURES,
866 .xlevel = 0,
867 },
868 {
869 .name = "pentium3",
870 .level = 3,
871 .vendor = CPUID_VENDOR_INTEL,
872 .family = 6,
873 .model = 7,
874 .stepping = 3,
875 .features[FEAT_1_EDX] =
876 PENTIUM3_FEATURES,
877 .xlevel = 0,
878 },
879 {
880 .name = "athlon",
881 .level = 2,
882 .vendor = CPUID_VENDOR_AMD,
883 .family = 6,
884 .model = 2,
885 .stepping = 3,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
888 CPUID_MCA,
889 .features[FEAT_8000_0001_EDX] =
890 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
891 .xlevel = 0x80000008,
892 },
893 {
894 .name = "n270",
895 .level = 10,
896 .vendor = CPUID_VENDOR_INTEL,
897 .family = 6,
898 .model = 28,
899 .stepping = 2,
900 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES |
903 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
904 CPUID_ACPI | CPUID_SS,
905 /* Some CPUs got no CPUID_SEP */
906 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
907 * CPUID_EXT_XTPR */
908 .features[FEAT_1_ECX] =
909 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
910 CPUID_EXT_MOVBE,
911 .features[FEAT_8000_0001_EDX] =
912 CPUID_EXT2_NX,
913 .features[FEAT_8000_0001_ECX] =
914 CPUID_EXT3_LAHF_LM,
915 .xlevel = 0x80000008,
916 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
917 },
918 {
919 .name = "Conroe",
920 .level = 10,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 15,
924 .stepping = 3,
925 .features[FEAT_1_EDX] =
926 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
927 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
928 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
929 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
930 CPUID_DE | CPUID_FP87,
931 .features[FEAT_1_ECX] =
932 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
933 .features[FEAT_8000_0001_EDX] =
934 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
935 .features[FEAT_8000_0001_ECX] =
936 CPUID_EXT3_LAHF_LM,
937 .xlevel = 0x80000008,
938 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
939 },
940 {
941 .name = "Penryn",
942 .level = 10,
943 .vendor = CPUID_VENDOR_INTEL,
944 .family = 6,
945 .model = 23,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
949 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
950 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
951 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
952 CPUID_DE | CPUID_FP87,
953 .features[FEAT_1_ECX] =
954 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
955 CPUID_EXT_SSE3,
956 .features[FEAT_8000_0001_EDX] =
957 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
958 .features[FEAT_8000_0001_ECX] =
959 CPUID_EXT3_LAHF_LM,
960 .xlevel = 0x80000008,
961 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
962 },
963 {
964 .name = "Nehalem",
965 .level = 11,
966 .vendor = CPUID_VENDOR_INTEL,
967 .family = 6,
968 .model = 26,
969 .stepping = 3,
970 .features[FEAT_1_EDX] =
971 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
972 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
973 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
974 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
975 CPUID_DE | CPUID_FP87,
976 .features[FEAT_1_ECX] =
977 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
978 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
979 .features[FEAT_8000_0001_EDX] =
980 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
981 .features[FEAT_8000_0001_ECX] =
982 CPUID_EXT3_LAHF_LM,
983 .xlevel = 0x80000008,
984 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
985 },
986 {
987 .name = "Westmere",
988 .level = 11,
989 .vendor = CPUID_VENDOR_INTEL,
990 .family = 6,
991 .model = 44,
992 .stepping = 1,
993 .features[FEAT_1_EDX] =
994 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
995 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
996 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
997 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
998 CPUID_DE | CPUID_FP87,
999 .features[FEAT_1_ECX] =
1000 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1001 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1002 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1003 .features[FEAT_8000_0001_EDX] =
1004 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1005 .features[FEAT_8000_0001_ECX] =
1006 CPUID_EXT3_LAHF_LM,
1007 .features[FEAT_6_EAX] =
1008 CPUID_6_EAX_ARAT,
1009 .xlevel = 0x80000008,
1010 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1011 },
1012 {
1013 .name = "SandyBridge",
1014 .level = 0xd,
1015 .vendor = CPUID_VENDOR_INTEL,
1016 .family = 6,
1017 .model = 42,
1018 .stepping = 1,
1019 .features[FEAT_1_EDX] =
1020 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1021 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1022 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1023 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1024 CPUID_DE | CPUID_FP87,
1025 .features[FEAT_1_ECX] =
1026 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1027 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1028 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1029 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1030 CPUID_EXT_SSE3,
1031 .features[FEAT_8000_0001_EDX] =
1032 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1033 CPUID_EXT2_SYSCALL,
1034 .features[FEAT_8000_0001_ECX] =
1035 CPUID_EXT3_LAHF_LM,
1036 .features[FEAT_XSAVE] =
1037 CPUID_XSAVE_XSAVEOPT,
1038 .features[FEAT_6_EAX] =
1039 CPUID_6_EAX_ARAT,
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1042 },
1043 {
1044 .name = "IvyBridge",
1045 .level = 0xd,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 58,
1049 .stepping = 9,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1059 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1061 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1062 .features[FEAT_7_0_EBX] =
1063 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1064 CPUID_7_0_EBX_ERMS,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1067 CPUID_EXT2_SYSCALL,
1068 .features[FEAT_8000_0001_ECX] =
1069 CPUID_EXT3_LAHF_LM,
1070 .features[FEAT_XSAVE] =
1071 CPUID_XSAVE_XSAVEOPT,
1072 .features[FEAT_6_EAX] =
1073 CPUID_6_EAX_ARAT,
1074 .xlevel = 0x80000008,
1075 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1076 },
1077 {
1078 .name = "Haswell-noTSX",
1079 .level = 0xd,
1080 .vendor = CPUID_VENDOR_INTEL,
1081 .family = 6,
1082 .model = 60,
1083 .stepping = 1,
1084 .features[FEAT_1_EDX] =
1085 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1086 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1087 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1088 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1089 CPUID_DE | CPUID_FP87,
1090 .features[FEAT_1_ECX] =
1091 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1092 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1093 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1094 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1096 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1097 .features[FEAT_8000_0001_EDX] =
1098 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1099 CPUID_EXT2_SYSCALL,
1100 .features[FEAT_8000_0001_ECX] =
1101 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1102 .features[FEAT_7_0_EBX] =
1103 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1104 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1105 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1106 .features[FEAT_XSAVE] =
1107 CPUID_XSAVE_XSAVEOPT,
1108 .features[FEAT_6_EAX] =
1109 CPUID_6_EAX_ARAT,
1110 .xlevel = 0x80000008,
1111 .model_id = "Intel Core Processor (Haswell, no TSX)",
1112 }, {
1113 .name = "Haswell",
1114 .level = 0xd,
1115 .vendor = CPUID_VENDOR_INTEL,
1116 .family = 6,
1117 .model = 60,
1118 .stepping = 1,
1119 .features[FEAT_1_EDX] =
1120 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1121 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1122 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1123 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1124 CPUID_DE | CPUID_FP87,
1125 .features[FEAT_1_ECX] =
1126 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1127 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1128 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1129 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1130 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1131 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1132 .features[FEAT_8000_0001_EDX] =
1133 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1134 CPUID_EXT2_SYSCALL,
1135 .features[FEAT_8000_0001_ECX] =
1136 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1137 .features[FEAT_7_0_EBX] =
1138 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1139 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1140 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1141 CPUID_7_0_EBX_RTM,
1142 .features[FEAT_XSAVE] =
1143 CPUID_XSAVE_XSAVEOPT,
1144 .features[FEAT_6_EAX] =
1145 CPUID_6_EAX_ARAT,
1146 .xlevel = 0x80000008,
1147 .model_id = "Intel Core Processor (Haswell)",
1148 },
1149 {
1150 .name = "Broadwell-noTSX",
1151 .level = 0xd,
1152 .vendor = CPUID_VENDOR_INTEL,
1153 .family = 6,
1154 .model = 61,
1155 .stepping = 2,
1156 .features[FEAT_1_EDX] =
1157 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1158 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1159 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1160 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1161 CPUID_DE | CPUID_FP87,
1162 .features[FEAT_1_ECX] =
1163 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1164 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1165 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1166 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1167 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1168 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1169 .features[FEAT_8000_0001_EDX] =
1170 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1171 CPUID_EXT2_SYSCALL,
1172 .features[FEAT_8000_0001_ECX] =
1173 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1174 .features[FEAT_7_0_EBX] =
1175 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1176 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1177 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1178 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1179 CPUID_7_0_EBX_SMAP,
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1183 CPUID_6_EAX_ARAT,
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1186 },
1187 {
1188 .name = "Broadwell",
1189 .level = 0xd,
1190 .vendor = CPUID_VENDOR_INTEL,
1191 .family = 6,
1192 .model = 61,
1193 .stepping = 2,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1209 CPUID_EXT2_SYSCALL,
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1217 CPUID_7_0_EBX_SMAP,
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1221 CPUID_6_EAX_ARAT,
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell)",
1224 },
1225 {
1226 .name = "Opteron_G1",
1227 .level = 5,
1228 .vendor = CPUID_VENDOR_AMD,
1229 .family = 15,
1230 .model = 6,
1231 .stepping = 1,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1239 CPUID_EXT_SSE3,
1240 .features[FEAT_8000_0001_EDX] =
1241 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1242 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1243 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1244 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1245 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1246 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1247 .xlevel = 0x80000008,
1248 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1249 },
1250 {
1251 .name = "Opteron_G2",
1252 .level = 5,
1253 .vendor = CPUID_VENDOR_AMD,
1254 .family = 15,
1255 .model = 6,
1256 .stepping = 1,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1265 /* Missing: CPUID_EXT2_RDTSCP */
1266 .features[FEAT_8000_0001_EDX] =
1267 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1268 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1269 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1270 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1271 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1272 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1273 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1274 .features[FEAT_8000_0001_ECX] =
1275 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1276 .xlevel = 0x80000008,
1277 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1278 },
1279 {
1280 .name = "Opteron_G3",
1281 .level = 5,
1282 .vendor = CPUID_VENDOR_AMD,
1283 .family = 15,
1284 .model = 6,
1285 .stepping = 1,
1286 .features[FEAT_1_EDX] =
1287 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1288 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1289 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1290 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1291 CPUID_DE | CPUID_FP87,
1292 .features[FEAT_1_ECX] =
1293 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1294 CPUID_EXT_SSE3,
1295 /* Missing: CPUID_EXT2_RDTSCP */
1296 .features[FEAT_8000_0001_EDX] =
1297 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1298 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1299 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1300 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1301 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1302 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1303 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1306 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1307 .xlevel = 0x80000008,
1308 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1309 },
1310 {
1311 .name = "Opteron_G4",
1312 .level = 0xd,
1313 .vendor = CPUID_VENDOR_AMD,
1314 .family = 21,
1315 .model = 1,
1316 .stepping = 2,
1317 .features[FEAT_1_EDX] =
1318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1322 CPUID_DE | CPUID_FP87,
1323 .features[FEAT_1_ECX] =
1324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1325 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1326 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1327 CPUID_EXT_SSE3,
1328 /* Missing: CPUID_EXT2_RDTSCP */
1329 .features[FEAT_8000_0001_EDX] =
1330 CPUID_EXT2_LM |
1331 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1332 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1333 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1334 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1335 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1336 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1337 .features[FEAT_8000_0001_ECX] =
1338 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1339 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1340 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1341 CPUID_EXT3_LAHF_LM,
1342 /* no xsaveopt! */
1343 .xlevel = 0x8000001A,
1344 .model_id = "AMD Opteron 62xx class CPU",
1345 },
1346 {
1347 .name = "Opteron_G5",
1348 .level = 0xd,
1349 .vendor = CPUID_VENDOR_AMD,
1350 .family = 21,
1351 .model = 2,
1352 .stepping = 0,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1361 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1362 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1363 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1364 /* Missing: CPUID_EXT2_RDTSCP */
1365 .features[FEAT_8000_0001_EDX] =
1366 CPUID_EXT2_LM |
1367 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1368 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1369 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1370 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1371 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1372 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1373 .features[FEAT_8000_0001_ECX] =
1374 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1375 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1376 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1377 CPUID_EXT3_LAHF_LM,
1378 /* no xsaveopt! */
1379 .xlevel = 0x8000001A,
1380 .model_id = "AMD Opteron 63xx class CPU",
1381 },
1382 };
1383
1384 typedef struct PropValue {
1385 const char *prop, *value;
1386 } PropValue;
1387
1388 /* KVM-specific features that are automatically added/removed
1389 * from all CPU models when KVM is enabled.
1390 */
1391 static PropValue kvm_default_props[] = {
1392 { "kvmclock", "on" },
1393 { "kvm-nopiodelay", "on" },
1394 { "kvm-asyncpf", "on" },
1395 { "kvm-steal-time", "on" },
1396 { "kvm-pv-eoi", "on" },
1397 { "kvmclock-stable-bit", "on" },
1398 { "x2apic", "on" },
1399 { "acpi", "off" },
1400 { "monitor", "off" },
1401 { "svm", "off" },
1402 { NULL, NULL },
1403 };
1404
1405 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1406 {
1407 PropValue *pv;
1408 for (pv = kvm_default_props; pv->prop; pv++) {
1409 if (!strcmp(pv->prop, prop)) {
1410 pv->value = value;
1411 break;
1412 }
1413 }
1414
1415 /* It is valid to call this function only for properties that
1416 * are already present in the kvm_default_props table.
1417 */
1418 assert(pv->prop);
1419 }
1420
1421 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1422 bool migratable_only);
1423
1424 #ifdef CONFIG_KVM
1425
1426 static int cpu_x86_fill_model_id(char *str)
1427 {
1428 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1429 int i;
1430
1431 for (i = 0; i < 3; i++) {
1432 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1433 memcpy(str + i * 16 + 0, &eax, 4);
1434 memcpy(str + i * 16 + 4, &ebx, 4);
1435 memcpy(str + i * 16 + 8, &ecx, 4);
1436 memcpy(str + i * 16 + 12, &edx, 4);
1437 }
1438 return 0;
1439 }
1440
1441 static X86CPUDefinition host_cpudef;
1442
1443 static Property host_x86_cpu_properties[] = {
1444 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1445 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1446 DEFINE_PROP_END_OF_LIST()
1447 };
1448
1449 /* class_init for the "host" CPU model
1450 *
1451 * This function may be called before KVM is initialized.
1452 */
1453 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1454 {
1455 DeviceClass *dc = DEVICE_CLASS(oc);
1456 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1457 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1458
1459 xcc->kvm_required = true;
1460
1461 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1462 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1463
1464 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1465 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1466 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1467 host_cpudef.stepping = eax & 0x0F;
1468
1469 cpu_x86_fill_model_id(host_cpudef.model_id);
1470
1471 xcc->cpu_def = &host_cpudef;
1472
1473 /* level, xlevel, xlevel2, and the feature words are initialized on
1474 * instance_init, because they require KVM to be initialized.
1475 */
1476
1477 dc->props = host_x86_cpu_properties;
1478 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1479 dc->cannot_destroy_with_object_finalize_yet = true;
1480 }
1481
1482 static void host_x86_cpu_initfn(Object *obj)
1483 {
1484 X86CPU *cpu = X86_CPU(obj);
1485 CPUX86State *env = &cpu->env;
1486 KVMState *s = kvm_state;
1487
1488 assert(kvm_enabled());
1489
1490 /* We can't fill the features array here because we don't know yet if
1491 * "migratable" is true or false.
1492 */
1493 cpu->host_features = true;
1494
1495 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1496 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1497 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1498
1499 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1500 }
1501
1502 static const TypeInfo host_x86_cpu_type_info = {
1503 .name = X86_CPU_TYPE_NAME("host"),
1504 .parent = TYPE_X86_CPU,
1505 .instance_init = host_x86_cpu_initfn,
1506 .class_init = host_x86_cpu_class_init,
1507 };
1508
1509 #endif
1510
1511 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1512 {
1513 FeatureWordInfo *f = &feature_word_info[w];
1514 int i;
1515
1516 for (i = 0; i < 32; ++i) {
1517 if ((1UL << i) & mask) {
1518 const char *reg = get_register_name_32(f->cpuid_reg);
1519 assert(reg);
1520 fprintf(stderr, "warning: %s doesn't support requested feature: "
1521 "CPUID.%02XH:%s%s%s [bit %d]\n",
1522 kvm_enabled() ? "host" : "TCG",
1523 f->cpuid_eax, reg,
1524 f->feat_names[i] ? "." : "",
1525 f->feat_names[i] ? f->feat_names[i] : "", i);
1526 }
1527 }
1528 }
1529
1530 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1531 const char *name, void *opaque,
1532 Error **errp)
1533 {
1534 X86CPU *cpu = X86_CPU(obj);
1535 CPUX86State *env = &cpu->env;
1536 int64_t value;
1537
1538 value = (env->cpuid_version >> 8) & 0xf;
1539 if (value == 0xf) {
1540 value += (env->cpuid_version >> 20) & 0xff;
1541 }
1542 visit_type_int(v, name, &value, errp);
1543 }
1544
1545 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1546 const char *name, void *opaque,
1547 Error **errp)
1548 {
1549 X86CPU *cpu = X86_CPU(obj);
1550 CPUX86State *env = &cpu->env;
1551 const int64_t min = 0;
1552 const int64_t max = 0xff + 0xf;
1553 Error *local_err = NULL;
1554 int64_t value;
1555
1556 visit_type_int(v, name, &value, &local_err);
1557 if (local_err) {
1558 error_propagate(errp, local_err);
1559 return;
1560 }
1561 if (value < min || value > max) {
1562 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1563 name ? name : "null", value, min, max);
1564 return;
1565 }
1566
1567 env->cpuid_version &= ~0xff00f00;
1568 if (value > 0x0f) {
1569 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1570 } else {
1571 env->cpuid_version |= value << 8;
1572 }
1573 }
1574
1575 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1576 const char *name, void *opaque,
1577 Error **errp)
1578 {
1579 X86CPU *cpu = X86_CPU(obj);
1580 CPUX86State *env = &cpu->env;
1581 int64_t value;
1582
1583 value = (env->cpuid_version >> 4) & 0xf;
1584 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1585 visit_type_int(v, name, &value, errp);
1586 }
1587
1588 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1589 const char *name, void *opaque,
1590 Error **errp)
1591 {
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 const int64_t min = 0;
1595 const int64_t max = 0xff;
1596 Error *local_err = NULL;
1597 int64_t value;
1598
1599 visit_type_int(v, name, &value, &local_err);
1600 if (local_err) {
1601 error_propagate(errp, local_err);
1602 return;
1603 }
1604 if (value < min || value > max) {
1605 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1606 name ? name : "null", value, min, max);
1607 return;
1608 }
1609
1610 env->cpuid_version &= ~0xf00f0;
1611 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1612 }
1613
1614 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1615 const char *name, void *opaque,
1616 Error **errp)
1617 {
1618 X86CPU *cpu = X86_CPU(obj);
1619 CPUX86State *env = &cpu->env;
1620 int64_t value;
1621
1622 value = env->cpuid_version & 0xf;
1623 visit_type_int(v, name, &value, errp);
1624 }
1625
1626 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1627 const char *name, void *opaque,
1628 Error **errp)
1629 {
1630 X86CPU *cpu = X86_CPU(obj);
1631 CPUX86State *env = &cpu->env;
1632 const int64_t min = 0;
1633 const int64_t max = 0xf;
1634 Error *local_err = NULL;
1635 int64_t value;
1636
1637 visit_type_int(v, name, &value, &local_err);
1638 if (local_err) {
1639 error_propagate(errp, local_err);
1640 return;
1641 }
1642 if (value < min || value > max) {
1643 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1644 name ? name : "null", value, min, max);
1645 return;
1646 }
1647
1648 env->cpuid_version &= ~0xf;
1649 env->cpuid_version |= value & 0xf;
1650 }
1651
1652 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1653 {
1654 X86CPU *cpu = X86_CPU(obj);
1655 CPUX86State *env = &cpu->env;
1656 char *value;
1657
1658 value = g_malloc(CPUID_VENDOR_SZ + 1);
1659 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1660 env->cpuid_vendor3);
1661 return value;
1662 }
1663
1664 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1665 Error **errp)
1666 {
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1669 int i;
1670
1671 if (strlen(value) != CPUID_VENDOR_SZ) {
1672 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1673 return;
1674 }
1675
1676 env->cpuid_vendor1 = 0;
1677 env->cpuid_vendor2 = 0;
1678 env->cpuid_vendor3 = 0;
1679 for (i = 0; i < 4; i++) {
1680 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1681 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1682 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1683 }
1684 }
1685
1686 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1687 {
1688 X86CPU *cpu = X86_CPU(obj);
1689 CPUX86State *env = &cpu->env;
1690 char *value;
1691 int i;
1692
1693 value = g_malloc(48 + 1);
1694 for (i = 0; i < 48; i++) {
1695 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1696 }
1697 value[48] = '\0';
1698 return value;
1699 }
1700
1701 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1702 Error **errp)
1703 {
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1706 int c, len, i;
1707
1708 if (model_id == NULL) {
1709 model_id = "";
1710 }
1711 len = strlen(model_id);
1712 memset(env->cpuid_model, 0, 48);
1713 for (i = 0; i < 48; i++) {
1714 if (i >= len) {
1715 c = '\0';
1716 } else {
1717 c = (uint8_t)model_id[i];
1718 }
1719 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1720 }
1721 }
1722
1723 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1724 void *opaque, Error **errp)
1725 {
1726 X86CPU *cpu = X86_CPU(obj);
1727 int64_t value;
1728
1729 value = cpu->env.tsc_khz * 1000;
1730 visit_type_int(v, name, &value, errp);
1731 }
1732
1733 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1734 void *opaque, Error **errp)
1735 {
1736 X86CPU *cpu = X86_CPU(obj);
1737 const int64_t min = 0;
1738 const int64_t max = INT64_MAX;
1739 Error *local_err = NULL;
1740 int64_t value;
1741
1742 visit_type_int(v, name, &value, &local_err);
1743 if (local_err) {
1744 error_propagate(errp, local_err);
1745 return;
1746 }
1747 if (value < min || value > max) {
1748 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1749 name ? name : "null", value, min, max);
1750 return;
1751 }
1752
1753 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1754 }
1755
1756 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1757 void *opaque, Error **errp)
1758 {
1759 X86CPU *cpu = X86_CPU(obj);
1760 int64_t value = cpu->apic_id;
1761
1762 visit_type_int(v, name, &value, errp);
1763 }
1764
1765 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1766 void *opaque, Error **errp)
1767 {
1768 X86CPU *cpu = X86_CPU(obj);
1769 DeviceState *dev = DEVICE(obj);
1770 const int64_t min = 0;
1771 const int64_t max = UINT32_MAX;
1772 Error *error = NULL;
1773 int64_t value;
1774
1775 if (dev->realized) {
1776 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1777 "it was realized", name, object_get_typename(obj));
1778 return;
1779 }
1780
1781 visit_type_int(v, name, &value, &error);
1782 if (error) {
1783 error_propagate(errp, error);
1784 return;
1785 }
1786 if (value < min || value > max) {
1787 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1788 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1789 object_get_typename(obj), name, value, min, max);
1790 return;
1791 }
1792
1793 if ((value != cpu->apic_id) && cpu_exists(value)) {
1794 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1795 return;
1796 }
1797 cpu->apic_id = value;
1798 }
1799
1800 /* Generic getter for "feature-words" and "filtered-features" properties */
1801 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1802 const char *name, void *opaque,
1803 Error **errp)
1804 {
1805 uint32_t *array = (uint32_t *)opaque;
1806 FeatureWord w;
1807 Error *err = NULL;
1808 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1809 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1810 X86CPUFeatureWordInfoList *list = NULL;
1811
1812 for (w = 0; w < FEATURE_WORDS; w++) {
1813 FeatureWordInfo *wi = &feature_word_info[w];
1814 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1815 qwi->cpuid_input_eax = wi->cpuid_eax;
1816 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1817 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1818 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1819 qwi->features = array[w];
1820
1821 /* List will be in reverse order, but order shouldn't matter */
1822 list_entries[w].next = list;
1823 list_entries[w].value = &word_infos[w];
1824 list = &list_entries[w];
1825 }
1826
1827 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1828 error_propagate(errp, err);
1829 }
1830
1831 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1832 void *opaque, Error **errp)
1833 {
1834 X86CPU *cpu = X86_CPU(obj);
1835 int64_t value = cpu->hyperv_spinlock_attempts;
1836
1837 visit_type_int(v, name, &value, errp);
1838 }
1839
1840 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1841 void *opaque, Error **errp)
1842 {
1843 const int64_t min = 0xFFF;
1844 const int64_t max = UINT_MAX;
1845 X86CPU *cpu = X86_CPU(obj);
1846 Error *err = NULL;
1847 int64_t value;
1848
1849 visit_type_int(v, name, &value, &err);
1850 if (err) {
1851 error_propagate(errp, err);
1852 return;
1853 }
1854
1855 if (value < min || value > max) {
1856 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1857 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1858 object_get_typename(obj), name ? name : "null",
1859 value, min, max);
1860 return;
1861 }
1862 cpu->hyperv_spinlock_attempts = value;
1863 }
1864
1865 static PropertyInfo qdev_prop_spinlocks = {
1866 .name = "int",
1867 .get = x86_get_hv_spinlocks,
1868 .set = x86_set_hv_spinlocks,
1869 };
1870
1871 /* Convert all '_' in a feature string option name to '-', to make feature
1872 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1873 */
1874 static inline void feat2prop(char *s)
1875 {
1876 while ((s = strchr(s, '_'))) {
1877 *s = '-';
1878 }
1879 }
1880
1881 /* Parse "+feature,-feature,feature=foo" CPU feature string
1882 */
1883 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1884 Error **errp)
1885 {
1886 X86CPU *cpu = X86_CPU(cs);
1887 char *featurestr; /* Single 'key=value" string being parsed */
1888 FeatureWord w;
1889 /* Features to be added */
1890 FeatureWordArray plus_features = { 0 };
1891 /* Features to be removed */
1892 FeatureWordArray minus_features = { 0 };
1893 uint32_t numvalue;
1894 CPUX86State *env = &cpu->env;
1895 Error *local_err = NULL;
1896
1897 featurestr = features ? strtok(features, ",") : NULL;
1898
1899 while (featurestr) {
1900 char *val;
1901 if (featurestr[0] == '+') {
1902 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1903 } else if (featurestr[0] == '-') {
1904 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1905 } else if ((val = strchr(featurestr, '='))) {
1906 *val = 0; val++;
1907 feat2prop(featurestr);
1908 if (!strcmp(featurestr, "xlevel")) {
1909 char *err;
1910 char num[32];
1911
1912 numvalue = strtoul(val, &err, 0);
1913 if (!*val || *err) {
1914 error_setg(errp, "bad numerical value %s", val);
1915 return;
1916 }
1917 if (numvalue < 0x80000000) {
1918 error_report("xlevel value shall always be >= 0x80000000"
1919 ", fixup will be removed in future versions");
1920 numvalue += 0x80000000;
1921 }
1922 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1923 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1924 } else if (!strcmp(featurestr, "tsc-freq")) {
1925 int64_t tsc_freq;
1926 char *err;
1927 char num[32];
1928
1929 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1930 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1931 if (tsc_freq < 0 || *err) {
1932 error_setg(errp, "bad numerical value %s", val);
1933 return;
1934 }
1935 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1936 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1937 &local_err);
1938 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1939 char *err;
1940 const int min = 0xFFF;
1941 char num[32];
1942 numvalue = strtoul(val, &err, 0);
1943 if (!*val || *err) {
1944 error_setg(errp, "bad numerical value %s", val);
1945 return;
1946 }
1947 if (numvalue < min) {
1948 error_report("hv-spinlocks value shall always be >= 0x%x"
1949 ", fixup will be removed in future versions",
1950 min);
1951 numvalue = min;
1952 }
1953 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1954 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1955 } else {
1956 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1957 }
1958 } else {
1959 feat2prop(featurestr);
1960 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1961 }
1962 if (local_err) {
1963 error_propagate(errp, local_err);
1964 return;
1965 }
1966 featurestr = strtok(NULL, ",");
1967 }
1968
1969 if (cpu->host_features) {
1970 for (w = 0; w < FEATURE_WORDS; w++) {
1971 env->features[w] =
1972 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1973 }
1974 }
1975
1976 for (w = 0; w < FEATURE_WORDS; w++) {
1977 env->features[w] |= plus_features[w];
1978 env->features[w] &= ~minus_features[w];
1979 }
1980 }
1981
1982 /* Print all cpuid feature names in featureset
1983 */
1984 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1985 {
1986 int bit;
1987 bool first = true;
1988
1989 for (bit = 0; bit < 32; bit++) {
1990 if (featureset[bit]) {
1991 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1992 first = false;
1993 }
1994 }
1995 }
1996
1997 /* generate CPU information. */
1998 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1999 {
2000 X86CPUDefinition *def;
2001 char buf[256];
2002 int i;
2003
2004 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2005 def = &builtin_x86_defs[i];
2006 snprintf(buf, sizeof(buf), "%s", def->name);
2007 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2008 }
2009 #ifdef CONFIG_KVM
2010 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2011 "KVM processor with all supported host features "
2012 "(only available in KVM mode)");
2013 #endif
2014
2015 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2016 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2017 FeatureWordInfo *fw = &feature_word_info[i];
2018
2019 (*cpu_fprintf)(f, " ");
2020 listflags(f, cpu_fprintf, fw->feat_names);
2021 (*cpu_fprintf)(f, "\n");
2022 }
2023 }
2024
2025 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2026 {
2027 CpuDefinitionInfoList *cpu_list = NULL;
2028 X86CPUDefinition *def;
2029 int i;
2030
2031 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2032 CpuDefinitionInfoList *entry;
2033 CpuDefinitionInfo *info;
2034
2035 def = &builtin_x86_defs[i];
2036 info = g_malloc0(sizeof(*info));
2037 info->name = g_strdup(def->name);
2038
2039 entry = g_malloc0(sizeof(*entry));
2040 entry->value = info;
2041 entry->next = cpu_list;
2042 cpu_list = entry;
2043 }
2044
2045 return cpu_list;
2046 }
2047
2048 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2049 bool migratable_only)
2050 {
2051 FeatureWordInfo *wi = &feature_word_info[w];
2052 uint32_t r;
2053
2054 if (kvm_enabled()) {
2055 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2056 wi->cpuid_ecx,
2057 wi->cpuid_reg);
2058 } else if (tcg_enabled()) {
2059 r = wi->tcg_features;
2060 } else {
2061 return ~0;
2062 }
2063 if (migratable_only) {
2064 r &= x86_cpu_get_migratable_flags(w);
2065 }
2066 return r;
2067 }
2068
2069 /*
2070 * Filters CPU feature words based on host availability of each feature.
2071 *
2072 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2073 */
2074 static int x86_cpu_filter_features(X86CPU *cpu)
2075 {
2076 CPUX86State *env = &cpu->env;
2077 FeatureWord w;
2078 int rv = 0;
2079
2080 for (w = 0; w < FEATURE_WORDS; w++) {
2081 uint32_t host_feat =
2082 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2083 uint32_t requested_features = env->features[w];
2084 env->features[w] &= host_feat;
2085 cpu->filtered_features[w] = requested_features & ~env->features[w];
2086 if (cpu->filtered_features[w]) {
2087 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2088 report_unavailable_features(w, cpu->filtered_features[w]);
2089 }
2090 rv = 1;
2091 }
2092 }
2093
2094 return rv;
2095 }
2096
2097 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2098 {
2099 PropValue *pv;
2100 for (pv = props; pv->prop; pv++) {
2101 if (!pv->value) {
2102 continue;
2103 }
2104 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2105 &error_abort);
2106 }
2107 }
2108
2109 /* Load data from X86CPUDefinition
2110 */
2111 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2112 {
2113 CPUX86State *env = &cpu->env;
2114 const char *vendor;
2115 char host_vendor[CPUID_VENDOR_SZ + 1];
2116 FeatureWord w;
2117
2118 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2119 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2120 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2121 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2122 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2123 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2124 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2125 for (w = 0; w < FEATURE_WORDS; w++) {
2126 env->features[w] = def->features[w];
2127 }
2128
2129 /* Special cases not set in the X86CPUDefinition structs: */
2130 if (kvm_enabled()) {
2131 x86_cpu_apply_props(cpu, kvm_default_props);
2132 }
2133
2134 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2135
2136 /* sysenter isn't supported in compatibility mode on AMD,
2137 * syscall isn't supported in compatibility mode on Intel.
2138 * Normally we advertise the actual CPU vendor, but you can
2139 * override this using the 'vendor' property if you want to use
2140 * KVM's sysenter/syscall emulation in compatibility mode and
2141 * when doing cross vendor migration
2142 */
2143 vendor = def->vendor;
2144 if (kvm_enabled()) {
2145 uint32_t ebx = 0, ecx = 0, edx = 0;
2146 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2147 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2148 vendor = host_vendor;
2149 }
2150
2151 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2152
2153 }
2154
2155 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2156 {
2157 X86CPU *cpu = NULL;
2158 X86CPUClass *xcc;
2159 ObjectClass *oc;
2160 gchar **model_pieces;
2161 char *name, *features;
2162 Error *error = NULL;
2163
2164 model_pieces = g_strsplit(cpu_model, ",", 2);
2165 if (!model_pieces[0]) {
2166 error_setg(&error, "Invalid/empty CPU model name");
2167 goto out;
2168 }
2169 name = model_pieces[0];
2170 features = model_pieces[1];
2171
2172 oc = x86_cpu_class_by_name(name);
2173 if (oc == NULL) {
2174 error_setg(&error, "Unable to find CPU definition: %s", name);
2175 goto out;
2176 }
2177 xcc = X86_CPU_CLASS(oc);
2178
2179 if (xcc->kvm_required && !kvm_enabled()) {
2180 error_setg(&error, "CPU model '%s' requires KVM", name);
2181 goto out;
2182 }
2183
2184 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2185
2186 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2187 if (error) {
2188 goto out;
2189 }
2190
2191 out:
2192 if (error != NULL) {
2193 error_propagate(errp, error);
2194 if (cpu) {
2195 object_unref(OBJECT(cpu));
2196 cpu = NULL;
2197 }
2198 }
2199 g_strfreev(model_pieces);
2200 return cpu;
2201 }
2202
2203 X86CPU *cpu_x86_init(const char *cpu_model)
2204 {
2205 Error *error = NULL;
2206 X86CPU *cpu;
2207
2208 cpu = cpu_x86_create(cpu_model, &error);
2209 if (error) {
2210 goto out;
2211 }
2212
2213 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2214
2215 out:
2216 if (error) {
2217 error_report_err(error);
2218 if (cpu != NULL) {
2219 object_unref(OBJECT(cpu));
2220 cpu = NULL;
2221 }
2222 }
2223 return cpu;
2224 }
2225
2226 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2227 {
2228 X86CPUDefinition *cpudef = data;
2229 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2230
2231 xcc->cpu_def = cpudef;
2232 }
2233
2234 static void x86_register_cpudef_type(X86CPUDefinition *def)
2235 {
2236 char *typename = x86_cpu_type_name(def->name);
2237 TypeInfo ti = {
2238 .name = typename,
2239 .parent = TYPE_X86_CPU,
2240 .class_init = x86_cpu_cpudef_class_init,
2241 .class_data = def,
2242 };
2243
2244 type_register(&ti);
2245 g_free(typename);
2246 }
2247
2248 #if !defined(CONFIG_USER_ONLY)
2249
2250 void cpu_clear_apic_feature(CPUX86State *env)
2251 {
2252 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2253 }
2254
2255 #endif /* !CONFIG_USER_ONLY */
2256
2257 /* Initialize list of CPU models, filling some non-static fields if necessary
2258 */
2259 void x86_cpudef_setup(void)
2260 {
2261 int i, j;
2262 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2263
2264 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2265 X86CPUDefinition *def = &builtin_x86_defs[i];
2266
2267 /* Look for specific "cpudef" models that */
2268 /* have the QEMU version in .model_id */
2269 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2270 if (strcmp(model_with_versions[j], def->name) == 0) {
2271 pstrcpy(def->model_id, sizeof(def->model_id),
2272 "QEMU Virtual CPU version ");
2273 pstrcat(def->model_id, sizeof(def->model_id),
2274 qemu_hw_version());
2275 break;
2276 }
2277 }
2278 }
2279 }
2280
2281 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2282 uint32_t *eax, uint32_t *ebx,
2283 uint32_t *ecx, uint32_t *edx)
2284 {
2285 X86CPU *cpu = x86_env_get_cpu(env);
2286 CPUState *cs = CPU(cpu);
2287
2288 /* test if maximum index reached */
2289 if (index & 0x80000000) {
2290 if (index > env->cpuid_xlevel) {
2291 if (env->cpuid_xlevel2 > 0) {
2292 /* Handle the Centaur's CPUID instruction. */
2293 if (index > env->cpuid_xlevel2) {
2294 index = env->cpuid_xlevel2;
2295 } else if (index < 0xC0000000) {
2296 index = env->cpuid_xlevel;
2297 }
2298 } else {
2299 /* Intel documentation states that invalid EAX input will
2300 * return the same information as EAX=cpuid_level
2301 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2302 */
2303 index = env->cpuid_level;
2304 }
2305 }
2306 } else {
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2309 }
2310
2311 switch(index) {
2312 case 0:
2313 *eax = env->cpuid_level;
2314 *ebx = env->cpuid_vendor1;
2315 *edx = env->cpuid_vendor2;
2316 *ecx = env->cpuid_vendor3;
2317 break;
2318 case 1:
2319 *eax = env->cpuid_version;
2320 *ebx = (cpu->apic_id << 24) |
2321 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 *ecx = env->features[FEAT_1_ECX];
2323 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2324 *ecx |= CPUID_EXT_OSXSAVE;
2325 }
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 *edx |= CPUID_HT;
2330 }
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2336 break;
2337 }
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2343 (L2_DESCRIPTOR);
2344 break;
2345 case 4:
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2350 } else {
2351 *eax = 0;
2352 switch (count) {
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2362 break;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2379 }
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2383 *ecx = L2_SETS - 1;
2384 *edx = CPUID_4_NO_INVD_SHARING;
2385 break;
2386 default: /* end of info */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2392 }
2393 }
2394
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2398 }
2399 break;
2400 case 5:
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 *edx = 0;
2406 break;
2407 case 6:
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 case 7:
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2416 if (count == 0) {
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 *edx = 0; /* Reserved */
2421 } else {
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2426 }
2427 break;
2428 case 9:
2429 /* Direct Cache Access Information Leaf */
2430 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2431 *ebx = 0;
2432 *ecx = 0;
2433 *edx = 0;
2434 break;
2435 case 0xA:
2436 /* Architectural Performance Monitoring Leaf */
2437 if (kvm_enabled() && cpu->enable_pmu) {
2438 KVMState *s = cs->kvm_state;
2439
2440 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2441 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2442 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2443 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2449 }
2450 break;
2451 case 0xD: {
2452 KVMState *s = cs->kvm_state;
2453 uint64_t ena_mask;
2454 int i;
2455
2456 /* Processor Extended State */
2457 *eax = 0;
2458 *ebx = 0;
2459 *ecx = 0;
2460 *edx = 0;
2461 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2462 break;
2463 }
2464 if (kvm_enabled()) {
2465 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2466 ena_mask <<= 32;
2467 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2468 } else {
2469 ena_mask = -1;
2470 }
2471
2472 if (count == 0) {
2473 *ecx = 0x240;
2474 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2475 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2476 if ((env->features[esa->feature] & esa->bits) == esa->bits
2477 && ((ena_mask >> i) & 1) != 0) {
2478 if (i < 32) {
2479 *eax |= 1u << i;
2480 } else {
2481 *edx |= 1u << (i - 32);
2482 }
2483 *ecx = MAX(*ecx, esa->offset + esa->size);
2484 }
2485 }
2486 *eax |= ena_mask & (XSTATE_FP | XSTATE_SSE);
2487 *ebx = *ecx;
2488 } else if (count == 1) {
2489 *eax = env->features[FEAT_XSAVE];
2490 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2491 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2492 if ((env->features[esa->feature] & esa->bits) == esa->bits
2493 && ((ena_mask >> count) & 1) != 0) {
2494 *eax = esa->size;
2495 *ebx = esa->offset;
2496 }
2497 }
2498 break;
2499 }
2500 case 0x80000000:
2501 *eax = env->cpuid_xlevel;
2502 *ebx = env->cpuid_vendor1;
2503 *edx = env->cpuid_vendor2;
2504 *ecx = env->cpuid_vendor3;
2505 break;
2506 case 0x80000001:
2507 *eax = env->cpuid_version;
2508 *ebx = 0;
2509 *ecx = env->features[FEAT_8000_0001_ECX];
2510 *edx = env->features[FEAT_8000_0001_EDX];
2511
2512 /* The Linux kernel checks for the CMPLegacy bit and
2513 * discards multiple thread information if it is set.
2514 * So dont set it here for Intel to make Linux guests happy.
2515 */
2516 if (cs->nr_cores * cs->nr_threads > 1) {
2517 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2518 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2519 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2520 *ecx |= 1 << 1; /* CmpLegacy bit */
2521 }
2522 }
2523 break;
2524 case 0x80000002:
2525 case 0x80000003:
2526 case 0x80000004:
2527 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2528 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2529 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2530 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2531 break;
2532 case 0x80000005:
2533 /* cache info (L1 cache) */
2534 if (cpu->cache_info_passthrough) {
2535 host_cpuid(index, 0, eax, ebx, ecx, edx);
2536 break;
2537 }
2538 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2539 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2540 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2541 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2542 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2543 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2544 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2545 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2546 break;
2547 case 0x80000006:
2548 /* cache info (L2 cache) */
2549 if (cpu->cache_info_passthrough) {
2550 host_cpuid(index, 0, eax, ebx, ecx, edx);
2551 break;
2552 }
2553 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2554 (L2_DTLB_2M_ENTRIES << 16) | \
2555 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2556 (L2_ITLB_2M_ENTRIES);
2557 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2558 (L2_DTLB_4K_ENTRIES << 16) | \
2559 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2560 (L2_ITLB_4K_ENTRIES);
2561 *ecx = (L2_SIZE_KB_AMD << 16) | \
2562 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2563 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2564 *edx = ((L3_SIZE_KB/512) << 18) | \
2565 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2566 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2567 break;
2568 case 0x80000007:
2569 *eax = 0;
2570 *ebx = 0;
2571 *ecx = 0;
2572 *edx = env->features[FEAT_8000_0007_EDX];
2573 break;
2574 case 0x80000008:
2575 /* virtual & phys address size in low 2 bytes. */
2576 /* XXX: This value must match the one used in the MMU code. */
2577 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2578 /* 64 bit processor */
2579 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2580 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2581 } else {
2582 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2583 *eax = 0x00000024; /* 36 bits physical */
2584 } else {
2585 *eax = 0x00000020; /* 32 bits physical */
2586 }
2587 }
2588 *ebx = 0;
2589 *ecx = 0;
2590 *edx = 0;
2591 if (cs->nr_cores * cs->nr_threads > 1) {
2592 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2593 }
2594 break;
2595 case 0x8000000A:
2596 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2597 *eax = 0x00000001; /* SVM Revision */
2598 *ebx = 0x00000010; /* nr of ASIDs */
2599 *ecx = 0;
2600 *edx = env->features[FEAT_SVM]; /* optional features */
2601 } else {
2602 *eax = 0;
2603 *ebx = 0;
2604 *ecx = 0;
2605 *edx = 0;
2606 }
2607 break;
2608 case 0xC0000000:
2609 *eax = env->cpuid_xlevel2;
2610 *ebx = 0;
2611 *ecx = 0;
2612 *edx = 0;
2613 break;
2614 case 0xC0000001:
2615 /* Support for VIA CPU's CPUID instruction */
2616 *eax = env->cpuid_version;
2617 *ebx = 0;
2618 *ecx = 0;
2619 *edx = env->features[FEAT_C000_0001_EDX];
2620 break;
2621 case 0xC0000002:
2622 case 0xC0000003:
2623 case 0xC0000004:
2624 /* Reserved for the future, and now filled with zero */
2625 *eax = 0;
2626 *ebx = 0;
2627 *ecx = 0;
2628 *edx = 0;
2629 break;
2630 default:
2631 /* reserved values: zero */
2632 *eax = 0;
2633 *ebx = 0;
2634 *ecx = 0;
2635 *edx = 0;
2636 break;
2637 }
2638 }
2639
2640 /* CPUClass::reset() */
2641 static void x86_cpu_reset(CPUState *s)
2642 {
2643 X86CPU *cpu = X86_CPU(s);
2644 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2645 CPUX86State *env = &cpu->env;
2646 int i;
2647
2648 xcc->parent_reset(s);
2649
2650 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2651
2652 tlb_flush(s, 1);
2653
2654 env->old_exception = -1;
2655
2656 /* init to reset state */
2657
2658 #ifdef CONFIG_SOFTMMU
2659 env->hflags |= HF_SOFTMMU_MASK;
2660 #endif
2661 env->hflags2 |= HF2_GIF_MASK;
2662
2663 cpu_x86_update_cr0(env, 0x60000010);
2664 env->a20_mask = ~0x0;
2665 env->smbase = 0x30000;
2666
2667 env->idt.limit = 0xffff;
2668 env->gdt.limit = 0xffff;
2669 env->ldt.limit = 0xffff;
2670 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2671 env->tr.limit = 0xffff;
2672 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2673
2674 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2675 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2676 DESC_R_MASK | DESC_A_MASK);
2677 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2678 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2679 DESC_A_MASK);
2680 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2681 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2682 DESC_A_MASK);
2683 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2684 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2685 DESC_A_MASK);
2686 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2687 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2688 DESC_A_MASK);
2689 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2690 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2691 DESC_A_MASK);
2692
2693 env->eip = 0xfff0;
2694 env->regs[R_EDX] = env->cpuid_version;
2695
2696 env->eflags = 0x2;
2697
2698 /* FPU init */
2699 for (i = 0; i < 8; i++) {
2700 env->fptags[i] = 1;
2701 }
2702 cpu_set_fpuc(env, 0x37f);
2703
2704 env->mxcsr = 0x1f80;
2705 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2706
2707 env->pat = 0x0007040600070406ULL;
2708 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2709
2710 memset(env->dr, 0, sizeof(env->dr));
2711 env->dr[6] = DR6_FIXED_1;
2712 env->dr[7] = DR7_FIXED_1;
2713 cpu_breakpoint_remove_all(s, BP_CPU);
2714 cpu_watchpoint_remove_all(s, BP_CPU);
2715
2716 env->xcr0 = 1;
2717
2718 /*
2719 * SDM 11.11.5 requires:
2720 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2721 * - IA32_MTRR_PHYSMASKn.V = 0
2722 * All other bits are undefined. For simplification, zero it all.
2723 */
2724 env->mtrr_deftype = 0;
2725 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2726 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2727
2728 #if !defined(CONFIG_USER_ONLY)
2729 /* We hard-wire the BSP to the first CPU. */
2730 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2731
2732 s->halted = !cpu_is_bsp(cpu);
2733
2734 if (kvm_enabled()) {
2735 kvm_arch_reset_vcpu(cpu);
2736 }
2737 #endif
2738 }
2739
2740 #ifndef CONFIG_USER_ONLY
2741 bool cpu_is_bsp(X86CPU *cpu)
2742 {
2743 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2744 }
2745
2746 /* TODO: remove me, when reset over QOM tree is implemented */
2747 static void x86_cpu_machine_reset_cb(void *opaque)
2748 {
2749 X86CPU *cpu = opaque;
2750 cpu_reset(CPU(cpu));
2751 }
2752 #endif
2753
2754 static void mce_init(X86CPU *cpu)
2755 {
2756 CPUX86State *cenv = &cpu->env;
2757 unsigned int bank;
2758
2759 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2760 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2761 (CPUID_MCE | CPUID_MCA)) {
2762 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2763 cenv->mcg_ctl = ~(uint64_t)0;
2764 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2765 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2766 }
2767 }
2768 }
2769
2770 #ifndef CONFIG_USER_ONLY
2771 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2772 {
2773 APICCommonState *apic;
2774 const char *apic_type = "apic";
2775
2776 if (kvm_apic_in_kernel()) {
2777 apic_type = "kvm-apic";
2778 } else if (xen_enabled()) {
2779 apic_type = "xen-apic";
2780 }
2781
2782 cpu->apic_state = DEVICE(object_new(apic_type));
2783
2784 object_property_add_child(OBJECT(cpu), "apic",
2785 OBJECT(cpu->apic_state), NULL);
2786 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2787 /* TODO: convert to link<> */
2788 apic = APIC_COMMON(cpu->apic_state);
2789 apic->cpu = cpu;
2790 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2791 }
2792
2793 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2794 {
2795 APICCommonState *apic;
2796 static bool apic_mmio_map_once;
2797
2798 if (cpu->apic_state == NULL) {
2799 return;
2800 }
2801 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2802 errp);
2803
2804 /* Map APIC MMIO area */
2805 apic = APIC_COMMON(cpu->apic_state);
2806 if (!apic_mmio_map_once) {
2807 memory_region_add_subregion_overlap(get_system_memory(),
2808 apic->apicbase &
2809 MSR_IA32_APICBASE_BASE,
2810 &apic->io_memory,
2811 0x1000);
2812 apic_mmio_map_once = true;
2813 }
2814 }
2815
2816 static void x86_cpu_machine_done(Notifier *n, void *unused)
2817 {
2818 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2819 MemoryRegion *smram =
2820 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2821
2822 if (smram) {
2823 cpu->smram = g_new(MemoryRegion, 1);
2824 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2825 smram, 0, 1ull << 32);
2826 memory_region_set_enabled(cpu->smram, false);
2827 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2828 }
2829 }
2830 #else
2831 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2832 {
2833 }
2834 #endif
2835
2836
2837 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2838 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2839 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2840 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2841 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2842 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2843 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2844 {
2845 CPUState *cs = CPU(dev);
2846 X86CPU *cpu = X86_CPU(dev);
2847 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2848 CPUX86State *env = &cpu->env;
2849 Error *local_err = NULL;
2850 static bool ht_warned;
2851
2852 if (cpu->apic_id < 0) {
2853 error_setg(errp, "apic-id property was not initialized properly");
2854 return;
2855 }
2856
2857 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2858 env->cpuid_level = 7;
2859 }
2860
2861 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2862 * CPUID[1].EDX.
2863 */
2864 if (IS_AMD_CPU(env)) {
2865 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2866 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2867 & CPUID_EXT2_AMD_ALIASES);
2868 }
2869
2870
2871 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2872 error_setg(&local_err,
2873 kvm_enabled() ?
2874 "Host doesn't support requested features" :
2875 "TCG doesn't support requested features");
2876 goto out;
2877 }
2878
2879 #ifndef CONFIG_USER_ONLY
2880 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2881
2882 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2883 x86_cpu_apic_create(cpu, &local_err);
2884 if (local_err != NULL) {
2885 goto out;
2886 }
2887 }
2888 #endif
2889
2890 mce_init(cpu);
2891
2892 #ifndef CONFIG_USER_ONLY
2893 if (tcg_enabled()) {
2894 AddressSpace *newas = g_new(AddressSpace, 1);
2895
2896 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2897 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2898
2899 /* Outer container... */
2900 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2901 memory_region_set_enabled(cpu->cpu_as_root, true);
2902
2903 /* ... with two regions inside: normal system memory with low
2904 * priority, and...
2905 */
2906 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2907 get_system_memory(), 0, ~0ull);
2908 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2909 memory_region_set_enabled(cpu->cpu_as_mem, true);
2910 address_space_init(newas, cpu->cpu_as_root, "CPU");
2911 cs->num_ases = 1;
2912 cpu_address_space_init(cs, newas, 0);
2913
2914 /* ... SMRAM with higher priority, linked from /machine/smram. */
2915 cpu->machine_done.notify = x86_cpu_machine_done;
2916 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2917 }
2918 #endif
2919
2920 qemu_init_vcpu(cs);
2921
2922 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2923 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2924 * based on inputs (sockets,cores,threads), it is still better to gives
2925 * users a warning.
2926 *
2927 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2928 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2929 */
2930 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2931 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2932 " -smp options properly.");
2933 ht_warned = true;
2934 }
2935
2936 x86_cpu_apic_realize(cpu, &local_err);
2937 if (local_err != NULL) {
2938 goto out;
2939 }
2940 cpu_reset(cs);
2941
2942 xcc->parent_realize(dev, &local_err);
2943
2944 out:
2945 if (local_err != NULL) {
2946 error_propagate(errp, local_err);
2947 return;
2948 }
2949 }
2950
2951 typedef struct BitProperty {
2952 uint32_t *ptr;
2953 uint32_t mask;
2954 } BitProperty;
2955
2956 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2957 void *opaque, Error **errp)
2958 {
2959 BitProperty *fp = opaque;
2960 bool value = (*fp->ptr & fp->mask) == fp->mask;
2961 visit_type_bool(v, name, &value, errp);
2962 }
2963
2964 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2965 void *opaque, Error **errp)
2966 {
2967 DeviceState *dev = DEVICE(obj);
2968 BitProperty *fp = opaque;
2969 Error *local_err = NULL;
2970 bool value;
2971
2972 if (dev->realized) {
2973 qdev_prop_set_after_realize(dev, name, errp);
2974 return;
2975 }
2976
2977 visit_type_bool(v, name, &value, &local_err);
2978 if (local_err) {
2979 error_propagate(errp, local_err);
2980 return;
2981 }
2982
2983 if (value) {
2984 *fp->ptr |= fp->mask;
2985 } else {
2986 *fp->ptr &= ~fp->mask;
2987 }
2988 }
2989
2990 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2991 void *opaque)
2992 {
2993 BitProperty *prop = opaque;
2994 g_free(prop);
2995 }
2996
2997 /* Register a boolean property to get/set a single bit in a uint32_t field.
2998 *
2999 * The same property name can be registered multiple times to make it affect
3000 * multiple bits in the same FeatureWord. In that case, the getter will return
3001 * true only if all bits are set.
3002 */
3003 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3004 const char *prop_name,
3005 uint32_t *field,
3006 int bitnr)
3007 {
3008 BitProperty *fp;
3009 ObjectProperty *op;
3010 uint32_t mask = (1UL << bitnr);
3011
3012 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3013 if (op) {
3014 fp = op->opaque;
3015 assert(fp->ptr == field);
3016 fp->mask |= mask;
3017 } else {
3018 fp = g_new0(BitProperty, 1);
3019 fp->ptr = field;
3020 fp->mask = mask;
3021 object_property_add(OBJECT(cpu), prop_name, "bool",
3022 x86_cpu_get_bit_prop,
3023 x86_cpu_set_bit_prop,
3024 x86_cpu_release_bit_prop, fp, &error_abort);
3025 }
3026 }
3027
3028 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3029 FeatureWord w,
3030 int bitnr)
3031 {
3032 Object *obj = OBJECT(cpu);
3033 int i;
3034 char **names;
3035 FeatureWordInfo *fi = &feature_word_info[w];
3036
3037 if (!fi->feat_names) {
3038 return;
3039 }
3040 if (!fi->feat_names[bitnr]) {
3041 return;
3042 }
3043
3044 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3045
3046 feat2prop(names[0]);
3047 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3048
3049 for (i = 1; names[i]; i++) {
3050 feat2prop(names[i]);
3051 object_property_add_alias(obj, names[i], obj, names[0],
3052 &error_abort);
3053 }
3054
3055 g_strfreev(names);
3056 }
3057
3058 static void x86_cpu_initfn(Object *obj)
3059 {
3060 CPUState *cs = CPU(obj);
3061 X86CPU *cpu = X86_CPU(obj);
3062 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3063 CPUX86State *env = &cpu->env;
3064 FeatureWord w;
3065 static int inited;
3066
3067 cs->env_ptr = env;
3068 cpu_exec_init(cs, &error_abort);
3069
3070 object_property_add(obj, "family", "int",
3071 x86_cpuid_version_get_family,
3072 x86_cpuid_version_set_family, NULL, NULL, NULL);
3073 object_property_add(obj, "model", "int",
3074 x86_cpuid_version_get_model,
3075 x86_cpuid_version_set_model, NULL, NULL, NULL);
3076 object_property_add(obj, "stepping", "int",
3077 x86_cpuid_version_get_stepping,
3078 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3079 object_property_add_str(obj, "vendor",
3080 x86_cpuid_get_vendor,
3081 x86_cpuid_set_vendor, NULL);
3082 object_property_add_str(obj, "model-id",
3083 x86_cpuid_get_model_id,
3084 x86_cpuid_set_model_id, NULL);
3085 object_property_add(obj, "tsc-frequency", "int",
3086 x86_cpuid_get_tsc_freq,
3087 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3088 object_property_add(obj, "apic-id", "int",
3089 x86_cpuid_get_apic_id,
3090 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3091 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3092 x86_cpu_get_feature_words,
3093 NULL, NULL, (void *)env->features, NULL);
3094 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3095 x86_cpu_get_feature_words,
3096 NULL, NULL, (void *)cpu->filtered_features, NULL);
3097
3098 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3099
3100 #ifndef CONFIG_USER_ONLY
3101 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3102 cpu->apic_id = -1;
3103 #endif
3104
3105 for (w = 0; w < FEATURE_WORDS; w++) {
3106 int bitnr;
3107
3108 for (bitnr = 0; bitnr < 32; bitnr++) {
3109 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3110 }
3111 }
3112
3113 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3114
3115 /* init various static tables used in TCG mode */
3116 if (tcg_enabled() && !inited) {
3117 inited = 1;
3118 tcg_x86_init();
3119 }
3120 }
3121
3122 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3123 {
3124 X86CPU *cpu = X86_CPU(cs);
3125
3126 return cpu->apic_id;
3127 }
3128
3129 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3130 {
3131 X86CPU *cpu = X86_CPU(cs);
3132
3133 return cpu->env.cr[0] & CR0_PG_MASK;
3134 }
3135
3136 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3137 {
3138 X86CPU *cpu = X86_CPU(cs);
3139
3140 cpu->env.eip = value;
3141 }
3142
3143 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3144 {
3145 X86CPU *cpu = X86_CPU(cs);
3146
3147 cpu->env.eip = tb->pc - tb->cs_base;
3148 }
3149
3150 static bool x86_cpu_has_work(CPUState *cs)
3151 {
3152 X86CPU *cpu = X86_CPU(cs);
3153 CPUX86State *env = &cpu->env;
3154
3155 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3156 CPU_INTERRUPT_POLL)) &&
3157 (env->eflags & IF_MASK)) ||
3158 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3159 CPU_INTERRUPT_INIT |
3160 CPU_INTERRUPT_SIPI |
3161 CPU_INTERRUPT_MCE)) ||
3162 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3163 !(env->hflags & HF_SMM_MASK));
3164 }
3165
3166 static Property x86_cpu_properties[] = {
3167 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3168 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3169 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3170 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3171 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3172 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3173 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3174 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3175 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3176 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3177 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3178 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3179 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3180 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3181 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3182 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3183 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3184 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3185 DEFINE_PROP_END_OF_LIST()
3186 };
3187
3188 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3189 {
3190 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3191 CPUClass *cc = CPU_CLASS(oc);
3192 DeviceClass *dc = DEVICE_CLASS(oc);
3193
3194 xcc->parent_realize = dc->realize;
3195 dc->realize = x86_cpu_realizefn;
3196 dc->props = x86_cpu_properties;
3197
3198 xcc->parent_reset = cc->reset;
3199 cc->reset = x86_cpu_reset;
3200 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3201
3202 cc->class_by_name = x86_cpu_class_by_name;
3203 cc->parse_features = x86_cpu_parse_featurestr;
3204 cc->has_work = x86_cpu_has_work;
3205 cc->do_interrupt = x86_cpu_do_interrupt;
3206 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3207 cc->dump_state = x86_cpu_dump_state;
3208 cc->set_pc = x86_cpu_set_pc;
3209 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3210 cc->gdb_read_register = x86_cpu_gdb_read_register;
3211 cc->gdb_write_register = x86_cpu_gdb_write_register;
3212 cc->get_arch_id = x86_cpu_get_arch_id;
3213 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3214 #ifdef CONFIG_USER_ONLY
3215 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3216 #else
3217 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3218 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3219 cc->write_elf64_note = x86_cpu_write_elf64_note;
3220 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3221 cc->write_elf32_note = x86_cpu_write_elf32_note;
3222 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3223 cc->vmsd = &vmstate_x86_cpu;
3224 #endif
3225 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3226 #ifndef CONFIG_USER_ONLY
3227 cc->debug_excp_handler = breakpoint_handler;
3228 #endif
3229 cc->cpu_exec_enter = x86_cpu_exec_enter;
3230 cc->cpu_exec_exit = x86_cpu_exec_exit;
3231
3232 /*
3233 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3234 * object in cpus -> dangling pointer after final object_unref().
3235 */
3236 dc->cannot_destroy_with_object_finalize_yet = true;
3237 }
3238
3239 static const TypeInfo x86_cpu_type_info = {
3240 .name = TYPE_X86_CPU,
3241 .parent = TYPE_CPU,
3242 .instance_size = sizeof(X86CPU),
3243 .instance_init = x86_cpu_initfn,
3244 .abstract = true,
3245 .class_size = sizeof(X86CPUClass),
3246 .class_init = x86_cpu_common_class_init,
3247 };
3248
3249 static void x86_cpu_register_types(void)
3250 {
3251 int i;
3252
3253 type_register_static(&x86_cpu_type_info);
3254 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3255 x86_register_cpudef_type(&builtin_x86_defs[i]);
3256 }
3257 #ifdef CONFIG_KVM
3258 type_register_static(&host_x86_cpu_type_info);
3259 #endif
3260 }
3261
3262 type_init(x86_cpu_register_types)