]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Add PKU and and OSPKE support
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
340 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
341 CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB)
362 /* missing:
363 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES 0
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369
370
371 typedef struct FeatureWordInfo {
372 const char **feat_names;
373 uint32_t cpuid_eax; /* Input EAX for CPUID */
374 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
375 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
376 int cpuid_reg; /* output register (R_* constant) */
377 uint32_t tcg_features; /* Feature flags supported by TCG */
378 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
379 } FeatureWordInfo;
380
381 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
382 [FEAT_1_EDX] = {
383 .feat_names = feature_name,
384 .cpuid_eax = 1, .cpuid_reg = R_EDX,
385 .tcg_features = TCG_FEATURES,
386 },
387 [FEAT_1_ECX] = {
388 .feat_names = ext_feature_name,
389 .cpuid_eax = 1, .cpuid_reg = R_ECX,
390 .tcg_features = TCG_EXT_FEATURES,
391 },
392 [FEAT_8000_0001_EDX] = {
393 .feat_names = ext2_feature_name,
394 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
395 .tcg_features = TCG_EXT2_FEATURES,
396 },
397 [FEAT_8000_0001_ECX] = {
398 .feat_names = ext3_feature_name,
399 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
400 .tcg_features = TCG_EXT3_FEATURES,
401 },
402 [FEAT_C000_0001_EDX] = {
403 .feat_names = ext4_feature_name,
404 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
405 .tcg_features = TCG_EXT4_FEATURES,
406 },
407 [FEAT_KVM] = {
408 .feat_names = kvm_feature_name,
409 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
410 .tcg_features = TCG_KVM_FEATURES,
411 },
412 [FEAT_SVM] = {
413 .feat_names = svm_feature_name,
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
416 },
417 [FEAT_7_0_EBX] = {
418 .feat_names = cpuid_7_0_ebx_feature_name,
419 .cpuid_eax = 7,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
421 .cpuid_reg = R_EBX,
422 .tcg_features = TCG_7_0_EBX_FEATURES,
423 },
424 [FEAT_7_0_ECX] = {
425 .feat_names = cpuid_7_0_ecx_feature_name,
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_ECX,
429 .tcg_features = TCG_7_0_ECX_FEATURES,
430 },
431 [FEAT_8000_0007_EDX] = {
432 .feat_names = cpuid_apm_edx_feature_name,
433 .cpuid_eax = 0x80000007,
434 .cpuid_reg = R_EDX,
435 .tcg_features = TCG_APM_FEATURES,
436 .unmigratable_flags = CPUID_APM_INVTSC,
437 },
438 [FEAT_XSAVE] = {
439 .feat_names = cpuid_xsave_feature_name,
440 .cpuid_eax = 0xd,
441 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
442 .cpuid_reg = R_EAX,
443 .tcg_features = 0,
444 },
445 [FEAT_6_EAX] = {
446 .feat_names = cpuid_6_feature_name,
447 .cpuid_eax = 6, .cpuid_reg = R_EAX,
448 .tcg_features = TCG_6_EAX_FEATURES,
449 },
450 };
451
452 typedef struct X86RegisterInfo32 {
453 /* Name of register */
454 const char *name;
455 /* QAPI enum value register */
456 X86CPURegister32 qapi_enum;
457 } X86RegisterInfo32;
458
459 #define REGISTER(reg) \
460 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
461 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
462 REGISTER(EAX),
463 REGISTER(ECX),
464 REGISTER(EDX),
465 REGISTER(EBX),
466 REGISTER(ESP),
467 REGISTER(EBP),
468 REGISTER(ESI),
469 REGISTER(EDI),
470 };
471 #undef REGISTER
472
473 typedef struct ExtSaveArea {
474 uint32_t feature, bits;
475 uint32_t offset, size;
476 } ExtSaveArea;
477
478 static const ExtSaveArea ext_save_areas[] = {
479 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
480 .offset = 0x240, .size = 0x100 },
481 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = 0x3c0, .size = 0x40 },
483 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
484 .offset = 0x400, .size = 0x40 },
485 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
486 .offset = 0x440, .size = 0x40 },
487 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
488 .offset = 0x480, .size = 0x200 },
489 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = 0x680, .size = 0x400 },
491 [9] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
492 .offset = 0xA80, .size = 0x8 },
493 };
494
495 const char *get_register_name_32(unsigned int reg)
496 {
497 if (reg >= CPU_NB_REGS32) {
498 return NULL;
499 }
500 return x86_reg_info_32[reg].name;
501 }
502
503 /*
504 * Returns the set of feature flags that are supported and migratable by
505 * QEMU, for a given FeatureWord.
506 */
507 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
508 {
509 FeatureWordInfo *wi = &feature_word_info[w];
510 uint32_t r = 0;
511 int i;
512
513 for (i = 0; i < 32; i++) {
514 uint32_t f = 1U << i;
515 /* If the feature name is unknown, it is not supported by QEMU yet */
516 if (!wi->feat_names[i]) {
517 continue;
518 }
519 /* Skip features known to QEMU, but explicitly marked as unmigratable */
520 if (wi->unmigratable_flags & f) {
521 continue;
522 }
523 r |= f;
524 }
525 return r;
526 }
527
528 void host_cpuid(uint32_t function, uint32_t count,
529 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
530 {
531 uint32_t vec[4];
532
533 #ifdef __x86_64__
534 asm volatile("cpuid"
535 : "=a"(vec[0]), "=b"(vec[1]),
536 "=c"(vec[2]), "=d"(vec[3])
537 : "0"(function), "c"(count) : "cc");
538 #elif defined(__i386__)
539 asm volatile("pusha \n\t"
540 "cpuid \n\t"
541 "mov %%eax, 0(%2) \n\t"
542 "mov %%ebx, 4(%2) \n\t"
543 "mov %%ecx, 8(%2) \n\t"
544 "mov %%edx, 12(%2) \n\t"
545 "popa"
546 : : "a"(function), "c"(count), "S"(vec)
547 : "memory", "cc");
548 #else
549 abort();
550 #endif
551
552 if (eax)
553 *eax = vec[0];
554 if (ebx)
555 *ebx = vec[1];
556 if (ecx)
557 *ecx = vec[2];
558 if (edx)
559 *edx = vec[3];
560 }
561
562 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
563
564 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
565 * a substring. ex if !NULL points to the first char after a substring,
566 * otherwise the string is assumed to sized by a terminating nul.
567 * Return lexical ordering of *s1:*s2.
568 */
569 static int sstrcmp(const char *s1, const char *e1,
570 const char *s2, const char *e2)
571 {
572 for (;;) {
573 if (!*s1 || !*s2 || *s1 != *s2)
574 return (*s1 - *s2);
575 ++s1, ++s2;
576 if (s1 == e1 && s2 == e2)
577 return (0);
578 else if (s1 == e1)
579 return (*s2);
580 else if (s2 == e2)
581 return (*s1);
582 }
583 }
584
585 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
586 * '|' delimited (possibly empty) strings in which case search for a match
587 * within the alternatives proceeds left to right. Return 0 for success,
588 * non-zero otherwise.
589 */
590 static int altcmp(const char *s, const char *e, const char *altstr)
591 {
592 const char *p, *q;
593
594 for (q = p = altstr; ; ) {
595 while (*p && *p != '|')
596 ++p;
597 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
598 return (0);
599 if (!*p)
600 return (1);
601 else
602 q = ++p;
603 }
604 }
605
606 /* search featureset for flag *[s..e), if found set corresponding bit in
607 * *pval and return true, otherwise return false
608 */
609 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
610 const char **featureset)
611 {
612 uint32_t mask;
613 const char **ppc;
614 bool found = false;
615
616 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
617 if (*ppc && !altcmp(s, e, *ppc)) {
618 *pval |= mask;
619 found = true;
620 }
621 }
622 return found;
623 }
624
625 static void add_flagname_to_bitmaps(const char *flagname,
626 FeatureWordArray words,
627 Error **errp)
628 {
629 FeatureWord w;
630 for (w = 0; w < FEATURE_WORDS; w++) {
631 FeatureWordInfo *wi = &feature_word_info[w];
632 if (wi->feat_names &&
633 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
634 break;
635 }
636 }
637 if (w == FEATURE_WORDS) {
638 error_setg(errp, "CPU feature %s not found", flagname);
639 }
640 }
641
642 /* CPU class name definitions: */
643
644 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
645 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
646
647 /* Return type name for a given CPU model name
648 * Caller is responsible for freeing the returned string.
649 */
650 static char *x86_cpu_type_name(const char *model_name)
651 {
652 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
653 }
654
655 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
656 {
657 ObjectClass *oc;
658 char *typename;
659
660 if (cpu_model == NULL) {
661 return NULL;
662 }
663
664 typename = x86_cpu_type_name(cpu_model);
665 oc = object_class_by_name(typename);
666 g_free(typename);
667 return oc;
668 }
669
670 struct X86CPUDefinition {
671 const char *name;
672 uint32_t level;
673 uint32_t xlevel;
674 uint32_t xlevel2;
675 /* vendor is zero-terminated, 12 character ASCII string */
676 char vendor[CPUID_VENDOR_SZ + 1];
677 int family;
678 int model;
679 int stepping;
680 FeatureWordArray features;
681 char model_id[48];
682 };
683
684 static X86CPUDefinition builtin_x86_defs[] = {
685 {
686 .name = "qemu64",
687 .level = 0xd,
688 .vendor = CPUID_VENDOR_AMD,
689 .family = 6,
690 .model = 6,
691 .stepping = 3,
692 .features[FEAT_1_EDX] =
693 PPRO_FEATURES |
694 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
695 CPUID_PSE36,
696 .features[FEAT_1_ECX] =
697 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
698 .features[FEAT_8000_0001_EDX] =
699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
700 .features[FEAT_8000_0001_ECX] =
701 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
702 .xlevel = 0x8000000A,
703 },
704 {
705 .name = "phenom",
706 .level = 5,
707 .vendor = CPUID_VENDOR_AMD,
708 .family = 16,
709 .model = 2,
710 .stepping = 3,
711 /* Missing: CPUID_HT */
712 .features[FEAT_1_EDX] =
713 PPRO_FEATURES |
714 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
715 CPUID_PSE36 | CPUID_VME,
716 .features[FEAT_1_ECX] =
717 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
718 CPUID_EXT_POPCNT,
719 .features[FEAT_8000_0001_EDX] =
720 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
721 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
722 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
723 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
724 CPUID_EXT3_CR8LEG,
725 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
726 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
727 .features[FEAT_8000_0001_ECX] =
728 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
729 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
730 /* Missing: CPUID_SVM_LBRV */
731 .features[FEAT_SVM] =
732 CPUID_SVM_NPT,
733 .xlevel = 0x8000001A,
734 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
735 },
736 {
737 .name = "core2duo",
738 .level = 10,
739 .vendor = CPUID_VENDOR_INTEL,
740 .family = 6,
741 .model = 15,
742 .stepping = 11,
743 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
744 .features[FEAT_1_EDX] =
745 PPRO_FEATURES |
746 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
747 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
748 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
749 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
750 .features[FEAT_1_ECX] =
751 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
752 CPUID_EXT_CX16,
753 .features[FEAT_8000_0001_EDX] =
754 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
755 .features[FEAT_8000_0001_ECX] =
756 CPUID_EXT3_LAHF_LM,
757 .xlevel = 0x80000008,
758 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
759 },
760 {
761 .name = "kvm64",
762 .level = 0xd,
763 .vendor = CPUID_VENDOR_INTEL,
764 .family = 15,
765 .model = 6,
766 .stepping = 1,
767 /* Missing: CPUID_HT */
768 .features[FEAT_1_EDX] =
769 PPRO_FEATURES | CPUID_VME |
770 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 CPUID_PSE36,
772 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
775 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
776 .features[FEAT_8000_0001_EDX] =
777 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
778 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
779 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
780 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
781 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
782 .features[FEAT_8000_0001_ECX] =
783 0,
784 .xlevel = 0x80000008,
785 .model_id = "Common KVM processor"
786 },
787 {
788 .name = "qemu32",
789 .level = 4,
790 .vendor = CPUID_VENDOR_INTEL,
791 .family = 6,
792 .model = 6,
793 .stepping = 3,
794 .features[FEAT_1_EDX] =
795 PPRO_FEATURES,
796 .features[FEAT_1_ECX] =
797 CPUID_EXT_SSE3,
798 .xlevel = 0x80000004,
799 },
800 {
801 .name = "kvm32",
802 .level = 5,
803 .vendor = CPUID_VENDOR_INTEL,
804 .family = 15,
805 .model = 6,
806 .stepping = 1,
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES | CPUID_VME |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3,
812 .features[FEAT_8000_0001_ECX] =
813 0,
814 .xlevel = 0x80000008,
815 .model_id = "Common 32-bit KVM processor"
816 },
817 {
818 .name = "coreduo",
819 .level = 10,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 6,
822 .model = 14,
823 .stepping = 8,
824 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
825 .features[FEAT_1_EDX] =
826 PPRO_FEATURES | CPUID_VME |
827 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
828 CPUID_SS,
829 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
830 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
831 .features[FEAT_1_ECX] =
832 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
833 .features[FEAT_8000_0001_EDX] =
834 CPUID_EXT2_NX,
835 .xlevel = 0x80000008,
836 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
837 },
838 {
839 .name = "486",
840 .level = 1,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 4,
843 .model = 8,
844 .stepping = 0,
845 .features[FEAT_1_EDX] =
846 I486_FEATURES,
847 .xlevel = 0,
848 },
849 {
850 .name = "pentium",
851 .level = 1,
852 .vendor = CPUID_VENDOR_INTEL,
853 .family = 5,
854 .model = 4,
855 .stepping = 3,
856 .features[FEAT_1_EDX] =
857 PENTIUM_FEATURES,
858 .xlevel = 0,
859 },
860 {
861 .name = "pentium2",
862 .level = 2,
863 .vendor = CPUID_VENDOR_INTEL,
864 .family = 6,
865 .model = 5,
866 .stepping = 2,
867 .features[FEAT_1_EDX] =
868 PENTIUM2_FEATURES,
869 .xlevel = 0,
870 },
871 {
872 .name = "pentium3",
873 .level = 3,
874 .vendor = CPUID_VENDOR_INTEL,
875 .family = 6,
876 .model = 7,
877 .stepping = 3,
878 .features[FEAT_1_EDX] =
879 PENTIUM3_FEATURES,
880 .xlevel = 0,
881 },
882 {
883 .name = "athlon",
884 .level = 2,
885 .vendor = CPUID_VENDOR_AMD,
886 .family = 6,
887 .model = 2,
888 .stepping = 3,
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
891 CPUID_MCA,
892 .features[FEAT_8000_0001_EDX] =
893 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
894 .xlevel = 0x80000008,
895 },
896 {
897 .name = "n270",
898 .level = 10,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 6,
901 .model = 28,
902 .stepping = 2,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
907 CPUID_ACPI | CPUID_SS,
908 /* Some CPUs got no CPUID_SEP */
909 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
910 * CPUID_EXT_XTPR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
913 CPUID_EXT_MOVBE,
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_NX,
916 .features[FEAT_8000_0001_ECX] =
917 CPUID_EXT3_LAHF_LM,
918 .xlevel = 0x80000008,
919 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
920 },
921 {
922 .name = "Conroe",
923 .level = 10,
924 .vendor = CPUID_VENDOR_INTEL,
925 .family = 6,
926 .model = 15,
927 .stepping = 3,
928 .features[FEAT_1_EDX] =
929 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
930 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
931 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
932 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
933 CPUID_DE | CPUID_FP87,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
942 },
943 {
944 .name = "Penryn",
945 .level = 10,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 23,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
958 CPUID_EXT_SSE3,
959 .features[FEAT_8000_0001_EDX] =
960 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
961 .features[FEAT_8000_0001_ECX] =
962 CPUID_EXT3_LAHF_LM,
963 .xlevel = 0x80000008,
964 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
965 },
966 {
967 .name = "Nehalem",
968 .level = 11,
969 .vendor = CPUID_VENDOR_INTEL,
970 .family = 6,
971 .model = 26,
972 .stepping = 3,
973 .features[FEAT_1_EDX] =
974 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
975 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
976 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
977 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
978 CPUID_DE | CPUID_FP87,
979 .features[FEAT_1_ECX] =
980 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
981 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
982 .features[FEAT_8000_0001_EDX] =
983 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
984 .features[FEAT_8000_0001_ECX] =
985 CPUID_EXT3_LAHF_LM,
986 .xlevel = 0x80000008,
987 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
988 },
989 {
990 .name = "Westmere",
991 .level = 11,
992 .vendor = CPUID_VENDOR_INTEL,
993 .family = 6,
994 .model = 44,
995 .stepping = 1,
996 .features[FEAT_1_EDX] =
997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1001 CPUID_DE | CPUID_FP87,
1002 .features[FEAT_1_ECX] =
1003 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1004 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1005 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1006 .features[FEAT_8000_0001_EDX] =
1007 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1008 .features[FEAT_8000_0001_ECX] =
1009 CPUID_EXT3_LAHF_LM,
1010 .features[FEAT_6_EAX] =
1011 CPUID_6_EAX_ARAT,
1012 .xlevel = 0x80000008,
1013 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1014 },
1015 {
1016 .name = "SandyBridge",
1017 .level = 0xd,
1018 .vendor = CPUID_VENDOR_INTEL,
1019 .family = 6,
1020 .model = 42,
1021 .stepping = 1,
1022 .features[FEAT_1_EDX] =
1023 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1024 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1025 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1026 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1027 CPUID_DE | CPUID_FP87,
1028 .features[FEAT_1_ECX] =
1029 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1031 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1032 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1033 CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1036 CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .features[FEAT_XSAVE] =
1040 CPUID_XSAVE_XSAVEOPT,
1041 .features[FEAT_6_EAX] =
1042 CPUID_6_EAX_ARAT,
1043 .xlevel = 0x80000008,
1044 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1045 },
1046 {
1047 .name = "IvyBridge",
1048 .level = 0xd,
1049 .vendor = CPUID_VENDOR_INTEL,
1050 .family = 6,
1051 .model = 58,
1052 .stepping = 9,
1053 .features[FEAT_1_EDX] =
1054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1058 CPUID_DE | CPUID_FP87,
1059 .features[FEAT_1_ECX] =
1060 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1061 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1062 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1063 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1064 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1065 .features[FEAT_7_0_EBX] =
1066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1067 CPUID_7_0_EBX_ERMS,
1068 .features[FEAT_8000_0001_EDX] =
1069 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1070 CPUID_EXT2_SYSCALL,
1071 .features[FEAT_8000_0001_ECX] =
1072 CPUID_EXT3_LAHF_LM,
1073 .features[FEAT_XSAVE] =
1074 CPUID_XSAVE_XSAVEOPT,
1075 .features[FEAT_6_EAX] =
1076 CPUID_6_EAX_ARAT,
1077 .xlevel = 0x80000008,
1078 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1079 },
1080 {
1081 .name = "Haswell-noTSX",
1082 .level = 0xd,
1083 .vendor = CPUID_VENDOR_INTEL,
1084 .family = 6,
1085 .model = 60,
1086 .stepping = 1,
1087 .features[FEAT_1_EDX] =
1088 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1089 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1090 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1091 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1092 CPUID_DE | CPUID_FP87,
1093 .features[FEAT_1_ECX] =
1094 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1095 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1096 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1097 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1098 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1099 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1100 .features[FEAT_8000_0001_EDX] =
1101 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1102 CPUID_EXT2_SYSCALL,
1103 .features[FEAT_8000_0001_ECX] =
1104 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1105 .features[FEAT_7_0_EBX] =
1106 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1107 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1108 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1109 .features[FEAT_XSAVE] =
1110 CPUID_XSAVE_XSAVEOPT,
1111 .features[FEAT_6_EAX] =
1112 CPUID_6_EAX_ARAT,
1113 .xlevel = 0x80000008,
1114 .model_id = "Intel Core Processor (Haswell, no TSX)",
1115 }, {
1116 .name = "Haswell",
1117 .level = 0xd,
1118 .vendor = CPUID_VENDOR_INTEL,
1119 .family = 6,
1120 .model = 60,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1135 .features[FEAT_8000_0001_EDX] =
1136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 CPUID_EXT2_SYSCALL,
1138 .features[FEAT_8000_0001_ECX] =
1139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1140 .features[FEAT_7_0_EBX] =
1141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1142 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1144 CPUID_7_0_EBX_RTM,
1145 .features[FEAT_XSAVE] =
1146 CPUID_XSAVE_XSAVEOPT,
1147 .features[FEAT_6_EAX] =
1148 CPUID_6_EAX_ARAT,
1149 .xlevel = 0x80000008,
1150 .model_id = "Intel Core Processor (Haswell)",
1151 },
1152 {
1153 .name = "Broadwell-noTSX",
1154 .level = 0xd,
1155 .vendor = CPUID_VENDOR_INTEL,
1156 .family = 6,
1157 .model = 61,
1158 .stepping = 2,
1159 .features[FEAT_1_EDX] =
1160 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1161 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1162 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1163 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1164 CPUID_DE | CPUID_FP87,
1165 .features[FEAT_1_ECX] =
1166 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1167 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1168 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1169 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1170 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1171 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1172 .features[FEAT_8000_0001_EDX] =
1173 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1174 CPUID_EXT2_SYSCALL,
1175 .features[FEAT_8000_0001_ECX] =
1176 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1177 .features[FEAT_7_0_EBX] =
1178 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1179 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1180 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1181 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1182 CPUID_7_0_EBX_SMAP,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1189 },
1190 {
1191 .name = "Broadwell",
1192 .level = 0xd,
1193 .vendor = CPUID_VENDOR_INTEL,
1194 .family = 6,
1195 .model = 61,
1196 .stepping = 2,
1197 .features[FEAT_1_EDX] =
1198 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1199 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1200 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1201 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1202 CPUID_DE | CPUID_FP87,
1203 .features[FEAT_1_ECX] =
1204 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1205 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1206 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1207 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1208 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1209 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1210 .features[FEAT_8000_0001_EDX] =
1211 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1212 CPUID_EXT2_SYSCALL,
1213 .features[FEAT_8000_0001_ECX] =
1214 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1215 .features[FEAT_7_0_EBX] =
1216 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1217 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1218 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1219 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1220 CPUID_7_0_EBX_SMAP,
1221 .features[FEAT_XSAVE] =
1222 CPUID_XSAVE_XSAVEOPT,
1223 .features[FEAT_6_EAX] =
1224 CPUID_6_EAX_ARAT,
1225 .xlevel = 0x80000008,
1226 .model_id = "Intel Core Processor (Broadwell)",
1227 },
1228 {
1229 .name = "Opteron_G1",
1230 .level = 5,
1231 .vendor = CPUID_VENDOR_AMD,
1232 .family = 15,
1233 .model = 6,
1234 .stepping = 1,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_SSE3,
1243 .features[FEAT_8000_0001_EDX] =
1244 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1245 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1246 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1247 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1248 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1249 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1250 .xlevel = 0x80000008,
1251 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1252 },
1253 {
1254 .name = "Opteron_G2",
1255 .level = 5,
1256 .vendor = CPUID_VENDOR_AMD,
1257 .family = 15,
1258 .model = 6,
1259 .stepping = 1,
1260 .features[FEAT_1_EDX] =
1261 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1262 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1263 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1264 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1265 CPUID_DE | CPUID_FP87,
1266 .features[FEAT_1_ECX] =
1267 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1268 /* Missing: CPUID_EXT2_RDTSCP */
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1271 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1272 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1273 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1274 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1275 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1276 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1277 .features[FEAT_8000_0001_ECX] =
1278 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1279 .xlevel = 0x80000008,
1280 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1281 },
1282 {
1283 .name = "Opteron_G3",
1284 .level = 5,
1285 .vendor = CPUID_VENDOR_AMD,
1286 .family = 15,
1287 .model = 6,
1288 .stepping = 1,
1289 .features[FEAT_1_EDX] =
1290 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1291 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1292 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1293 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1294 CPUID_DE | CPUID_FP87,
1295 .features[FEAT_1_ECX] =
1296 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1297 CPUID_EXT_SSE3,
1298 /* Missing: CPUID_EXT2_RDTSCP */
1299 .features[FEAT_8000_0001_EDX] =
1300 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1301 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1302 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1303 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1304 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1305 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1306 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1307 .features[FEAT_8000_0001_ECX] =
1308 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1309 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1310 .xlevel = 0x80000008,
1311 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1312 },
1313 {
1314 .name = "Opteron_G4",
1315 .level = 0xd,
1316 .vendor = CPUID_VENDOR_AMD,
1317 .family = 21,
1318 .model = 1,
1319 .stepping = 2,
1320 .features[FEAT_1_EDX] =
1321 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1322 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1323 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1324 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1325 CPUID_DE | CPUID_FP87,
1326 .features[FEAT_1_ECX] =
1327 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1328 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1329 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1330 CPUID_EXT_SSE3,
1331 /* Missing: CPUID_EXT2_RDTSCP */
1332 .features[FEAT_8000_0001_EDX] =
1333 CPUID_EXT2_LM |
1334 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1335 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1336 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1337 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1338 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1339 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1340 .features[FEAT_8000_0001_ECX] =
1341 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1342 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1343 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1344 CPUID_EXT3_LAHF_LM,
1345 /* no xsaveopt! */
1346 .xlevel = 0x8000001A,
1347 .model_id = "AMD Opteron 62xx class CPU",
1348 },
1349 {
1350 .name = "Opteron_G5",
1351 .level = 0xd,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 21,
1354 .model = 2,
1355 .stepping = 0,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1364 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1365 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1366 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1367 /* Missing: CPUID_EXT2_RDTSCP */
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM |
1370 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1371 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1372 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1373 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1374 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1375 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1376 .features[FEAT_8000_0001_ECX] =
1377 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1378 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1379 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1380 CPUID_EXT3_LAHF_LM,
1381 /* no xsaveopt! */
1382 .xlevel = 0x8000001A,
1383 .model_id = "AMD Opteron 63xx class CPU",
1384 },
1385 };
1386
1387 typedef struct PropValue {
1388 const char *prop, *value;
1389 } PropValue;
1390
1391 /* KVM-specific features that are automatically added/removed
1392 * from all CPU models when KVM is enabled.
1393 */
1394 static PropValue kvm_default_props[] = {
1395 { "kvmclock", "on" },
1396 { "kvm-nopiodelay", "on" },
1397 { "kvm-asyncpf", "on" },
1398 { "kvm-steal-time", "on" },
1399 { "kvm-pv-eoi", "on" },
1400 { "kvmclock-stable-bit", "on" },
1401 { "x2apic", "on" },
1402 { "acpi", "off" },
1403 { "monitor", "off" },
1404 { "svm", "off" },
1405 { NULL, NULL },
1406 };
1407
1408 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1409 {
1410 PropValue *pv;
1411 for (pv = kvm_default_props; pv->prop; pv++) {
1412 if (!strcmp(pv->prop, prop)) {
1413 pv->value = value;
1414 break;
1415 }
1416 }
1417
1418 /* It is valid to call this function only for properties that
1419 * are already present in the kvm_default_props table.
1420 */
1421 assert(pv->prop);
1422 }
1423
1424 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1425 bool migratable_only);
1426
1427 #ifdef CONFIG_KVM
1428
1429 static int cpu_x86_fill_model_id(char *str)
1430 {
1431 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1432 int i;
1433
1434 for (i = 0; i < 3; i++) {
1435 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1436 memcpy(str + i * 16 + 0, &eax, 4);
1437 memcpy(str + i * 16 + 4, &ebx, 4);
1438 memcpy(str + i * 16 + 8, &ecx, 4);
1439 memcpy(str + i * 16 + 12, &edx, 4);
1440 }
1441 return 0;
1442 }
1443
1444 static X86CPUDefinition host_cpudef;
1445
1446 static Property host_x86_cpu_properties[] = {
1447 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1448 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1449 DEFINE_PROP_END_OF_LIST()
1450 };
1451
1452 /* class_init for the "host" CPU model
1453 *
1454 * This function may be called before KVM is initialized.
1455 */
1456 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1457 {
1458 DeviceClass *dc = DEVICE_CLASS(oc);
1459 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1460 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1461
1462 xcc->kvm_required = true;
1463
1464 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1465 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1466
1467 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1468 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1469 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1470 host_cpudef.stepping = eax & 0x0F;
1471
1472 cpu_x86_fill_model_id(host_cpudef.model_id);
1473
1474 xcc->cpu_def = &host_cpudef;
1475
1476 /* level, xlevel, xlevel2, and the feature words are initialized on
1477 * instance_init, because they require KVM to be initialized.
1478 */
1479
1480 dc->props = host_x86_cpu_properties;
1481 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1482 dc->cannot_destroy_with_object_finalize_yet = true;
1483 }
1484
1485 static void host_x86_cpu_initfn(Object *obj)
1486 {
1487 X86CPU *cpu = X86_CPU(obj);
1488 CPUX86State *env = &cpu->env;
1489 KVMState *s = kvm_state;
1490
1491 assert(kvm_enabled());
1492
1493 /* We can't fill the features array here because we don't know yet if
1494 * "migratable" is true or false.
1495 */
1496 cpu->host_features = true;
1497
1498 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1499 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1500 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1501
1502 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1503 }
1504
1505 static const TypeInfo host_x86_cpu_type_info = {
1506 .name = X86_CPU_TYPE_NAME("host"),
1507 .parent = TYPE_X86_CPU,
1508 .instance_init = host_x86_cpu_initfn,
1509 .class_init = host_x86_cpu_class_init,
1510 };
1511
1512 #endif
1513
1514 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1515 {
1516 FeatureWordInfo *f = &feature_word_info[w];
1517 int i;
1518
1519 for (i = 0; i < 32; ++i) {
1520 if ((1UL << i) & mask) {
1521 const char *reg = get_register_name_32(f->cpuid_reg);
1522 assert(reg);
1523 fprintf(stderr, "warning: %s doesn't support requested feature: "
1524 "CPUID.%02XH:%s%s%s [bit %d]\n",
1525 kvm_enabled() ? "host" : "TCG",
1526 f->cpuid_eax, reg,
1527 f->feat_names[i] ? "." : "",
1528 f->feat_names[i] ? f->feat_names[i] : "", i);
1529 }
1530 }
1531 }
1532
1533 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1534 const char *name, Error **errp)
1535 {
1536 X86CPU *cpu = X86_CPU(obj);
1537 CPUX86State *env = &cpu->env;
1538 int64_t value;
1539
1540 value = (env->cpuid_version >> 8) & 0xf;
1541 if (value == 0xf) {
1542 value += (env->cpuid_version >> 20) & 0xff;
1543 }
1544 visit_type_int(v, &value, name, errp);
1545 }
1546
1547 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1548 const char *name, Error **errp)
1549 {
1550 X86CPU *cpu = X86_CPU(obj);
1551 CPUX86State *env = &cpu->env;
1552 const int64_t min = 0;
1553 const int64_t max = 0xff + 0xf;
1554 Error *local_err = NULL;
1555 int64_t value;
1556
1557 visit_type_int(v, &value, name, &local_err);
1558 if (local_err) {
1559 error_propagate(errp, local_err);
1560 return;
1561 }
1562 if (value < min || value > max) {
1563 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1564 name ? name : "null", value, min, max);
1565 return;
1566 }
1567
1568 env->cpuid_version &= ~0xff00f00;
1569 if (value > 0x0f) {
1570 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1571 } else {
1572 env->cpuid_version |= value << 8;
1573 }
1574 }
1575
1576 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1577 const char *name, Error **errp)
1578 {
1579 X86CPU *cpu = X86_CPU(obj);
1580 CPUX86State *env = &cpu->env;
1581 int64_t value;
1582
1583 value = (env->cpuid_version >> 4) & 0xf;
1584 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1585 visit_type_int(v, &value, name, errp);
1586 }
1587
1588 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1589 const char *name, Error **errp)
1590 {
1591 X86CPU *cpu = X86_CPU(obj);
1592 CPUX86State *env = &cpu->env;
1593 const int64_t min = 0;
1594 const int64_t max = 0xff;
1595 Error *local_err = NULL;
1596 int64_t value;
1597
1598 visit_type_int(v, &value, name, &local_err);
1599 if (local_err) {
1600 error_propagate(errp, local_err);
1601 return;
1602 }
1603 if (value < min || value > max) {
1604 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1605 name ? name : "null", value, min, max);
1606 return;
1607 }
1608
1609 env->cpuid_version &= ~0xf00f0;
1610 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1611 }
1612
1613 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1614 void *opaque, const char *name,
1615 Error **errp)
1616 {
1617 X86CPU *cpu = X86_CPU(obj);
1618 CPUX86State *env = &cpu->env;
1619 int64_t value;
1620
1621 value = env->cpuid_version & 0xf;
1622 visit_type_int(v, &value, name, errp);
1623 }
1624
1625 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1626 void *opaque, const char *name,
1627 Error **errp)
1628 {
1629 X86CPU *cpu = X86_CPU(obj);
1630 CPUX86State *env = &cpu->env;
1631 const int64_t min = 0;
1632 const int64_t max = 0xf;
1633 Error *local_err = NULL;
1634 int64_t value;
1635
1636 visit_type_int(v, &value, name, &local_err);
1637 if (local_err) {
1638 error_propagate(errp, local_err);
1639 return;
1640 }
1641 if (value < min || value > max) {
1642 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1643 name ? name : "null", value, min, max);
1644 return;
1645 }
1646
1647 env->cpuid_version &= ~0xf;
1648 env->cpuid_version |= value & 0xf;
1649 }
1650
1651 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1652 {
1653 X86CPU *cpu = X86_CPU(obj);
1654 CPUX86State *env = &cpu->env;
1655 char *value;
1656
1657 value = g_malloc(CPUID_VENDOR_SZ + 1);
1658 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1659 env->cpuid_vendor3);
1660 return value;
1661 }
1662
1663 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1664 Error **errp)
1665 {
1666 X86CPU *cpu = X86_CPU(obj);
1667 CPUX86State *env = &cpu->env;
1668 int i;
1669
1670 if (strlen(value) != CPUID_VENDOR_SZ) {
1671 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1672 return;
1673 }
1674
1675 env->cpuid_vendor1 = 0;
1676 env->cpuid_vendor2 = 0;
1677 env->cpuid_vendor3 = 0;
1678 for (i = 0; i < 4; i++) {
1679 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1680 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1681 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1682 }
1683 }
1684
1685 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1686 {
1687 X86CPU *cpu = X86_CPU(obj);
1688 CPUX86State *env = &cpu->env;
1689 char *value;
1690 int i;
1691
1692 value = g_malloc(48 + 1);
1693 for (i = 0; i < 48; i++) {
1694 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1695 }
1696 value[48] = '\0';
1697 return value;
1698 }
1699
1700 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1701 Error **errp)
1702 {
1703 X86CPU *cpu = X86_CPU(obj);
1704 CPUX86State *env = &cpu->env;
1705 int c, len, i;
1706
1707 if (model_id == NULL) {
1708 model_id = "";
1709 }
1710 len = strlen(model_id);
1711 memset(env->cpuid_model, 0, 48);
1712 for (i = 0; i < 48; i++) {
1713 if (i >= len) {
1714 c = '\0';
1715 } else {
1716 c = (uint8_t)model_id[i];
1717 }
1718 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1719 }
1720 }
1721
1722 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1723 const char *name, Error **errp)
1724 {
1725 X86CPU *cpu = X86_CPU(obj);
1726 int64_t value;
1727
1728 value = cpu->env.tsc_khz * 1000;
1729 visit_type_int(v, &value, name, errp);
1730 }
1731
1732 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1733 const char *name, Error **errp)
1734 {
1735 X86CPU *cpu = X86_CPU(obj);
1736 const int64_t min = 0;
1737 const int64_t max = INT64_MAX;
1738 Error *local_err = NULL;
1739 int64_t value;
1740
1741 visit_type_int(v, &value, name, &local_err);
1742 if (local_err) {
1743 error_propagate(errp, local_err);
1744 return;
1745 }
1746 if (value < min || value > max) {
1747 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1748 name ? name : "null", value, min, max);
1749 return;
1750 }
1751
1752 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1753 }
1754
1755 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1756 const char *name, Error **errp)
1757 {
1758 X86CPU *cpu = X86_CPU(obj);
1759 int64_t value = cpu->apic_id;
1760
1761 visit_type_int(v, &value, name, errp);
1762 }
1763
1764 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1765 const char *name, Error **errp)
1766 {
1767 X86CPU *cpu = X86_CPU(obj);
1768 DeviceState *dev = DEVICE(obj);
1769 const int64_t min = 0;
1770 const int64_t max = UINT32_MAX;
1771 Error *error = NULL;
1772 int64_t value;
1773
1774 if (dev->realized) {
1775 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1776 "it was realized", name, object_get_typename(obj));
1777 return;
1778 }
1779
1780 visit_type_int(v, &value, name, &error);
1781 if (error) {
1782 error_propagate(errp, error);
1783 return;
1784 }
1785 if (value < min || value > max) {
1786 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1787 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1788 object_get_typename(obj), name, value, min, max);
1789 return;
1790 }
1791
1792 if ((value != cpu->apic_id) && cpu_exists(value)) {
1793 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1794 return;
1795 }
1796 cpu->apic_id = value;
1797 }
1798
1799 /* Generic getter for "feature-words" and "filtered-features" properties */
1800 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1801 const char *name, Error **errp)
1802 {
1803 uint32_t *array = (uint32_t *)opaque;
1804 FeatureWord w;
1805 Error *err = NULL;
1806 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1807 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1808 X86CPUFeatureWordInfoList *list = NULL;
1809
1810 for (w = 0; w < FEATURE_WORDS; w++) {
1811 FeatureWordInfo *wi = &feature_word_info[w];
1812 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1813 qwi->cpuid_input_eax = wi->cpuid_eax;
1814 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1815 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1816 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1817 qwi->features = array[w];
1818
1819 /* List will be in reverse order, but order shouldn't matter */
1820 list_entries[w].next = list;
1821 list_entries[w].value = &word_infos[w];
1822 list = &list_entries[w];
1823 }
1824
1825 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1826 error_propagate(errp, err);
1827 }
1828
1829 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1830 const char *name, Error **errp)
1831 {
1832 X86CPU *cpu = X86_CPU(obj);
1833 int64_t value = cpu->hyperv_spinlock_attempts;
1834
1835 visit_type_int(v, &value, name, errp);
1836 }
1837
1838 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1839 const char *name, Error **errp)
1840 {
1841 const int64_t min = 0xFFF;
1842 const int64_t max = UINT_MAX;
1843 X86CPU *cpu = X86_CPU(obj);
1844 Error *err = NULL;
1845 int64_t value;
1846
1847 visit_type_int(v, &value, name, &err);
1848 if (err) {
1849 error_propagate(errp, err);
1850 return;
1851 }
1852
1853 if (value < min || value > max) {
1854 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1855 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1856 object_get_typename(obj), name ? name : "null",
1857 value, min, max);
1858 return;
1859 }
1860 cpu->hyperv_spinlock_attempts = value;
1861 }
1862
1863 static PropertyInfo qdev_prop_spinlocks = {
1864 .name = "int",
1865 .get = x86_get_hv_spinlocks,
1866 .set = x86_set_hv_spinlocks,
1867 };
1868
1869 /* Convert all '_' in a feature string option name to '-', to make feature
1870 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1871 */
1872 static inline void feat2prop(char *s)
1873 {
1874 while ((s = strchr(s, '_'))) {
1875 *s = '-';
1876 }
1877 }
1878
1879 /* Parse "+feature,-feature,feature=foo" CPU feature string
1880 */
1881 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1882 Error **errp)
1883 {
1884 X86CPU *cpu = X86_CPU(cs);
1885 char *featurestr; /* Single 'key=value" string being parsed */
1886 FeatureWord w;
1887 /* Features to be added */
1888 FeatureWordArray plus_features = { 0 };
1889 /* Features to be removed */
1890 FeatureWordArray minus_features = { 0 };
1891 uint32_t numvalue;
1892 CPUX86State *env = &cpu->env;
1893 Error *local_err = NULL;
1894
1895 featurestr = features ? strtok(features, ",") : NULL;
1896
1897 while (featurestr) {
1898 char *val;
1899 if (featurestr[0] == '+') {
1900 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1901 } else if (featurestr[0] == '-') {
1902 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1903 } else if ((val = strchr(featurestr, '='))) {
1904 *val = 0; val++;
1905 feat2prop(featurestr);
1906 if (!strcmp(featurestr, "xlevel")) {
1907 char *err;
1908 char num[32];
1909
1910 numvalue = strtoul(val, &err, 0);
1911 if (!*val || *err) {
1912 error_setg(errp, "bad numerical value %s", val);
1913 return;
1914 }
1915 if (numvalue < 0x80000000) {
1916 error_report("xlevel value shall always be >= 0x80000000"
1917 ", fixup will be removed in future versions");
1918 numvalue += 0x80000000;
1919 }
1920 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1921 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1922 } else if (!strcmp(featurestr, "tsc-freq")) {
1923 int64_t tsc_freq;
1924 char *err;
1925 char num[32];
1926
1927 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1928 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1929 if (tsc_freq < 0 || *err) {
1930 error_setg(errp, "bad numerical value %s", val);
1931 return;
1932 }
1933 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1934 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1935 &local_err);
1936 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1937 char *err;
1938 const int min = 0xFFF;
1939 char num[32];
1940 numvalue = strtoul(val, &err, 0);
1941 if (!*val || *err) {
1942 error_setg(errp, "bad numerical value %s", val);
1943 return;
1944 }
1945 if (numvalue < min) {
1946 error_report("hv-spinlocks value shall always be >= 0x%x"
1947 ", fixup will be removed in future versions",
1948 min);
1949 numvalue = min;
1950 }
1951 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1952 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1953 } else {
1954 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1955 }
1956 } else {
1957 feat2prop(featurestr);
1958 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1959 }
1960 if (local_err) {
1961 error_propagate(errp, local_err);
1962 return;
1963 }
1964 featurestr = strtok(NULL, ",");
1965 }
1966
1967 if (cpu->host_features) {
1968 for (w = 0; w < FEATURE_WORDS; w++) {
1969 env->features[w] =
1970 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1971 }
1972 }
1973
1974 for (w = 0; w < FEATURE_WORDS; w++) {
1975 env->features[w] |= plus_features[w];
1976 env->features[w] &= ~minus_features[w];
1977 }
1978 }
1979
1980 /* Print all cpuid feature names in featureset
1981 */
1982 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1983 {
1984 int bit;
1985 bool first = true;
1986
1987 for (bit = 0; bit < 32; bit++) {
1988 if (featureset[bit]) {
1989 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1990 first = false;
1991 }
1992 }
1993 }
1994
1995 /* generate CPU information. */
1996 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1997 {
1998 X86CPUDefinition *def;
1999 char buf[256];
2000 int i;
2001
2002 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2003 def = &builtin_x86_defs[i];
2004 snprintf(buf, sizeof(buf), "%s", def->name);
2005 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2006 }
2007 #ifdef CONFIG_KVM
2008 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2009 "KVM processor with all supported host features "
2010 "(only available in KVM mode)");
2011 #endif
2012
2013 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2014 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2015 FeatureWordInfo *fw = &feature_word_info[i];
2016
2017 (*cpu_fprintf)(f, " ");
2018 listflags(f, cpu_fprintf, fw->feat_names);
2019 (*cpu_fprintf)(f, "\n");
2020 }
2021 }
2022
2023 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2024 {
2025 CpuDefinitionInfoList *cpu_list = NULL;
2026 X86CPUDefinition *def;
2027 int i;
2028
2029 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2030 CpuDefinitionInfoList *entry;
2031 CpuDefinitionInfo *info;
2032
2033 def = &builtin_x86_defs[i];
2034 info = g_malloc0(sizeof(*info));
2035 info->name = g_strdup(def->name);
2036
2037 entry = g_malloc0(sizeof(*entry));
2038 entry->value = info;
2039 entry->next = cpu_list;
2040 cpu_list = entry;
2041 }
2042
2043 return cpu_list;
2044 }
2045
2046 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2047 bool migratable_only)
2048 {
2049 FeatureWordInfo *wi = &feature_word_info[w];
2050 uint32_t r;
2051
2052 if (kvm_enabled()) {
2053 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2054 wi->cpuid_ecx,
2055 wi->cpuid_reg);
2056 } else if (tcg_enabled()) {
2057 r = wi->tcg_features;
2058 } else {
2059 return ~0;
2060 }
2061 if (migratable_only) {
2062 r &= x86_cpu_get_migratable_flags(w);
2063 }
2064 return r;
2065 }
2066
2067 /*
2068 * Filters CPU feature words based on host availability of each feature.
2069 *
2070 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2071 */
2072 static int x86_cpu_filter_features(X86CPU *cpu)
2073 {
2074 CPUX86State *env = &cpu->env;
2075 FeatureWord w;
2076 int rv = 0;
2077
2078 for (w = 0; w < FEATURE_WORDS; w++) {
2079 uint32_t host_feat =
2080 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2081 uint32_t requested_features = env->features[w];
2082 env->features[w] &= host_feat;
2083 cpu->filtered_features[w] = requested_features & ~env->features[w];
2084 if (cpu->filtered_features[w]) {
2085 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2086 report_unavailable_features(w, cpu->filtered_features[w]);
2087 }
2088 rv = 1;
2089 }
2090 }
2091
2092 return rv;
2093 }
2094
2095 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2096 {
2097 PropValue *pv;
2098 for (pv = props; pv->prop; pv++) {
2099 if (!pv->value) {
2100 continue;
2101 }
2102 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2103 &error_abort);
2104 }
2105 }
2106
2107 /* Load data from X86CPUDefinition
2108 */
2109 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2110 {
2111 CPUX86State *env = &cpu->env;
2112 const char *vendor;
2113 char host_vendor[CPUID_VENDOR_SZ + 1];
2114 FeatureWord w;
2115
2116 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2117 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2118 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2119 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2120 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2121 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2122 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2123 for (w = 0; w < FEATURE_WORDS; w++) {
2124 env->features[w] = def->features[w];
2125 }
2126
2127 /* Special cases not set in the X86CPUDefinition structs: */
2128 if (kvm_enabled()) {
2129 x86_cpu_apply_props(cpu, kvm_default_props);
2130 }
2131
2132 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2133
2134 /* sysenter isn't supported in compatibility mode on AMD,
2135 * syscall isn't supported in compatibility mode on Intel.
2136 * Normally we advertise the actual CPU vendor, but you can
2137 * override this using the 'vendor' property if you want to use
2138 * KVM's sysenter/syscall emulation in compatibility mode and
2139 * when doing cross vendor migration
2140 */
2141 vendor = def->vendor;
2142 if (kvm_enabled()) {
2143 uint32_t ebx = 0, ecx = 0, edx = 0;
2144 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2145 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2146 vendor = host_vendor;
2147 }
2148
2149 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2150
2151 }
2152
2153 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2154 {
2155 X86CPU *cpu = NULL;
2156 X86CPUClass *xcc;
2157 ObjectClass *oc;
2158 gchar **model_pieces;
2159 char *name, *features;
2160 Error *error = NULL;
2161
2162 model_pieces = g_strsplit(cpu_model, ",", 2);
2163 if (!model_pieces[0]) {
2164 error_setg(&error, "Invalid/empty CPU model name");
2165 goto out;
2166 }
2167 name = model_pieces[0];
2168 features = model_pieces[1];
2169
2170 oc = x86_cpu_class_by_name(name);
2171 if (oc == NULL) {
2172 error_setg(&error, "Unable to find CPU definition: %s", name);
2173 goto out;
2174 }
2175 xcc = X86_CPU_CLASS(oc);
2176
2177 if (xcc->kvm_required && !kvm_enabled()) {
2178 error_setg(&error, "CPU model '%s' requires KVM", name);
2179 goto out;
2180 }
2181
2182 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2183
2184 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2185 if (error) {
2186 goto out;
2187 }
2188
2189 out:
2190 if (error != NULL) {
2191 error_propagate(errp, error);
2192 if (cpu) {
2193 object_unref(OBJECT(cpu));
2194 cpu = NULL;
2195 }
2196 }
2197 g_strfreev(model_pieces);
2198 return cpu;
2199 }
2200
2201 X86CPU *cpu_x86_init(const char *cpu_model)
2202 {
2203 Error *error = NULL;
2204 X86CPU *cpu;
2205
2206 cpu = cpu_x86_create(cpu_model, &error);
2207 if (error) {
2208 goto out;
2209 }
2210
2211 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2212
2213 out:
2214 if (error) {
2215 error_report_err(error);
2216 if (cpu != NULL) {
2217 object_unref(OBJECT(cpu));
2218 cpu = NULL;
2219 }
2220 }
2221 return cpu;
2222 }
2223
2224 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2225 {
2226 X86CPUDefinition *cpudef = data;
2227 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2228
2229 xcc->cpu_def = cpudef;
2230 }
2231
2232 static void x86_register_cpudef_type(X86CPUDefinition *def)
2233 {
2234 char *typename = x86_cpu_type_name(def->name);
2235 TypeInfo ti = {
2236 .name = typename,
2237 .parent = TYPE_X86_CPU,
2238 .class_init = x86_cpu_cpudef_class_init,
2239 .class_data = def,
2240 };
2241
2242 type_register(&ti);
2243 g_free(typename);
2244 }
2245
2246 #if !defined(CONFIG_USER_ONLY)
2247
2248 void cpu_clear_apic_feature(CPUX86State *env)
2249 {
2250 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2251 }
2252
2253 #endif /* !CONFIG_USER_ONLY */
2254
2255 /* Initialize list of CPU models, filling some non-static fields if necessary
2256 */
2257 void x86_cpudef_setup(void)
2258 {
2259 int i, j;
2260 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2261
2262 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2263 X86CPUDefinition *def = &builtin_x86_defs[i];
2264
2265 /* Look for specific "cpudef" models that */
2266 /* have the QEMU version in .model_id */
2267 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2268 if (strcmp(model_with_versions[j], def->name) == 0) {
2269 pstrcpy(def->model_id, sizeof(def->model_id),
2270 "QEMU Virtual CPU version ");
2271 pstrcat(def->model_id, sizeof(def->model_id),
2272 qemu_hw_version());
2273 break;
2274 }
2275 }
2276 }
2277 }
2278
2279 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2280 uint32_t *eax, uint32_t *ebx,
2281 uint32_t *ecx, uint32_t *edx)
2282 {
2283 X86CPU *cpu = x86_env_get_cpu(env);
2284 CPUState *cs = CPU(cpu);
2285
2286 /* test if maximum index reached */
2287 if (index & 0x80000000) {
2288 if (index > env->cpuid_xlevel) {
2289 if (env->cpuid_xlevel2 > 0) {
2290 /* Handle the Centaur's CPUID instruction. */
2291 if (index > env->cpuid_xlevel2) {
2292 index = env->cpuid_xlevel2;
2293 } else if (index < 0xC0000000) {
2294 index = env->cpuid_xlevel;
2295 }
2296 } else {
2297 /* Intel documentation states that invalid EAX input will
2298 * return the same information as EAX=cpuid_level
2299 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2300 */
2301 index = env->cpuid_level;
2302 }
2303 }
2304 } else {
2305 if (index > env->cpuid_level)
2306 index = env->cpuid_level;
2307 }
2308
2309 switch(index) {
2310 case 0:
2311 *eax = env->cpuid_level;
2312 *ebx = env->cpuid_vendor1;
2313 *edx = env->cpuid_vendor2;
2314 *ecx = env->cpuid_vendor3;
2315 break;
2316 case 1:
2317 *eax = env->cpuid_version;
2318 *ebx = (cpu->apic_id << 24) |
2319 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2320 *ecx = env->features[FEAT_1_ECX];
2321 *edx = env->features[FEAT_1_EDX];
2322 if (cs->nr_cores * cs->nr_threads > 1) {
2323 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2324 *edx |= 1 << 28; /* HTT bit */
2325 }
2326 break;
2327 case 2:
2328 /* cache info: needed for Pentium Pro compatibility */
2329 if (cpu->cache_info_passthrough) {
2330 host_cpuid(index, 0, eax, ebx, ecx, edx);
2331 break;
2332 }
2333 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2334 *ebx = 0;
2335 *ecx = 0;
2336 *edx = (L1D_DESCRIPTOR << 16) | \
2337 (L1I_DESCRIPTOR << 8) | \
2338 (L2_DESCRIPTOR);
2339 break;
2340 case 4:
2341 /* cache info: needed for Core compatibility */
2342 if (cpu->cache_info_passthrough) {
2343 host_cpuid(index, count, eax, ebx, ecx, edx);
2344 *eax &= ~0xFC000000;
2345 } else {
2346 *eax = 0;
2347 switch (count) {
2348 case 0: /* L1 dcache info */
2349 *eax |= CPUID_4_TYPE_DCACHE | \
2350 CPUID_4_LEVEL(1) | \
2351 CPUID_4_SELF_INIT_LEVEL;
2352 *ebx = (L1D_LINE_SIZE - 1) | \
2353 ((L1D_PARTITIONS - 1) << 12) | \
2354 ((L1D_ASSOCIATIVITY - 1) << 22);
2355 *ecx = L1D_SETS - 1;
2356 *edx = CPUID_4_NO_INVD_SHARING;
2357 break;
2358 case 1: /* L1 icache info */
2359 *eax |= CPUID_4_TYPE_ICACHE | \
2360 CPUID_4_LEVEL(1) | \
2361 CPUID_4_SELF_INIT_LEVEL;
2362 *ebx = (L1I_LINE_SIZE - 1) | \
2363 ((L1I_PARTITIONS - 1) << 12) | \
2364 ((L1I_ASSOCIATIVITY - 1) << 22);
2365 *ecx = L1I_SETS - 1;
2366 *edx = CPUID_4_NO_INVD_SHARING;
2367 break;
2368 case 2: /* L2 cache info */
2369 *eax |= CPUID_4_TYPE_UNIFIED | \
2370 CPUID_4_LEVEL(2) | \
2371 CPUID_4_SELF_INIT_LEVEL;
2372 if (cs->nr_threads > 1) {
2373 *eax |= (cs->nr_threads - 1) << 14;
2374 }
2375 *ebx = (L2_LINE_SIZE - 1) | \
2376 ((L2_PARTITIONS - 1) << 12) | \
2377 ((L2_ASSOCIATIVITY - 1) << 22);
2378 *ecx = L2_SETS - 1;
2379 *edx = CPUID_4_NO_INVD_SHARING;
2380 break;
2381 default: /* end of info */
2382 *eax = 0;
2383 *ebx = 0;
2384 *ecx = 0;
2385 *edx = 0;
2386 break;
2387 }
2388 }
2389
2390 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2391 if ((*eax & 31) && cs->nr_cores > 1) {
2392 *eax |= (cs->nr_cores - 1) << 26;
2393 }
2394 break;
2395 case 5:
2396 /* mwait info: needed for Core compatibility */
2397 *eax = 0; /* Smallest monitor-line size in bytes */
2398 *ebx = 0; /* Largest monitor-line size in bytes */
2399 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2400 *edx = 0;
2401 break;
2402 case 6:
2403 /* Thermal and Power Leaf */
2404 *eax = env->features[FEAT_6_EAX];
2405 *ebx = 0;
2406 *ecx = 0;
2407 *edx = 0;
2408 break;
2409 case 7:
2410 /* Structured Extended Feature Flags Enumeration Leaf */
2411 if (count == 0) {
2412 *eax = 0; /* Maximum ECX value for sub-leaves */
2413 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2414 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2415 *edx = 0; /* Reserved */
2416 } else {
2417 *eax = 0;
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 }
2422 break;
2423 case 9:
2424 /* Direct Cache Access Information Leaf */
2425 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2426 *ebx = 0;
2427 *ecx = 0;
2428 *edx = 0;
2429 break;
2430 case 0xA:
2431 /* Architectural Performance Monitoring Leaf */
2432 if (kvm_enabled() && cpu->enable_pmu) {
2433 KVMState *s = cs->kvm_state;
2434
2435 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2436 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2437 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2438 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2439 } else {
2440 *eax = 0;
2441 *ebx = 0;
2442 *ecx = 0;
2443 *edx = 0;
2444 }
2445 break;
2446 case 0xD: {
2447 KVMState *s = cs->kvm_state;
2448 uint64_t kvm_mask;
2449 int i;
2450
2451 /* Processor Extended State */
2452 *eax = 0;
2453 *ebx = 0;
2454 *ecx = 0;
2455 *edx = 0;
2456 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2457 break;
2458 }
2459 kvm_mask =
2460 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2461 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2462
2463 if (count == 0) {
2464 *ecx = 0x240;
2465 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2466 const ExtSaveArea *esa = &ext_save_areas[i];
2467 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2468 (kvm_mask & (1 << i)) != 0) {
2469 if (i < 32) {
2470 *eax |= 1 << i;
2471 } else {
2472 *edx |= 1 << (i - 32);
2473 }
2474 *ecx = MAX(*ecx, esa->offset + esa->size);
2475 }
2476 }
2477 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2478 *ebx = *ecx;
2479 } else if (count == 1) {
2480 *eax = env->features[FEAT_XSAVE];
2481 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2482 const ExtSaveArea *esa = &ext_save_areas[count];
2483 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2484 (kvm_mask & (1 << count)) != 0) {
2485 *eax = esa->size;
2486 *ebx = esa->offset;
2487 }
2488 }
2489 break;
2490 }
2491 case 0x80000000:
2492 *eax = env->cpuid_xlevel;
2493 *ebx = env->cpuid_vendor1;
2494 *edx = env->cpuid_vendor2;
2495 *ecx = env->cpuid_vendor3;
2496 break;
2497 case 0x80000001:
2498 *eax = env->cpuid_version;
2499 *ebx = 0;
2500 *ecx = env->features[FEAT_8000_0001_ECX];
2501 *edx = env->features[FEAT_8000_0001_EDX];
2502
2503 /* The Linux kernel checks for the CMPLegacy bit and
2504 * discards multiple thread information if it is set.
2505 * So dont set it here for Intel to make Linux guests happy.
2506 */
2507 if (cs->nr_cores * cs->nr_threads > 1) {
2508 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2509 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2510 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2511 *ecx |= 1 << 1; /* CmpLegacy bit */
2512 }
2513 }
2514 break;
2515 case 0x80000002:
2516 case 0x80000003:
2517 case 0x80000004:
2518 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2519 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2520 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2521 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2522 break;
2523 case 0x80000005:
2524 /* cache info (L1 cache) */
2525 if (cpu->cache_info_passthrough) {
2526 host_cpuid(index, 0, eax, ebx, ecx, edx);
2527 break;
2528 }
2529 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2530 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2531 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2532 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2533 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2534 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2535 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2536 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2537 break;
2538 case 0x80000006:
2539 /* cache info (L2 cache) */
2540 if (cpu->cache_info_passthrough) {
2541 host_cpuid(index, 0, eax, ebx, ecx, edx);
2542 break;
2543 }
2544 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2545 (L2_DTLB_2M_ENTRIES << 16) | \
2546 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2547 (L2_ITLB_2M_ENTRIES);
2548 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2549 (L2_DTLB_4K_ENTRIES << 16) | \
2550 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2551 (L2_ITLB_4K_ENTRIES);
2552 *ecx = (L2_SIZE_KB_AMD << 16) | \
2553 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2554 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2555 *edx = ((L3_SIZE_KB/512) << 18) | \
2556 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2557 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2558 break;
2559 case 0x80000007:
2560 *eax = 0;
2561 *ebx = 0;
2562 *ecx = 0;
2563 *edx = env->features[FEAT_8000_0007_EDX];
2564 break;
2565 case 0x80000008:
2566 /* virtual & phys address size in low 2 bytes. */
2567 /* XXX: This value must match the one used in the MMU code. */
2568 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2569 /* 64 bit processor */
2570 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2571 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2572 } else {
2573 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2574 *eax = 0x00000024; /* 36 bits physical */
2575 } else {
2576 *eax = 0x00000020; /* 32 bits physical */
2577 }
2578 }
2579 *ebx = 0;
2580 *ecx = 0;
2581 *edx = 0;
2582 if (cs->nr_cores * cs->nr_threads > 1) {
2583 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2584 }
2585 break;
2586 case 0x8000000A:
2587 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2588 *eax = 0x00000001; /* SVM Revision */
2589 *ebx = 0x00000010; /* nr of ASIDs */
2590 *ecx = 0;
2591 *edx = env->features[FEAT_SVM]; /* optional features */
2592 } else {
2593 *eax = 0;
2594 *ebx = 0;
2595 *ecx = 0;
2596 *edx = 0;
2597 }
2598 break;
2599 case 0xC0000000:
2600 *eax = env->cpuid_xlevel2;
2601 *ebx = 0;
2602 *ecx = 0;
2603 *edx = 0;
2604 break;
2605 case 0xC0000001:
2606 /* Support for VIA CPU's CPUID instruction */
2607 *eax = env->cpuid_version;
2608 *ebx = 0;
2609 *ecx = 0;
2610 *edx = env->features[FEAT_C000_0001_EDX];
2611 break;
2612 case 0xC0000002:
2613 case 0xC0000003:
2614 case 0xC0000004:
2615 /* Reserved for the future, and now filled with zero */
2616 *eax = 0;
2617 *ebx = 0;
2618 *ecx = 0;
2619 *edx = 0;
2620 break;
2621 default:
2622 /* reserved values: zero */
2623 *eax = 0;
2624 *ebx = 0;
2625 *ecx = 0;
2626 *edx = 0;
2627 break;
2628 }
2629 }
2630
2631 /* CPUClass::reset() */
2632 static void x86_cpu_reset(CPUState *s)
2633 {
2634 X86CPU *cpu = X86_CPU(s);
2635 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2636 CPUX86State *env = &cpu->env;
2637 int i;
2638
2639 xcc->parent_reset(s);
2640
2641 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2642
2643 tlb_flush(s, 1);
2644
2645 env->old_exception = -1;
2646
2647 /* init to reset state */
2648
2649 #ifdef CONFIG_SOFTMMU
2650 env->hflags |= HF_SOFTMMU_MASK;
2651 #endif
2652 env->hflags2 |= HF2_GIF_MASK;
2653
2654 cpu_x86_update_cr0(env, 0x60000010);
2655 env->a20_mask = ~0x0;
2656 env->smbase = 0x30000;
2657
2658 env->idt.limit = 0xffff;
2659 env->gdt.limit = 0xffff;
2660 env->ldt.limit = 0xffff;
2661 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2662 env->tr.limit = 0xffff;
2663 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2664
2665 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2666 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2667 DESC_R_MASK | DESC_A_MASK);
2668 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2669 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2670 DESC_A_MASK);
2671 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2672 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2673 DESC_A_MASK);
2674 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2675 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2676 DESC_A_MASK);
2677 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2678 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2679 DESC_A_MASK);
2680 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2681 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2682 DESC_A_MASK);
2683
2684 env->eip = 0xfff0;
2685 env->regs[R_EDX] = env->cpuid_version;
2686
2687 env->eflags = 0x2;
2688
2689 /* FPU init */
2690 for (i = 0; i < 8; i++) {
2691 env->fptags[i] = 1;
2692 }
2693 cpu_set_fpuc(env, 0x37f);
2694
2695 env->mxcsr = 0x1f80;
2696 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2697
2698 env->pat = 0x0007040600070406ULL;
2699 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2700
2701 memset(env->dr, 0, sizeof(env->dr));
2702 env->dr[6] = DR6_FIXED_1;
2703 env->dr[7] = DR7_FIXED_1;
2704 cpu_breakpoint_remove_all(s, BP_CPU);
2705 cpu_watchpoint_remove_all(s, BP_CPU);
2706
2707 env->xcr0 = 1;
2708
2709 /*
2710 * SDM 11.11.5 requires:
2711 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2712 * - IA32_MTRR_PHYSMASKn.V = 0
2713 * All other bits are undefined. For simplification, zero it all.
2714 */
2715 env->mtrr_deftype = 0;
2716 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2717 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2718
2719 #if !defined(CONFIG_USER_ONLY)
2720 /* We hard-wire the BSP to the first CPU. */
2721 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2722
2723 s->halted = !cpu_is_bsp(cpu);
2724
2725 if (kvm_enabled()) {
2726 kvm_arch_reset_vcpu(cpu);
2727 }
2728 #endif
2729 }
2730
2731 #ifndef CONFIG_USER_ONLY
2732 bool cpu_is_bsp(X86CPU *cpu)
2733 {
2734 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2735 }
2736
2737 /* TODO: remove me, when reset over QOM tree is implemented */
2738 static void x86_cpu_machine_reset_cb(void *opaque)
2739 {
2740 X86CPU *cpu = opaque;
2741 cpu_reset(CPU(cpu));
2742 }
2743 #endif
2744
2745 static void mce_init(X86CPU *cpu)
2746 {
2747 CPUX86State *cenv = &cpu->env;
2748 unsigned int bank;
2749
2750 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2751 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2752 (CPUID_MCE | CPUID_MCA)) {
2753 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2754 cenv->mcg_ctl = ~(uint64_t)0;
2755 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2756 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2757 }
2758 }
2759 }
2760
2761 #ifndef CONFIG_USER_ONLY
2762 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2763 {
2764 APICCommonState *apic;
2765 const char *apic_type = "apic";
2766
2767 if (kvm_apic_in_kernel()) {
2768 apic_type = "kvm-apic";
2769 } else if (xen_enabled()) {
2770 apic_type = "xen-apic";
2771 }
2772
2773 cpu->apic_state = DEVICE(object_new(apic_type));
2774
2775 object_property_add_child(OBJECT(cpu), "apic",
2776 OBJECT(cpu->apic_state), NULL);
2777 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2778 /* TODO: convert to link<> */
2779 apic = APIC_COMMON(cpu->apic_state);
2780 apic->cpu = cpu;
2781 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2782 }
2783
2784 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2785 {
2786 APICCommonState *apic;
2787 static bool apic_mmio_map_once;
2788
2789 if (cpu->apic_state == NULL) {
2790 return;
2791 }
2792 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2793 errp);
2794
2795 /* Map APIC MMIO area */
2796 apic = APIC_COMMON(cpu->apic_state);
2797 if (!apic_mmio_map_once) {
2798 memory_region_add_subregion_overlap(get_system_memory(),
2799 apic->apicbase &
2800 MSR_IA32_APICBASE_BASE,
2801 &apic->io_memory,
2802 0x1000);
2803 apic_mmio_map_once = true;
2804 }
2805 }
2806
2807 static void x86_cpu_machine_done(Notifier *n, void *unused)
2808 {
2809 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2810 MemoryRegion *smram =
2811 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2812
2813 if (smram) {
2814 cpu->smram = g_new(MemoryRegion, 1);
2815 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2816 smram, 0, 1ull << 32);
2817 memory_region_set_enabled(cpu->smram, false);
2818 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2819 }
2820 }
2821 #else
2822 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2823 {
2824 }
2825 #endif
2826
2827
2828 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2829 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2830 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2831 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2832 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2833 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2834 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2835 {
2836 CPUState *cs = CPU(dev);
2837 X86CPU *cpu = X86_CPU(dev);
2838 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2839 CPUX86State *env = &cpu->env;
2840 Error *local_err = NULL;
2841 static bool ht_warned;
2842
2843 if (cpu->apic_id < 0) {
2844 error_setg(errp, "apic-id property was not initialized properly");
2845 return;
2846 }
2847
2848 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2849 env->cpuid_level = 7;
2850 }
2851
2852 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2853 * CPUID[1].EDX.
2854 */
2855 if (IS_AMD_CPU(env)) {
2856 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2857 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2858 & CPUID_EXT2_AMD_ALIASES);
2859 }
2860
2861
2862 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2863 error_setg(&local_err,
2864 kvm_enabled() ?
2865 "Host doesn't support requested features" :
2866 "TCG doesn't support requested features");
2867 goto out;
2868 }
2869
2870 #ifndef CONFIG_USER_ONLY
2871 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2872
2873 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2874 x86_cpu_apic_create(cpu, &local_err);
2875 if (local_err != NULL) {
2876 goto out;
2877 }
2878 }
2879 #endif
2880
2881 mce_init(cpu);
2882
2883 #ifndef CONFIG_USER_ONLY
2884 if (tcg_enabled()) {
2885 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2886 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2887 cs->as = g_new(AddressSpace, 1);
2888
2889 /* Outer container... */
2890 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2891 memory_region_set_enabled(cpu->cpu_as_root, true);
2892
2893 /* ... with two regions inside: normal system memory with low
2894 * priority, and...
2895 */
2896 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2897 get_system_memory(), 0, ~0ull);
2898 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2899 memory_region_set_enabled(cpu->cpu_as_mem, true);
2900 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2901
2902 /* ... SMRAM with higher priority, linked from /machine/smram. */
2903 cpu->machine_done.notify = x86_cpu_machine_done;
2904 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2905 }
2906 #endif
2907
2908 qemu_init_vcpu(cs);
2909
2910 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2911 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2912 * based on inputs (sockets,cores,threads), it is still better to gives
2913 * users a warning.
2914 *
2915 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2916 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2917 */
2918 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2919 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2920 " -smp options properly.");
2921 ht_warned = true;
2922 }
2923
2924 x86_cpu_apic_realize(cpu, &local_err);
2925 if (local_err != NULL) {
2926 goto out;
2927 }
2928 cpu_reset(cs);
2929
2930 xcc->parent_realize(dev, &local_err);
2931
2932 out:
2933 if (local_err != NULL) {
2934 error_propagate(errp, local_err);
2935 return;
2936 }
2937 }
2938
2939 typedef struct BitProperty {
2940 uint32_t *ptr;
2941 uint32_t mask;
2942 } BitProperty;
2943
2944 static void x86_cpu_get_bit_prop(Object *obj,
2945 struct Visitor *v,
2946 void *opaque,
2947 const char *name,
2948 Error **errp)
2949 {
2950 BitProperty *fp = opaque;
2951 bool value = (*fp->ptr & fp->mask) == fp->mask;
2952 visit_type_bool(v, &value, name, errp);
2953 }
2954
2955 static void x86_cpu_set_bit_prop(Object *obj,
2956 struct Visitor *v,
2957 void *opaque,
2958 const char *name,
2959 Error **errp)
2960 {
2961 DeviceState *dev = DEVICE(obj);
2962 BitProperty *fp = opaque;
2963 Error *local_err = NULL;
2964 bool value;
2965
2966 if (dev->realized) {
2967 qdev_prop_set_after_realize(dev, name, errp);
2968 return;
2969 }
2970
2971 visit_type_bool(v, &value, name, &local_err);
2972 if (local_err) {
2973 error_propagate(errp, local_err);
2974 return;
2975 }
2976
2977 if (value) {
2978 *fp->ptr |= fp->mask;
2979 } else {
2980 *fp->ptr &= ~fp->mask;
2981 }
2982 }
2983
2984 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2985 void *opaque)
2986 {
2987 BitProperty *prop = opaque;
2988 g_free(prop);
2989 }
2990
2991 /* Register a boolean property to get/set a single bit in a uint32_t field.
2992 *
2993 * The same property name can be registered multiple times to make it affect
2994 * multiple bits in the same FeatureWord. In that case, the getter will return
2995 * true only if all bits are set.
2996 */
2997 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2998 const char *prop_name,
2999 uint32_t *field,
3000 int bitnr)
3001 {
3002 BitProperty *fp;
3003 ObjectProperty *op;
3004 uint32_t mask = (1UL << bitnr);
3005
3006 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3007 if (op) {
3008 fp = op->opaque;
3009 assert(fp->ptr == field);
3010 fp->mask |= mask;
3011 } else {
3012 fp = g_new0(BitProperty, 1);
3013 fp->ptr = field;
3014 fp->mask = mask;
3015 object_property_add(OBJECT(cpu), prop_name, "bool",
3016 x86_cpu_get_bit_prop,
3017 x86_cpu_set_bit_prop,
3018 x86_cpu_release_bit_prop, fp, &error_abort);
3019 }
3020 }
3021
3022 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3023 FeatureWord w,
3024 int bitnr)
3025 {
3026 Object *obj = OBJECT(cpu);
3027 int i;
3028 char **names;
3029 FeatureWordInfo *fi = &feature_word_info[w];
3030
3031 if (!fi->feat_names) {
3032 return;
3033 }
3034 if (!fi->feat_names[bitnr]) {
3035 return;
3036 }
3037
3038 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3039
3040 feat2prop(names[0]);
3041 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3042
3043 for (i = 1; names[i]; i++) {
3044 feat2prop(names[i]);
3045 object_property_add_alias(obj, names[i], obj, names[0],
3046 &error_abort);
3047 }
3048
3049 g_strfreev(names);
3050 }
3051
3052 static void x86_cpu_initfn(Object *obj)
3053 {
3054 CPUState *cs = CPU(obj);
3055 X86CPU *cpu = X86_CPU(obj);
3056 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3057 CPUX86State *env = &cpu->env;
3058 FeatureWord w;
3059 static int inited;
3060
3061 cs->env_ptr = env;
3062 cpu_exec_init(cs, &error_abort);
3063
3064 object_property_add(obj, "family", "int",
3065 x86_cpuid_version_get_family,
3066 x86_cpuid_version_set_family, NULL, NULL, NULL);
3067 object_property_add(obj, "model", "int",
3068 x86_cpuid_version_get_model,
3069 x86_cpuid_version_set_model, NULL, NULL, NULL);
3070 object_property_add(obj, "stepping", "int",
3071 x86_cpuid_version_get_stepping,
3072 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3073 object_property_add_str(obj, "vendor",
3074 x86_cpuid_get_vendor,
3075 x86_cpuid_set_vendor, NULL);
3076 object_property_add_str(obj, "model-id",
3077 x86_cpuid_get_model_id,
3078 x86_cpuid_set_model_id, NULL);
3079 object_property_add(obj, "tsc-frequency", "int",
3080 x86_cpuid_get_tsc_freq,
3081 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3082 object_property_add(obj, "apic-id", "int",
3083 x86_cpuid_get_apic_id,
3084 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3085 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3086 x86_cpu_get_feature_words,
3087 NULL, NULL, (void *)env->features, NULL);
3088 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3089 x86_cpu_get_feature_words,
3090 NULL, NULL, (void *)cpu->filtered_features, NULL);
3091
3092 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3093
3094 #ifndef CONFIG_USER_ONLY
3095 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3096 cpu->apic_id = -1;
3097 #endif
3098
3099 for (w = 0; w < FEATURE_WORDS; w++) {
3100 int bitnr;
3101
3102 for (bitnr = 0; bitnr < 32; bitnr++) {
3103 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3104 }
3105 }
3106
3107 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3108
3109 /* init various static tables used in TCG mode */
3110 if (tcg_enabled() && !inited) {
3111 inited = 1;
3112 tcg_x86_init();
3113 }
3114 }
3115
3116 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3117 {
3118 X86CPU *cpu = X86_CPU(cs);
3119
3120 return cpu->apic_id;
3121 }
3122
3123 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3124 {
3125 X86CPU *cpu = X86_CPU(cs);
3126
3127 return cpu->env.cr[0] & CR0_PG_MASK;
3128 }
3129
3130 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3131 {
3132 X86CPU *cpu = X86_CPU(cs);
3133
3134 cpu->env.eip = value;
3135 }
3136
3137 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3138 {
3139 X86CPU *cpu = X86_CPU(cs);
3140
3141 cpu->env.eip = tb->pc - tb->cs_base;
3142 }
3143
3144 static bool x86_cpu_has_work(CPUState *cs)
3145 {
3146 X86CPU *cpu = X86_CPU(cs);
3147 CPUX86State *env = &cpu->env;
3148
3149 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3150 CPU_INTERRUPT_POLL)) &&
3151 (env->eflags & IF_MASK)) ||
3152 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3153 CPU_INTERRUPT_INIT |
3154 CPU_INTERRUPT_SIPI |
3155 CPU_INTERRUPT_MCE)) ||
3156 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3157 !(env->hflags & HF_SMM_MASK));
3158 }
3159
3160 static Property x86_cpu_properties[] = {
3161 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3162 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3163 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3164 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3165 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3166 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3167 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3168 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3169 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3170 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3171 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3172 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3173 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3174 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3175 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3176 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3177 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3178 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3179 DEFINE_PROP_END_OF_LIST()
3180 };
3181
3182 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3183 {
3184 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3185 CPUClass *cc = CPU_CLASS(oc);
3186 DeviceClass *dc = DEVICE_CLASS(oc);
3187
3188 xcc->parent_realize = dc->realize;
3189 dc->realize = x86_cpu_realizefn;
3190 dc->props = x86_cpu_properties;
3191
3192 xcc->parent_reset = cc->reset;
3193 cc->reset = x86_cpu_reset;
3194 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3195
3196 cc->class_by_name = x86_cpu_class_by_name;
3197 cc->parse_features = x86_cpu_parse_featurestr;
3198 cc->has_work = x86_cpu_has_work;
3199 cc->do_interrupt = x86_cpu_do_interrupt;
3200 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3201 cc->dump_state = x86_cpu_dump_state;
3202 cc->set_pc = x86_cpu_set_pc;
3203 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3204 cc->gdb_read_register = x86_cpu_gdb_read_register;
3205 cc->gdb_write_register = x86_cpu_gdb_write_register;
3206 cc->get_arch_id = x86_cpu_get_arch_id;
3207 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3208 #ifdef CONFIG_USER_ONLY
3209 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3210 #else
3211 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3212 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3213 cc->write_elf64_note = x86_cpu_write_elf64_note;
3214 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3215 cc->write_elf32_note = x86_cpu_write_elf32_note;
3216 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3217 cc->vmsd = &vmstate_x86_cpu;
3218 #endif
3219 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3220 #ifndef CONFIG_USER_ONLY
3221 cc->debug_excp_handler = breakpoint_handler;
3222 #endif
3223 cc->cpu_exec_enter = x86_cpu_exec_enter;
3224 cc->cpu_exec_exit = x86_cpu_exec_exit;
3225
3226 /*
3227 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3228 * object in cpus -> dangling pointer after final object_unref().
3229 */
3230 dc->cannot_destroy_with_object_finalize_yet = true;
3231 }
3232
3233 static const TypeInfo x86_cpu_type_info = {
3234 .name = TYPE_X86_CPU,
3235 .parent = TYPE_CPU,
3236 .instance_size = sizeof(X86CPU),
3237 .instance_init = x86_cpu_initfn,
3238 .abstract = true,
3239 .class_size = sizeof(X86CPUClass),
3240 .class_init = x86_cpu_common_class_init,
3241 };
3242
3243 static void x86_cpu_register_types(void)
3244 {
3245 int i;
3246
3247 type_register_static(&x86_cpu_type_info);
3248 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3249 x86_register_cpudef_type(&builtin_x86_defs[i]);
3250 }
3251 #ifdef CONFIG_KVM
3252 type_register_static(&host_x86_cpu_type_info);
3253 #endif
3254 }
3255
3256 type_init(x86_cpu_register_types)