]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
qmp: Fix device-list-properties not to crash for abstract device
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #ifndef CONFIG_USER_ONLY
47 #include "exec/address-spaces.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_6_feature_name[] = {
289 NULL, NULL, "arat", NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
300 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
301 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
302 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
303 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
304 CPUID_PSE36 | CPUID_FXSR)
305 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
306 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
307 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
308 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
309 CPUID_PAE | CPUID_SEP | CPUID_APIC)
310
311 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
312 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
315 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
316 /* partly implemented:
317 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
318 /* missing:
319 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
320 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
321 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
322 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
323 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
324 /* missing:
325 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
326 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
327 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
328 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
329 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
330 CPUID_EXT_RDRAND */
331
332 #ifdef TARGET_X86_64
333 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
334 #else
335 #define TCG_EXT2_X86_64_FEATURES 0
336 #endif
337
338 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
339 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
340 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
341 TCG_EXT2_X86_64_FEATURES)
342 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
343 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
344 #define TCG_EXT4_FEATURES 0
345 #define TCG_SVM_FEATURES 0
346 #define TCG_KVM_FEATURES 0
347 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
348 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
349 /* missing:
350 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
351 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
352 CPUID_7_0_EBX_RDSEED */
353 #define TCG_APM_FEATURES 0
354 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
355
356
357 typedef struct FeatureWordInfo {
358 const char **feat_names;
359 uint32_t cpuid_eax; /* Input EAX for CPUID */
360 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
361 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
362 int cpuid_reg; /* output register (R_* constant) */
363 uint32_t tcg_features; /* Feature flags supported by TCG */
364 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
365 } FeatureWordInfo;
366
367 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
368 [FEAT_1_EDX] = {
369 .feat_names = feature_name,
370 .cpuid_eax = 1, .cpuid_reg = R_EDX,
371 .tcg_features = TCG_FEATURES,
372 },
373 [FEAT_1_ECX] = {
374 .feat_names = ext_feature_name,
375 .cpuid_eax = 1, .cpuid_reg = R_ECX,
376 .tcg_features = TCG_EXT_FEATURES,
377 },
378 [FEAT_8000_0001_EDX] = {
379 .feat_names = ext2_feature_name,
380 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
381 .tcg_features = TCG_EXT2_FEATURES,
382 },
383 [FEAT_8000_0001_ECX] = {
384 .feat_names = ext3_feature_name,
385 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
386 .tcg_features = TCG_EXT3_FEATURES,
387 },
388 [FEAT_C000_0001_EDX] = {
389 .feat_names = ext4_feature_name,
390 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
391 .tcg_features = TCG_EXT4_FEATURES,
392 },
393 [FEAT_KVM] = {
394 .feat_names = kvm_feature_name,
395 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
396 .tcg_features = TCG_KVM_FEATURES,
397 },
398 [FEAT_SVM] = {
399 .feat_names = svm_feature_name,
400 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
401 .tcg_features = TCG_SVM_FEATURES,
402 },
403 [FEAT_7_0_EBX] = {
404 .feat_names = cpuid_7_0_ebx_feature_name,
405 .cpuid_eax = 7,
406 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
407 .cpuid_reg = R_EBX,
408 .tcg_features = TCG_7_0_EBX_FEATURES,
409 },
410 [FEAT_8000_0007_EDX] = {
411 .feat_names = cpuid_apm_edx_feature_name,
412 .cpuid_eax = 0x80000007,
413 .cpuid_reg = R_EDX,
414 .tcg_features = TCG_APM_FEATURES,
415 .unmigratable_flags = CPUID_APM_INVTSC,
416 },
417 [FEAT_XSAVE] = {
418 .feat_names = cpuid_xsave_feature_name,
419 .cpuid_eax = 0xd,
420 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
421 .cpuid_reg = R_EAX,
422 .tcg_features = 0,
423 },
424 [FEAT_6_EAX] = {
425 .feat_names = cpuid_6_feature_name,
426 .cpuid_eax = 6, .cpuid_reg = R_EAX,
427 .tcg_features = TCG_6_EAX_FEATURES,
428 },
429 };
430
431 typedef struct X86RegisterInfo32 {
432 /* Name of register */
433 const char *name;
434 /* QAPI enum value register */
435 X86CPURegister32 qapi_enum;
436 } X86RegisterInfo32;
437
438 #define REGISTER(reg) \
439 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
440 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
441 REGISTER(EAX),
442 REGISTER(ECX),
443 REGISTER(EDX),
444 REGISTER(EBX),
445 REGISTER(ESP),
446 REGISTER(EBP),
447 REGISTER(ESI),
448 REGISTER(EDI),
449 };
450 #undef REGISTER
451
452 typedef struct ExtSaveArea {
453 uint32_t feature, bits;
454 uint32_t offset, size;
455 } ExtSaveArea;
456
457 static const ExtSaveArea ext_save_areas[] = {
458 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
459 .offset = 0x240, .size = 0x100 },
460 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
461 .offset = 0x3c0, .size = 0x40 },
462 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
463 .offset = 0x400, .size = 0x40 },
464 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
465 .offset = 0x440, .size = 0x40 },
466 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
467 .offset = 0x480, .size = 0x200 },
468 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
469 .offset = 0x680, .size = 0x400 },
470 };
471
472 const char *get_register_name_32(unsigned int reg)
473 {
474 if (reg >= CPU_NB_REGS32) {
475 return NULL;
476 }
477 return x86_reg_info_32[reg].name;
478 }
479
480 /*
481 * Returns the set of feature flags that are supported and migratable by
482 * QEMU, for a given FeatureWord.
483 */
484 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
485 {
486 FeatureWordInfo *wi = &feature_word_info[w];
487 uint32_t r = 0;
488 int i;
489
490 for (i = 0; i < 32; i++) {
491 uint32_t f = 1U << i;
492 /* If the feature name is unknown, it is not supported by QEMU yet */
493 if (!wi->feat_names[i]) {
494 continue;
495 }
496 /* Skip features known to QEMU, but explicitly marked as unmigratable */
497 if (wi->unmigratable_flags & f) {
498 continue;
499 }
500 r |= f;
501 }
502 return r;
503 }
504
505 void host_cpuid(uint32_t function, uint32_t count,
506 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
507 {
508 uint32_t vec[4];
509
510 #ifdef __x86_64__
511 asm volatile("cpuid"
512 : "=a"(vec[0]), "=b"(vec[1]),
513 "=c"(vec[2]), "=d"(vec[3])
514 : "0"(function), "c"(count) : "cc");
515 #elif defined(__i386__)
516 asm volatile("pusha \n\t"
517 "cpuid \n\t"
518 "mov %%eax, 0(%2) \n\t"
519 "mov %%ebx, 4(%2) \n\t"
520 "mov %%ecx, 8(%2) \n\t"
521 "mov %%edx, 12(%2) \n\t"
522 "popa"
523 : : "a"(function), "c"(count), "S"(vec)
524 : "memory", "cc");
525 #else
526 abort();
527 #endif
528
529 if (eax)
530 *eax = vec[0];
531 if (ebx)
532 *ebx = vec[1];
533 if (ecx)
534 *ecx = vec[2];
535 if (edx)
536 *edx = vec[3];
537 }
538
539 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
540
541 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
542 * a substring. ex if !NULL points to the first char after a substring,
543 * otherwise the string is assumed to sized by a terminating nul.
544 * Return lexical ordering of *s1:*s2.
545 */
546 static int sstrcmp(const char *s1, const char *e1,
547 const char *s2, const char *e2)
548 {
549 for (;;) {
550 if (!*s1 || !*s2 || *s1 != *s2)
551 return (*s1 - *s2);
552 ++s1, ++s2;
553 if (s1 == e1 && s2 == e2)
554 return (0);
555 else if (s1 == e1)
556 return (*s2);
557 else if (s2 == e2)
558 return (*s1);
559 }
560 }
561
562 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
563 * '|' delimited (possibly empty) strings in which case search for a match
564 * within the alternatives proceeds left to right. Return 0 for success,
565 * non-zero otherwise.
566 */
567 static int altcmp(const char *s, const char *e, const char *altstr)
568 {
569 const char *p, *q;
570
571 for (q = p = altstr; ; ) {
572 while (*p && *p != '|')
573 ++p;
574 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
575 return (0);
576 if (!*p)
577 return (1);
578 else
579 q = ++p;
580 }
581 }
582
583 /* search featureset for flag *[s..e), if found set corresponding bit in
584 * *pval and return true, otherwise return false
585 */
586 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
587 const char **featureset)
588 {
589 uint32_t mask;
590 const char **ppc;
591 bool found = false;
592
593 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
594 if (*ppc && !altcmp(s, e, *ppc)) {
595 *pval |= mask;
596 found = true;
597 }
598 }
599 return found;
600 }
601
602 static void add_flagname_to_bitmaps(const char *flagname,
603 FeatureWordArray words,
604 Error **errp)
605 {
606 FeatureWord w;
607 for (w = 0; w < FEATURE_WORDS; w++) {
608 FeatureWordInfo *wi = &feature_word_info[w];
609 if (wi->feat_names &&
610 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
611 break;
612 }
613 }
614 if (w == FEATURE_WORDS) {
615 error_setg(errp, "CPU feature %s not found", flagname);
616 }
617 }
618
619 /* CPU class name definitions: */
620
621 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
622 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
623
624 /* Return type name for a given CPU model name
625 * Caller is responsible for freeing the returned string.
626 */
627 static char *x86_cpu_type_name(const char *model_name)
628 {
629 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
630 }
631
632 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
633 {
634 ObjectClass *oc;
635 char *typename;
636
637 if (cpu_model == NULL) {
638 return NULL;
639 }
640
641 typename = x86_cpu_type_name(cpu_model);
642 oc = object_class_by_name(typename);
643 g_free(typename);
644 return oc;
645 }
646
647 struct X86CPUDefinition {
648 const char *name;
649 uint32_t level;
650 uint32_t xlevel;
651 uint32_t xlevel2;
652 /* vendor is zero-terminated, 12 character ASCII string */
653 char vendor[CPUID_VENDOR_SZ + 1];
654 int family;
655 int model;
656 int stepping;
657 FeatureWordArray features;
658 char model_id[48];
659 bool cache_info_passthrough;
660 };
661
662 static X86CPUDefinition builtin_x86_defs[] = {
663 {
664 .name = "qemu64",
665 .level = 0xd,
666 .vendor = CPUID_VENDOR_AMD,
667 .family = 6,
668 .model = 6,
669 .stepping = 3,
670 .features[FEAT_1_EDX] =
671 PPRO_FEATURES |
672 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
673 CPUID_PSE36,
674 .features[FEAT_1_ECX] =
675 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
676 .features[FEAT_8000_0001_EDX] =
677 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
678 .features[FEAT_8000_0001_ECX] =
679 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
680 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
681 .xlevel = 0x8000000A,
682 },
683 {
684 .name = "phenom",
685 .level = 5,
686 .vendor = CPUID_VENDOR_AMD,
687 .family = 16,
688 .model = 2,
689 .stepping = 3,
690 /* Missing: CPUID_HT */
691 .features[FEAT_1_EDX] =
692 PPRO_FEATURES |
693 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
694 CPUID_PSE36 | CPUID_VME,
695 .features[FEAT_1_ECX] =
696 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
697 CPUID_EXT_POPCNT,
698 .features[FEAT_8000_0001_EDX] =
699 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
700 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
701 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
702 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
703 CPUID_EXT3_CR8LEG,
704 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
705 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
706 .features[FEAT_8000_0001_ECX] =
707 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
708 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
709 /* Missing: CPUID_SVM_LBRV */
710 .features[FEAT_SVM] =
711 CPUID_SVM_NPT,
712 .xlevel = 0x8000001A,
713 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
714 },
715 {
716 .name = "core2duo",
717 .level = 10,
718 .vendor = CPUID_VENDOR_INTEL,
719 .family = 6,
720 .model = 15,
721 .stepping = 11,
722 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
723 .features[FEAT_1_EDX] =
724 PPRO_FEATURES |
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
727 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
728 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
729 .features[FEAT_1_ECX] =
730 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
731 CPUID_EXT_CX16,
732 .features[FEAT_8000_0001_EDX] =
733 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
734 .features[FEAT_8000_0001_ECX] =
735 CPUID_EXT3_LAHF_LM,
736 .xlevel = 0x80000008,
737 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
738 },
739 {
740 .name = "kvm64",
741 .level = 0xd,
742 .vendor = CPUID_VENDOR_INTEL,
743 .family = 15,
744 .model = 6,
745 .stepping = 1,
746 /* Missing: CPUID_HT */
747 .features[FEAT_1_EDX] =
748 PPRO_FEATURES | CPUID_VME |
749 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
750 CPUID_PSE36,
751 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
754 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
755 .features[FEAT_8000_0001_EDX] =
756 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
757 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
758 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
759 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
760 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
761 .features[FEAT_8000_0001_ECX] =
762 0,
763 .xlevel = 0x80000008,
764 .model_id = "Common KVM processor"
765 },
766 {
767 .name = "qemu32",
768 .level = 4,
769 .vendor = CPUID_VENDOR_INTEL,
770 .family = 6,
771 .model = 6,
772 .stepping = 3,
773 .features[FEAT_1_EDX] =
774 PPRO_FEATURES,
775 .features[FEAT_1_ECX] =
776 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
777 .xlevel = 0x80000004,
778 },
779 {
780 .name = "kvm32",
781 .level = 5,
782 .vendor = CPUID_VENDOR_INTEL,
783 .family = 15,
784 .model = 6,
785 .stepping = 1,
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES | CPUID_VME |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3,
791 .features[FEAT_8000_0001_ECX] =
792 0,
793 .xlevel = 0x80000008,
794 .model_id = "Common 32-bit KVM processor"
795 },
796 {
797 .name = "coreduo",
798 .level = 10,
799 .vendor = CPUID_VENDOR_INTEL,
800 .family = 6,
801 .model = 14,
802 .stepping = 8,
803 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
807 CPUID_SS,
808 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
809 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
812 .features[FEAT_8000_0001_EDX] =
813 CPUID_EXT2_NX,
814 .xlevel = 0x80000008,
815 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
816 },
817 {
818 .name = "486",
819 .level = 1,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 4,
822 .model = 8,
823 .stepping = 0,
824 .features[FEAT_1_EDX] =
825 I486_FEATURES,
826 .xlevel = 0,
827 },
828 {
829 .name = "pentium",
830 .level = 1,
831 .vendor = CPUID_VENDOR_INTEL,
832 .family = 5,
833 .model = 4,
834 .stepping = 3,
835 .features[FEAT_1_EDX] =
836 PENTIUM_FEATURES,
837 .xlevel = 0,
838 },
839 {
840 .name = "pentium2",
841 .level = 2,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 6,
844 .model = 5,
845 .stepping = 2,
846 .features[FEAT_1_EDX] =
847 PENTIUM2_FEATURES,
848 .xlevel = 0,
849 },
850 {
851 .name = "pentium3",
852 .level = 3,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 6,
855 .model = 7,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PENTIUM3_FEATURES,
859 .xlevel = 0,
860 },
861 {
862 .name = "athlon",
863 .level = 2,
864 .vendor = CPUID_VENDOR_AMD,
865 .family = 6,
866 .model = 2,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
870 CPUID_MCA,
871 .features[FEAT_8000_0001_EDX] =
872 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
873 .xlevel = 0x80000008,
874 },
875 {
876 .name = "n270",
877 .level = 10,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 28,
881 .stepping = 2,
882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES |
885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
886 CPUID_ACPI | CPUID_SS,
887 /* Some CPUs got no CPUID_SEP */
888 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
889 * CPUID_EXT_XTPR */
890 .features[FEAT_1_ECX] =
891 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
892 CPUID_EXT_MOVBE,
893 .features[FEAT_8000_0001_EDX] =
894 CPUID_EXT2_NX,
895 .features[FEAT_8000_0001_ECX] =
896 CPUID_EXT3_LAHF_LM,
897 .xlevel = 0x80000008,
898 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
899 },
900 {
901 .name = "Conroe",
902 .level = 10,
903 .vendor = CPUID_VENDOR_INTEL,
904 .family = 6,
905 .model = 15,
906 .stepping = 3,
907 .features[FEAT_1_EDX] =
908 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
909 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
910 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
911 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
912 CPUID_DE | CPUID_FP87,
913 .features[FEAT_1_ECX] =
914 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
915 .features[FEAT_8000_0001_EDX] =
916 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
917 .features[FEAT_8000_0001_ECX] =
918 CPUID_EXT3_LAHF_LM,
919 .xlevel = 0x80000008,
920 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
921 },
922 {
923 .name = "Penryn",
924 .level = 10,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 23,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
931 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
932 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
933 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
934 CPUID_DE | CPUID_FP87,
935 .features[FEAT_1_ECX] =
936 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
937 CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x80000008,
943 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
944 },
945 {
946 .name = "Nehalem",
947 .level = 11,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 26,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
960 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x80000008,
966 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
967 },
968 {
969 .name = "Westmere",
970 .level = 11,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 44,
974 .stepping = 1,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
983 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
984 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
985 .features[FEAT_8000_0001_EDX] =
986 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
987 .features[FEAT_8000_0001_ECX] =
988 CPUID_EXT3_LAHF_LM,
989 .features[FEAT_6_EAX] =
990 CPUID_6_EAX_ARAT,
991 .xlevel = 0x80000008,
992 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
993 },
994 {
995 .name = "SandyBridge",
996 .level = 0xd,
997 .vendor = CPUID_VENDOR_INTEL,
998 .family = 6,
999 .model = 42,
1000 .stepping = 1,
1001 .features[FEAT_1_EDX] =
1002 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1003 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1004 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1005 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1006 CPUID_DE | CPUID_FP87,
1007 .features[FEAT_1_ECX] =
1008 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1009 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1010 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1011 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1012 CPUID_EXT_SSE3,
1013 .features[FEAT_8000_0001_EDX] =
1014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1015 CPUID_EXT2_SYSCALL,
1016 .features[FEAT_8000_0001_ECX] =
1017 CPUID_EXT3_LAHF_LM,
1018 .features[FEAT_XSAVE] =
1019 CPUID_XSAVE_XSAVEOPT,
1020 .features[FEAT_6_EAX] =
1021 CPUID_6_EAX_ARAT,
1022 .xlevel = 0x80000008,
1023 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1024 },
1025 {
1026 .name = "IvyBridge",
1027 .level = 0xd,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 58,
1031 .stepping = 9,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1040 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1041 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1042 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1043 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1044 .features[FEAT_7_0_EBX] =
1045 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1046 CPUID_7_0_EBX_ERMS,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1058 },
1059 {
1060 .name = "Haswell-noTSX",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 60,
1065 .stepping = 1,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1075 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1076 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1077 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1078 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1079 .features[FEAT_8000_0001_EDX] =
1080 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1081 CPUID_EXT2_SYSCALL,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_7_0_EBX] =
1085 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1086 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1087 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1088 .features[FEAT_XSAVE] =
1089 CPUID_XSAVE_XSAVEOPT,
1090 .features[FEAT_6_EAX] =
1091 CPUID_6_EAX_ARAT,
1092 .xlevel = 0x80000008,
1093 .model_id = "Intel Core Processor (Haswell, no TSX)",
1094 }, {
1095 .name = "Haswell",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 60,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1110 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1111 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1112 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1113 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_7_0_EBX] =
1120 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1121 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1122 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1123 CPUID_7_0_EBX_RTM,
1124 .features[FEAT_XSAVE] =
1125 CPUID_XSAVE_XSAVEOPT,
1126 .features[FEAT_6_EAX] =
1127 CPUID_6_EAX_ARAT,
1128 .xlevel = 0x80000008,
1129 .model_id = "Intel Core Processor (Haswell)",
1130 },
1131 {
1132 .name = "Broadwell-noTSX",
1133 .level = 0xd,
1134 .vendor = CPUID_VENDOR_INTEL,
1135 .family = 6,
1136 .model = 61,
1137 .stepping = 2,
1138 .features[FEAT_1_EDX] =
1139 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1140 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1141 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1142 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1143 CPUID_DE | CPUID_FP87,
1144 .features[FEAT_1_ECX] =
1145 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1146 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1147 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1148 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1149 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1150 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1151 .features[FEAT_8000_0001_EDX] =
1152 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1153 CPUID_EXT2_SYSCALL,
1154 .features[FEAT_8000_0001_ECX] =
1155 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1156 .features[FEAT_7_0_EBX] =
1157 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1158 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1159 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1160 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1161 CPUID_7_0_EBX_SMAP,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1168 },
1169 {
1170 .name = "Broadwell",
1171 .level = 0xd,
1172 .vendor = CPUID_VENDOR_INTEL,
1173 .family = 6,
1174 .model = 61,
1175 .stepping = 2,
1176 .features[FEAT_1_EDX] =
1177 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1178 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1179 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1180 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1181 CPUID_DE | CPUID_FP87,
1182 .features[FEAT_1_ECX] =
1183 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1184 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1185 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1186 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1187 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1188 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1189 .features[FEAT_8000_0001_EDX] =
1190 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1191 CPUID_EXT2_SYSCALL,
1192 .features[FEAT_8000_0001_ECX] =
1193 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1194 .features[FEAT_7_0_EBX] =
1195 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1196 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1197 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1198 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1199 CPUID_7_0_EBX_SMAP,
1200 .features[FEAT_XSAVE] =
1201 CPUID_XSAVE_XSAVEOPT,
1202 .features[FEAT_6_EAX] =
1203 CPUID_6_EAX_ARAT,
1204 .xlevel = 0x80000008,
1205 .model_id = "Intel Core Processor (Broadwell)",
1206 },
1207 {
1208 .name = "Opteron_G1",
1209 .level = 5,
1210 .vendor = CPUID_VENDOR_AMD,
1211 .family = 15,
1212 .model = 6,
1213 .stepping = 1,
1214 .features[FEAT_1_EDX] =
1215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1219 CPUID_DE | CPUID_FP87,
1220 .features[FEAT_1_ECX] =
1221 CPUID_EXT_SSE3,
1222 .features[FEAT_8000_0001_EDX] =
1223 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1224 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1225 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1226 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1227 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1228 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1229 .xlevel = 0x80000008,
1230 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1231 },
1232 {
1233 .name = "Opteron_G2",
1234 .level = 5,
1235 .vendor = CPUID_VENDOR_AMD,
1236 .family = 15,
1237 .model = 6,
1238 .stepping = 1,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1247 .features[FEAT_8000_0001_EDX] =
1248 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1249 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1250 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1251 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1252 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1253 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1254 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1257 .xlevel = 0x80000008,
1258 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1259 },
1260 {
1261 .name = "Opteron_G3",
1262 .level = 5,
1263 .vendor = CPUID_VENDOR_AMD,
1264 .family = 15,
1265 .model = 6,
1266 .stepping = 1,
1267 .features[FEAT_1_EDX] =
1268 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1269 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1270 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1271 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1272 CPUID_DE | CPUID_FP87,
1273 .features[FEAT_1_ECX] =
1274 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1275 CPUID_EXT_SSE3,
1276 .features[FEAT_8000_0001_EDX] =
1277 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1278 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1279 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1280 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1281 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1282 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1283 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1284 .features[FEAT_8000_0001_ECX] =
1285 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1286 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1287 .xlevel = 0x80000008,
1288 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1289 },
1290 {
1291 .name = "Opteron_G4",
1292 .level = 0xd,
1293 .vendor = CPUID_VENDOR_AMD,
1294 .family = 21,
1295 .model = 1,
1296 .stepping = 2,
1297 .features[FEAT_1_EDX] =
1298 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1299 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1300 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1301 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1302 CPUID_DE | CPUID_FP87,
1303 .features[FEAT_1_ECX] =
1304 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1305 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1306 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1307 CPUID_EXT_SSE3,
1308 .features[FEAT_8000_0001_EDX] =
1309 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1310 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1311 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1312 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1313 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1314 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1315 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1316 .features[FEAT_8000_0001_ECX] =
1317 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1318 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1319 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1320 CPUID_EXT3_LAHF_LM,
1321 /* no xsaveopt! */
1322 .xlevel = 0x8000001A,
1323 .model_id = "AMD Opteron 62xx class CPU",
1324 },
1325 {
1326 .name = "Opteron_G5",
1327 .level = 0xd,
1328 .vendor = CPUID_VENDOR_AMD,
1329 .family = 21,
1330 .model = 2,
1331 .stepping = 0,
1332 .features[FEAT_1_EDX] =
1333 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1334 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1335 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1336 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1337 CPUID_DE | CPUID_FP87,
1338 .features[FEAT_1_ECX] =
1339 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1340 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1341 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1342 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1343 .features[FEAT_8000_0001_EDX] =
1344 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1345 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1346 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1347 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1348 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1349 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1350 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1351 .features[FEAT_8000_0001_ECX] =
1352 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1353 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1354 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1355 CPUID_EXT3_LAHF_LM,
1356 /* no xsaveopt! */
1357 .xlevel = 0x8000001A,
1358 .model_id = "AMD Opteron 63xx class CPU",
1359 },
1360 };
1361
1362 typedef struct PropValue {
1363 const char *prop, *value;
1364 } PropValue;
1365
1366 /* KVM-specific features that are automatically added/removed
1367 * from all CPU models when KVM is enabled.
1368 */
1369 static PropValue kvm_default_props[] = {
1370 { "kvmclock", "on" },
1371 { "kvm-nopiodelay", "on" },
1372 { "kvm-asyncpf", "on" },
1373 { "kvm-steal-time", "on" },
1374 { "kvm-pv-eoi", "on" },
1375 { "kvmclock-stable-bit", "on" },
1376 { "x2apic", "on" },
1377 { "acpi", "off" },
1378 { "monitor", "off" },
1379 { "svm", "off" },
1380 { NULL, NULL },
1381 };
1382
1383 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1384 {
1385 PropValue *pv;
1386 for (pv = kvm_default_props; pv->prop; pv++) {
1387 if (!strcmp(pv->prop, prop)) {
1388 pv->value = value;
1389 break;
1390 }
1391 }
1392
1393 /* It is valid to call this function only for properties that
1394 * are already present in the kvm_default_props table.
1395 */
1396 assert(pv->prop);
1397 }
1398
1399 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1400 bool migratable_only);
1401
1402 #ifdef CONFIG_KVM
1403
1404 static int cpu_x86_fill_model_id(char *str)
1405 {
1406 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1407 int i;
1408
1409 for (i = 0; i < 3; i++) {
1410 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1411 memcpy(str + i * 16 + 0, &eax, 4);
1412 memcpy(str + i * 16 + 4, &ebx, 4);
1413 memcpy(str + i * 16 + 8, &ecx, 4);
1414 memcpy(str + i * 16 + 12, &edx, 4);
1415 }
1416 return 0;
1417 }
1418
1419 static X86CPUDefinition host_cpudef;
1420
1421 static Property host_x86_cpu_properties[] = {
1422 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1423 DEFINE_PROP_END_OF_LIST()
1424 };
1425
1426 /* class_init for the "host" CPU model
1427 *
1428 * This function may be called before KVM is initialized.
1429 */
1430 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1431 {
1432 DeviceClass *dc = DEVICE_CLASS(oc);
1433 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1434 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1435
1436 xcc->kvm_required = true;
1437
1438 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1439 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1440
1441 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1442 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1443 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1444 host_cpudef.stepping = eax & 0x0F;
1445
1446 cpu_x86_fill_model_id(host_cpudef.model_id);
1447
1448 xcc->cpu_def = &host_cpudef;
1449 host_cpudef.cache_info_passthrough = true;
1450
1451 /* level, xlevel, xlevel2, and the feature words are initialized on
1452 * instance_init, because they require KVM to be initialized.
1453 */
1454
1455 dc->props = host_x86_cpu_properties;
1456 }
1457
1458 static void host_x86_cpu_initfn(Object *obj)
1459 {
1460 X86CPU *cpu = X86_CPU(obj);
1461 CPUX86State *env = &cpu->env;
1462 KVMState *s = kvm_state;
1463
1464 assert(kvm_enabled());
1465
1466 /* We can't fill the features array here because we don't know yet if
1467 * "migratable" is true or false.
1468 */
1469 cpu->host_features = true;
1470
1471 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1472 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1473 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1474
1475 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1476 }
1477
1478 static const TypeInfo host_x86_cpu_type_info = {
1479 .name = X86_CPU_TYPE_NAME("host"),
1480 .parent = TYPE_X86_CPU,
1481 .instance_init = host_x86_cpu_initfn,
1482 .class_init = host_x86_cpu_class_init,
1483 };
1484
1485 #endif
1486
1487 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1488 {
1489 FeatureWordInfo *f = &feature_word_info[w];
1490 int i;
1491
1492 for (i = 0; i < 32; ++i) {
1493 if (1 << i & mask) {
1494 const char *reg = get_register_name_32(f->cpuid_reg);
1495 assert(reg);
1496 fprintf(stderr, "warning: %s doesn't support requested feature: "
1497 "CPUID.%02XH:%s%s%s [bit %d]\n",
1498 kvm_enabled() ? "host" : "TCG",
1499 f->cpuid_eax, reg,
1500 f->feat_names[i] ? "." : "",
1501 f->feat_names[i] ? f->feat_names[i] : "", i);
1502 }
1503 }
1504 }
1505
1506 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1507 const char *name, Error **errp)
1508 {
1509 X86CPU *cpu = X86_CPU(obj);
1510 CPUX86State *env = &cpu->env;
1511 int64_t value;
1512
1513 value = (env->cpuid_version >> 8) & 0xf;
1514 if (value == 0xf) {
1515 value += (env->cpuid_version >> 20) & 0xff;
1516 }
1517 visit_type_int(v, &value, name, errp);
1518 }
1519
1520 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1521 const char *name, Error **errp)
1522 {
1523 X86CPU *cpu = X86_CPU(obj);
1524 CPUX86State *env = &cpu->env;
1525 const int64_t min = 0;
1526 const int64_t max = 0xff + 0xf;
1527 Error *local_err = NULL;
1528 int64_t value;
1529
1530 visit_type_int(v, &value, name, &local_err);
1531 if (local_err) {
1532 error_propagate(errp, local_err);
1533 return;
1534 }
1535 if (value < min || value > max) {
1536 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1537 name ? name : "null", value, min, max);
1538 return;
1539 }
1540
1541 env->cpuid_version &= ~0xff00f00;
1542 if (value > 0x0f) {
1543 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1544 } else {
1545 env->cpuid_version |= value << 8;
1546 }
1547 }
1548
1549 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1550 const char *name, Error **errp)
1551 {
1552 X86CPU *cpu = X86_CPU(obj);
1553 CPUX86State *env = &cpu->env;
1554 int64_t value;
1555
1556 value = (env->cpuid_version >> 4) & 0xf;
1557 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1558 visit_type_int(v, &value, name, errp);
1559 }
1560
1561 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1562 const char *name, Error **errp)
1563 {
1564 X86CPU *cpu = X86_CPU(obj);
1565 CPUX86State *env = &cpu->env;
1566 const int64_t min = 0;
1567 const int64_t max = 0xff;
1568 Error *local_err = NULL;
1569 int64_t value;
1570
1571 visit_type_int(v, &value, name, &local_err);
1572 if (local_err) {
1573 error_propagate(errp, local_err);
1574 return;
1575 }
1576 if (value < min || value > max) {
1577 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1578 name ? name : "null", value, min, max);
1579 return;
1580 }
1581
1582 env->cpuid_version &= ~0xf00f0;
1583 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1584 }
1585
1586 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1587 void *opaque, const char *name,
1588 Error **errp)
1589 {
1590 X86CPU *cpu = X86_CPU(obj);
1591 CPUX86State *env = &cpu->env;
1592 int64_t value;
1593
1594 value = env->cpuid_version & 0xf;
1595 visit_type_int(v, &value, name, errp);
1596 }
1597
1598 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1599 void *opaque, const char *name,
1600 Error **errp)
1601 {
1602 X86CPU *cpu = X86_CPU(obj);
1603 CPUX86State *env = &cpu->env;
1604 const int64_t min = 0;
1605 const int64_t max = 0xf;
1606 Error *local_err = NULL;
1607 int64_t value;
1608
1609 visit_type_int(v, &value, name, &local_err);
1610 if (local_err) {
1611 error_propagate(errp, local_err);
1612 return;
1613 }
1614 if (value < min || value > max) {
1615 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1616 name ? name : "null", value, min, max);
1617 return;
1618 }
1619
1620 env->cpuid_version &= ~0xf;
1621 env->cpuid_version |= value & 0xf;
1622 }
1623
1624 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1625 {
1626 X86CPU *cpu = X86_CPU(obj);
1627 CPUX86State *env = &cpu->env;
1628 char *value;
1629
1630 value = g_malloc(CPUID_VENDOR_SZ + 1);
1631 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1632 env->cpuid_vendor3);
1633 return value;
1634 }
1635
1636 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1637 Error **errp)
1638 {
1639 X86CPU *cpu = X86_CPU(obj);
1640 CPUX86State *env = &cpu->env;
1641 int i;
1642
1643 if (strlen(value) != CPUID_VENDOR_SZ) {
1644 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1645 return;
1646 }
1647
1648 env->cpuid_vendor1 = 0;
1649 env->cpuid_vendor2 = 0;
1650 env->cpuid_vendor3 = 0;
1651 for (i = 0; i < 4; i++) {
1652 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1653 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1654 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1655 }
1656 }
1657
1658 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1659 {
1660 X86CPU *cpu = X86_CPU(obj);
1661 CPUX86State *env = &cpu->env;
1662 char *value;
1663 int i;
1664
1665 value = g_malloc(48 + 1);
1666 for (i = 0; i < 48; i++) {
1667 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1668 }
1669 value[48] = '\0';
1670 return value;
1671 }
1672
1673 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1674 Error **errp)
1675 {
1676 X86CPU *cpu = X86_CPU(obj);
1677 CPUX86State *env = &cpu->env;
1678 int c, len, i;
1679
1680 if (model_id == NULL) {
1681 model_id = "";
1682 }
1683 len = strlen(model_id);
1684 memset(env->cpuid_model, 0, 48);
1685 for (i = 0; i < 48; i++) {
1686 if (i >= len) {
1687 c = '\0';
1688 } else {
1689 c = (uint8_t)model_id[i];
1690 }
1691 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1692 }
1693 }
1694
1695 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1696 const char *name, Error **errp)
1697 {
1698 X86CPU *cpu = X86_CPU(obj);
1699 int64_t value;
1700
1701 value = cpu->env.tsc_khz * 1000;
1702 visit_type_int(v, &value, name, errp);
1703 }
1704
1705 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1706 const char *name, Error **errp)
1707 {
1708 X86CPU *cpu = X86_CPU(obj);
1709 const int64_t min = 0;
1710 const int64_t max = INT64_MAX;
1711 Error *local_err = NULL;
1712 int64_t value;
1713
1714 visit_type_int(v, &value, name, &local_err);
1715 if (local_err) {
1716 error_propagate(errp, local_err);
1717 return;
1718 }
1719 if (value < min || value > max) {
1720 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1721 name ? name : "null", value, min, max);
1722 return;
1723 }
1724
1725 cpu->env.tsc_khz = value / 1000;
1726 }
1727
1728 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1729 const char *name, Error **errp)
1730 {
1731 X86CPU *cpu = X86_CPU(obj);
1732 int64_t value = cpu->apic_id;
1733
1734 visit_type_int(v, &value, name, errp);
1735 }
1736
1737 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1738 const char *name, Error **errp)
1739 {
1740 X86CPU *cpu = X86_CPU(obj);
1741 DeviceState *dev = DEVICE(obj);
1742 const int64_t min = 0;
1743 const int64_t max = UINT32_MAX;
1744 Error *error = NULL;
1745 int64_t value;
1746
1747 if (dev->realized) {
1748 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1749 "it was realized", name, object_get_typename(obj));
1750 return;
1751 }
1752
1753 visit_type_int(v, &value, name, &error);
1754 if (error) {
1755 error_propagate(errp, error);
1756 return;
1757 }
1758 if (value < min || value > max) {
1759 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1760 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1761 object_get_typename(obj), name, value, min, max);
1762 return;
1763 }
1764
1765 if ((value != cpu->apic_id) && cpu_exists(value)) {
1766 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1767 return;
1768 }
1769 cpu->apic_id = value;
1770 }
1771
1772 /* Generic getter for "feature-words" and "filtered-features" properties */
1773 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1774 const char *name, Error **errp)
1775 {
1776 uint32_t *array = (uint32_t *)opaque;
1777 FeatureWord w;
1778 Error *err = NULL;
1779 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1780 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1781 X86CPUFeatureWordInfoList *list = NULL;
1782
1783 for (w = 0; w < FEATURE_WORDS; w++) {
1784 FeatureWordInfo *wi = &feature_word_info[w];
1785 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1786 qwi->cpuid_input_eax = wi->cpuid_eax;
1787 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1788 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1789 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1790 qwi->features = array[w];
1791
1792 /* List will be in reverse order, but order shouldn't matter */
1793 list_entries[w].next = list;
1794 list_entries[w].value = &word_infos[w];
1795 list = &list_entries[w];
1796 }
1797
1798 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1799 error_propagate(errp, err);
1800 }
1801
1802 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1803 const char *name, Error **errp)
1804 {
1805 X86CPU *cpu = X86_CPU(obj);
1806 int64_t value = cpu->hyperv_spinlock_attempts;
1807
1808 visit_type_int(v, &value, name, errp);
1809 }
1810
1811 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1812 const char *name, Error **errp)
1813 {
1814 const int64_t min = 0xFFF;
1815 const int64_t max = UINT_MAX;
1816 X86CPU *cpu = X86_CPU(obj);
1817 Error *err = NULL;
1818 int64_t value;
1819
1820 visit_type_int(v, &value, name, &err);
1821 if (err) {
1822 error_propagate(errp, err);
1823 return;
1824 }
1825
1826 if (value < min || value > max) {
1827 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1828 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1829 object_get_typename(obj), name ? name : "null",
1830 value, min, max);
1831 return;
1832 }
1833 cpu->hyperv_spinlock_attempts = value;
1834 }
1835
1836 static PropertyInfo qdev_prop_spinlocks = {
1837 .name = "int",
1838 .get = x86_get_hv_spinlocks,
1839 .set = x86_set_hv_spinlocks,
1840 };
1841
1842 /* Convert all '_' in a feature string option name to '-', to make feature
1843 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1844 */
1845 static inline void feat2prop(char *s)
1846 {
1847 while ((s = strchr(s, '_'))) {
1848 *s = '-';
1849 }
1850 }
1851
1852 /* Parse "+feature,-feature,feature=foo" CPU feature string
1853 */
1854 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1855 Error **errp)
1856 {
1857 X86CPU *cpu = X86_CPU(cs);
1858 char *featurestr; /* Single 'key=value" string being parsed */
1859 FeatureWord w;
1860 /* Features to be added */
1861 FeatureWordArray plus_features = { 0 };
1862 /* Features to be removed */
1863 FeatureWordArray minus_features = { 0 };
1864 uint32_t numvalue;
1865 CPUX86State *env = &cpu->env;
1866 Error *local_err = NULL;
1867
1868 featurestr = features ? strtok(features, ",") : NULL;
1869
1870 while (featurestr) {
1871 char *val;
1872 if (featurestr[0] == '+') {
1873 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1874 } else if (featurestr[0] == '-') {
1875 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1876 } else if ((val = strchr(featurestr, '='))) {
1877 *val = 0; val++;
1878 feat2prop(featurestr);
1879 if (!strcmp(featurestr, "xlevel")) {
1880 char *err;
1881 char num[32];
1882
1883 numvalue = strtoul(val, &err, 0);
1884 if (!*val || *err) {
1885 error_setg(errp, "bad numerical value %s", val);
1886 return;
1887 }
1888 if (numvalue < 0x80000000) {
1889 error_report("xlevel value shall always be >= 0x80000000"
1890 ", fixup will be removed in future versions");
1891 numvalue += 0x80000000;
1892 }
1893 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1894 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1895 } else if (!strcmp(featurestr, "tsc-freq")) {
1896 int64_t tsc_freq;
1897 char *err;
1898 char num[32];
1899
1900 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1901 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1902 if (tsc_freq < 0 || *err) {
1903 error_setg(errp, "bad numerical value %s", val);
1904 return;
1905 }
1906 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1907 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1908 &local_err);
1909 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1910 char *err;
1911 const int min = 0xFFF;
1912 char num[32];
1913 numvalue = strtoul(val, &err, 0);
1914 if (!*val || *err) {
1915 error_setg(errp, "bad numerical value %s", val);
1916 return;
1917 }
1918 if (numvalue < min) {
1919 error_report("hv-spinlocks value shall always be >= 0x%x"
1920 ", fixup will be removed in future versions",
1921 min);
1922 numvalue = min;
1923 }
1924 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1925 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1926 } else {
1927 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1928 }
1929 } else {
1930 feat2prop(featurestr);
1931 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1932 }
1933 if (local_err) {
1934 error_propagate(errp, local_err);
1935 return;
1936 }
1937 featurestr = strtok(NULL, ",");
1938 }
1939
1940 if (cpu->host_features) {
1941 for (w = 0; w < FEATURE_WORDS; w++) {
1942 env->features[w] =
1943 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1944 }
1945 }
1946
1947 for (w = 0; w < FEATURE_WORDS; w++) {
1948 env->features[w] |= plus_features[w];
1949 env->features[w] &= ~minus_features[w];
1950 }
1951 }
1952
1953 /* Print all cpuid feature names in featureset
1954 */
1955 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1956 {
1957 int bit;
1958 bool first = true;
1959
1960 for (bit = 0; bit < 32; bit++) {
1961 if (featureset[bit]) {
1962 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1963 first = false;
1964 }
1965 }
1966 }
1967
1968 /* generate CPU information. */
1969 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1970 {
1971 X86CPUDefinition *def;
1972 char buf[256];
1973 int i;
1974
1975 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1976 def = &builtin_x86_defs[i];
1977 snprintf(buf, sizeof(buf), "%s", def->name);
1978 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1979 }
1980 #ifdef CONFIG_KVM
1981 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1982 "KVM processor with all supported host features "
1983 "(only available in KVM mode)");
1984 #endif
1985
1986 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1987 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1988 FeatureWordInfo *fw = &feature_word_info[i];
1989
1990 (*cpu_fprintf)(f, " ");
1991 listflags(f, cpu_fprintf, fw->feat_names);
1992 (*cpu_fprintf)(f, "\n");
1993 }
1994 }
1995
1996 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1997 {
1998 CpuDefinitionInfoList *cpu_list = NULL;
1999 X86CPUDefinition *def;
2000 int i;
2001
2002 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2003 CpuDefinitionInfoList *entry;
2004 CpuDefinitionInfo *info;
2005
2006 def = &builtin_x86_defs[i];
2007 info = g_malloc0(sizeof(*info));
2008 info->name = g_strdup(def->name);
2009
2010 entry = g_malloc0(sizeof(*entry));
2011 entry->value = info;
2012 entry->next = cpu_list;
2013 cpu_list = entry;
2014 }
2015
2016 return cpu_list;
2017 }
2018
2019 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2020 bool migratable_only)
2021 {
2022 FeatureWordInfo *wi = &feature_word_info[w];
2023 uint32_t r;
2024
2025 if (kvm_enabled()) {
2026 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2027 wi->cpuid_ecx,
2028 wi->cpuid_reg);
2029 } else if (tcg_enabled()) {
2030 r = wi->tcg_features;
2031 } else {
2032 return ~0;
2033 }
2034 if (migratable_only) {
2035 r &= x86_cpu_get_migratable_flags(w);
2036 }
2037 return r;
2038 }
2039
2040 /*
2041 * Filters CPU feature words based on host availability of each feature.
2042 *
2043 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2044 */
2045 static int x86_cpu_filter_features(X86CPU *cpu)
2046 {
2047 CPUX86State *env = &cpu->env;
2048 FeatureWord w;
2049 int rv = 0;
2050
2051 for (w = 0; w < FEATURE_WORDS; w++) {
2052 uint32_t host_feat =
2053 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2054 uint32_t requested_features = env->features[w];
2055 env->features[w] &= host_feat;
2056 cpu->filtered_features[w] = requested_features & ~env->features[w];
2057 if (cpu->filtered_features[w]) {
2058 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2059 report_unavailable_features(w, cpu->filtered_features[w]);
2060 }
2061 rv = 1;
2062 }
2063 }
2064
2065 return rv;
2066 }
2067
2068 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2069 {
2070 PropValue *pv;
2071 for (pv = props; pv->prop; pv++) {
2072 if (!pv->value) {
2073 continue;
2074 }
2075 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2076 &error_abort);
2077 }
2078 }
2079
2080 /* Load data from X86CPUDefinition
2081 */
2082 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2083 {
2084 CPUX86State *env = &cpu->env;
2085 const char *vendor;
2086 char host_vendor[CPUID_VENDOR_SZ + 1];
2087 FeatureWord w;
2088
2089 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2090 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2091 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2092 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2093 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2094 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2095 cpu->cache_info_passthrough = def->cache_info_passthrough;
2096 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2097 for (w = 0; w < FEATURE_WORDS; w++) {
2098 env->features[w] = def->features[w];
2099 }
2100
2101 /* Special cases not set in the X86CPUDefinition structs: */
2102 if (kvm_enabled()) {
2103 x86_cpu_apply_props(cpu, kvm_default_props);
2104 }
2105
2106 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2107
2108 /* sysenter isn't supported in compatibility mode on AMD,
2109 * syscall isn't supported in compatibility mode on Intel.
2110 * Normally we advertise the actual CPU vendor, but you can
2111 * override this using the 'vendor' property if you want to use
2112 * KVM's sysenter/syscall emulation in compatibility mode and
2113 * when doing cross vendor migration
2114 */
2115 vendor = def->vendor;
2116 if (kvm_enabled()) {
2117 uint32_t ebx = 0, ecx = 0, edx = 0;
2118 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2119 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2120 vendor = host_vendor;
2121 }
2122
2123 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2124
2125 }
2126
2127 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2128 {
2129 X86CPU *cpu = NULL;
2130 X86CPUClass *xcc;
2131 ObjectClass *oc;
2132 gchar **model_pieces;
2133 char *name, *features;
2134 Error *error = NULL;
2135
2136 model_pieces = g_strsplit(cpu_model, ",", 2);
2137 if (!model_pieces[0]) {
2138 error_setg(&error, "Invalid/empty CPU model name");
2139 goto out;
2140 }
2141 name = model_pieces[0];
2142 features = model_pieces[1];
2143
2144 oc = x86_cpu_class_by_name(name);
2145 if (oc == NULL) {
2146 error_setg(&error, "Unable to find CPU definition: %s", name);
2147 goto out;
2148 }
2149 xcc = X86_CPU_CLASS(oc);
2150
2151 if (xcc->kvm_required && !kvm_enabled()) {
2152 error_setg(&error, "CPU model '%s' requires KVM", name);
2153 goto out;
2154 }
2155
2156 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2157
2158 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2159 if (error) {
2160 goto out;
2161 }
2162
2163 out:
2164 if (error != NULL) {
2165 error_propagate(errp, error);
2166 if (cpu) {
2167 object_unref(OBJECT(cpu));
2168 cpu = NULL;
2169 }
2170 }
2171 g_strfreev(model_pieces);
2172 return cpu;
2173 }
2174
2175 X86CPU *cpu_x86_init(const char *cpu_model)
2176 {
2177 Error *error = NULL;
2178 X86CPU *cpu;
2179
2180 cpu = cpu_x86_create(cpu_model, &error);
2181 if (error) {
2182 goto out;
2183 }
2184
2185 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2186
2187 out:
2188 if (error) {
2189 error_report_err(error);
2190 if (cpu != NULL) {
2191 object_unref(OBJECT(cpu));
2192 cpu = NULL;
2193 }
2194 }
2195 return cpu;
2196 }
2197
2198 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2199 {
2200 X86CPUDefinition *cpudef = data;
2201 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2202
2203 xcc->cpu_def = cpudef;
2204 }
2205
2206 static void x86_register_cpudef_type(X86CPUDefinition *def)
2207 {
2208 char *typename = x86_cpu_type_name(def->name);
2209 TypeInfo ti = {
2210 .name = typename,
2211 .parent = TYPE_X86_CPU,
2212 .class_init = x86_cpu_cpudef_class_init,
2213 .class_data = def,
2214 };
2215
2216 type_register(&ti);
2217 g_free(typename);
2218 }
2219
2220 #if !defined(CONFIG_USER_ONLY)
2221
2222 void cpu_clear_apic_feature(CPUX86State *env)
2223 {
2224 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2225 }
2226
2227 #endif /* !CONFIG_USER_ONLY */
2228
2229 /* Initialize list of CPU models, filling some non-static fields if necessary
2230 */
2231 void x86_cpudef_setup(void)
2232 {
2233 int i, j;
2234 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2235
2236 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2237 X86CPUDefinition *def = &builtin_x86_defs[i];
2238
2239 /* Look for specific "cpudef" models that */
2240 /* have the QEMU version in .model_id */
2241 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2242 if (strcmp(model_with_versions[j], def->name) == 0) {
2243 pstrcpy(def->model_id, sizeof(def->model_id),
2244 "QEMU Virtual CPU version ");
2245 pstrcat(def->model_id, sizeof(def->model_id),
2246 qemu_get_version());
2247 break;
2248 }
2249 }
2250 }
2251 }
2252
2253 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2254 uint32_t *eax, uint32_t *ebx,
2255 uint32_t *ecx, uint32_t *edx)
2256 {
2257 X86CPU *cpu = x86_env_get_cpu(env);
2258 CPUState *cs = CPU(cpu);
2259
2260 /* test if maximum index reached */
2261 if (index & 0x80000000) {
2262 if (index > env->cpuid_xlevel) {
2263 if (env->cpuid_xlevel2 > 0) {
2264 /* Handle the Centaur's CPUID instruction. */
2265 if (index > env->cpuid_xlevel2) {
2266 index = env->cpuid_xlevel2;
2267 } else if (index < 0xC0000000) {
2268 index = env->cpuid_xlevel;
2269 }
2270 } else {
2271 /* Intel documentation states that invalid EAX input will
2272 * return the same information as EAX=cpuid_level
2273 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2274 */
2275 index = env->cpuid_level;
2276 }
2277 }
2278 } else {
2279 if (index > env->cpuid_level)
2280 index = env->cpuid_level;
2281 }
2282
2283 switch(index) {
2284 case 0:
2285 *eax = env->cpuid_level;
2286 *ebx = env->cpuid_vendor1;
2287 *edx = env->cpuid_vendor2;
2288 *ecx = env->cpuid_vendor3;
2289 break;
2290 case 1:
2291 *eax = env->cpuid_version;
2292 *ebx = (cpu->apic_id << 24) |
2293 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2294 *ecx = env->features[FEAT_1_ECX];
2295 *edx = env->features[FEAT_1_EDX];
2296 if (cs->nr_cores * cs->nr_threads > 1) {
2297 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2298 *edx |= 1 << 28; /* HTT bit */
2299 }
2300 break;
2301 case 2:
2302 /* cache info: needed for Pentium Pro compatibility */
2303 if (cpu->cache_info_passthrough) {
2304 host_cpuid(index, 0, eax, ebx, ecx, edx);
2305 break;
2306 }
2307 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2308 *ebx = 0;
2309 *ecx = 0;
2310 *edx = (L1D_DESCRIPTOR << 16) | \
2311 (L1I_DESCRIPTOR << 8) | \
2312 (L2_DESCRIPTOR);
2313 break;
2314 case 4:
2315 /* cache info: needed for Core compatibility */
2316 if (cpu->cache_info_passthrough) {
2317 host_cpuid(index, count, eax, ebx, ecx, edx);
2318 *eax &= ~0xFC000000;
2319 } else {
2320 *eax = 0;
2321 switch (count) {
2322 case 0: /* L1 dcache info */
2323 *eax |= CPUID_4_TYPE_DCACHE | \
2324 CPUID_4_LEVEL(1) | \
2325 CPUID_4_SELF_INIT_LEVEL;
2326 *ebx = (L1D_LINE_SIZE - 1) | \
2327 ((L1D_PARTITIONS - 1) << 12) | \
2328 ((L1D_ASSOCIATIVITY - 1) << 22);
2329 *ecx = L1D_SETS - 1;
2330 *edx = CPUID_4_NO_INVD_SHARING;
2331 break;
2332 case 1: /* L1 icache info */
2333 *eax |= CPUID_4_TYPE_ICACHE | \
2334 CPUID_4_LEVEL(1) | \
2335 CPUID_4_SELF_INIT_LEVEL;
2336 *ebx = (L1I_LINE_SIZE - 1) | \
2337 ((L1I_PARTITIONS - 1) << 12) | \
2338 ((L1I_ASSOCIATIVITY - 1) << 22);
2339 *ecx = L1I_SETS - 1;
2340 *edx = CPUID_4_NO_INVD_SHARING;
2341 break;
2342 case 2: /* L2 cache info */
2343 *eax |= CPUID_4_TYPE_UNIFIED | \
2344 CPUID_4_LEVEL(2) | \
2345 CPUID_4_SELF_INIT_LEVEL;
2346 if (cs->nr_threads > 1) {
2347 *eax |= (cs->nr_threads - 1) << 14;
2348 }
2349 *ebx = (L2_LINE_SIZE - 1) | \
2350 ((L2_PARTITIONS - 1) << 12) | \
2351 ((L2_ASSOCIATIVITY - 1) << 22);
2352 *ecx = L2_SETS - 1;
2353 *edx = CPUID_4_NO_INVD_SHARING;
2354 break;
2355 default: /* end of info */
2356 *eax = 0;
2357 *ebx = 0;
2358 *ecx = 0;
2359 *edx = 0;
2360 break;
2361 }
2362 }
2363
2364 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2365 if ((*eax & 31) && cs->nr_cores > 1) {
2366 *eax |= (cs->nr_cores - 1) << 26;
2367 }
2368 break;
2369 case 5:
2370 /* mwait info: needed for Core compatibility */
2371 *eax = 0; /* Smallest monitor-line size in bytes */
2372 *ebx = 0; /* Largest monitor-line size in bytes */
2373 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2374 *edx = 0;
2375 break;
2376 case 6:
2377 /* Thermal and Power Leaf */
2378 *eax = env->features[FEAT_6_EAX];
2379 *ebx = 0;
2380 *ecx = 0;
2381 *edx = 0;
2382 break;
2383 case 7:
2384 /* Structured Extended Feature Flags Enumeration Leaf */
2385 if (count == 0) {
2386 *eax = 0; /* Maximum ECX value for sub-leaves */
2387 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2388 *ecx = 0; /* Reserved */
2389 *edx = 0; /* Reserved */
2390 } else {
2391 *eax = 0;
2392 *ebx = 0;
2393 *ecx = 0;
2394 *edx = 0;
2395 }
2396 break;
2397 case 9:
2398 /* Direct Cache Access Information Leaf */
2399 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2400 *ebx = 0;
2401 *ecx = 0;
2402 *edx = 0;
2403 break;
2404 case 0xA:
2405 /* Architectural Performance Monitoring Leaf */
2406 if (kvm_enabled() && cpu->enable_pmu) {
2407 KVMState *s = cs->kvm_state;
2408
2409 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2410 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2411 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2412 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2413 } else {
2414 *eax = 0;
2415 *ebx = 0;
2416 *ecx = 0;
2417 *edx = 0;
2418 }
2419 break;
2420 case 0xD: {
2421 KVMState *s = cs->kvm_state;
2422 uint64_t kvm_mask;
2423 int i;
2424
2425 /* Processor Extended State */
2426 *eax = 0;
2427 *ebx = 0;
2428 *ecx = 0;
2429 *edx = 0;
2430 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2431 break;
2432 }
2433 kvm_mask =
2434 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2435 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2436
2437 if (count == 0) {
2438 *ecx = 0x240;
2439 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2440 const ExtSaveArea *esa = &ext_save_areas[i];
2441 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2442 (kvm_mask & (1 << i)) != 0) {
2443 if (i < 32) {
2444 *eax |= 1 << i;
2445 } else {
2446 *edx |= 1 << (i - 32);
2447 }
2448 *ecx = MAX(*ecx, esa->offset + esa->size);
2449 }
2450 }
2451 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2452 *ebx = *ecx;
2453 } else if (count == 1) {
2454 *eax = env->features[FEAT_XSAVE];
2455 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2456 const ExtSaveArea *esa = &ext_save_areas[count];
2457 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2458 (kvm_mask & (1 << count)) != 0) {
2459 *eax = esa->size;
2460 *ebx = esa->offset;
2461 }
2462 }
2463 break;
2464 }
2465 case 0x80000000:
2466 *eax = env->cpuid_xlevel;
2467 *ebx = env->cpuid_vendor1;
2468 *edx = env->cpuid_vendor2;
2469 *ecx = env->cpuid_vendor3;
2470 break;
2471 case 0x80000001:
2472 *eax = env->cpuid_version;
2473 *ebx = 0;
2474 *ecx = env->features[FEAT_8000_0001_ECX];
2475 *edx = env->features[FEAT_8000_0001_EDX];
2476
2477 /* The Linux kernel checks for the CMPLegacy bit and
2478 * discards multiple thread information if it is set.
2479 * So dont set it here for Intel to make Linux guests happy.
2480 */
2481 if (cs->nr_cores * cs->nr_threads > 1) {
2482 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2483 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2484 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2485 *ecx |= 1 << 1; /* CmpLegacy bit */
2486 }
2487 }
2488 break;
2489 case 0x80000002:
2490 case 0x80000003:
2491 case 0x80000004:
2492 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2493 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2494 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2495 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2496 break;
2497 case 0x80000005:
2498 /* cache info (L1 cache) */
2499 if (cpu->cache_info_passthrough) {
2500 host_cpuid(index, 0, eax, ebx, ecx, edx);
2501 break;
2502 }
2503 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2504 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2505 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2506 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2507 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2508 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2509 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2510 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2511 break;
2512 case 0x80000006:
2513 /* cache info (L2 cache) */
2514 if (cpu->cache_info_passthrough) {
2515 host_cpuid(index, 0, eax, ebx, ecx, edx);
2516 break;
2517 }
2518 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2519 (L2_DTLB_2M_ENTRIES << 16) | \
2520 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2521 (L2_ITLB_2M_ENTRIES);
2522 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2523 (L2_DTLB_4K_ENTRIES << 16) | \
2524 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2525 (L2_ITLB_4K_ENTRIES);
2526 *ecx = (L2_SIZE_KB_AMD << 16) | \
2527 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2528 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2529 *edx = ((L3_SIZE_KB/512) << 18) | \
2530 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2531 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2532 break;
2533 case 0x80000007:
2534 *eax = 0;
2535 *ebx = 0;
2536 *ecx = 0;
2537 *edx = env->features[FEAT_8000_0007_EDX];
2538 break;
2539 case 0x80000008:
2540 /* virtual & phys address size in low 2 bytes. */
2541 /* XXX: This value must match the one used in the MMU code. */
2542 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2543 /* 64 bit processor */
2544 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2545 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2546 } else {
2547 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2548 *eax = 0x00000024; /* 36 bits physical */
2549 } else {
2550 *eax = 0x00000020; /* 32 bits physical */
2551 }
2552 }
2553 *ebx = 0;
2554 *ecx = 0;
2555 *edx = 0;
2556 if (cs->nr_cores * cs->nr_threads > 1) {
2557 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2558 }
2559 break;
2560 case 0x8000000A:
2561 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2562 *eax = 0x00000001; /* SVM Revision */
2563 *ebx = 0x00000010; /* nr of ASIDs */
2564 *ecx = 0;
2565 *edx = env->features[FEAT_SVM]; /* optional features */
2566 } else {
2567 *eax = 0;
2568 *ebx = 0;
2569 *ecx = 0;
2570 *edx = 0;
2571 }
2572 break;
2573 case 0xC0000000:
2574 *eax = env->cpuid_xlevel2;
2575 *ebx = 0;
2576 *ecx = 0;
2577 *edx = 0;
2578 break;
2579 case 0xC0000001:
2580 /* Support for VIA CPU's CPUID instruction */
2581 *eax = env->cpuid_version;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = env->features[FEAT_C000_0001_EDX];
2585 break;
2586 case 0xC0000002:
2587 case 0xC0000003:
2588 case 0xC0000004:
2589 /* Reserved for the future, and now filled with zero */
2590 *eax = 0;
2591 *ebx = 0;
2592 *ecx = 0;
2593 *edx = 0;
2594 break;
2595 default:
2596 /* reserved values: zero */
2597 *eax = 0;
2598 *ebx = 0;
2599 *ecx = 0;
2600 *edx = 0;
2601 break;
2602 }
2603 }
2604
2605 /* CPUClass::reset() */
2606 static void x86_cpu_reset(CPUState *s)
2607 {
2608 X86CPU *cpu = X86_CPU(s);
2609 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2610 CPUX86State *env = &cpu->env;
2611 int i;
2612
2613 xcc->parent_reset(s);
2614
2615 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2616
2617 tlb_flush(s, 1);
2618
2619 env->old_exception = -1;
2620
2621 /* init to reset state */
2622
2623 #ifdef CONFIG_SOFTMMU
2624 env->hflags |= HF_SOFTMMU_MASK;
2625 #endif
2626 env->hflags2 |= HF2_GIF_MASK;
2627
2628 cpu_x86_update_cr0(env, 0x60000010);
2629 env->a20_mask = ~0x0;
2630 env->smbase = 0x30000;
2631
2632 env->idt.limit = 0xffff;
2633 env->gdt.limit = 0xffff;
2634 env->ldt.limit = 0xffff;
2635 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2636 env->tr.limit = 0xffff;
2637 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2638
2639 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2640 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2641 DESC_R_MASK | DESC_A_MASK);
2642 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2643 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2644 DESC_A_MASK);
2645 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2646 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2647 DESC_A_MASK);
2648 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2649 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2650 DESC_A_MASK);
2651 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2652 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2653 DESC_A_MASK);
2654 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2655 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2656 DESC_A_MASK);
2657
2658 env->eip = 0xfff0;
2659 env->regs[R_EDX] = env->cpuid_version;
2660
2661 env->eflags = 0x2;
2662
2663 /* FPU init */
2664 for (i = 0; i < 8; i++) {
2665 env->fptags[i] = 1;
2666 }
2667 cpu_set_fpuc(env, 0x37f);
2668
2669 env->mxcsr = 0x1f80;
2670 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2671
2672 env->pat = 0x0007040600070406ULL;
2673 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2674
2675 memset(env->dr, 0, sizeof(env->dr));
2676 env->dr[6] = DR6_FIXED_1;
2677 env->dr[7] = DR7_FIXED_1;
2678 cpu_breakpoint_remove_all(s, BP_CPU);
2679 cpu_watchpoint_remove_all(s, BP_CPU);
2680
2681 env->xcr0 = 1;
2682
2683 /*
2684 * SDM 11.11.5 requires:
2685 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2686 * - IA32_MTRR_PHYSMASKn.V = 0
2687 * All other bits are undefined. For simplification, zero it all.
2688 */
2689 env->mtrr_deftype = 0;
2690 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2691 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2692
2693 #if !defined(CONFIG_USER_ONLY)
2694 /* We hard-wire the BSP to the first CPU. */
2695 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2696
2697 s->halted = !cpu_is_bsp(cpu);
2698
2699 if (kvm_enabled()) {
2700 kvm_arch_reset_vcpu(cpu);
2701 }
2702 #endif
2703 }
2704
2705 #ifndef CONFIG_USER_ONLY
2706 bool cpu_is_bsp(X86CPU *cpu)
2707 {
2708 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2709 }
2710
2711 /* TODO: remove me, when reset over QOM tree is implemented */
2712 static void x86_cpu_machine_reset_cb(void *opaque)
2713 {
2714 X86CPU *cpu = opaque;
2715 cpu_reset(CPU(cpu));
2716 }
2717 #endif
2718
2719 static void mce_init(X86CPU *cpu)
2720 {
2721 CPUX86State *cenv = &cpu->env;
2722 unsigned int bank;
2723
2724 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2725 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2726 (CPUID_MCE | CPUID_MCA)) {
2727 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2728 cenv->mcg_ctl = ~(uint64_t)0;
2729 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2730 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2731 }
2732 }
2733 }
2734
2735 #ifndef CONFIG_USER_ONLY
2736 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2737 {
2738 APICCommonState *apic;
2739 const char *apic_type = "apic";
2740
2741 if (kvm_irqchip_in_kernel()) {
2742 apic_type = "kvm-apic";
2743 } else if (xen_enabled()) {
2744 apic_type = "xen-apic";
2745 }
2746
2747 cpu->apic_state = DEVICE(object_new(apic_type));
2748
2749 object_property_add_child(OBJECT(cpu), "apic",
2750 OBJECT(cpu->apic_state), NULL);
2751 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2752 /* TODO: convert to link<> */
2753 apic = APIC_COMMON(cpu->apic_state);
2754 apic->cpu = cpu;
2755 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2756 }
2757
2758 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2759 {
2760 APICCommonState *apic;
2761 static bool apic_mmio_map_once;
2762
2763 if (cpu->apic_state == NULL) {
2764 return;
2765 }
2766 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2767 errp);
2768
2769 /* Map APIC MMIO area */
2770 apic = APIC_COMMON(cpu->apic_state);
2771 if (!apic_mmio_map_once) {
2772 memory_region_add_subregion_overlap(get_system_memory(),
2773 apic->apicbase &
2774 MSR_IA32_APICBASE_BASE,
2775 &apic->io_memory,
2776 0x1000);
2777 apic_mmio_map_once = true;
2778 }
2779 }
2780
2781 static void x86_cpu_machine_done(Notifier *n, void *unused)
2782 {
2783 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2784 MemoryRegion *smram =
2785 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2786
2787 if (smram) {
2788 cpu->smram = g_new(MemoryRegion, 1);
2789 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2790 smram, 0, 1ull << 32);
2791 memory_region_set_enabled(cpu->smram, false);
2792 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2793 }
2794 }
2795 #else
2796 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2797 {
2798 }
2799 #endif
2800
2801
2802 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2803 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2804 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2805 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2806 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2807 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2808 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2809 {
2810 CPUState *cs = CPU(dev);
2811 X86CPU *cpu = X86_CPU(dev);
2812 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2813 CPUX86State *env = &cpu->env;
2814 Error *local_err = NULL;
2815 static bool ht_warned;
2816
2817 if (cpu->apic_id < 0) {
2818 error_setg(errp, "apic-id property was not initialized properly");
2819 return;
2820 }
2821
2822 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2823 env->cpuid_level = 7;
2824 }
2825
2826 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2827 * CPUID[1].EDX.
2828 */
2829 if (IS_AMD_CPU(env)) {
2830 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2831 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2832 & CPUID_EXT2_AMD_ALIASES);
2833 }
2834
2835
2836 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2837 error_setg(&local_err,
2838 kvm_enabled() ?
2839 "Host doesn't support requested features" :
2840 "TCG doesn't support requested features");
2841 goto out;
2842 }
2843
2844 #ifndef CONFIG_USER_ONLY
2845 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2846
2847 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2848 x86_cpu_apic_create(cpu, &local_err);
2849 if (local_err != NULL) {
2850 goto out;
2851 }
2852 }
2853 #endif
2854
2855 mce_init(cpu);
2856
2857 #ifndef CONFIG_USER_ONLY
2858 if (tcg_enabled()) {
2859 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2860 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2861 cs->as = g_new(AddressSpace, 1);
2862
2863 /* Outer container... */
2864 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2865 memory_region_set_enabled(cpu->cpu_as_root, true);
2866
2867 /* ... with two regions inside: normal system memory with low
2868 * priority, and...
2869 */
2870 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2871 get_system_memory(), 0, ~0ull);
2872 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2873 memory_region_set_enabled(cpu->cpu_as_mem, true);
2874 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2875
2876 /* ... SMRAM with higher priority, linked from /machine/smram. */
2877 cpu->machine_done.notify = x86_cpu_machine_done;
2878 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2879 }
2880 #endif
2881
2882 qemu_init_vcpu(cs);
2883
2884 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2885 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2886 * based on inputs (sockets,cores,threads), it is still better to gives
2887 * users a warning.
2888 *
2889 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2890 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2891 */
2892 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2893 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2894 " -smp options properly.");
2895 ht_warned = true;
2896 }
2897
2898 x86_cpu_apic_realize(cpu, &local_err);
2899 if (local_err != NULL) {
2900 goto out;
2901 }
2902 cpu_reset(cs);
2903
2904 xcc->parent_realize(dev, &local_err);
2905
2906 out:
2907 if (local_err != NULL) {
2908 error_propagate(errp, local_err);
2909 return;
2910 }
2911 }
2912
2913 typedef struct BitProperty {
2914 uint32_t *ptr;
2915 uint32_t mask;
2916 } BitProperty;
2917
2918 static void x86_cpu_get_bit_prop(Object *obj,
2919 struct Visitor *v,
2920 void *opaque,
2921 const char *name,
2922 Error **errp)
2923 {
2924 BitProperty *fp = opaque;
2925 bool value = (*fp->ptr & fp->mask) == fp->mask;
2926 visit_type_bool(v, &value, name, errp);
2927 }
2928
2929 static void x86_cpu_set_bit_prop(Object *obj,
2930 struct Visitor *v,
2931 void *opaque,
2932 const char *name,
2933 Error **errp)
2934 {
2935 DeviceState *dev = DEVICE(obj);
2936 BitProperty *fp = opaque;
2937 Error *local_err = NULL;
2938 bool value;
2939
2940 if (dev->realized) {
2941 qdev_prop_set_after_realize(dev, name, errp);
2942 return;
2943 }
2944
2945 visit_type_bool(v, &value, name, &local_err);
2946 if (local_err) {
2947 error_propagate(errp, local_err);
2948 return;
2949 }
2950
2951 if (value) {
2952 *fp->ptr |= fp->mask;
2953 } else {
2954 *fp->ptr &= ~fp->mask;
2955 }
2956 }
2957
2958 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2959 void *opaque)
2960 {
2961 BitProperty *prop = opaque;
2962 g_free(prop);
2963 }
2964
2965 /* Register a boolean property to get/set a single bit in a uint32_t field.
2966 *
2967 * The same property name can be registered multiple times to make it affect
2968 * multiple bits in the same FeatureWord. In that case, the getter will return
2969 * true only if all bits are set.
2970 */
2971 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2972 const char *prop_name,
2973 uint32_t *field,
2974 int bitnr)
2975 {
2976 BitProperty *fp;
2977 ObjectProperty *op;
2978 uint32_t mask = (1UL << bitnr);
2979
2980 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2981 if (op) {
2982 fp = op->opaque;
2983 assert(fp->ptr == field);
2984 fp->mask |= mask;
2985 } else {
2986 fp = g_new0(BitProperty, 1);
2987 fp->ptr = field;
2988 fp->mask = mask;
2989 object_property_add(OBJECT(cpu), prop_name, "bool",
2990 x86_cpu_get_bit_prop,
2991 x86_cpu_set_bit_prop,
2992 x86_cpu_release_bit_prop, fp, &error_abort);
2993 }
2994 }
2995
2996 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2997 FeatureWord w,
2998 int bitnr)
2999 {
3000 Object *obj = OBJECT(cpu);
3001 int i;
3002 char **names;
3003 FeatureWordInfo *fi = &feature_word_info[w];
3004
3005 if (!fi->feat_names) {
3006 return;
3007 }
3008 if (!fi->feat_names[bitnr]) {
3009 return;
3010 }
3011
3012 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3013
3014 feat2prop(names[0]);
3015 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3016
3017 for (i = 1; names[i]; i++) {
3018 feat2prop(names[i]);
3019 object_property_add_alias(obj, names[i], obj, names[0],
3020 &error_abort);
3021 }
3022
3023 g_strfreev(names);
3024 }
3025
3026 static void x86_cpu_initfn(Object *obj)
3027 {
3028 CPUState *cs = CPU(obj);
3029 X86CPU *cpu = X86_CPU(obj);
3030 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3031 CPUX86State *env = &cpu->env;
3032 FeatureWord w;
3033 static int inited;
3034
3035 cs->env_ptr = env;
3036 cpu_exec_init(cs, &error_abort);
3037
3038 object_property_add(obj, "family", "int",
3039 x86_cpuid_version_get_family,
3040 x86_cpuid_version_set_family, NULL, NULL, NULL);
3041 object_property_add(obj, "model", "int",
3042 x86_cpuid_version_get_model,
3043 x86_cpuid_version_set_model, NULL, NULL, NULL);
3044 object_property_add(obj, "stepping", "int",
3045 x86_cpuid_version_get_stepping,
3046 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3047 object_property_add_str(obj, "vendor",
3048 x86_cpuid_get_vendor,
3049 x86_cpuid_set_vendor, NULL);
3050 object_property_add_str(obj, "model-id",
3051 x86_cpuid_get_model_id,
3052 x86_cpuid_set_model_id, NULL);
3053 object_property_add(obj, "tsc-frequency", "int",
3054 x86_cpuid_get_tsc_freq,
3055 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3056 object_property_add(obj, "apic-id", "int",
3057 x86_cpuid_get_apic_id,
3058 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3059 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3060 x86_cpu_get_feature_words,
3061 NULL, NULL, (void *)env->features, NULL);
3062 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3063 x86_cpu_get_feature_words,
3064 NULL, NULL, (void *)cpu->filtered_features, NULL);
3065
3066 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3067
3068 #ifndef CONFIG_USER_ONLY
3069 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3070 cpu->apic_id = -1;
3071 #endif
3072
3073 for (w = 0; w < FEATURE_WORDS; w++) {
3074 int bitnr;
3075
3076 for (bitnr = 0; bitnr < 32; bitnr++) {
3077 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3078 }
3079 }
3080
3081 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3082
3083 /* init various static tables used in TCG mode */
3084 if (tcg_enabled() && !inited) {
3085 inited = 1;
3086 optimize_flags_init();
3087 }
3088 }
3089
3090 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3091 {
3092 X86CPU *cpu = X86_CPU(cs);
3093
3094 return cpu->apic_id;
3095 }
3096
3097 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3098 {
3099 X86CPU *cpu = X86_CPU(cs);
3100
3101 return cpu->env.cr[0] & CR0_PG_MASK;
3102 }
3103
3104 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3105 {
3106 X86CPU *cpu = X86_CPU(cs);
3107
3108 cpu->env.eip = value;
3109 }
3110
3111 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3112 {
3113 X86CPU *cpu = X86_CPU(cs);
3114
3115 cpu->env.eip = tb->pc - tb->cs_base;
3116 }
3117
3118 static bool x86_cpu_has_work(CPUState *cs)
3119 {
3120 X86CPU *cpu = X86_CPU(cs);
3121 CPUX86State *env = &cpu->env;
3122
3123 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3124 CPU_INTERRUPT_POLL)) &&
3125 (env->eflags & IF_MASK)) ||
3126 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3127 CPU_INTERRUPT_INIT |
3128 CPU_INTERRUPT_SIPI |
3129 CPU_INTERRUPT_MCE)) ||
3130 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3131 !(env->hflags & HF_SMM_MASK));
3132 }
3133
3134 static Property x86_cpu_properties[] = {
3135 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3136 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3137 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3138 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3139 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3140 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3141 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3142 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3143 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3144 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3145 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3146 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3147 DEFINE_PROP_END_OF_LIST()
3148 };
3149
3150 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3151 {
3152 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3153 CPUClass *cc = CPU_CLASS(oc);
3154 DeviceClass *dc = DEVICE_CLASS(oc);
3155
3156 xcc->parent_realize = dc->realize;
3157 dc->realize = x86_cpu_realizefn;
3158 dc->props = x86_cpu_properties;
3159
3160 xcc->parent_reset = cc->reset;
3161 cc->reset = x86_cpu_reset;
3162 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3163
3164 cc->class_by_name = x86_cpu_class_by_name;
3165 cc->parse_features = x86_cpu_parse_featurestr;
3166 cc->has_work = x86_cpu_has_work;
3167 cc->do_interrupt = x86_cpu_do_interrupt;
3168 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3169 cc->dump_state = x86_cpu_dump_state;
3170 cc->set_pc = x86_cpu_set_pc;
3171 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3172 cc->gdb_read_register = x86_cpu_gdb_read_register;
3173 cc->gdb_write_register = x86_cpu_gdb_write_register;
3174 cc->get_arch_id = x86_cpu_get_arch_id;
3175 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3176 #ifdef CONFIG_USER_ONLY
3177 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3178 #else
3179 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3180 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3181 cc->write_elf64_note = x86_cpu_write_elf64_note;
3182 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3183 cc->write_elf32_note = x86_cpu_write_elf32_note;
3184 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3185 cc->vmsd = &vmstate_x86_cpu;
3186 #endif
3187 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3188 #ifndef CONFIG_USER_ONLY
3189 cc->debug_excp_handler = breakpoint_handler;
3190 #endif
3191 cc->cpu_exec_enter = x86_cpu_exec_enter;
3192 cc->cpu_exec_exit = x86_cpu_exec_exit;
3193 }
3194
3195 static const TypeInfo x86_cpu_type_info = {
3196 .name = TYPE_X86_CPU,
3197 .parent = TYPE_CPU,
3198 .instance_size = sizeof(X86CPU),
3199 .instance_init = x86_cpu_initfn,
3200 .abstract = true,
3201 .class_size = sizeof(X86CPUClass),
3202 .class_init = x86_cpu_common_class_init,
3203 };
3204
3205 static void x86_cpu_register_types(void)
3206 {
3207 int i;
3208
3209 type_register_static(&x86_cpu_type_info);
3210 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3211 x86_register_cpudef_type(&builtin_x86_defs[i]);
3212 }
3213 #ifdef CONFIG_KVM
3214 type_register_static(&host_x86_cpu_type_info);
3215 #endif
3216 }
3217
3218 type_init(x86_cpu_register_types)