]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
virtio-net: correctly drop truncated packets
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/xen/xen.h"
50 #include "hw/i386/apic_internal.h"
51 #endif
52
53
54 /* Cache topology CPUID constants: */
55
56 /* CPUID Leaf 2 Descriptors */
57
58 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
59 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
60 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* No L3 cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140
141 /* TLB definitions: */
142
143 #define L1_DTLB_2M_ASSOC 1
144 #define L1_DTLB_2M_ENTRIES 255
145 #define L1_DTLB_4K_ASSOC 1
146 #define L1_DTLB_4K_ENTRIES 255
147
148 #define L1_ITLB_2M_ASSOC 1
149 #define L1_ITLB_2M_ENTRIES 255
150 #define L1_ITLB_4K_ASSOC 1
151 #define L1_ITLB_4K_ENTRIES 255
152
153 #define L2_DTLB_2M_ASSOC 0 /* disabled */
154 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
155 #define L2_DTLB_4K_ASSOC 4
156 #define L2_DTLB_4K_ENTRIES 512
157
158 #define L2_ITLB_2M_ASSOC 0 /* disabled */
159 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
160 #define L2_ITLB_4K_ASSOC 4
161 #define L2_ITLB_4K_ENTRIES 512
162
163
164
165 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
166 uint32_t vendor2, uint32_t vendor3)
167 {
168 int i;
169 for (i = 0; i < 4; i++) {
170 dst[i] = vendor1 >> (8 * i);
171 dst[i + 4] = vendor2 >> (8 * i);
172 dst[i + 8] = vendor3 >> (8 * i);
173 }
174 dst[CPUID_VENDOR_SZ] = '\0';
175 }
176
177 /* feature flags taken from "Intel Processor Identification and the CPUID
178 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
179 * between feature naming conventions, aliases may be added.
180 */
181 static const char *feature_name[] = {
182 "fpu", "vme", "de", "pse",
183 "tsc", "msr", "pae", "mce",
184 "cx8", "apic", NULL, "sep",
185 "mtrr", "pge", "mca", "cmov",
186 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
187 NULL, "ds" /* Intel dts */, "acpi", "mmx",
188 "fxsr", "sse", "sse2", "ss",
189 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 };
191 static const char *ext_feature_name[] = {
192 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
193 "ds_cpl", "vmx", "smx", "est",
194 "tm2", "ssse3", "cid", NULL,
195 "fma", "cx16", "xtpr", "pdcm",
196 NULL, "pcid", "dca", "sse4.1|sse4_1",
197 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
198 "tsc-deadline", "aes", "xsave", "osxsave",
199 "avx", "f16c", "rdrand", "hypervisor",
200 };
201 /* Feature names that are already defined on feature_name[] but are set on
202 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
203 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
204 * if and only if CPU vendor is AMD.
205 */
206 static const char *ext2_feature_name[] = {
207 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
208 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
209 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
210 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
211 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
212 "nx|xd", NULL, "mmxext", NULL /* mmx */,
213 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
214 NULL, "lm|i64", "3dnowext", "3dnow",
215 };
216 static const char *ext3_feature_name[] = {
217 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
218 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
219 "3dnowprefetch", "osvw", "ibs", "xop",
220 "skinit", "wdt", NULL, "lwp",
221 "fma4", "tce", NULL, "nodeid_msr",
222 NULL, "tbm", "topoext", "perfctr_core",
223 "perfctr_nb", NULL, NULL, NULL,
224 NULL, NULL, NULL, NULL,
225 };
226
227 static const char *ext4_feature_name[] = {
228 NULL, NULL, "xstore", "xstore-en",
229 NULL, NULL, "xcrypt", "xcrypt-en",
230 "ace2", "ace2-en", "phe", "phe-en",
231 "pmm", "pmm-en", NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 NULL, NULL, NULL, NULL,
236 };
237
238 static const char *kvm_feature_name[] = {
239 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
240 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 "kvmclock-stable-bit", NULL, NULL, NULL,
246 NULL, NULL, NULL, NULL,
247 };
248
249 static const char *svm_feature_name[] = {
250 "npt", "lbrv", "svm_lock", "nrip_save",
251 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
252 NULL, NULL, "pause_filter", NULL,
253 "pfthreshold", NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 };
259
260 static const char *cpuid_7_0_ebx_feature_name[] = {
261 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
262 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
263 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
264 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 };
266
267 static const char *cpuid_apm_edx_feature_name[] = {
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 "invtsc", NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 NULL, NULL, NULL, NULL,
276 };
277
278 static const char *cpuid_xsave_feature_name[] = {
279 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
287 };
288
289 static const char *cpuid_6_feature_name[] = {
290 NULL, NULL, "arat", NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
298 };
299
300 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
301 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
302 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
303 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
304 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
305 CPUID_PSE36 | CPUID_FXSR)
306 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
307 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
308 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
309 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
310 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311
312 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
313 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
316 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
317 /* partly implemented:
318 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
319 /* missing:
320 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
321 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
322 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
323 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
324 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
325 /* missing:
326 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
327 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
328 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
329 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
330 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
331 CPUID_EXT_RDRAND */
332
333 #ifdef TARGET_X86_64
334 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
335 #else
336 #define TCG_EXT2_X86_64_FEATURES 0
337 #endif
338
339 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
340 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
341 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
342 TCG_EXT2_X86_64_FEATURES)
343 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
344 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
345 #define TCG_EXT4_FEATURES 0
346 #define TCG_SVM_FEATURES 0
347 #define TCG_KVM_FEATURES 0
348 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
349 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
350 /* missing:
351 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
352 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
353 CPUID_7_0_EBX_RDSEED */
354 #define TCG_APM_FEATURES 0
355 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
356
357
358 typedef struct FeatureWordInfo {
359 const char **feat_names;
360 uint32_t cpuid_eax; /* Input EAX for CPUID */
361 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
362 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
363 int cpuid_reg; /* output register (R_* constant) */
364 uint32_t tcg_features; /* Feature flags supported by TCG */
365 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
366 } FeatureWordInfo;
367
368 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
369 [FEAT_1_EDX] = {
370 .feat_names = feature_name,
371 .cpuid_eax = 1, .cpuid_reg = R_EDX,
372 .tcg_features = TCG_FEATURES,
373 },
374 [FEAT_1_ECX] = {
375 .feat_names = ext_feature_name,
376 .cpuid_eax = 1, .cpuid_reg = R_ECX,
377 .tcg_features = TCG_EXT_FEATURES,
378 },
379 [FEAT_8000_0001_EDX] = {
380 .feat_names = ext2_feature_name,
381 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
382 .tcg_features = TCG_EXT2_FEATURES,
383 },
384 [FEAT_8000_0001_ECX] = {
385 .feat_names = ext3_feature_name,
386 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
387 .tcg_features = TCG_EXT3_FEATURES,
388 },
389 [FEAT_C000_0001_EDX] = {
390 .feat_names = ext4_feature_name,
391 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
392 .tcg_features = TCG_EXT4_FEATURES,
393 },
394 [FEAT_KVM] = {
395 .feat_names = kvm_feature_name,
396 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
397 .tcg_features = TCG_KVM_FEATURES,
398 },
399 [FEAT_SVM] = {
400 .feat_names = svm_feature_name,
401 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
402 .tcg_features = TCG_SVM_FEATURES,
403 },
404 [FEAT_7_0_EBX] = {
405 .feat_names = cpuid_7_0_ebx_feature_name,
406 .cpuid_eax = 7,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
408 .cpuid_reg = R_EBX,
409 .tcg_features = TCG_7_0_EBX_FEATURES,
410 },
411 [FEAT_8000_0007_EDX] = {
412 .feat_names = cpuid_apm_edx_feature_name,
413 .cpuid_eax = 0x80000007,
414 .cpuid_reg = R_EDX,
415 .tcg_features = TCG_APM_FEATURES,
416 .unmigratable_flags = CPUID_APM_INVTSC,
417 },
418 [FEAT_XSAVE] = {
419 .feat_names = cpuid_xsave_feature_name,
420 .cpuid_eax = 0xd,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
422 .cpuid_reg = R_EAX,
423 .tcg_features = 0,
424 },
425 [FEAT_6_EAX] = {
426 .feat_names = cpuid_6_feature_name,
427 .cpuid_eax = 6, .cpuid_reg = R_EAX,
428 .tcg_features = TCG_6_EAX_FEATURES,
429 },
430 };
431
432 typedef struct X86RegisterInfo32 {
433 /* Name of register */
434 const char *name;
435 /* QAPI enum value register */
436 X86CPURegister32 qapi_enum;
437 } X86RegisterInfo32;
438
439 #define REGISTER(reg) \
440 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
441 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
442 REGISTER(EAX),
443 REGISTER(ECX),
444 REGISTER(EDX),
445 REGISTER(EBX),
446 REGISTER(ESP),
447 REGISTER(EBP),
448 REGISTER(ESI),
449 REGISTER(EDI),
450 };
451 #undef REGISTER
452
453 typedef struct ExtSaveArea {
454 uint32_t feature, bits;
455 uint32_t offset, size;
456 } ExtSaveArea;
457
458 static const ExtSaveArea ext_save_areas[] = {
459 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
460 .offset = 0x240, .size = 0x100 },
461 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
462 .offset = 0x3c0, .size = 0x40 },
463 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
464 .offset = 0x400, .size = 0x40 },
465 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
466 .offset = 0x440, .size = 0x40 },
467 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
468 .offset = 0x480, .size = 0x200 },
469 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
470 .offset = 0x680, .size = 0x400 },
471 };
472
473 const char *get_register_name_32(unsigned int reg)
474 {
475 if (reg >= CPU_NB_REGS32) {
476 return NULL;
477 }
478 return x86_reg_info_32[reg].name;
479 }
480
481 /* KVM-specific features that are automatically added to all CPU models
482 * when KVM is enabled.
483 */
484 static uint32_t kvm_default_features[FEATURE_WORDS] = {
485 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
486 (1 << KVM_FEATURE_NOP_IO_DELAY) |
487 (1 << KVM_FEATURE_CLOCKSOURCE2) |
488 (1 << KVM_FEATURE_ASYNC_PF) |
489 (1 << KVM_FEATURE_STEAL_TIME) |
490 (1 << KVM_FEATURE_PV_EOI) |
491 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
492 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
493 };
494
495 /* Features that are not added by default to any CPU model when KVM is enabled.
496 */
497 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
498 [FEAT_1_EDX] = CPUID_ACPI,
499 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
500 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
501 };
502
503 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
504 {
505 kvm_default_features[w] &= ~features;
506 }
507
508 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
509 {
510 kvm_default_unset_features[w] &= ~features;
511 }
512
513 /*
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
516 */
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
518 {
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
522
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
528 }
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
532 }
533 r |= f;
534 }
535 return r;
536 }
537
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
540 {
541 uint32_t vec[4];
542
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
561
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
570 }
571
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
573
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
578 */
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
581 {
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
592 }
593 }
594
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
599 */
600 static int altcmp(const char *s, const char *e, const char *altstr)
601 {
602 const char *p, *q;
603
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
613 }
614 }
615
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
618 */
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
621 {
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
625
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
630 }
631 }
632 return found;
633 }
634
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
638 {
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
645 }
646 }
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
649 }
650 }
651
652 /* CPU class name definitions: */
653
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
656
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
659 */
660 static char *x86_cpu_type_name(const char *model_name)
661 {
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
663 }
664
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
666 {
667 ObjectClass *oc;
668 char *typename;
669
670 if (cpu_model == NULL) {
671 return NULL;
672 }
673
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
678 }
679
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
692 bool cache_info_passthrough;
693 };
694
695 static X86CPUDefinition builtin_x86_defs[] = {
696 {
697 .name = "qemu64",
698 .level = 0xd,
699 .vendor = CPUID_VENDOR_AMD,
700 .family = 6,
701 .model = 6,
702 .stepping = 3,
703 .features[FEAT_1_EDX] =
704 PPRO_FEATURES |
705 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
706 CPUID_PSE36,
707 .features[FEAT_1_ECX] =
708 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
709 .features[FEAT_8000_0001_EDX] =
710 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
711 .features[FEAT_8000_0001_ECX] =
712 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
713 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
714 .xlevel = 0x8000000A,
715 },
716 {
717 .name = "phenom",
718 .level = 5,
719 .vendor = CPUID_VENDOR_AMD,
720 .family = 16,
721 .model = 2,
722 .stepping = 3,
723 /* Missing: CPUID_HT */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
730 CPUID_EXT_POPCNT,
731 .features[FEAT_8000_0001_EDX] =
732 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
733 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
734 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
735 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
736 CPUID_EXT3_CR8LEG,
737 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
738 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
739 .features[FEAT_8000_0001_ECX] =
740 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
741 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
742 /* Missing: CPUID_SVM_LBRV */
743 .features[FEAT_SVM] =
744 CPUID_SVM_NPT,
745 .xlevel = 0x8000001A,
746 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
747 },
748 {
749 .name = "core2duo",
750 .level = 10,
751 .vendor = CPUID_VENDOR_INTEL,
752 .family = 6,
753 .model = 15,
754 .stepping = 11,
755 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
756 .features[FEAT_1_EDX] =
757 PPRO_FEATURES |
758 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
759 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
760 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
761 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
762 .features[FEAT_1_ECX] =
763 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
764 CPUID_EXT_CX16,
765 .features[FEAT_8000_0001_EDX] =
766 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
767 .features[FEAT_8000_0001_ECX] =
768 CPUID_EXT3_LAHF_LM,
769 .xlevel = 0x80000008,
770 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
771 },
772 {
773 .name = "kvm64",
774 .level = 0xd,
775 .vendor = CPUID_VENDOR_INTEL,
776 .family = 15,
777 .model = 6,
778 .stepping = 1,
779 /* Missing: CPUID_HT */
780 .features[FEAT_1_EDX] =
781 PPRO_FEATURES | CPUID_VME |
782 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
783 CPUID_PSE36,
784 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
788 .features[FEAT_8000_0001_EDX] =
789 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
790 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
791 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
792 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
793 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
794 .features[FEAT_8000_0001_ECX] =
795 0,
796 .xlevel = 0x80000008,
797 .model_id = "Common KVM processor"
798 },
799 {
800 .name = "qemu32",
801 .level = 4,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 6,
805 .stepping = 3,
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
810 .xlevel = 0x80000004,
811 },
812 {
813 .name = "kvm32",
814 .level = 5,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 15,
817 .model = 6,
818 .stepping = 1,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3,
824 .features[FEAT_8000_0001_ECX] =
825 0,
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
828 },
829 {
830 .name = "coreduo",
831 .level = 10,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 14,
835 .stepping = 8,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
840 CPUID_SS,
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_NX,
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
849 },
850 {
851 .name = "486",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 4,
855 .model = 8,
856 .stepping = 0,
857 .features[FEAT_1_EDX] =
858 I486_FEATURES,
859 .xlevel = 0,
860 },
861 {
862 .name = "pentium",
863 .level = 1,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 5,
866 .model = 4,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PENTIUM_FEATURES,
870 .xlevel = 0,
871 },
872 {
873 .name = "pentium2",
874 .level = 2,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 5,
878 .stepping = 2,
879 .features[FEAT_1_EDX] =
880 PENTIUM2_FEATURES,
881 .xlevel = 0,
882 },
883 {
884 .name = "pentium3",
885 .level = 3,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 6,
888 .model = 7,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM3_FEATURES,
892 .xlevel = 0,
893 },
894 {
895 .name = "athlon",
896 .level = 2,
897 .vendor = CPUID_VENDOR_AMD,
898 .family = 6,
899 .model = 2,
900 .stepping = 3,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
903 CPUID_MCA,
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
907 },
908 {
909 .name = "n270",
910 .level = 10,
911 .vendor = CPUID_VENDOR_INTEL,
912 .family = 6,
913 .model = 28,
914 .stepping = 2,
915 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
916 .features[FEAT_1_EDX] =
917 PPRO_FEATURES |
918 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
919 CPUID_ACPI | CPUID_SS,
920 /* Some CPUs got no CPUID_SEP */
921 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
922 * CPUID_EXT_XTPR */
923 .features[FEAT_1_ECX] =
924 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
925 CPUID_EXT_MOVBE,
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_NX,
928 .features[FEAT_8000_0001_ECX] =
929 CPUID_EXT3_LAHF_LM,
930 .xlevel = 0x80000008,
931 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
932 },
933 {
934 .name = "Conroe",
935 .level = 10,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 15,
939 .stepping = 3,
940 .features[FEAT_1_EDX] =
941 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
942 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
943 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
944 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
945 CPUID_DE | CPUID_FP87,
946 .features[FEAT_1_ECX] =
947 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
948 .features[FEAT_8000_0001_EDX] =
949 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
950 .features[FEAT_8000_0001_ECX] =
951 CPUID_EXT3_LAHF_LM,
952 .xlevel = 0x80000008,
953 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
954 },
955 {
956 .name = "Penryn",
957 .level = 10,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 23,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
967 CPUID_DE | CPUID_FP87,
968 .features[FEAT_1_ECX] =
969 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
970 CPUID_EXT_SSE3,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
973 .features[FEAT_8000_0001_ECX] =
974 CPUID_EXT3_LAHF_LM,
975 .xlevel = 0x80000008,
976 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
977 },
978 {
979 .name = "Nehalem",
980 .level = 11,
981 .vendor = CPUID_VENDOR_INTEL,
982 .family = 6,
983 .model = 26,
984 .stepping = 3,
985 .features[FEAT_1_EDX] =
986 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
987 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
988 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
989 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
990 CPUID_DE | CPUID_FP87,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
993 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
996 .features[FEAT_8000_0001_ECX] =
997 CPUID_EXT3_LAHF_LM,
998 .xlevel = 0x80000008,
999 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1000 },
1001 {
1002 .name = "Westmere",
1003 .level = 11,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 44,
1007 .stepping = 1,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1016 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1017 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1018 .features[FEAT_8000_0001_EDX] =
1019 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1020 .features[FEAT_8000_0001_ECX] =
1021 CPUID_EXT3_LAHF_LM,
1022 .features[FEAT_6_EAX] =
1023 CPUID_6_EAX_ARAT,
1024 .xlevel = 0x80000008,
1025 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1026 },
1027 {
1028 .name = "SandyBridge",
1029 .level = 0xd,
1030 .vendor = CPUID_VENDOR_INTEL,
1031 .family = 6,
1032 .model = 42,
1033 .stepping = 1,
1034 .features[FEAT_1_EDX] =
1035 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1036 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1037 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1038 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1039 CPUID_DE | CPUID_FP87,
1040 .features[FEAT_1_ECX] =
1041 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1042 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1043 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1044 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1045 CPUID_EXT_SSE3,
1046 .features[FEAT_8000_0001_EDX] =
1047 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1048 CPUID_EXT2_SYSCALL,
1049 .features[FEAT_8000_0001_ECX] =
1050 CPUID_EXT3_LAHF_LM,
1051 .features[FEAT_XSAVE] =
1052 CPUID_XSAVE_XSAVEOPT,
1053 .features[FEAT_6_EAX] =
1054 CPUID_6_EAX_ARAT,
1055 .xlevel = 0x80000008,
1056 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1057 },
1058 {
1059 .name = "IvyBridge",
1060 .level = 0xd,
1061 .vendor = CPUID_VENDOR_INTEL,
1062 .family = 6,
1063 .model = 58,
1064 .stepping = 9,
1065 .features[FEAT_1_EDX] =
1066 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1073 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1074 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1075 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1076 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1077 .features[FEAT_7_0_EBX] =
1078 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1079 CPUID_7_0_EBX_ERMS,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1082 CPUID_EXT2_SYSCALL,
1083 .features[FEAT_8000_0001_ECX] =
1084 CPUID_EXT3_LAHF_LM,
1085 .features[FEAT_XSAVE] =
1086 CPUID_XSAVE_XSAVEOPT,
1087 .features[FEAT_6_EAX] =
1088 CPUID_6_EAX_ARAT,
1089 .xlevel = 0x80000008,
1090 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1091 },
1092 {
1093 .name = "Haswell-noTSX",
1094 .level = 0xd,
1095 .vendor = CPUID_VENDOR_INTEL,
1096 .family = 6,
1097 .model = 60,
1098 .stepping = 1,
1099 .features[FEAT_1_EDX] =
1100 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1101 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1102 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1103 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1104 CPUID_DE | CPUID_FP87,
1105 .features[FEAT_1_ECX] =
1106 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1107 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1108 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1109 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1111 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1112 .features[FEAT_8000_0001_EDX] =
1113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1114 CPUID_EXT2_SYSCALL,
1115 .features[FEAT_8000_0001_ECX] =
1116 CPUID_EXT3_LAHF_LM,
1117 .features[FEAT_7_0_EBX] =
1118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1119 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1121 .features[FEAT_XSAVE] =
1122 CPUID_XSAVE_XSAVEOPT,
1123 .features[FEAT_6_EAX] =
1124 CPUID_6_EAX_ARAT,
1125 .xlevel = 0x80000008,
1126 .model_id = "Intel Core Processor (Haswell, no TSX)",
1127 }, {
1128 .name = "Haswell",
1129 .level = 0xd,
1130 .vendor = CPUID_VENDOR_INTEL,
1131 .family = 6,
1132 .model = 60,
1133 .stepping = 1,
1134 .features[FEAT_1_EDX] =
1135 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1136 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1137 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1138 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1139 CPUID_DE | CPUID_FP87,
1140 .features[FEAT_1_ECX] =
1141 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1142 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1143 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1144 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1145 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1146 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 CPUID_EXT2_SYSCALL,
1150 .features[FEAT_8000_0001_ECX] =
1151 CPUID_EXT3_LAHF_LM,
1152 .features[FEAT_7_0_EBX] =
1153 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1154 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1155 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1156 CPUID_7_0_EBX_RTM,
1157 .features[FEAT_XSAVE] =
1158 CPUID_XSAVE_XSAVEOPT,
1159 .features[FEAT_6_EAX] =
1160 CPUID_6_EAX_ARAT,
1161 .xlevel = 0x80000008,
1162 .model_id = "Intel Core Processor (Haswell)",
1163 },
1164 {
1165 .name = "Broadwell-noTSX",
1166 .level = 0xd,
1167 .vendor = CPUID_VENDOR_INTEL,
1168 .family = 6,
1169 .model = 61,
1170 .stepping = 2,
1171 .features[FEAT_1_EDX] =
1172 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1173 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1174 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1175 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1176 CPUID_DE | CPUID_FP87,
1177 .features[FEAT_1_ECX] =
1178 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1179 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1180 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1181 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1182 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1183 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1184 .features[FEAT_8000_0001_EDX] =
1185 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1186 CPUID_EXT2_SYSCALL,
1187 .features[FEAT_8000_0001_ECX] =
1188 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1189 .features[FEAT_7_0_EBX] =
1190 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1191 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1192 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1193 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1194 CPUID_7_0_EBX_SMAP,
1195 .features[FEAT_XSAVE] =
1196 CPUID_XSAVE_XSAVEOPT,
1197 .features[FEAT_6_EAX] =
1198 CPUID_6_EAX_ARAT,
1199 .xlevel = 0x80000008,
1200 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1201 },
1202 {
1203 .name = "Broadwell",
1204 .level = 0xd,
1205 .vendor = CPUID_VENDOR_INTEL,
1206 .family = 6,
1207 .model = 61,
1208 .stepping = 2,
1209 .features[FEAT_1_EDX] =
1210 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1211 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1212 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1213 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1214 CPUID_DE | CPUID_FP87,
1215 .features[FEAT_1_ECX] =
1216 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1217 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1218 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1219 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1220 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1221 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1222 .features[FEAT_8000_0001_EDX] =
1223 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1224 CPUID_EXT2_SYSCALL,
1225 .features[FEAT_8000_0001_ECX] =
1226 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1227 .features[FEAT_7_0_EBX] =
1228 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1229 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1230 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1231 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1232 CPUID_7_0_EBX_SMAP,
1233 .features[FEAT_XSAVE] =
1234 CPUID_XSAVE_XSAVEOPT,
1235 .features[FEAT_6_EAX] =
1236 CPUID_6_EAX_ARAT,
1237 .xlevel = 0x80000008,
1238 .model_id = "Intel Core Processor (Broadwell)",
1239 },
1240 {
1241 .name = "Opteron_G1",
1242 .level = 5,
1243 .vendor = CPUID_VENDOR_AMD,
1244 .family = 15,
1245 .model = 6,
1246 .stepping = 1,
1247 .features[FEAT_1_EDX] =
1248 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1249 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1250 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1251 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1252 CPUID_DE | CPUID_FP87,
1253 .features[FEAT_1_ECX] =
1254 CPUID_EXT_SSE3,
1255 .features[FEAT_8000_0001_EDX] =
1256 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1257 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1258 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1259 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1260 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1261 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1262 .xlevel = 0x80000008,
1263 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1264 },
1265 {
1266 .name = "Opteron_G2",
1267 .level = 5,
1268 .vendor = CPUID_VENDOR_AMD,
1269 .family = 15,
1270 .model = 6,
1271 .stepping = 1,
1272 .features[FEAT_1_EDX] =
1273 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1274 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1275 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1276 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1277 CPUID_DE | CPUID_FP87,
1278 .features[FEAT_1_ECX] =
1279 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1280 .features[FEAT_8000_0001_EDX] =
1281 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1282 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1283 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1284 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1285 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1286 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1287 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1288 .features[FEAT_8000_0001_ECX] =
1289 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1290 .xlevel = 0x80000008,
1291 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1292 },
1293 {
1294 .name = "Opteron_G3",
1295 .level = 5,
1296 .vendor = CPUID_VENDOR_AMD,
1297 .family = 15,
1298 .model = 6,
1299 .stepping = 1,
1300 .features[FEAT_1_EDX] =
1301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1305 CPUID_DE | CPUID_FP87,
1306 .features[FEAT_1_ECX] =
1307 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1308 CPUID_EXT_SSE3,
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1311 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1312 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1313 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1314 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1315 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1316 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .features[FEAT_8000_0001_ECX] =
1318 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1319 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1320 .xlevel = 0x80000008,
1321 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1322 },
1323 {
1324 .name = "Opteron_G4",
1325 .level = 0xd,
1326 .vendor = CPUID_VENDOR_AMD,
1327 .family = 21,
1328 .model = 1,
1329 .stepping = 2,
1330 .features[FEAT_1_EDX] =
1331 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1332 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1333 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1334 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1335 CPUID_DE | CPUID_FP87,
1336 .features[FEAT_1_ECX] =
1337 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1338 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1339 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1340 CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1343 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1344 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1345 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1346 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1347 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1348 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1349 .features[FEAT_8000_0001_ECX] =
1350 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1351 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1352 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1353 CPUID_EXT3_LAHF_LM,
1354 /* no xsaveopt! */
1355 .xlevel = 0x8000001A,
1356 .model_id = "AMD Opteron 62xx class CPU",
1357 },
1358 {
1359 .name = "Opteron_G5",
1360 .level = 0xd,
1361 .vendor = CPUID_VENDOR_AMD,
1362 .family = 21,
1363 .model = 2,
1364 .stepping = 0,
1365 .features[FEAT_1_EDX] =
1366 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1367 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1368 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1369 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1370 CPUID_DE | CPUID_FP87,
1371 .features[FEAT_1_ECX] =
1372 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1373 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1374 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1375 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1376 .features[FEAT_8000_0001_EDX] =
1377 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1378 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1379 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1380 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1381 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1382 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1383 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1384 .features[FEAT_8000_0001_ECX] =
1385 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1386 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1387 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1388 CPUID_EXT3_LAHF_LM,
1389 /* no xsaveopt! */
1390 .xlevel = 0x8000001A,
1391 .model_id = "AMD Opteron 63xx class CPU",
1392 },
1393 };
1394
1395 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1396 bool migratable_only);
1397
1398 #ifdef CONFIG_KVM
1399
1400 static int cpu_x86_fill_model_id(char *str)
1401 {
1402 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1403 int i;
1404
1405 for (i = 0; i < 3; i++) {
1406 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1407 memcpy(str + i * 16 + 0, &eax, 4);
1408 memcpy(str + i * 16 + 4, &ebx, 4);
1409 memcpy(str + i * 16 + 8, &ecx, 4);
1410 memcpy(str + i * 16 + 12, &edx, 4);
1411 }
1412 return 0;
1413 }
1414
1415 static X86CPUDefinition host_cpudef;
1416
1417 static Property host_x86_cpu_properties[] = {
1418 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1419 DEFINE_PROP_END_OF_LIST()
1420 };
1421
1422 /* class_init for the "host" CPU model
1423 *
1424 * This function may be called before KVM is initialized.
1425 */
1426 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1427 {
1428 DeviceClass *dc = DEVICE_CLASS(oc);
1429 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1430 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1431
1432 xcc->kvm_required = true;
1433
1434 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1435 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1436
1437 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1438 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1439 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1440 host_cpudef.stepping = eax & 0x0F;
1441
1442 cpu_x86_fill_model_id(host_cpudef.model_id);
1443
1444 xcc->cpu_def = &host_cpudef;
1445 host_cpudef.cache_info_passthrough = true;
1446
1447 /* level, xlevel, xlevel2, and the feature words are initialized on
1448 * instance_init, because they require KVM to be initialized.
1449 */
1450
1451 dc->props = host_x86_cpu_properties;
1452 }
1453
1454 static void host_x86_cpu_initfn(Object *obj)
1455 {
1456 X86CPU *cpu = X86_CPU(obj);
1457 CPUX86State *env = &cpu->env;
1458 KVMState *s = kvm_state;
1459
1460 assert(kvm_enabled());
1461
1462 /* We can't fill the features array here because we don't know yet if
1463 * "migratable" is true or false.
1464 */
1465 cpu->host_features = true;
1466
1467 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1468 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1469 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1470
1471 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1472 }
1473
1474 static const TypeInfo host_x86_cpu_type_info = {
1475 .name = X86_CPU_TYPE_NAME("host"),
1476 .parent = TYPE_X86_CPU,
1477 .instance_init = host_x86_cpu_initfn,
1478 .class_init = host_x86_cpu_class_init,
1479 };
1480
1481 #endif
1482
1483 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1484 {
1485 FeatureWordInfo *f = &feature_word_info[w];
1486 int i;
1487
1488 for (i = 0; i < 32; ++i) {
1489 if (1 << i & mask) {
1490 const char *reg = get_register_name_32(f->cpuid_reg);
1491 assert(reg);
1492 fprintf(stderr, "warning: %s doesn't support requested feature: "
1493 "CPUID.%02XH:%s%s%s [bit %d]\n",
1494 kvm_enabled() ? "host" : "TCG",
1495 f->cpuid_eax, reg,
1496 f->feat_names[i] ? "." : "",
1497 f->feat_names[i] ? f->feat_names[i] : "", i);
1498 }
1499 }
1500 }
1501
1502 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1503 const char *name, Error **errp)
1504 {
1505 X86CPU *cpu = X86_CPU(obj);
1506 CPUX86State *env = &cpu->env;
1507 int64_t value;
1508
1509 value = (env->cpuid_version >> 8) & 0xf;
1510 if (value == 0xf) {
1511 value += (env->cpuid_version >> 20) & 0xff;
1512 }
1513 visit_type_int(v, &value, name, errp);
1514 }
1515
1516 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1517 const char *name, Error **errp)
1518 {
1519 X86CPU *cpu = X86_CPU(obj);
1520 CPUX86State *env = &cpu->env;
1521 const int64_t min = 0;
1522 const int64_t max = 0xff + 0xf;
1523 Error *local_err = NULL;
1524 int64_t value;
1525
1526 visit_type_int(v, &value, name, &local_err);
1527 if (local_err) {
1528 error_propagate(errp, local_err);
1529 return;
1530 }
1531 if (value < min || value > max) {
1532 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1533 name ? name : "null", value, min, max);
1534 return;
1535 }
1536
1537 env->cpuid_version &= ~0xff00f00;
1538 if (value > 0x0f) {
1539 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1540 } else {
1541 env->cpuid_version |= value << 8;
1542 }
1543 }
1544
1545 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1546 const char *name, Error **errp)
1547 {
1548 X86CPU *cpu = X86_CPU(obj);
1549 CPUX86State *env = &cpu->env;
1550 int64_t value;
1551
1552 value = (env->cpuid_version >> 4) & 0xf;
1553 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1554 visit_type_int(v, &value, name, errp);
1555 }
1556
1557 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1558 const char *name, Error **errp)
1559 {
1560 X86CPU *cpu = X86_CPU(obj);
1561 CPUX86State *env = &cpu->env;
1562 const int64_t min = 0;
1563 const int64_t max = 0xff;
1564 Error *local_err = NULL;
1565 int64_t value;
1566
1567 visit_type_int(v, &value, name, &local_err);
1568 if (local_err) {
1569 error_propagate(errp, local_err);
1570 return;
1571 }
1572 if (value < min || value > max) {
1573 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1574 name ? name : "null", value, min, max);
1575 return;
1576 }
1577
1578 env->cpuid_version &= ~0xf00f0;
1579 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1580 }
1581
1582 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1583 void *opaque, const char *name,
1584 Error **errp)
1585 {
1586 X86CPU *cpu = X86_CPU(obj);
1587 CPUX86State *env = &cpu->env;
1588 int64_t value;
1589
1590 value = env->cpuid_version & 0xf;
1591 visit_type_int(v, &value, name, errp);
1592 }
1593
1594 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1595 void *opaque, const char *name,
1596 Error **errp)
1597 {
1598 X86CPU *cpu = X86_CPU(obj);
1599 CPUX86State *env = &cpu->env;
1600 const int64_t min = 0;
1601 const int64_t max = 0xf;
1602 Error *local_err = NULL;
1603 int64_t value;
1604
1605 visit_type_int(v, &value, name, &local_err);
1606 if (local_err) {
1607 error_propagate(errp, local_err);
1608 return;
1609 }
1610 if (value < min || value > max) {
1611 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1612 name ? name : "null", value, min, max);
1613 return;
1614 }
1615
1616 env->cpuid_version &= ~0xf;
1617 env->cpuid_version |= value & 0xf;
1618 }
1619
1620 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1621 {
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 char *value;
1625
1626 value = g_malloc(CPUID_VENDOR_SZ + 1);
1627 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1628 env->cpuid_vendor3);
1629 return value;
1630 }
1631
1632 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1633 Error **errp)
1634 {
1635 X86CPU *cpu = X86_CPU(obj);
1636 CPUX86State *env = &cpu->env;
1637 int i;
1638
1639 if (strlen(value) != CPUID_VENDOR_SZ) {
1640 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1641 return;
1642 }
1643
1644 env->cpuid_vendor1 = 0;
1645 env->cpuid_vendor2 = 0;
1646 env->cpuid_vendor3 = 0;
1647 for (i = 0; i < 4; i++) {
1648 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1649 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1650 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1651 }
1652 }
1653
1654 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1655 {
1656 X86CPU *cpu = X86_CPU(obj);
1657 CPUX86State *env = &cpu->env;
1658 char *value;
1659 int i;
1660
1661 value = g_malloc(48 + 1);
1662 for (i = 0; i < 48; i++) {
1663 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1664 }
1665 value[48] = '\0';
1666 return value;
1667 }
1668
1669 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1670 Error **errp)
1671 {
1672 X86CPU *cpu = X86_CPU(obj);
1673 CPUX86State *env = &cpu->env;
1674 int c, len, i;
1675
1676 if (model_id == NULL) {
1677 model_id = "";
1678 }
1679 len = strlen(model_id);
1680 memset(env->cpuid_model, 0, 48);
1681 for (i = 0; i < 48; i++) {
1682 if (i >= len) {
1683 c = '\0';
1684 } else {
1685 c = (uint8_t)model_id[i];
1686 }
1687 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1688 }
1689 }
1690
1691 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1692 const char *name, Error **errp)
1693 {
1694 X86CPU *cpu = X86_CPU(obj);
1695 int64_t value;
1696
1697 value = cpu->env.tsc_khz * 1000;
1698 visit_type_int(v, &value, name, errp);
1699 }
1700
1701 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1702 const char *name, Error **errp)
1703 {
1704 X86CPU *cpu = X86_CPU(obj);
1705 const int64_t min = 0;
1706 const int64_t max = INT64_MAX;
1707 Error *local_err = NULL;
1708 int64_t value;
1709
1710 visit_type_int(v, &value, name, &local_err);
1711 if (local_err) {
1712 error_propagate(errp, local_err);
1713 return;
1714 }
1715 if (value < min || value > max) {
1716 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1717 name ? name : "null", value, min, max);
1718 return;
1719 }
1720
1721 cpu->env.tsc_khz = value / 1000;
1722 }
1723
1724 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1725 const char *name, Error **errp)
1726 {
1727 X86CPU *cpu = X86_CPU(obj);
1728 int64_t value = cpu->apic_id;
1729
1730 visit_type_int(v, &value, name, errp);
1731 }
1732
1733 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1734 const char *name, Error **errp)
1735 {
1736 X86CPU *cpu = X86_CPU(obj);
1737 DeviceState *dev = DEVICE(obj);
1738 const int64_t min = 0;
1739 const int64_t max = UINT32_MAX;
1740 Error *error = NULL;
1741 int64_t value;
1742
1743 if (dev->realized) {
1744 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1745 "it was realized", name, object_get_typename(obj));
1746 return;
1747 }
1748
1749 visit_type_int(v, &value, name, &error);
1750 if (error) {
1751 error_propagate(errp, error);
1752 return;
1753 }
1754 if (value < min || value > max) {
1755 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1756 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1757 object_get_typename(obj), name, value, min, max);
1758 return;
1759 }
1760
1761 if ((value != cpu->apic_id) && cpu_exists(value)) {
1762 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1763 return;
1764 }
1765 cpu->apic_id = value;
1766 }
1767
1768 /* Generic getter for "feature-words" and "filtered-features" properties */
1769 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1770 const char *name, Error **errp)
1771 {
1772 uint32_t *array = (uint32_t *)opaque;
1773 FeatureWord w;
1774 Error *err = NULL;
1775 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1776 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1777 X86CPUFeatureWordInfoList *list = NULL;
1778
1779 for (w = 0; w < FEATURE_WORDS; w++) {
1780 FeatureWordInfo *wi = &feature_word_info[w];
1781 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1782 qwi->cpuid_input_eax = wi->cpuid_eax;
1783 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1784 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1785 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1786 qwi->features = array[w];
1787
1788 /* List will be in reverse order, but order shouldn't matter */
1789 list_entries[w].next = list;
1790 list_entries[w].value = &word_infos[w];
1791 list = &list_entries[w];
1792 }
1793
1794 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1795 error_propagate(errp, err);
1796 }
1797
1798 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1799 const char *name, Error **errp)
1800 {
1801 X86CPU *cpu = X86_CPU(obj);
1802 int64_t value = cpu->hyperv_spinlock_attempts;
1803
1804 visit_type_int(v, &value, name, errp);
1805 }
1806
1807 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1808 const char *name, Error **errp)
1809 {
1810 const int64_t min = 0xFFF;
1811 const int64_t max = UINT_MAX;
1812 X86CPU *cpu = X86_CPU(obj);
1813 Error *err = NULL;
1814 int64_t value;
1815
1816 visit_type_int(v, &value, name, &err);
1817 if (err) {
1818 error_propagate(errp, err);
1819 return;
1820 }
1821
1822 if (value < min || value > max) {
1823 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1824 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1825 object_get_typename(obj), name ? name : "null",
1826 value, min, max);
1827 return;
1828 }
1829 cpu->hyperv_spinlock_attempts = value;
1830 }
1831
1832 static PropertyInfo qdev_prop_spinlocks = {
1833 .name = "int",
1834 .get = x86_get_hv_spinlocks,
1835 .set = x86_set_hv_spinlocks,
1836 };
1837
1838 /* Convert all '_' in a feature string option name to '-', to make feature
1839 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1840 */
1841 static inline void feat2prop(char *s)
1842 {
1843 while ((s = strchr(s, '_'))) {
1844 *s = '-';
1845 }
1846 }
1847
1848 /* Parse "+feature,-feature,feature=foo" CPU feature string
1849 */
1850 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1851 Error **errp)
1852 {
1853 X86CPU *cpu = X86_CPU(cs);
1854 char *featurestr; /* Single 'key=value" string being parsed */
1855 FeatureWord w;
1856 /* Features to be added */
1857 FeatureWordArray plus_features = { 0 };
1858 /* Features to be removed */
1859 FeatureWordArray minus_features = { 0 };
1860 uint32_t numvalue;
1861 CPUX86State *env = &cpu->env;
1862 Error *local_err = NULL;
1863
1864 featurestr = features ? strtok(features, ",") : NULL;
1865
1866 while (featurestr) {
1867 char *val;
1868 if (featurestr[0] == '+') {
1869 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1870 } else if (featurestr[0] == '-') {
1871 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1872 } else if ((val = strchr(featurestr, '='))) {
1873 *val = 0; val++;
1874 feat2prop(featurestr);
1875 if (!strcmp(featurestr, "xlevel")) {
1876 char *err;
1877 char num[32];
1878
1879 numvalue = strtoul(val, &err, 0);
1880 if (!*val || *err) {
1881 error_setg(errp, "bad numerical value %s", val);
1882 return;
1883 }
1884 if (numvalue < 0x80000000) {
1885 error_report("xlevel value shall always be >= 0x80000000"
1886 ", fixup will be removed in future versions");
1887 numvalue += 0x80000000;
1888 }
1889 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1890 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1891 } else if (!strcmp(featurestr, "tsc-freq")) {
1892 int64_t tsc_freq;
1893 char *err;
1894 char num[32];
1895
1896 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1897 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1898 if (tsc_freq < 0 || *err) {
1899 error_setg(errp, "bad numerical value %s", val);
1900 return;
1901 }
1902 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1903 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1904 &local_err);
1905 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1906 char *err;
1907 const int min = 0xFFF;
1908 char num[32];
1909 numvalue = strtoul(val, &err, 0);
1910 if (!*val || *err) {
1911 error_setg(errp, "bad numerical value %s", val);
1912 return;
1913 }
1914 if (numvalue < min) {
1915 error_report("hv-spinlocks value shall always be >= 0x%x"
1916 ", fixup will be removed in future versions",
1917 min);
1918 numvalue = min;
1919 }
1920 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1921 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1922 } else {
1923 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1924 }
1925 } else {
1926 feat2prop(featurestr);
1927 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1928 }
1929 if (local_err) {
1930 error_propagate(errp, local_err);
1931 return;
1932 }
1933 featurestr = strtok(NULL, ",");
1934 }
1935
1936 if (cpu->host_features) {
1937 for (w = 0; w < FEATURE_WORDS; w++) {
1938 env->features[w] =
1939 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1940 }
1941 }
1942
1943 for (w = 0; w < FEATURE_WORDS; w++) {
1944 env->features[w] |= plus_features[w];
1945 env->features[w] &= ~minus_features[w];
1946 }
1947 }
1948
1949 /* Print all cpuid feature names in featureset
1950 */
1951 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1952 {
1953 int bit;
1954 bool first = true;
1955
1956 for (bit = 0; bit < 32; bit++) {
1957 if (featureset[bit]) {
1958 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1959 first = false;
1960 }
1961 }
1962 }
1963
1964 /* generate CPU information. */
1965 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1966 {
1967 X86CPUDefinition *def;
1968 char buf[256];
1969 int i;
1970
1971 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1972 def = &builtin_x86_defs[i];
1973 snprintf(buf, sizeof(buf), "%s", def->name);
1974 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1975 }
1976 #ifdef CONFIG_KVM
1977 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1978 "KVM processor with all supported host features "
1979 "(only available in KVM mode)");
1980 #endif
1981
1982 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1983 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1984 FeatureWordInfo *fw = &feature_word_info[i];
1985
1986 (*cpu_fprintf)(f, " ");
1987 listflags(f, cpu_fprintf, fw->feat_names);
1988 (*cpu_fprintf)(f, "\n");
1989 }
1990 }
1991
1992 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1993 {
1994 CpuDefinitionInfoList *cpu_list = NULL;
1995 X86CPUDefinition *def;
1996 int i;
1997
1998 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1999 CpuDefinitionInfoList *entry;
2000 CpuDefinitionInfo *info;
2001
2002 def = &builtin_x86_defs[i];
2003 info = g_malloc0(sizeof(*info));
2004 info->name = g_strdup(def->name);
2005
2006 entry = g_malloc0(sizeof(*entry));
2007 entry->value = info;
2008 entry->next = cpu_list;
2009 cpu_list = entry;
2010 }
2011
2012 return cpu_list;
2013 }
2014
2015 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2016 bool migratable_only)
2017 {
2018 FeatureWordInfo *wi = &feature_word_info[w];
2019 uint32_t r;
2020
2021 if (kvm_enabled()) {
2022 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2023 wi->cpuid_ecx,
2024 wi->cpuid_reg);
2025 } else if (tcg_enabled()) {
2026 r = wi->tcg_features;
2027 } else {
2028 return ~0;
2029 }
2030 if (migratable_only) {
2031 r &= x86_cpu_get_migratable_flags(w);
2032 }
2033 return r;
2034 }
2035
2036 /*
2037 * Filters CPU feature words based on host availability of each feature.
2038 *
2039 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2040 */
2041 static int x86_cpu_filter_features(X86CPU *cpu)
2042 {
2043 CPUX86State *env = &cpu->env;
2044 FeatureWord w;
2045 int rv = 0;
2046
2047 for (w = 0; w < FEATURE_WORDS; w++) {
2048 uint32_t host_feat =
2049 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2050 uint32_t requested_features = env->features[w];
2051 env->features[w] &= host_feat;
2052 cpu->filtered_features[w] = requested_features & ~env->features[w];
2053 if (cpu->filtered_features[w]) {
2054 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2055 report_unavailable_features(w, cpu->filtered_features[w]);
2056 }
2057 rv = 1;
2058 }
2059 }
2060
2061 return rv;
2062 }
2063
2064 /* Load data from X86CPUDefinition
2065 */
2066 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2067 {
2068 CPUX86State *env = &cpu->env;
2069 const char *vendor;
2070 char host_vendor[CPUID_VENDOR_SZ + 1];
2071 FeatureWord w;
2072
2073 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2074 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2075 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2076 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2077 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2078 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2079 cpu->cache_info_passthrough = def->cache_info_passthrough;
2080 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2081 for (w = 0; w < FEATURE_WORDS; w++) {
2082 env->features[w] = def->features[w];
2083 }
2084
2085 /* Special cases not set in the X86CPUDefinition structs: */
2086 if (kvm_enabled()) {
2087 FeatureWord w;
2088 for (w = 0; w < FEATURE_WORDS; w++) {
2089 env->features[w] |= kvm_default_features[w];
2090 env->features[w] &= ~kvm_default_unset_features[w];
2091 }
2092 }
2093
2094 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2095
2096 /* sysenter isn't supported in compatibility mode on AMD,
2097 * syscall isn't supported in compatibility mode on Intel.
2098 * Normally we advertise the actual CPU vendor, but you can
2099 * override this using the 'vendor' property if you want to use
2100 * KVM's sysenter/syscall emulation in compatibility mode and
2101 * when doing cross vendor migration
2102 */
2103 vendor = def->vendor;
2104 if (kvm_enabled()) {
2105 uint32_t ebx = 0, ecx = 0, edx = 0;
2106 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2107 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2108 vendor = host_vendor;
2109 }
2110
2111 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2112
2113 }
2114
2115 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2116 {
2117 X86CPU *cpu = NULL;
2118 X86CPUClass *xcc;
2119 ObjectClass *oc;
2120 gchar **model_pieces;
2121 char *name, *features;
2122 Error *error = NULL;
2123
2124 model_pieces = g_strsplit(cpu_model, ",", 2);
2125 if (!model_pieces[0]) {
2126 error_setg(&error, "Invalid/empty CPU model name");
2127 goto out;
2128 }
2129 name = model_pieces[0];
2130 features = model_pieces[1];
2131
2132 oc = x86_cpu_class_by_name(name);
2133 if (oc == NULL) {
2134 error_setg(&error, "Unable to find CPU definition: %s", name);
2135 goto out;
2136 }
2137 xcc = X86_CPU_CLASS(oc);
2138
2139 if (xcc->kvm_required && !kvm_enabled()) {
2140 error_setg(&error, "CPU model '%s' requires KVM", name);
2141 goto out;
2142 }
2143
2144 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2145
2146 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2147 if (error) {
2148 goto out;
2149 }
2150
2151 out:
2152 if (error != NULL) {
2153 error_propagate(errp, error);
2154 if (cpu) {
2155 object_unref(OBJECT(cpu));
2156 cpu = NULL;
2157 }
2158 }
2159 g_strfreev(model_pieces);
2160 return cpu;
2161 }
2162
2163 X86CPU *cpu_x86_init(const char *cpu_model)
2164 {
2165 Error *error = NULL;
2166 X86CPU *cpu;
2167
2168 cpu = cpu_x86_create(cpu_model, &error);
2169 if (error) {
2170 goto out;
2171 }
2172
2173 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2174
2175 out:
2176 if (error) {
2177 error_report_err(error);
2178 if (cpu != NULL) {
2179 object_unref(OBJECT(cpu));
2180 cpu = NULL;
2181 }
2182 }
2183 return cpu;
2184 }
2185
2186 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2187 {
2188 X86CPUDefinition *cpudef = data;
2189 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2190
2191 xcc->cpu_def = cpudef;
2192 }
2193
2194 static void x86_register_cpudef_type(X86CPUDefinition *def)
2195 {
2196 char *typename = x86_cpu_type_name(def->name);
2197 TypeInfo ti = {
2198 .name = typename,
2199 .parent = TYPE_X86_CPU,
2200 .class_init = x86_cpu_cpudef_class_init,
2201 .class_data = def,
2202 };
2203
2204 type_register(&ti);
2205 g_free(typename);
2206 }
2207
2208 #if !defined(CONFIG_USER_ONLY)
2209
2210 void cpu_clear_apic_feature(CPUX86State *env)
2211 {
2212 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2213 }
2214
2215 #endif /* !CONFIG_USER_ONLY */
2216
2217 /* Initialize list of CPU models, filling some non-static fields if necessary
2218 */
2219 void x86_cpudef_setup(void)
2220 {
2221 int i, j;
2222 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2223
2224 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2225 X86CPUDefinition *def = &builtin_x86_defs[i];
2226
2227 /* Look for specific "cpudef" models that */
2228 /* have the QEMU version in .model_id */
2229 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2230 if (strcmp(model_with_versions[j], def->name) == 0) {
2231 pstrcpy(def->model_id, sizeof(def->model_id),
2232 "QEMU Virtual CPU version ");
2233 pstrcat(def->model_id, sizeof(def->model_id),
2234 qemu_get_version());
2235 break;
2236 }
2237 }
2238 }
2239 }
2240
2241 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2242 uint32_t *eax, uint32_t *ebx,
2243 uint32_t *ecx, uint32_t *edx)
2244 {
2245 X86CPU *cpu = x86_env_get_cpu(env);
2246 CPUState *cs = CPU(cpu);
2247
2248 /* test if maximum index reached */
2249 if (index & 0x80000000) {
2250 if (index > env->cpuid_xlevel) {
2251 if (env->cpuid_xlevel2 > 0) {
2252 /* Handle the Centaur's CPUID instruction. */
2253 if (index > env->cpuid_xlevel2) {
2254 index = env->cpuid_xlevel2;
2255 } else if (index < 0xC0000000) {
2256 index = env->cpuid_xlevel;
2257 }
2258 } else {
2259 /* Intel documentation states that invalid EAX input will
2260 * return the same information as EAX=cpuid_level
2261 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2262 */
2263 index = env->cpuid_level;
2264 }
2265 }
2266 } else {
2267 if (index > env->cpuid_level)
2268 index = env->cpuid_level;
2269 }
2270
2271 switch(index) {
2272 case 0:
2273 *eax = env->cpuid_level;
2274 *ebx = env->cpuid_vendor1;
2275 *edx = env->cpuid_vendor2;
2276 *ecx = env->cpuid_vendor3;
2277 break;
2278 case 1:
2279 *eax = env->cpuid_version;
2280 *ebx = (cpu->apic_id << 24) |
2281 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2282 *ecx = env->features[FEAT_1_ECX];
2283 *edx = env->features[FEAT_1_EDX];
2284 if (cs->nr_cores * cs->nr_threads > 1) {
2285 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2286 *edx |= 1 << 28; /* HTT bit */
2287 }
2288 break;
2289 case 2:
2290 /* cache info: needed for Pentium Pro compatibility */
2291 if (cpu->cache_info_passthrough) {
2292 host_cpuid(index, 0, eax, ebx, ecx, edx);
2293 break;
2294 }
2295 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2296 *ebx = 0;
2297 *ecx = 0;
2298 *edx = (L1D_DESCRIPTOR << 16) | \
2299 (L1I_DESCRIPTOR << 8) | \
2300 (L2_DESCRIPTOR);
2301 break;
2302 case 4:
2303 /* cache info: needed for Core compatibility */
2304 if (cpu->cache_info_passthrough) {
2305 host_cpuid(index, count, eax, ebx, ecx, edx);
2306 *eax &= ~0xFC000000;
2307 } else {
2308 *eax = 0;
2309 switch (count) {
2310 case 0: /* L1 dcache info */
2311 *eax |= CPUID_4_TYPE_DCACHE | \
2312 CPUID_4_LEVEL(1) | \
2313 CPUID_4_SELF_INIT_LEVEL;
2314 *ebx = (L1D_LINE_SIZE - 1) | \
2315 ((L1D_PARTITIONS - 1) << 12) | \
2316 ((L1D_ASSOCIATIVITY - 1) << 22);
2317 *ecx = L1D_SETS - 1;
2318 *edx = CPUID_4_NO_INVD_SHARING;
2319 break;
2320 case 1: /* L1 icache info */
2321 *eax |= CPUID_4_TYPE_ICACHE | \
2322 CPUID_4_LEVEL(1) | \
2323 CPUID_4_SELF_INIT_LEVEL;
2324 *ebx = (L1I_LINE_SIZE - 1) | \
2325 ((L1I_PARTITIONS - 1) << 12) | \
2326 ((L1I_ASSOCIATIVITY - 1) << 22);
2327 *ecx = L1I_SETS - 1;
2328 *edx = CPUID_4_NO_INVD_SHARING;
2329 break;
2330 case 2: /* L2 cache info */
2331 *eax |= CPUID_4_TYPE_UNIFIED | \
2332 CPUID_4_LEVEL(2) | \
2333 CPUID_4_SELF_INIT_LEVEL;
2334 if (cs->nr_threads > 1) {
2335 *eax |= (cs->nr_threads - 1) << 14;
2336 }
2337 *ebx = (L2_LINE_SIZE - 1) | \
2338 ((L2_PARTITIONS - 1) << 12) | \
2339 ((L2_ASSOCIATIVITY - 1) << 22);
2340 *ecx = L2_SETS - 1;
2341 *edx = CPUID_4_NO_INVD_SHARING;
2342 break;
2343 default: /* end of info */
2344 *eax = 0;
2345 *ebx = 0;
2346 *ecx = 0;
2347 *edx = 0;
2348 break;
2349 }
2350 }
2351
2352 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2353 if ((*eax & 31) && cs->nr_cores > 1) {
2354 *eax |= (cs->nr_cores - 1) << 26;
2355 }
2356 break;
2357 case 5:
2358 /* mwait info: needed for Core compatibility */
2359 *eax = 0; /* Smallest monitor-line size in bytes */
2360 *ebx = 0; /* Largest monitor-line size in bytes */
2361 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2362 *edx = 0;
2363 break;
2364 case 6:
2365 /* Thermal and Power Leaf */
2366 *eax = env->features[FEAT_6_EAX];
2367 *ebx = 0;
2368 *ecx = 0;
2369 *edx = 0;
2370 break;
2371 case 7:
2372 /* Structured Extended Feature Flags Enumeration Leaf */
2373 if (count == 0) {
2374 *eax = 0; /* Maximum ECX value for sub-leaves */
2375 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2376 *ecx = 0; /* Reserved */
2377 *edx = 0; /* Reserved */
2378 } else {
2379 *eax = 0;
2380 *ebx = 0;
2381 *ecx = 0;
2382 *edx = 0;
2383 }
2384 break;
2385 case 9:
2386 /* Direct Cache Access Information Leaf */
2387 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2392 case 0xA:
2393 /* Architectural Performance Monitoring Leaf */
2394 if (kvm_enabled() && cpu->enable_pmu) {
2395 KVMState *s = cs->kvm_state;
2396
2397 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2398 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2399 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2400 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2401 } else {
2402 *eax = 0;
2403 *ebx = 0;
2404 *ecx = 0;
2405 *edx = 0;
2406 }
2407 break;
2408 case 0xD: {
2409 KVMState *s = cs->kvm_state;
2410 uint64_t kvm_mask;
2411 int i;
2412
2413 /* Processor Extended State */
2414 *eax = 0;
2415 *ebx = 0;
2416 *ecx = 0;
2417 *edx = 0;
2418 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2419 break;
2420 }
2421 kvm_mask =
2422 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2423 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2424
2425 if (count == 0) {
2426 *ecx = 0x240;
2427 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2428 const ExtSaveArea *esa = &ext_save_areas[i];
2429 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2430 (kvm_mask & (1 << i)) != 0) {
2431 if (i < 32) {
2432 *eax |= 1 << i;
2433 } else {
2434 *edx |= 1 << (i - 32);
2435 }
2436 *ecx = MAX(*ecx, esa->offset + esa->size);
2437 }
2438 }
2439 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2440 *ebx = *ecx;
2441 } else if (count == 1) {
2442 *eax = env->features[FEAT_XSAVE];
2443 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2444 const ExtSaveArea *esa = &ext_save_areas[count];
2445 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2446 (kvm_mask & (1 << count)) != 0) {
2447 *eax = esa->size;
2448 *ebx = esa->offset;
2449 }
2450 }
2451 break;
2452 }
2453 case 0x80000000:
2454 *eax = env->cpuid_xlevel;
2455 *ebx = env->cpuid_vendor1;
2456 *edx = env->cpuid_vendor2;
2457 *ecx = env->cpuid_vendor3;
2458 break;
2459 case 0x80000001:
2460 *eax = env->cpuid_version;
2461 *ebx = 0;
2462 *ecx = env->features[FEAT_8000_0001_ECX];
2463 *edx = env->features[FEAT_8000_0001_EDX];
2464
2465 /* The Linux kernel checks for the CMPLegacy bit and
2466 * discards multiple thread information if it is set.
2467 * So dont set it here for Intel to make Linux guests happy.
2468 */
2469 if (cs->nr_cores * cs->nr_threads > 1) {
2470 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2471 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2472 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2473 *ecx |= 1 << 1; /* CmpLegacy bit */
2474 }
2475 }
2476 break;
2477 case 0x80000002:
2478 case 0x80000003:
2479 case 0x80000004:
2480 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2481 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2482 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2483 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2484 break;
2485 case 0x80000005:
2486 /* cache info (L1 cache) */
2487 if (cpu->cache_info_passthrough) {
2488 host_cpuid(index, 0, eax, ebx, ecx, edx);
2489 break;
2490 }
2491 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2492 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2493 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2494 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2495 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2496 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2497 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2498 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2499 break;
2500 case 0x80000006:
2501 /* cache info (L2 cache) */
2502 if (cpu->cache_info_passthrough) {
2503 host_cpuid(index, 0, eax, ebx, ecx, edx);
2504 break;
2505 }
2506 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2507 (L2_DTLB_2M_ENTRIES << 16) | \
2508 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2509 (L2_ITLB_2M_ENTRIES);
2510 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2511 (L2_DTLB_4K_ENTRIES << 16) | \
2512 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2513 (L2_ITLB_4K_ENTRIES);
2514 *ecx = (L2_SIZE_KB_AMD << 16) | \
2515 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2516 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2517 *edx = ((L3_SIZE_KB/512) << 18) | \
2518 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2519 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2520 break;
2521 case 0x80000007:
2522 *eax = 0;
2523 *ebx = 0;
2524 *ecx = 0;
2525 *edx = env->features[FEAT_8000_0007_EDX];
2526 break;
2527 case 0x80000008:
2528 /* virtual & phys address size in low 2 bytes. */
2529 /* XXX: This value must match the one used in the MMU code. */
2530 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2531 /* 64 bit processor */
2532 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2533 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2534 } else {
2535 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2536 *eax = 0x00000024; /* 36 bits physical */
2537 } else {
2538 *eax = 0x00000020; /* 32 bits physical */
2539 }
2540 }
2541 *ebx = 0;
2542 *ecx = 0;
2543 *edx = 0;
2544 if (cs->nr_cores * cs->nr_threads > 1) {
2545 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2546 }
2547 break;
2548 case 0x8000000A:
2549 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2550 *eax = 0x00000001; /* SVM Revision */
2551 *ebx = 0x00000010; /* nr of ASIDs */
2552 *ecx = 0;
2553 *edx = env->features[FEAT_SVM]; /* optional features */
2554 } else {
2555 *eax = 0;
2556 *ebx = 0;
2557 *ecx = 0;
2558 *edx = 0;
2559 }
2560 break;
2561 case 0xC0000000:
2562 *eax = env->cpuid_xlevel2;
2563 *ebx = 0;
2564 *ecx = 0;
2565 *edx = 0;
2566 break;
2567 case 0xC0000001:
2568 /* Support for VIA CPU's CPUID instruction */
2569 *eax = env->cpuid_version;
2570 *ebx = 0;
2571 *ecx = 0;
2572 *edx = env->features[FEAT_C000_0001_EDX];
2573 break;
2574 case 0xC0000002:
2575 case 0xC0000003:
2576 case 0xC0000004:
2577 /* Reserved for the future, and now filled with zero */
2578 *eax = 0;
2579 *ebx = 0;
2580 *ecx = 0;
2581 *edx = 0;
2582 break;
2583 default:
2584 /* reserved values: zero */
2585 *eax = 0;
2586 *ebx = 0;
2587 *ecx = 0;
2588 *edx = 0;
2589 break;
2590 }
2591 }
2592
2593 /* CPUClass::reset() */
2594 static void x86_cpu_reset(CPUState *s)
2595 {
2596 X86CPU *cpu = X86_CPU(s);
2597 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2598 CPUX86State *env = &cpu->env;
2599 int i;
2600
2601 xcc->parent_reset(s);
2602
2603 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2604
2605 tlb_flush(s, 1);
2606
2607 env->old_exception = -1;
2608
2609 /* init to reset state */
2610
2611 #ifdef CONFIG_SOFTMMU
2612 env->hflags |= HF_SOFTMMU_MASK;
2613 #endif
2614 env->hflags2 |= HF2_GIF_MASK;
2615
2616 cpu_x86_update_cr0(env, 0x60000010);
2617 env->a20_mask = ~0x0;
2618 env->smbase = 0x30000;
2619
2620 env->idt.limit = 0xffff;
2621 env->gdt.limit = 0xffff;
2622 env->ldt.limit = 0xffff;
2623 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2624 env->tr.limit = 0xffff;
2625 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2626
2627 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2628 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2629 DESC_R_MASK | DESC_A_MASK);
2630 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2631 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2632 DESC_A_MASK);
2633 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2634 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2635 DESC_A_MASK);
2636 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2637 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2638 DESC_A_MASK);
2639 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2640 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2641 DESC_A_MASK);
2642 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2643 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2644 DESC_A_MASK);
2645
2646 env->eip = 0xfff0;
2647 env->regs[R_EDX] = env->cpuid_version;
2648
2649 env->eflags = 0x2;
2650
2651 /* FPU init */
2652 for (i = 0; i < 8; i++) {
2653 env->fptags[i] = 1;
2654 }
2655 cpu_set_fpuc(env, 0x37f);
2656
2657 env->mxcsr = 0x1f80;
2658 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2659
2660 env->pat = 0x0007040600070406ULL;
2661 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2662
2663 memset(env->dr, 0, sizeof(env->dr));
2664 env->dr[6] = DR6_FIXED_1;
2665 env->dr[7] = DR7_FIXED_1;
2666 cpu_breakpoint_remove_all(s, BP_CPU);
2667 cpu_watchpoint_remove_all(s, BP_CPU);
2668
2669 env->xcr0 = 1;
2670
2671 /*
2672 * SDM 11.11.5 requires:
2673 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2674 * - IA32_MTRR_PHYSMASKn.V = 0
2675 * All other bits are undefined. For simplification, zero it all.
2676 */
2677 env->mtrr_deftype = 0;
2678 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2679 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2680
2681 #if !defined(CONFIG_USER_ONLY)
2682 /* We hard-wire the BSP to the first CPU. */
2683 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2684
2685 s->halted = !cpu_is_bsp(cpu);
2686
2687 if (kvm_enabled()) {
2688 kvm_arch_reset_vcpu(cpu);
2689 }
2690 #endif
2691 }
2692
2693 #ifndef CONFIG_USER_ONLY
2694 bool cpu_is_bsp(X86CPU *cpu)
2695 {
2696 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2697 }
2698
2699 /* TODO: remove me, when reset over QOM tree is implemented */
2700 static void x86_cpu_machine_reset_cb(void *opaque)
2701 {
2702 X86CPU *cpu = opaque;
2703 cpu_reset(CPU(cpu));
2704 }
2705 #endif
2706
2707 static void mce_init(X86CPU *cpu)
2708 {
2709 CPUX86State *cenv = &cpu->env;
2710 unsigned int bank;
2711
2712 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2713 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2714 (CPUID_MCE | CPUID_MCA)) {
2715 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2716 cenv->mcg_ctl = ~(uint64_t)0;
2717 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2718 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2719 }
2720 }
2721 }
2722
2723 #ifndef CONFIG_USER_ONLY
2724 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2725 {
2726 DeviceState *dev = DEVICE(cpu);
2727 APICCommonState *apic;
2728 const char *apic_type = "apic";
2729
2730 if (kvm_irqchip_in_kernel()) {
2731 apic_type = "kvm-apic";
2732 } else if (xen_enabled()) {
2733 apic_type = "xen-apic";
2734 }
2735
2736 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2737 if (cpu->apic_state == NULL) {
2738 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2739 return;
2740 }
2741
2742 object_property_add_child(OBJECT(cpu), "apic",
2743 OBJECT(cpu->apic_state), NULL);
2744 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2745 /* TODO: convert to link<> */
2746 apic = APIC_COMMON(cpu->apic_state);
2747 apic->cpu = cpu;
2748 }
2749
2750 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2751 {
2752 if (cpu->apic_state == NULL) {
2753 return;
2754 }
2755 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2756 errp);
2757 }
2758
2759 static void x86_cpu_machine_done(Notifier *n, void *unused)
2760 {
2761 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2762 MemoryRegion *smram =
2763 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2764
2765 if (smram) {
2766 cpu->smram = g_new(MemoryRegion, 1);
2767 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2768 smram, 0, 1ull << 32);
2769 memory_region_set_enabled(cpu->smram, false);
2770 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2771 }
2772 }
2773 #else
2774 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2775 {
2776 }
2777 #endif
2778
2779
2780 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2781 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2782 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2783 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2784 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2785 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2786 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2787 {
2788 CPUState *cs = CPU(dev);
2789 X86CPU *cpu = X86_CPU(dev);
2790 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2791 CPUX86State *env = &cpu->env;
2792 Error *local_err = NULL;
2793 static bool ht_warned;
2794
2795 if (cpu->apic_id < 0) {
2796 error_setg(errp, "apic-id property was not initialized properly");
2797 return;
2798 }
2799
2800 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2801 env->cpuid_level = 7;
2802 }
2803
2804 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2805 * CPUID[1].EDX.
2806 */
2807 if (IS_AMD_CPU(env)) {
2808 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2809 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2810 & CPUID_EXT2_AMD_ALIASES);
2811 }
2812
2813
2814 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2815 error_setg(&local_err,
2816 kvm_enabled() ?
2817 "Host doesn't support requested features" :
2818 "TCG doesn't support requested features");
2819 goto out;
2820 }
2821
2822 #ifndef CONFIG_USER_ONLY
2823 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2824
2825 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2826 x86_cpu_apic_create(cpu, &local_err);
2827 if (local_err != NULL) {
2828 goto out;
2829 }
2830 }
2831 #endif
2832
2833 mce_init(cpu);
2834
2835 #ifndef CONFIG_USER_ONLY
2836 if (tcg_enabled()) {
2837 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2838 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2839 cs->as = g_new(AddressSpace, 1);
2840
2841 /* Outer container... */
2842 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2843 memory_region_set_enabled(cpu->cpu_as_root, true);
2844
2845 /* ... with two regions inside: normal system memory with low
2846 * priority, and...
2847 */
2848 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2849 get_system_memory(), 0, ~0ull);
2850 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2851 memory_region_set_enabled(cpu->cpu_as_mem, true);
2852 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2853
2854 /* ... SMRAM with higher priority, linked from /machine/smram. */
2855 cpu->machine_done.notify = x86_cpu_machine_done;
2856 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2857 }
2858 #endif
2859
2860 qemu_init_vcpu(cs);
2861
2862 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2863 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2864 * based on inputs (sockets,cores,threads), it is still better to gives
2865 * users a warning.
2866 *
2867 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2868 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2869 */
2870 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2871 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2872 " -smp options properly.");
2873 ht_warned = true;
2874 }
2875
2876 x86_cpu_apic_realize(cpu, &local_err);
2877 if (local_err != NULL) {
2878 goto out;
2879 }
2880 cpu_reset(cs);
2881
2882 xcc->parent_realize(dev, &local_err);
2883
2884 out:
2885 if (local_err != NULL) {
2886 error_propagate(errp, local_err);
2887 return;
2888 }
2889 }
2890
2891 typedef struct BitProperty {
2892 uint32_t *ptr;
2893 uint32_t mask;
2894 } BitProperty;
2895
2896 static void x86_cpu_get_bit_prop(Object *obj,
2897 struct Visitor *v,
2898 void *opaque,
2899 const char *name,
2900 Error **errp)
2901 {
2902 BitProperty *fp = opaque;
2903 bool value = (*fp->ptr & fp->mask) == fp->mask;
2904 visit_type_bool(v, &value, name, errp);
2905 }
2906
2907 static void x86_cpu_set_bit_prop(Object *obj,
2908 struct Visitor *v,
2909 void *opaque,
2910 const char *name,
2911 Error **errp)
2912 {
2913 DeviceState *dev = DEVICE(obj);
2914 BitProperty *fp = opaque;
2915 Error *local_err = NULL;
2916 bool value;
2917
2918 if (dev->realized) {
2919 qdev_prop_set_after_realize(dev, name, errp);
2920 return;
2921 }
2922
2923 visit_type_bool(v, &value, name, &local_err);
2924 if (local_err) {
2925 error_propagate(errp, local_err);
2926 return;
2927 }
2928
2929 if (value) {
2930 *fp->ptr |= fp->mask;
2931 } else {
2932 *fp->ptr &= ~fp->mask;
2933 }
2934 }
2935
2936 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2937 void *opaque)
2938 {
2939 BitProperty *prop = opaque;
2940 g_free(prop);
2941 }
2942
2943 /* Register a boolean property to get/set a single bit in a uint32_t field.
2944 *
2945 * The same property name can be registered multiple times to make it affect
2946 * multiple bits in the same FeatureWord. In that case, the getter will return
2947 * true only if all bits are set.
2948 */
2949 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2950 const char *prop_name,
2951 uint32_t *field,
2952 int bitnr)
2953 {
2954 BitProperty *fp;
2955 ObjectProperty *op;
2956 uint32_t mask = (1UL << bitnr);
2957
2958 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2959 if (op) {
2960 fp = op->opaque;
2961 assert(fp->ptr == field);
2962 fp->mask |= mask;
2963 } else {
2964 fp = g_new0(BitProperty, 1);
2965 fp->ptr = field;
2966 fp->mask = mask;
2967 object_property_add(OBJECT(cpu), prop_name, "bool",
2968 x86_cpu_get_bit_prop,
2969 x86_cpu_set_bit_prop,
2970 x86_cpu_release_bit_prop, fp, &error_abort);
2971 }
2972 }
2973
2974 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
2975 FeatureWord w,
2976 int bitnr)
2977 {
2978 Object *obj = OBJECT(cpu);
2979 int i;
2980 char **names;
2981 FeatureWordInfo *fi = &feature_word_info[w];
2982
2983 if (!fi->feat_names) {
2984 return;
2985 }
2986 if (!fi->feat_names[bitnr]) {
2987 return;
2988 }
2989
2990 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
2991
2992 feat2prop(names[0]);
2993 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
2994
2995 for (i = 1; names[i]; i++) {
2996 feat2prop(names[i]);
2997 object_property_add_alias(obj, names[i], obj, names[0],
2998 &error_abort);
2999 }
3000
3001 g_strfreev(names);
3002 }
3003
3004 static void x86_cpu_initfn(Object *obj)
3005 {
3006 CPUState *cs = CPU(obj);
3007 X86CPU *cpu = X86_CPU(obj);
3008 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3009 CPUX86State *env = &cpu->env;
3010 FeatureWord w;
3011 static int inited;
3012
3013 cs->env_ptr = env;
3014 cpu_exec_init(cs, &error_abort);
3015
3016 object_property_add(obj, "family", "int",
3017 x86_cpuid_version_get_family,
3018 x86_cpuid_version_set_family, NULL, NULL, NULL);
3019 object_property_add(obj, "model", "int",
3020 x86_cpuid_version_get_model,
3021 x86_cpuid_version_set_model, NULL, NULL, NULL);
3022 object_property_add(obj, "stepping", "int",
3023 x86_cpuid_version_get_stepping,
3024 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3025 object_property_add_str(obj, "vendor",
3026 x86_cpuid_get_vendor,
3027 x86_cpuid_set_vendor, NULL);
3028 object_property_add_str(obj, "model-id",
3029 x86_cpuid_get_model_id,
3030 x86_cpuid_set_model_id, NULL);
3031 object_property_add(obj, "tsc-frequency", "int",
3032 x86_cpuid_get_tsc_freq,
3033 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3034 object_property_add(obj, "apic-id", "int",
3035 x86_cpuid_get_apic_id,
3036 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3037 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3038 x86_cpu_get_feature_words,
3039 NULL, NULL, (void *)env->features, NULL);
3040 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3041 x86_cpu_get_feature_words,
3042 NULL, NULL, (void *)cpu->filtered_features, NULL);
3043
3044 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3045
3046 #ifndef CONFIG_USER_ONLY
3047 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3048 cpu->apic_id = -1;
3049 #endif
3050
3051 for (w = 0; w < FEATURE_WORDS; w++) {
3052 int bitnr;
3053
3054 for (bitnr = 0; bitnr < 32; bitnr++) {
3055 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3056 }
3057 }
3058
3059 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3060
3061 /* init various static tables used in TCG mode */
3062 if (tcg_enabled() && !inited) {
3063 inited = 1;
3064 optimize_flags_init();
3065 }
3066 }
3067
3068 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3069 {
3070 X86CPU *cpu = X86_CPU(cs);
3071
3072 return cpu->apic_id;
3073 }
3074
3075 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3076 {
3077 X86CPU *cpu = X86_CPU(cs);
3078
3079 return cpu->env.cr[0] & CR0_PG_MASK;
3080 }
3081
3082 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3083 {
3084 X86CPU *cpu = X86_CPU(cs);
3085
3086 cpu->env.eip = value;
3087 }
3088
3089 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3090 {
3091 X86CPU *cpu = X86_CPU(cs);
3092
3093 cpu->env.eip = tb->pc - tb->cs_base;
3094 }
3095
3096 static bool x86_cpu_has_work(CPUState *cs)
3097 {
3098 X86CPU *cpu = X86_CPU(cs);
3099 CPUX86State *env = &cpu->env;
3100
3101 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3102 CPU_INTERRUPT_POLL)) &&
3103 (env->eflags & IF_MASK)) ||
3104 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3105 CPU_INTERRUPT_INIT |
3106 CPU_INTERRUPT_SIPI |
3107 CPU_INTERRUPT_MCE)) ||
3108 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3109 !(env->hflags & HF_SMM_MASK));
3110 }
3111
3112 static Property x86_cpu_properties[] = {
3113 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3114 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3115 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3116 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3117 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3118 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3119 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3120 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3121 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3122 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3123 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3124 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3125 DEFINE_PROP_END_OF_LIST()
3126 };
3127
3128 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3129 {
3130 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3131 CPUClass *cc = CPU_CLASS(oc);
3132 DeviceClass *dc = DEVICE_CLASS(oc);
3133
3134 xcc->parent_realize = dc->realize;
3135 dc->realize = x86_cpu_realizefn;
3136 dc->bus_type = TYPE_ICC_BUS;
3137 dc->props = x86_cpu_properties;
3138
3139 xcc->parent_reset = cc->reset;
3140 cc->reset = x86_cpu_reset;
3141 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3142
3143 cc->class_by_name = x86_cpu_class_by_name;
3144 cc->parse_features = x86_cpu_parse_featurestr;
3145 cc->has_work = x86_cpu_has_work;
3146 cc->do_interrupt = x86_cpu_do_interrupt;
3147 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3148 cc->dump_state = x86_cpu_dump_state;
3149 cc->set_pc = x86_cpu_set_pc;
3150 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3151 cc->gdb_read_register = x86_cpu_gdb_read_register;
3152 cc->gdb_write_register = x86_cpu_gdb_write_register;
3153 cc->get_arch_id = x86_cpu_get_arch_id;
3154 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3155 #ifdef CONFIG_USER_ONLY
3156 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3157 #else
3158 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3159 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3160 cc->write_elf64_note = x86_cpu_write_elf64_note;
3161 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3162 cc->write_elf32_note = x86_cpu_write_elf32_note;
3163 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3164 cc->vmsd = &vmstate_x86_cpu;
3165 #endif
3166 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3167 #ifndef CONFIG_USER_ONLY
3168 cc->debug_excp_handler = breakpoint_handler;
3169 #endif
3170 cc->cpu_exec_enter = x86_cpu_exec_enter;
3171 cc->cpu_exec_exit = x86_cpu_exec_exit;
3172 }
3173
3174 static const TypeInfo x86_cpu_type_info = {
3175 .name = TYPE_X86_CPU,
3176 .parent = TYPE_CPU,
3177 .instance_size = sizeof(X86CPU),
3178 .instance_init = x86_cpu_initfn,
3179 .abstract = true,
3180 .class_size = sizeof(X86CPUClass),
3181 .class_init = x86_cpu_common_class_init,
3182 };
3183
3184 static void x86_cpu_register_types(void)
3185 {
3186 int i;
3187
3188 type_register_static(&x86_cpu_type_info);
3189 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3190 x86_register_cpudef_type(&builtin_x86_defs[i]);
3191 }
3192 #ifdef CONFIG_KVM
3193 type_register_static(&host_x86_cpu_type_info);
3194 #endif
3195 }
3196
3197 type_init(x86_cpu_register_types)