]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
apic: move APIC's MMIO region mapping into APIC
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/xen/xen.h"
50 #include "hw/i386/apic_internal.h"
51 #endif
52
53
54 /* Cache topology CPUID constants: */
55
56 /* CPUID Leaf 2 Descriptors */
57
58 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
59 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
60 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* No L3 cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140
141 /* TLB definitions: */
142
143 #define L1_DTLB_2M_ASSOC 1
144 #define L1_DTLB_2M_ENTRIES 255
145 #define L1_DTLB_4K_ASSOC 1
146 #define L1_DTLB_4K_ENTRIES 255
147
148 #define L1_ITLB_2M_ASSOC 1
149 #define L1_ITLB_2M_ENTRIES 255
150 #define L1_ITLB_4K_ASSOC 1
151 #define L1_ITLB_4K_ENTRIES 255
152
153 #define L2_DTLB_2M_ASSOC 0 /* disabled */
154 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
155 #define L2_DTLB_4K_ASSOC 4
156 #define L2_DTLB_4K_ENTRIES 512
157
158 #define L2_ITLB_2M_ASSOC 0 /* disabled */
159 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
160 #define L2_ITLB_4K_ASSOC 4
161 #define L2_ITLB_4K_ENTRIES 512
162
163
164
165 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
166 uint32_t vendor2, uint32_t vendor3)
167 {
168 int i;
169 for (i = 0; i < 4; i++) {
170 dst[i] = vendor1 >> (8 * i);
171 dst[i + 4] = vendor2 >> (8 * i);
172 dst[i + 8] = vendor3 >> (8 * i);
173 }
174 dst[CPUID_VENDOR_SZ] = '\0';
175 }
176
177 /* feature flags taken from "Intel Processor Identification and the CPUID
178 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
179 * between feature naming conventions, aliases may be added.
180 */
181 static const char *feature_name[] = {
182 "fpu", "vme", "de", "pse",
183 "tsc", "msr", "pae", "mce",
184 "cx8", "apic", NULL, "sep",
185 "mtrr", "pge", "mca", "cmov",
186 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
187 NULL, "ds" /* Intel dts */, "acpi", "mmx",
188 "fxsr", "sse", "sse2", "ss",
189 "ht" /* Intel htt */, "tm", "ia64", "pbe",
190 };
191 static const char *ext_feature_name[] = {
192 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
193 "ds_cpl", "vmx", "smx", "est",
194 "tm2", "ssse3", "cid", NULL,
195 "fma", "cx16", "xtpr", "pdcm",
196 NULL, "pcid", "dca", "sse4.1|sse4_1",
197 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
198 "tsc-deadline", "aes", "xsave", "osxsave",
199 "avx", "f16c", "rdrand", "hypervisor",
200 };
201 /* Feature names that are already defined on feature_name[] but are set on
202 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
203 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
204 * if and only if CPU vendor is AMD.
205 */
206 static const char *ext2_feature_name[] = {
207 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
208 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
209 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
210 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
211 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
212 "nx|xd", NULL, "mmxext", NULL /* mmx */,
213 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
214 NULL, "lm|i64", "3dnowext", "3dnow",
215 };
216 static const char *ext3_feature_name[] = {
217 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
218 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
219 "3dnowprefetch", "osvw", "ibs", "xop",
220 "skinit", "wdt", NULL, "lwp",
221 "fma4", "tce", NULL, "nodeid_msr",
222 NULL, "tbm", "topoext", "perfctr_core",
223 "perfctr_nb", NULL, NULL, NULL,
224 NULL, NULL, NULL, NULL,
225 };
226
227 static const char *ext4_feature_name[] = {
228 NULL, NULL, "xstore", "xstore-en",
229 NULL, NULL, "xcrypt", "xcrypt-en",
230 "ace2", "ace2-en", "phe", "phe-en",
231 "pmm", "pmm-en", NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 NULL, NULL, NULL, NULL,
236 };
237
238 static const char *kvm_feature_name[] = {
239 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
240 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 "kvmclock-stable-bit", NULL, NULL, NULL,
246 NULL, NULL, NULL, NULL,
247 };
248
249 static const char *svm_feature_name[] = {
250 "npt", "lbrv", "svm_lock", "nrip_save",
251 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
252 NULL, NULL, "pause_filter", NULL,
253 "pfthreshold", NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 };
259
260 static const char *cpuid_7_0_ebx_feature_name[] = {
261 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
262 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
263 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
264 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
265 };
266
267 static const char *cpuid_apm_edx_feature_name[] = {
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 "invtsc", NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 NULL, NULL, NULL, NULL,
276 };
277
278 static const char *cpuid_xsave_feature_name[] = {
279 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
287 };
288
289 static const char *cpuid_6_feature_name[] = {
290 NULL, NULL, "arat", NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
298 };
299
300 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
301 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
302 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
303 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
304 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
305 CPUID_PSE36 | CPUID_FXSR)
306 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
307 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
308 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
309 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
310 CPUID_PAE | CPUID_SEP | CPUID_APIC)
311
312 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
313 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
316 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
317 /* partly implemented:
318 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
319 /* missing:
320 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
321 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
322 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
323 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
324 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
325 /* missing:
326 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
327 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
328 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
329 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
330 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
331 CPUID_EXT_RDRAND */
332
333 #ifdef TARGET_X86_64
334 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
335 #else
336 #define TCG_EXT2_X86_64_FEATURES 0
337 #endif
338
339 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
340 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
341 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
342 TCG_EXT2_X86_64_FEATURES)
343 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
344 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
345 #define TCG_EXT4_FEATURES 0
346 #define TCG_SVM_FEATURES 0
347 #define TCG_KVM_FEATURES 0
348 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
349 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
350 /* missing:
351 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
352 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
353 CPUID_7_0_EBX_RDSEED */
354 #define TCG_APM_FEATURES 0
355 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
356
357
358 typedef struct FeatureWordInfo {
359 const char **feat_names;
360 uint32_t cpuid_eax; /* Input EAX for CPUID */
361 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
362 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
363 int cpuid_reg; /* output register (R_* constant) */
364 uint32_t tcg_features; /* Feature flags supported by TCG */
365 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
366 } FeatureWordInfo;
367
368 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
369 [FEAT_1_EDX] = {
370 .feat_names = feature_name,
371 .cpuid_eax = 1, .cpuid_reg = R_EDX,
372 .tcg_features = TCG_FEATURES,
373 },
374 [FEAT_1_ECX] = {
375 .feat_names = ext_feature_name,
376 .cpuid_eax = 1, .cpuid_reg = R_ECX,
377 .tcg_features = TCG_EXT_FEATURES,
378 },
379 [FEAT_8000_0001_EDX] = {
380 .feat_names = ext2_feature_name,
381 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
382 .tcg_features = TCG_EXT2_FEATURES,
383 },
384 [FEAT_8000_0001_ECX] = {
385 .feat_names = ext3_feature_name,
386 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
387 .tcg_features = TCG_EXT3_FEATURES,
388 },
389 [FEAT_C000_0001_EDX] = {
390 .feat_names = ext4_feature_name,
391 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
392 .tcg_features = TCG_EXT4_FEATURES,
393 },
394 [FEAT_KVM] = {
395 .feat_names = kvm_feature_name,
396 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
397 .tcg_features = TCG_KVM_FEATURES,
398 },
399 [FEAT_SVM] = {
400 .feat_names = svm_feature_name,
401 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
402 .tcg_features = TCG_SVM_FEATURES,
403 },
404 [FEAT_7_0_EBX] = {
405 .feat_names = cpuid_7_0_ebx_feature_name,
406 .cpuid_eax = 7,
407 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
408 .cpuid_reg = R_EBX,
409 .tcg_features = TCG_7_0_EBX_FEATURES,
410 },
411 [FEAT_8000_0007_EDX] = {
412 .feat_names = cpuid_apm_edx_feature_name,
413 .cpuid_eax = 0x80000007,
414 .cpuid_reg = R_EDX,
415 .tcg_features = TCG_APM_FEATURES,
416 .unmigratable_flags = CPUID_APM_INVTSC,
417 },
418 [FEAT_XSAVE] = {
419 .feat_names = cpuid_xsave_feature_name,
420 .cpuid_eax = 0xd,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
422 .cpuid_reg = R_EAX,
423 .tcg_features = 0,
424 },
425 [FEAT_6_EAX] = {
426 .feat_names = cpuid_6_feature_name,
427 .cpuid_eax = 6, .cpuid_reg = R_EAX,
428 .tcg_features = TCG_6_EAX_FEATURES,
429 },
430 };
431
432 typedef struct X86RegisterInfo32 {
433 /* Name of register */
434 const char *name;
435 /* QAPI enum value register */
436 X86CPURegister32 qapi_enum;
437 } X86RegisterInfo32;
438
439 #define REGISTER(reg) \
440 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
441 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
442 REGISTER(EAX),
443 REGISTER(ECX),
444 REGISTER(EDX),
445 REGISTER(EBX),
446 REGISTER(ESP),
447 REGISTER(EBP),
448 REGISTER(ESI),
449 REGISTER(EDI),
450 };
451 #undef REGISTER
452
453 typedef struct ExtSaveArea {
454 uint32_t feature, bits;
455 uint32_t offset, size;
456 } ExtSaveArea;
457
458 static const ExtSaveArea ext_save_areas[] = {
459 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
460 .offset = 0x240, .size = 0x100 },
461 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
462 .offset = 0x3c0, .size = 0x40 },
463 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
464 .offset = 0x400, .size = 0x40 },
465 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
466 .offset = 0x440, .size = 0x40 },
467 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
468 .offset = 0x480, .size = 0x200 },
469 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
470 .offset = 0x680, .size = 0x400 },
471 };
472
473 const char *get_register_name_32(unsigned int reg)
474 {
475 if (reg >= CPU_NB_REGS32) {
476 return NULL;
477 }
478 return x86_reg_info_32[reg].name;
479 }
480
481 /*
482 * Returns the set of feature flags that are supported and migratable by
483 * QEMU, for a given FeatureWord.
484 */
485 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
486 {
487 FeatureWordInfo *wi = &feature_word_info[w];
488 uint32_t r = 0;
489 int i;
490
491 for (i = 0; i < 32; i++) {
492 uint32_t f = 1U << i;
493 /* If the feature name is unknown, it is not supported by QEMU yet */
494 if (!wi->feat_names[i]) {
495 continue;
496 }
497 /* Skip features known to QEMU, but explicitly marked as unmigratable */
498 if (wi->unmigratable_flags & f) {
499 continue;
500 }
501 r |= f;
502 }
503 return r;
504 }
505
506 void host_cpuid(uint32_t function, uint32_t count,
507 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
508 {
509 uint32_t vec[4];
510
511 #ifdef __x86_64__
512 asm volatile("cpuid"
513 : "=a"(vec[0]), "=b"(vec[1]),
514 "=c"(vec[2]), "=d"(vec[3])
515 : "0"(function), "c"(count) : "cc");
516 #elif defined(__i386__)
517 asm volatile("pusha \n\t"
518 "cpuid \n\t"
519 "mov %%eax, 0(%2) \n\t"
520 "mov %%ebx, 4(%2) \n\t"
521 "mov %%ecx, 8(%2) \n\t"
522 "mov %%edx, 12(%2) \n\t"
523 "popa"
524 : : "a"(function), "c"(count), "S"(vec)
525 : "memory", "cc");
526 #else
527 abort();
528 #endif
529
530 if (eax)
531 *eax = vec[0];
532 if (ebx)
533 *ebx = vec[1];
534 if (ecx)
535 *ecx = vec[2];
536 if (edx)
537 *edx = vec[3];
538 }
539
540 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
541
542 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
543 * a substring. ex if !NULL points to the first char after a substring,
544 * otherwise the string is assumed to sized by a terminating nul.
545 * Return lexical ordering of *s1:*s2.
546 */
547 static int sstrcmp(const char *s1, const char *e1,
548 const char *s2, const char *e2)
549 {
550 for (;;) {
551 if (!*s1 || !*s2 || *s1 != *s2)
552 return (*s1 - *s2);
553 ++s1, ++s2;
554 if (s1 == e1 && s2 == e2)
555 return (0);
556 else if (s1 == e1)
557 return (*s2);
558 else if (s2 == e2)
559 return (*s1);
560 }
561 }
562
563 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
564 * '|' delimited (possibly empty) strings in which case search for a match
565 * within the alternatives proceeds left to right. Return 0 for success,
566 * non-zero otherwise.
567 */
568 static int altcmp(const char *s, const char *e, const char *altstr)
569 {
570 const char *p, *q;
571
572 for (q = p = altstr; ; ) {
573 while (*p && *p != '|')
574 ++p;
575 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
576 return (0);
577 if (!*p)
578 return (1);
579 else
580 q = ++p;
581 }
582 }
583
584 /* search featureset for flag *[s..e), if found set corresponding bit in
585 * *pval and return true, otherwise return false
586 */
587 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
588 const char **featureset)
589 {
590 uint32_t mask;
591 const char **ppc;
592 bool found = false;
593
594 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
595 if (*ppc && !altcmp(s, e, *ppc)) {
596 *pval |= mask;
597 found = true;
598 }
599 }
600 return found;
601 }
602
603 static void add_flagname_to_bitmaps(const char *flagname,
604 FeatureWordArray words,
605 Error **errp)
606 {
607 FeatureWord w;
608 for (w = 0; w < FEATURE_WORDS; w++) {
609 FeatureWordInfo *wi = &feature_word_info[w];
610 if (wi->feat_names &&
611 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
612 break;
613 }
614 }
615 if (w == FEATURE_WORDS) {
616 error_setg(errp, "CPU feature %s not found", flagname);
617 }
618 }
619
620 /* CPU class name definitions: */
621
622 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
623 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
624
625 /* Return type name for a given CPU model name
626 * Caller is responsible for freeing the returned string.
627 */
628 static char *x86_cpu_type_name(const char *model_name)
629 {
630 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
631 }
632
633 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
634 {
635 ObjectClass *oc;
636 char *typename;
637
638 if (cpu_model == NULL) {
639 return NULL;
640 }
641
642 typename = x86_cpu_type_name(cpu_model);
643 oc = object_class_by_name(typename);
644 g_free(typename);
645 return oc;
646 }
647
648 struct X86CPUDefinition {
649 const char *name;
650 uint32_t level;
651 uint32_t xlevel;
652 uint32_t xlevel2;
653 /* vendor is zero-terminated, 12 character ASCII string */
654 char vendor[CPUID_VENDOR_SZ + 1];
655 int family;
656 int model;
657 int stepping;
658 FeatureWordArray features;
659 char model_id[48];
660 bool cache_info_passthrough;
661 };
662
663 static X86CPUDefinition builtin_x86_defs[] = {
664 {
665 .name = "qemu64",
666 .level = 0xd,
667 .vendor = CPUID_VENDOR_AMD,
668 .family = 6,
669 .model = 6,
670 .stepping = 3,
671 .features[FEAT_1_EDX] =
672 PPRO_FEATURES |
673 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
674 CPUID_PSE36,
675 .features[FEAT_1_ECX] =
676 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
677 .features[FEAT_8000_0001_EDX] =
678 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
679 .features[FEAT_8000_0001_ECX] =
680 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
681 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
682 .xlevel = 0x8000000A,
683 },
684 {
685 .name = "phenom",
686 .level = 5,
687 .vendor = CPUID_VENDOR_AMD,
688 .family = 16,
689 .model = 2,
690 .stepping = 3,
691 /* Missing: CPUID_HT */
692 .features[FEAT_1_EDX] =
693 PPRO_FEATURES |
694 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
695 CPUID_PSE36 | CPUID_VME,
696 .features[FEAT_1_ECX] =
697 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
698 CPUID_EXT_POPCNT,
699 .features[FEAT_8000_0001_EDX] =
700 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
701 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
702 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
703 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
704 CPUID_EXT3_CR8LEG,
705 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
706 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
707 .features[FEAT_8000_0001_ECX] =
708 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
709 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
710 /* Missing: CPUID_SVM_LBRV */
711 .features[FEAT_SVM] =
712 CPUID_SVM_NPT,
713 .xlevel = 0x8000001A,
714 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
715 },
716 {
717 .name = "core2duo",
718 .level = 10,
719 .vendor = CPUID_VENDOR_INTEL,
720 .family = 6,
721 .model = 15,
722 .stepping = 11,
723 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
728 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
729 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
730 .features[FEAT_1_ECX] =
731 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
732 CPUID_EXT_CX16,
733 .features[FEAT_8000_0001_EDX] =
734 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
735 .features[FEAT_8000_0001_ECX] =
736 CPUID_EXT3_LAHF_LM,
737 .xlevel = 0x80000008,
738 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
739 },
740 {
741 .name = "kvm64",
742 .level = 0xd,
743 .vendor = CPUID_VENDOR_INTEL,
744 .family = 15,
745 .model = 6,
746 .stepping = 1,
747 /* Missing: CPUID_HT */
748 .features[FEAT_1_EDX] =
749 PPRO_FEATURES | CPUID_VME |
750 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
751 CPUID_PSE36,
752 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
753 .features[FEAT_1_ECX] =
754 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
755 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
756 .features[FEAT_8000_0001_EDX] =
757 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
758 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
759 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
760 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
761 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
762 .features[FEAT_8000_0001_ECX] =
763 0,
764 .xlevel = 0x80000008,
765 .model_id = "Common KVM processor"
766 },
767 {
768 .name = "qemu32",
769 .level = 4,
770 .vendor = CPUID_VENDOR_INTEL,
771 .family = 6,
772 .model = 6,
773 .stepping = 3,
774 .features[FEAT_1_EDX] =
775 PPRO_FEATURES,
776 .features[FEAT_1_ECX] =
777 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
778 .xlevel = 0x80000004,
779 },
780 {
781 .name = "kvm32",
782 .level = 5,
783 .vendor = CPUID_VENDOR_INTEL,
784 .family = 15,
785 .model = 6,
786 .stepping = 1,
787 .features[FEAT_1_EDX] =
788 PPRO_FEATURES | CPUID_VME |
789 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3,
792 .features[FEAT_8000_0001_ECX] =
793 0,
794 .xlevel = 0x80000008,
795 .model_id = "Common 32-bit KVM processor"
796 },
797 {
798 .name = "coreduo",
799 .level = 10,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 6,
802 .model = 14,
803 .stepping = 8,
804 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
805 .features[FEAT_1_EDX] =
806 PPRO_FEATURES | CPUID_VME |
807 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
808 CPUID_SS,
809 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
810 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
811 .features[FEAT_1_ECX] =
812 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
813 .features[FEAT_8000_0001_EDX] =
814 CPUID_EXT2_NX,
815 .xlevel = 0x80000008,
816 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
817 },
818 {
819 .name = "486",
820 .level = 1,
821 .vendor = CPUID_VENDOR_INTEL,
822 .family = 4,
823 .model = 8,
824 .stepping = 0,
825 .features[FEAT_1_EDX] =
826 I486_FEATURES,
827 .xlevel = 0,
828 },
829 {
830 .name = "pentium",
831 .level = 1,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 5,
834 .model = 4,
835 .stepping = 3,
836 .features[FEAT_1_EDX] =
837 PENTIUM_FEATURES,
838 .xlevel = 0,
839 },
840 {
841 .name = "pentium2",
842 .level = 2,
843 .vendor = CPUID_VENDOR_INTEL,
844 .family = 6,
845 .model = 5,
846 .stepping = 2,
847 .features[FEAT_1_EDX] =
848 PENTIUM2_FEATURES,
849 .xlevel = 0,
850 },
851 {
852 .name = "pentium3",
853 .level = 3,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 6,
856 .model = 7,
857 .stepping = 3,
858 .features[FEAT_1_EDX] =
859 PENTIUM3_FEATURES,
860 .xlevel = 0,
861 },
862 {
863 .name = "athlon",
864 .level = 2,
865 .vendor = CPUID_VENDOR_AMD,
866 .family = 6,
867 .model = 2,
868 .stepping = 3,
869 .features[FEAT_1_EDX] =
870 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
871 CPUID_MCA,
872 .features[FEAT_8000_0001_EDX] =
873 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
874 .xlevel = 0x80000008,
875 },
876 {
877 .name = "n270",
878 .level = 10,
879 .vendor = CPUID_VENDOR_INTEL,
880 .family = 6,
881 .model = 28,
882 .stepping = 2,
883 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES |
886 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
887 CPUID_ACPI | CPUID_SS,
888 /* Some CPUs got no CPUID_SEP */
889 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
890 * CPUID_EXT_XTPR */
891 .features[FEAT_1_ECX] =
892 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
893 CPUID_EXT_MOVBE,
894 .features[FEAT_8000_0001_EDX] =
895 CPUID_EXT2_NX,
896 .features[FEAT_8000_0001_ECX] =
897 CPUID_EXT3_LAHF_LM,
898 .xlevel = 0x80000008,
899 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
900 },
901 {
902 .name = "Conroe",
903 .level = 10,
904 .vendor = CPUID_VENDOR_INTEL,
905 .family = 6,
906 .model = 15,
907 .stepping = 3,
908 .features[FEAT_1_EDX] =
909 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
910 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
911 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
912 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
913 CPUID_DE | CPUID_FP87,
914 .features[FEAT_1_ECX] =
915 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
916 .features[FEAT_8000_0001_EDX] =
917 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
918 .features[FEAT_8000_0001_ECX] =
919 CPUID_EXT3_LAHF_LM,
920 .xlevel = 0x80000008,
921 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
922 },
923 {
924 .name = "Penryn",
925 .level = 10,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 23,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
938 CPUID_EXT_SSE3,
939 .features[FEAT_8000_0001_EDX] =
940 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
941 .features[FEAT_8000_0001_ECX] =
942 CPUID_EXT3_LAHF_LM,
943 .xlevel = 0x80000008,
944 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
945 },
946 {
947 .name = "Nehalem",
948 .level = 11,
949 .vendor = CPUID_VENDOR_INTEL,
950 .family = 6,
951 .model = 26,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
955 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
956 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
957 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
958 CPUID_DE | CPUID_FP87,
959 .features[FEAT_1_ECX] =
960 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
961 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
962 .features[FEAT_8000_0001_EDX] =
963 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
964 .features[FEAT_8000_0001_ECX] =
965 CPUID_EXT3_LAHF_LM,
966 .xlevel = 0x80000008,
967 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
968 },
969 {
970 .name = "Westmere",
971 .level = 11,
972 .vendor = CPUID_VENDOR_INTEL,
973 .family = 6,
974 .model = 44,
975 .stepping = 1,
976 .features[FEAT_1_EDX] =
977 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
978 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
979 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
980 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
981 CPUID_DE | CPUID_FP87,
982 .features[FEAT_1_ECX] =
983 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
984 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
985 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
986 .features[FEAT_8000_0001_EDX] =
987 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
988 .features[FEAT_8000_0001_ECX] =
989 CPUID_EXT3_LAHF_LM,
990 .features[FEAT_6_EAX] =
991 CPUID_6_EAX_ARAT,
992 .xlevel = 0x80000008,
993 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
994 },
995 {
996 .name = "SandyBridge",
997 .level = 0xd,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 42,
1001 .stepping = 1,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1010 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1011 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1012 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1013 CPUID_EXT_SSE3,
1014 .features[FEAT_8000_0001_EDX] =
1015 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1016 CPUID_EXT2_SYSCALL,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .features[FEAT_XSAVE] =
1020 CPUID_XSAVE_XSAVEOPT,
1021 .features[FEAT_6_EAX] =
1022 CPUID_6_EAX_ARAT,
1023 .xlevel = 0x80000008,
1024 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1025 },
1026 {
1027 .name = "IvyBridge",
1028 .level = 0xd,
1029 .vendor = CPUID_VENDOR_INTEL,
1030 .family = 6,
1031 .model = 58,
1032 .stepping = 9,
1033 .features[FEAT_1_EDX] =
1034 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1035 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1036 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1037 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1038 CPUID_DE | CPUID_FP87,
1039 .features[FEAT_1_ECX] =
1040 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1041 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1042 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1043 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1044 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1045 .features[FEAT_7_0_EBX] =
1046 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1047 CPUID_7_0_EBX_ERMS,
1048 .features[FEAT_8000_0001_EDX] =
1049 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1050 CPUID_EXT2_SYSCALL,
1051 .features[FEAT_8000_0001_ECX] =
1052 CPUID_EXT3_LAHF_LM,
1053 .features[FEAT_XSAVE] =
1054 CPUID_XSAVE_XSAVEOPT,
1055 .features[FEAT_6_EAX] =
1056 CPUID_6_EAX_ARAT,
1057 .xlevel = 0x80000008,
1058 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1059 },
1060 {
1061 .name = "Haswell-noTSX",
1062 .level = 0xd,
1063 .vendor = CPUID_VENDOR_INTEL,
1064 .family = 6,
1065 .model = 60,
1066 .stepping = 1,
1067 .features[FEAT_1_EDX] =
1068 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1069 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1070 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1071 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1072 CPUID_DE | CPUID_FP87,
1073 .features[FEAT_1_ECX] =
1074 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1075 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1076 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1077 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1078 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1079 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1082 CPUID_EXT2_SYSCALL,
1083 .features[FEAT_8000_0001_ECX] =
1084 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1085 .features[FEAT_7_0_EBX] =
1086 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1087 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1088 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1089 .features[FEAT_XSAVE] =
1090 CPUID_XSAVE_XSAVEOPT,
1091 .features[FEAT_6_EAX] =
1092 CPUID_6_EAX_ARAT,
1093 .xlevel = 0x80000008,
1094 .model_id = "Intel Core Processor (Haswell, no TSX)",
1095 }, {
1096 .name = "Haswell",
1097 .level = 0xd,
1098 .vendor = CPUID_VENDOR_INTEL,
1099 .family = 6,
1100 .model = 60,
1101 .stepping = 1,
1102 .features[FEAT_1_EDX] =
1103 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1104 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1105 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1106 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1107 CPUID_DE | CPUID_FP87,
1108 .features[FEAT_1_ECX] =
1109 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1110 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1111 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1112 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1113 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1114 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1115 .features[FEAT_8000_0001_EDX] =
1116 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1117 CPUID_EXT2_SYSCALL,
1118 .features[FEAT_8000_0001_ECX] =
1119 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1120 .features[FEAT_7_0_EBX] =
1121 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1122 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1123 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1124 CPUID_7_0_EBX_RTM,
1125 .features[FEAT_XSAVE] =
1126 CPUID_XSAVE_XSAVEOPT,
1127 .features[FEAT_6_EAX] =
1128 CPUID_6_EAX_ARAT,
1129 .xlevel = 0x80000008,
1130 .model_id = "Intel Core Processor (Haswell)",
1131 },
1132 {
1133 .name = "Broadwell-noTSX",
1134 .level = 0xd,
1135 .vendor = CPUID_VENDOR_INTEL,
1136 .family = 6,
1137 .model = 61,
1138 .stepping = 2,
1139 .features[FEAT_1_EDX] =
1140 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1141 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1142 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1143 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1144 CPUID_DE | CPUID_FP87,
1145 .features[FEAT_1_ECX] =
1146 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1147 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1148 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1149 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1150 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1151 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1152 .features[FEAT_8000_0001_EDX] =
1153 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1154 CPUID_EXT2_SYSCALL,
1155 .features[FEAT_8000_0001_ECX] =
1156 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1157 .features[FEAT_7_0_EBX] =
1158 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1159 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1160 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1161 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1162 CPUID_7_0_EBX_SMAP,
1163 .features[FEAT_XSAVE] =
1164 CPUID_XSAVE_XSAVEOPT,
1165 .features[FEAT_6_EAX] =
1166 CPUID_6_EAX_ARAT,
1167 .xlevel = 0x80000008,
1168 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1169 },
1170 {
1171 .name = "Broadwell",
1172 .level = 0xd,
1173 .vendor = CPUID_VENDOR_INTEL,
1174 .family = 6,
1175 .model = 61,
1176 .stepping = 2,
1177 .features[FEAT_1_EDX] =
1178 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1179 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1180 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1181 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1182 CPUID_DE | CPUID_FP87,
1183 .features[FEAT_1_ECX] =
1184 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1185 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1186 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1187 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1188 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1189 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1190 .features[FEAT_8000_0001_EDX] =
1191 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1192 CPUID_EXT2_SYSCALL,
1193 .features[FEAT_8000_0001_ECX] =
1194 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1195 .features[FEAT_7_0_EBX] =
1196 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1197 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1198 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1199 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1200 CPUID_7_0_EBX_SMAP,
1201 .features[FEAT_XSAVE] =
1202 CPUID_XSAVE_XSAVEOPT,
1203 .features[FEAT_6_EAX] =
1204 CPUID_6_EAX_ARAT,
1205 .xlevel = 0x80000008,
1206 .model_id = "Intel Core Processor (Broadwell)",
1207 },
1208 {
1209 .name = "Opteron_G1",
1210 .level = 5,
1211 .vendor = CPUID_VENDOR_AMD,
1212 .family = 15,
1213 .model = 6,
1214 .stepping = 1,
1215 .features[FEAT_1_EDX] =
1216 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1217 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1218 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1219 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1220 CPUID_DE | CPUID_FP87,
1221 .features[FEAT_1_ECX] =
1222 CPUID_EXT_SSE3,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1225 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1226 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1227 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1228 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1229 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1230 .xlevel = 0x80000008,
1231 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1232 },
1233 {
1234 .name = "Opteron_G2",
1235 .level = 5,
1236 .vendor = CPUID_VENDOR_AMD,
1237 .family = 15,
1238 .model = 6,
1239 .stepping = 1,
1240 .features[FEAT_1_EDX] =
1241 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1242 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1243 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1244 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1245 CPUID_DE | CPUID_FP87,
1246 .features[FEAT_1_ECX] =
1247 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1250 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1251 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1252 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1253 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1254 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1255 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1256 .features[FEAT_8000_0001_ECX] =
1257 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1258 .xlevel = 0x80000008,
1259 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1260 },
1261 {
1262 .name = "Opteron_G3",
1263 .level = 5,
1264 .vendor = CPUID_VENDOR_AMD,
1265 .family = 15,
1266 .model = 6,
1267 .stepping = 1,
1268 .features[FEAT_1_EDX] =
1269 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1270 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1271 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1272 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1273 CPUID_DE | CPUID_FP87,
1274 .features[FEAT_1_ECX] =
1275 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1276 CPUID_EXT_SSE3,
1277 .features[FEAT_8000_0001_EDX] =
1278 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1279 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1280 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1281 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1282 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1283 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1284 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1285 .features[FEAT_8000_0001_ECX] =
1286 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1287 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1288 .xlevel = 0x80000008,
1289 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1290 },
1291 {
1292 .name = "Opteron_G4",
1293 .level = 0xd,
1294 .vendor = CPUID_VENDOR_AMD,
1295 .family = 21,
1296 .model = 1,
1297 .stepping = 2,
1298 .features[FEAT_1_EDX] =
1299 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1300 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1301 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1302 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1303 CPUID_DE | CPUID_FP87,
1304 .features[FEAT_1_ECX] =
1305 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1306 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1307 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1308 CPUID_EXT_SSE3,
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1311 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1312 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1313 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1314 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1315 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1316 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .features[FEAT_8000_0001_ECX] =
1318 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1319 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1320 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1321 CPUID_EXT3_LAHF_LM,
1322 /* no xsaveopt! */
1323 .xlevel = 0x8000001A,
1324 .model_id = "AMD Opteron 62xx class CPU",
1325 },
1326 {
1327 .name = "Opteron_G5",
1328 .level = 0xd,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 21,
1331 .model = 2,
1332 .stepping = 0,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1341 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1342 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1343 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1344 .features[FEAT_8000_0001_EDX] =
1345 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1346 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1347 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1348 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1349 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1350 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1351 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1352 .features[FEAT_8000_0001_ECX] =
1353 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1354 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1355 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1356 CPUID_EXT3_LAHF_LM,
1357 /* no xsaveopt! */
1358 .xlevel = 0x8000001A,
1359 .model_id = "AMD Opteron 63xx class CPU",
1360 },
1361 };
1362
1363 typedef struct PropValue {
1364 const char *prop, *value;
1365 } PropValue;
1366
1367 /* KVM-specific features that are automatically added/removed
1368 * from all CPU models when KVM is enabled.
1369 */
1370 static PropValue kvm_default_props[] = {
1371 { "kvmclock", "on" },
1372 { "kvm-nopiodelay", "on" },
1373 { "kvm-asyncpf", "on" },
1374 { "kvm-steal-time", "on" },
1375 { "kvm-pv-eoi", "on" },
1376 { "kvmclock-stable-bit", "on" },
1377 { "x2apic", "on" },
1378 { "acpi", "off" },
1379 { "monitor", "off" },
1380 { "svm", "off" },
1381 { NULL, NULL },
1382 };
1383
1384 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1385 {
1386 PropValue *pv;
1387 for (pv = kvm_default_props; pv->prop; pv++) {
1388 if (!strcmp(pv->prop, prop)) {
1389 pv->value = value;
1390 break;
1391 }
1392 }
1393
1394 /* It is valid to call this function only for properties that
1395 * are already present in the kvm_default_props table.
1396 */
1397 assert(pv->prop);
1398 }
1399
1400 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1401 bool migratable_only);
1402
1403 #ifdef CONFIG_KVM
1404
1405 static int cpu_x86_fill_model_id(char *str)
1406 {
1407 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1408 int i;
1409
1410 for (i = 0; i < 3; i++) {
1411 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1412 memcpy(str + i * 16 + 0, &eax, 4);
1413 memcpy(str + i * 16 + 4, &ebx, 4);
1414 memcpy(str + i * 16 + 8, &ecx, 4);
1415 memcpy(str + i * 16 + 12, &edx, 4);
1416 }
1417 return 0;
1418 }
1419
1420 static X86CPUDefinition host_cpudef;
1421
1422 static Property host_x86_cpu_properties[] = {
1423 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1424 DEFINE_PROP_END_OF_LIST()
1425 };
1426
1427 /* class_init for the "host" CPU model
1428 *
1429 * This function may be called before KVM is initialized.
1430 */
1431 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1432 {
1433 DeviceClass *dc = DEVICE_CLASS(oc);
1434 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1435 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1436
1437 xcc->kvm_required = true;
1438
1439 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1440 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1441
1442 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1443 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1444 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1445 host_cpudef.stepping = eax & 0x0F;
1446
1447 cpu_x86_fill_model_id(host_cpudef.model_id);
1448
1449 xcc->cpu_def = &host_cpudef;
1450 host_cpudef.cache_info_passthrough = true;
1451
1452 /* level, xlevel, xlevel2, and the feature words are initialized on
1453 * instance_init, because they require KVM to be initialized.
1454 */
1455
1456 dc->props = host_x86_cpu_properties;
1457 }
1458
1459 static void host_x86_cpu_initfn(Object *obj)
1460 {
1461 X86CPU *cpu = X86_CPU(obj);
1462 CPUX86State *env = &cpu->env;
1463 KVMState *s = kvm_state;
1464
1465 assert(kvm_enabled());
1466
1467 /* We can't fill the features array here because we don't know yet if
1468 * "migratable" is true or false.
1469 */
1470 cpu->host_features = true;
1471
1472 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1473 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1474 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1475
1476 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1477 }
1478
1479 static const TypeInfo host_x86_cpu_type_info = {
1480 .name = X86_CPU_TYPE_NAME("host"),
1481 .parent = TYPE_X86_CPU,
1482 .instance_init = host_x86_cpu_initfn,
1483 .class_init = host_x86_cpu_class_init,
1484 };
1485
1486 #endif
1487
1488 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1489 {
1490 FeatureWordInfo *f = &feature_word_info[w];
1491 int i;
1492
1493 for (i = 0; i < 32; ++i) {
1494 if (1 << i & mask) {
1495 const char *reg = get_register_name_32(f->cpuid_reg);
1496 assert(reg);
1497 fprintf(stderr, "warning: %s doesn't support requested feature: "
1498 "CPUID.%02XH:%s%s%s [bit %d]\n",
1499 kvm_enabled() ? "host" : "TCG",
1500 f->cpuid_eax, reg,
1501 f->feat_names[i] ? "." : "",
1502 f->feat_names[i] ? f->feat_names[i] : "", i);
1503 }
1504 }
1505 }
1506
1507 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1508 const char *name, Error **errp)
1509 {
1510 X86CPU *cpu = X86_CPU(obj);
1511 CPUX86State *env = &cpu->env;
1512 int64_t value;
1513
1514 value = (env->cpuid_version >> 8) & 0xf;
1515 if (value == 0xf) {
1516 value += (env->cpuid_version >> 20) & 0xff;
1517 }
1518 visit_type_int(v, &value, name, errp);
1519 }
1520
1521 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1522 const char *name, Error **errp)
1523 {
1524 X86CPU *cpu = X86_CPU(obj);
1525 CPUX86State *env = &cpu->env;
1526 const int64_t min = 0;
1527 const int64_t max = 0xff + 0xf;
1528 Error *local_err = NULL;
1529 int64_t value;
1530
1531 visit_type_int(v, &value, name, &local_err);
1532 if (local_err) {
1533 error_propagate(errp, local_err);
1534 return;
1535 }
1536 if (value < min || value > max) {
1537 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1538 name ? name : "null", value, min, max);
1539 return;
1540 }
1541
1542 env->cpuid_version &= ~0xff00f00;
1543 if (value > 0x0f) {
1544 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1545 } else {
1546 env->cpuid_version |= value << 8;
1547 }
1548 }
1549
1550 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1551 const char *name, Error **errp)
1552 {
1553 X86CPU *cpu = X86_CPU(obj);
1554 CPUX86State *env = &cpu->env;
1555 int64_t value;
1556
1557 value = (env->cpuid_version >> 4) & 0xf;
1558 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1559 visit_type_int(v, &value, name, errp);
1560 }
1561
1562 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1563 const char *name, Error **errp)
1564 {
1565 X86CPU *cpu = X86_CPU(obj);
1566 CPUX86State *env = &cpu->env;
1567 const int64_t min = 0;
1568 const int64_t max = 0xff;
1569 Error *local_err = NULL;
1570 int64_t value;
1571
1572 visit_type_int(v, &value, name, &local_err);
1573 if (local_err) {
1574 error_propagate(errp, local_err);
1575 return;
1576 }
1577 if (value < min || value > max) {
1578 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1579 name ? name : "null", value, min, max);
1580 return;
1581 }
1582
1583 env->cpuid_version &= ~0xf00f0;
1584 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1585 }
1586
1587 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1588 void *opaque, const char *name,
1589 Error **errp)
1590 {
1591 X86CPU *cpu = X86_CPU(obj);
1592 CPUX86State *env = &cpu->env;
1593 int64_t value;
1594
1595 value = env->cpuid_version & 0xf;
1596 visit_type_int(v, &value, name, errp);
1597 }
1598
1599 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1600 void *opaque, const char *name,
1601 Error **errp)
1602 {
1603 X86CPU *cpu = X86_CPU(obj);
1604 CPUX86State *env = &cpu->env;
1605 const int64_t min = 0;
1606 const int64_t max = 0xf;
1607 Error *local_err = NULL;
1608 int64_t value;
1609
1610 visit_type_int(v, &value, name, &local_err);
1611 if (local_err) {
1612 error_propagate(errp, local_err);
1613 return;
1614 }
1615 if (value < min || value > max) {
1616 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1617 name ? name : "null", value, min, max);
1618 return;
1619 }
1620
1621 env->cpuid_version &= ~0xf;
1622 env->cpuid_version |= value & 0xf;
1623 }
1624
1625 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1626 {
1627 X86CPU *cpu = X86_CPU(obj);
1628 CPUX86State *env = &cpu->env;
1629 char *value;
1630
1631 value = g_malloc(CPUID_VENDOR_SZ + 1);
1632 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1633 env->cpuid_vendor3);
1634 return value;
1635 }
1636
1637 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1638 Error **errp)
1639 {
1640 X86CPU *cpu = X86_CPU(obj);
1641 CPUX86State *env = &cpu->env;
1642 int i;
1643
1644 if (strlen(value) != CPUID_VENDOR_SZ) {
1645 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1646 return;
1647 }
1648
1649 env->cpuid_vendor1 = 0;
1650 env->cpuid_vendor2 = 0;
1651 env->cpuid_vendor3 = 0;
1652 for (i = 0; i < 4; i++) {
1653 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1654 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1655 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1656 }
1657 }
1658
1659 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1660 {
1661 X86CPU *cpu = X86_CPU(obj);
1662 CPUX86State *env = &cpu->env;
1663 char *value;
1664 int i;
1665
1666 value = g_malloc(48 + 1);
1667 for (i = 0; i < 48; i++) {
1668 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1669 }
1670 value[48] = '\0';
1671 return value;
1672 }
1673
1674 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1675 Error **errp)
1676 {
1677 X86CPU *cpu = X86_CPU(obj);
1678 CPUX86State *env = &cpu->env;
1679 int c, len, i;
1680
1681 if (model_id == NULL) {
1682 model_id = "";
1683 }
1684 len = strlen(model_id);
1685 memset(env->cpuid_model, 0, 48);
1686 for (i = 0; i < 48; i++) {
1687 if (i >= len) {
1688 c = '\0';
1689 } else {
1690 c = (uint8_t)model_id[i];
1691 }
1692 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1693 }
1694 }
1695
1696 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1697 const char *name, Error **errp)
1698 {
1699 X86CPU *cpu = X86_CPU(obj);
1700 int64_t value;
1701
1702 value = cpu->env.tsc_khz * 1000;
1703 visit_type_int(v, &value, name, errp);
1704 }
1705
1706 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1707 const char *name, Error **errp)
1708 {
1709 X86CPU *cpu = X86_CPU(obj);
1710 const int64_t min = 0;
1711 const int64_t max = INT64_MAX;
1712 Error *local_err = NULL;
1713 int64_t value;
1714
1715 visit_type_int(v, &value, name, &local_err);
1716 if (local_err) {
1717 error_propagate(errp, local_err);
1718 return;
1719 }
1720 if (value < min || value > max) {
1721 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1722 name ? name : "null", value, min, max);
1723 return;
1724 }
1725
1726 cpu->env.tsc_khz = value / 1000;
1727 }
1728
1729 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1730 const char *name, Error **errp)
1731 {
1732 X86CPU *cpu = X86_CPU(obj);
1733 int64_t value = cpu->apic_id;
1734
1735 visit_type_int(v, &value, name, errp);
1736 }
1737
1738 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1739 const char *name, Error **errp)
1740 {
1741 X86CPU *cpu = X86_CPU(obj);
1742 DeviceState *dev = DEVICE(obj);
1743 const int64_t min = 0;
1744 const int64_t max = UINT32_MAX;
1745 Error *error = NULL;
1746 int64_t value;
1747
1748 if (dev->realized) {
1749 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1750 "it was realized", name, object_get_typename(obj));
1751 return;
1752 }
1753
1754 visit_type_int(v, &value, name, &error);
1755 if (error) {
1756 error_propagate(errp, error);
1757 return;
1758 }
1759 if (value < min || value > max) {
1760 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1761 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1762 object_get_typename(obj), name, value, min, max);
1763 return;
1764 }
1765
1766 if ((value != cpu->apic_id) && cpu_exists(value)) {
1767 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1768 return;
1769 }
1770 cpu->apic_id = value;
1771 }
1772
1773 /* Generic getter for "feature-words" and "filtered-features" properties */
1774 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1775 const char *name, Error **errp)
1776 {
1777 uint32_t *array = (uint32_t *)opaque;
1778 FeatureWord w;
1779 Error *err = NULL;
1780 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1781 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1782 X86CPUFeatureWordInfoList *list = NULL;
1783
1784 for (w = 0; w < FEATURE_WORDS; w++) {
1785 FeatureWordInfo *wi = &feature_word_info[w];
1786 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1787 qwi->cpuid_input_eax = wi->cpuid_eax;
1788 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1789 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1790 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1791 qwi->features = array[w];
1792
1793 /* List will be in reverse order, but order shouldn't matter */
1794 list_entries[w].next = list;
1795 list_entries[w].value = &word_infos[w];
1796 list = &list_entries[w];
1797 }
1798
1799 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1800 error_propagate(errp, err);
1801 }
1802
1803 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1804 const char *name, Error **errp)
1805 {
1806 X86CPU *cpu = X86_CPU(obj);
1807 int64_t value = cpu->hyperv_spinlock_attempts;
1808
1809 visit_type_int(v, &value, name, errp);
1810 }
1811
1812 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1813 const char *name, Error **errp)
1814 {
1815 const int64_t min = 0xFFF;
1816 const int64_t max = UINT_MAX;
1817 X86CPU *cpu = X86_CPU(obj);
1818 Error *err = NULL;
1819 int64_t value;
1820
1821 visit_type_int(v, &value, name, &err);
1822 if (err) {
1823 error_propagate(errp, err);
1824 return;
1825 }
1826
1827 if (value < min || value > max) {
1828 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1829 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1830 object_get_typename(obj), name ? name : "null",
1831 value, min, max);
1832 return;
1833 }
1834 cpu->hyperv_spinlock_attempts = value;
1835 }
1836
1837 static PropertyInfo qdev_prop_spinlocks = {
1838 .name = "int",
1839 .get = x86_get_hv_spinlocks,
1840 .set = x86_set_hv_spinlocks,
1841 };
1842
1843 /* Convert all '_' in a feature string option name to '-', to make feature
1844 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1845 */
1846 static inline void feat2prop(char *s)
1847 {
1848 while ((s = strchr(s, '_'))) {
1849 *s = '-';
1850 }
1851 }
1852
1853 /* Parse "+feature,-feature,feature=foo" CPU feature string
1854 */
1855 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1856 Error **errp)
1857 {
1858 X86CPU *cpu = X86_CPU(cs);
1859 char *featurestr; /* Single 'key=value" string being parsed */
1860 FeatureWord w;
1861 /* Features to be added */
1862 FeatureWordArray plus_features = { 0 };
1863 /* Features to be removed */
1864 FeatureWordArray minus_features = { 0 };
1865 uint32_t numvalue;
1866 CPUX86State *env = &cpu->env;
1867 Error *local_err = NULL;
1868
1869 featurestr = features ? strtok(features, ",") : NULL;
1870
1871 while (featurestr) {
1872 char *val;
1873 if (featurestr[0] == '+') {
1874 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1875 } else if (featurestr[0] == '-') {
1876 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1877 } else if ((val = strchr(featurestr, '='))) {
1878 *val = 0; val++;
1879 feat2prop(featurestr);
1880 if (!strcmp(featurestr, "xlevel")) {
1881 char *err;
1882 char num[32];
1883
1884 numvalue = strtoul(val, &err, 0);
1885 if (!*val || *err) {
1886 error_setg(errp, "bad numerical value %s", val);
1887 return;
1888 }
1889 if (numvalue < 0x80000000) {
1890 error_report("xlevel value shall always be >= 0x80000000"
1891 ", fixup will be removed in future versions");
1892 numvalue += 0x80000000;
1893 }
1894 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1895 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1896 } else if (!strcmp(featurestr, "tsc-freq")) {
1897 int64_t tsc_freq;
1898 char *err;
1899 char num[32];
1900
1901 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1902 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1903 if (tsc_freq < 0 || *err) {
1904 error_setg(errp, "bad numerical value %s", val);
1905 return;
1906 }
1907 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1908 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1909 &local_err);
1910 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1911 char *err;
1912 const int min = 0xFFF;
1913 char num[32];
1914 numvalue = strtoul(val, &err, 0);
1915 if (!*val || *err) {
1916 error_setg(errp, "bad numerical value %s", val);
1917 return;
1918 }
1919 if (numvalue < min) {
1920 error_report("hv-spinlocks value shall always be >= 0x%x"
1921 ", fixup will be removed in future versions",
1922 min);
1923 numvalue = min;
1924 }
1925 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1926 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1927 } else {
1928 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1929 }
1930 } else {
1931 feat2prop(featurestr);
1932 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1933 }
1934 if (local_err) {
1935 error_propagate(errp, local_err);
1936 return;
1937 }
1938 featurestr = strtok(NULL, ",");
1939 }
1940
1941 if (cpu->host_features) {
1942 for (w = 0; w < FEATURE_WORDS; w++) {
1943 env->features[w] =
1944 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1945 }
1946 }
1947
1948 for (w = 0; w < FEATURE_WORDS; w++) {
1949 env->features[w] |= plus_features[w];
1950 env->features[w] &= ~minus_features[w];
1951 }
1952 }
1953
1954 /* Print all cpuid feature names in featureset
1955 */
1956 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1957 {
1958 int bit;
1959 bool first = true;
1960
1961 for (bit = 0; bit < 32; bit++) {
1962 if (featureset[bit]) {
1963 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1964 first = false;
1965 }
1966 }
1967 }
1968
1969 /* generate CPU information. */
1970 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1971 {
1972 X86CPUDefinition *def;
1973 char buf[256];
1974 int i;
1975
1976 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1977 def = &builtin_x86_defs[i];
1978 snprintf(buf, sizeof(buf), "%s", def->name);
1979 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1980 }
1981 #ifdef CONFIG_KVM
1982 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1983 "KVM processor with all supported host features "
1984 "(only available in KVM mode)");
1985 #endif
1986
1987 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1988 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1989 FeatureWordInfo *fw = &feature_word_info[i];
1990
1991 (*cpu_fprintf)(f, " ");
1992 listflags(f, cpu_fprintf, fw->feat_names);
1993 (*cpu_fprintf)(f, "\n");
1994 }
1995 }
1996
1997 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1998 {
1999 CpuDefinitionInfoList *cpu_list = NULL;
2000 X86CPUDefinition *def;
2001 int i;
2002
2003 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2004 CpuDefinitionInfoList *entry;
2005 CpuDefinitionInfo *info;
2006
2007 def = &builtin_x86_defs[i];
2008 info = g_malloc0(sizeof(*info));
2009 info->name = g_strdup(def->name);
2010
2011 entry = g_malloc0(sizeof(*entry));
2012 entry->value = info;
2013 entry->next = cpu_list;
2014 cpu_list = entry;
2015 }
2016
2017 return cpu_list;
2018 }
2019
2020 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2021 bool migratable_only)
2022 {
2023 FeatureWordInfo *wi = &feature_word_info[w];
2024 uint32_t r;
2025
2026 if (kvm_enabled()) {
2027 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2028 wi->cpuid_ecx,
2029 wi->cpuid_reg);
2030 } else if (tcg_enabled()) {
2031 r = wi->tcg_features;
2032 } else {
2033 return ~0;
2034 }
2035 if (migratable_only) {
2036 r &= x86_cpu_get_migratable_flags(w);
2037 }
2038 return r;
2039 }
2040
2041 /*
2042 * Filters CPU feature words based on host availability of each feature.
2043 *
2044 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2045 */
2046 static int x86_cpu_filter_features(X86CPU *cpu)
2047 {
2048 CPUX86State *env = &cpu->env;
2049 FeatureWord w;
2050 int rv = 0;
2051
2052 for (w = 0; w < FEATURE_WORDS; w++) {
2053 uint32_t host_feat =
2054 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2055 uint32_t requested_features = env->features[w];
2056 env->features[w] &= host_feat;
2057 cpu->filtered_features[w] = requested_features & ~env->features[w];
2058 if (cpu->filtered_features[w]) {
2059 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2060 report_unavailable_features(w, cpu->filtered_features[w]);
2061 }
2062 rv = 1;
2063 }
2064 }
2065
2066 return rv;
2067 }
2068
2069 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2070 {
2071 PropValue *pv;
2072 for (pv = props; pv->prop; pv++) {
2073 if (!pv->value) {
2074 continue;
2075 }
2076 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2077 &error_abort);
2078 }
2079 }
2080
2081 /* Load data from X86CPUDefinition
2082 */
2083 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2084 {
2085 CPUX86State *env = &cpu->env;
2086 const char *vendor;
2087 char host_vendor[CPUID_VENDOR_SZ + 1];
2088 FeatureWord w;
2089
2090 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2091 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2092 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2093 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2094 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2095 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2096 cpu->cache_info_passthrough = def->cache_info_passthrough;
2097 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2098 for (w = 0; w < FEATURE_WORDS; w++) {
2099 env->features[w] = def->features[w];
2100 }
2101
2102 /* Special cases not set in the X86CPUDefinition structs: */
2103 if (kvm_enabled()) {
2104 x86_cpu_apply_props(cpu, kvm_default_props);
2105 }
2106
2107 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2108
2109 /* sysenter isn't supported in compatibility mode on AMD,
2110 * syscall isn't supported in compatibility mode on Intel.
2111 * Normally we advertise the actual CPU vendor, but you can
2112 * override this using the 'vendor' property if you want to use
2113 * KVM's sysenter/syscall emulation in compatibility mode and
2114 * when doing cross vendor migration
2115 */
2116 vendor = def->vendor;
2117 if (kvm_enabled()) {
2118 uint32_t ebx = 0, ecx = 0, edx = 0;
2119 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2120 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2121 vendor = host_vendor;
2122 }
2123
2124 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2125
2126 }
2127
2128 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2129 {
2130 X86CPU *cpu = NULL;
2131 X86CPUClass *xcc;
2132 ObjectClass *oc;
2133 gchar **model_pieces;
2134 char *name, *features;
2135 Error *error = NULL;
2136
2137 model_pieces = g_strsplit(cpu_model, ",", 2);
2138 if (!model_pieces[0]) {
2139 error_setg(&error, "Invalid/empty CPU model name");
2140 goto out;
2141 }
2142 name = model_pieces[0];
2143 features = model_pieces[1];
2144
2145 oc = x86_cpu_class_by_name(name);
2146 if (oc == NULL) {
2147 error_setg(&error, "Unable to find CPU definition: %s", name);
2148 goto out;
2149 }
2150 xcc = X86_CPU_CLASS(oc);
2151
2152 if (xcc->kvm_required && !kvm_enabled()) {
2153 error_setg(&error, "CPU model '%s' requires KVM", name);
2154 goto out;
2155 }
2156
2157 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2158
2159 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2160 if (error) {
2161 goto out;
2162 }
2163
2164 out:
2165 if (error != NULL) {
2166 error_propagate(errp, error);
2167 if (cpu) {
2168 object_unref(OBJECT(cpu));
2169 cpu = NULL;
2170 }
2171 }
2172 g_strfreev(model_pieces);
2173 return cpu;
2174 }
2175
2176 X86CPU *cpu_x86_init(const char *cpu_model)
2177 {
2178 Error *error = NULL;
2179 X86CPU *cpu;
2180
2181 cpu = cpu_x86_create(cpu_model, &error);
2182 if (error) {
2183 goto out;
2184 }
2185
2186 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2187
2188 out:
2189 if (error) {
2190 error_report_err(error);
2191 if (cpu != NULL) {
2192 object_unref(OBJECT(cpu));
2193 cpu = NULL;
2194 }
2195 }
2196 return cpu;
2197 }
2198
2199 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2200 {
2201 X86CPUDefinition *cpudef = data;
2202 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2203
2204 xcc->cpu_def = cpudef;
2205 }
2206
2207 static void x86_register_cpudef_type(X86CPUDefinition *def)
2208 {
2209 char *typename = x86_cpu_type_name(def->name);
2210 TypeInfo ti = {
2211 .name = typename,
2212 .parent = TYPE_X86_CPU,
2213 .class_init = x86_cpu_cpudef_class_init,
2214 .class_data = def,
2215 };
2216
2217 type_register(&ti);
2218 g_free(typename);
2219 }
2220
2221 #if !defined(CONFIG_USER_ONLY)
2222
2223 void cpu_clear_apic_feature(CPUX86State *env)
2224 {
2225 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2226 }
2227
2228 #endif /* !CONFIG_USER_ONLY */
2229
2230 /* Initialize list of CPU models, filling some non-static fields if necessary
2231 */
2232 void x86_cpudef_setup(void)
2233 {
2234 int i, j;
2235 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2236
2237 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2238 X86CPUDefinition *def = &builtin_x86_defs[i];
2239
2240 /* Look for specific "cpudef" models that */
2241 /* have the QEMU version in .model_id */
2242 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2243 if (strcmp(model_with_versions[j], def->name) == 0) {
2244 pstrcpy(def->model_id, sizeof(def->model_id),
2245 "QEMU Virtual CPU version ");
2246 pstrcat(def->model_id, sizeof(def->model_id),
2247 qemu_get_version());
2248 break;
2249 }
2250 }
2251 }
2252 }
2253
2254 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2255 uint32_t *eax, uint32_t *ebx,
2256 uint32_t *ecx, uint32_t *edx)
2257 {
2258 X86CPU *cpu = x86_env_get_cpu(env);
2259 CPUState *cs = CPU(cpu);
2260
2261 /* test if maximum index reached */
2262 if (index & 0x80000000) {
2263 if (index > env->cpuid_xlevel) {
2264 if (env->cpuid_xlevel2 > 0) {
2265 /* Handle the Centaur's CPUID instruction. */
2266 if (index > env->cpuid_xlevel2) {
2267 index = env->cpuid_xlevel2;
2268 } else if (index < 0xC0000000) {
2269 index = env->cpuid_xlevel;
2270 }
2271 } else {
2272 /* Intel documentation states that invalid EAX input will
2273 * return the same information as EAX=cpuid_level
2274 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2275 */
2276 index = env->cpuid_level;
2277 }
2278 }
2279 } else {
2280 if (index > env->cpuid_level)
2281 index = env->cpuid_level;
2282 }
2283
2284 switch(index) {
2285 case 0:
2286 *eax = env->cpuid_level;
2287 *ebx = env->cpuid_vendor1;
2288 *edx = env->cpuid_vendor2;
2289 *ecx = env->cpuid_vendor3;
2290 break;
2291 case 1:
2292 *eax = env->cpuid_version;
2293 *ebx = (cpu->apic_id << 24) |
2294 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2295 *ecx = env->features[FEAT_1_ECX];
2296 *edx = env->features[FEAT_1_EDX];
2297 if (cs->nr_cores * cs->nr_threads > 1) {
2298 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2299 *edx |= 1 << 28; /* HTT bit */
2300 }
2301 break;
2302 case 2:
2303 /* cache info: needed for Pentium Pro compatibility */
2304 if (cpu->cache_info_passthrough) {
2305 host_cpuid(index, 0, eax, ebx, ecx, edx);
2306 break;
2307 }
2308 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2309 *ebx = 0;
2310 *ecx = 0;
2311 *edx = (L1D_DESCRIPTOR << 16) | \
2312 (L1I_DESCRIPTOR << 8) | \
2313 (L2_DESCRIPTOR);
2314 break;
2315 case 4:
2316 /* cache info: needed for Core compatibility */
2317 if (cpu->cache_info_passthrough) {
2318 host_cpuid(index, count, eax, ebx, ecx, edx);
2319 *eax &= ~0xFC000000;
2320 } else {
2321 *eax = 0;
2322 switch (count) {
2323 case 0: /* L1 dcache info */
2324 *eax |= CPUID_4_TYPE_DCACHE | \
2325 CPUID_4_LEVEL(1) | \
2326 CPUID_4_SELF_INIT_LEVEL;
2327 *ebx = (L1D_LINE_SIZE - 1) | \
2328 ((L1D_PARTITIONS - 1) << 12) | \
2329 ((L1D_ASSOCIATIVITY - 1) << 22);
2330 *ecx = L1D_SETS - 1;
2331 *edx = CPUID_4_NO_INVD_SHARING;
2332 break;
2333 case 1: /* L1 icache info */
2334 *eax |= CPUID_4_TYPE_ICACHE | \
2335 CPUID_4_LEVEL(1) | \
2336 CPUID_4_SELF_INIT_LEVEL;
2337 *ebx = (L1I_LINE_SIZE - 1) | \
2338 ((L1I_PARTITIONS - 1) << 12) | \
2339 ((L1I_ASSOCIATIVITY - 1) << 22);
2340 *ecx = L1I_SETS - 1;
2341 *edx = CPUID_4_NO_INVD_SHARING;
2342 break;
2343 case 2: /* L2 cache info */
2344 *eax |= CPUID_4_TYPE_UNIFIED | \
2345 CPUID_4_LEVEL(2) | \
2346 CPUID_4_SELF_INIT_LEVEL;
2347 if (cs->nr_threads > 1) {
2348 *eax |= (cs->nr_threads - 1) << 14;
2349 }
2350 *ebx = (L2_LINE_SIZE - 1) | \
2351 ((L2_PARTITIONS - 1) << 12) | \
2352 ((L2_ASSOCIATIVITY - 1) << 22);
2353 *ecx = L2_SETS - 1;
2354 *edx = CPUID_4_NO_INVD_SHARING;
2355 break;
2356 default: /* end of info */
2357 *eax = 0;
2358 *ebx = 0;
2359 *ecx = 0;
2360 *edx = 0;
2361 break;
2362 }
2363 }
2364
2365 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2366 if ((*eax & 31) && cs->nr_cores > 1) {
2367 *eax |= (cs->nr_cores - 1) << 26;
2368 }
2369 break;
2370 case 5:
2371 /* mwait info: needed for Core compatibility */
2372 *eax = 0; /* Smallest monitor-line size in bytes */
2373 *ebx = 0; /* Largest monitor-line size in bytes */
2374 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2375 *edx = 0;
2376 break;
2377 case 6:
2378 /* Thermal and Power Leaf */
2379 *eax = env->features[FEAT_6_EAX];
2380 *ebx = 0;
2381 *ecx = 0;
2382 *edx = 0;
2383 break;
2384 case 7:
2385 /* Structured Extended Feature Flags Enumeration Leaf */
2386 if (count == 0) {
2387 *eax = 0; /* Maximum ECX value for sub-leaves */
2388 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2389 *ecx = 0; /* Reserved */
2390 *edx = 0; /* Reserved */
2391 } else {
2392 *eax = 0;
2393 *ebx = 0;
2394 *ecx = 0;
2395 *edx = 0;
2396 }
2397 break;
2398 case 9:
2399 /* Direct Cache Access Information Leaf */
2400 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2401 *ebx = 0;
2402 *ecx = 0;
2403 *edx = 0;
2404 break;
2405 case 0xA:
2406 /* Architectural Performance Monitoring Leaf */
2407 if (kvm_enabled() && cpu->enable_pmu) {
2408 KVMState *s = cs->kvm_state;
2409
2410 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2411 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2412 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2413 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2414 } else {
2415 *eax = 0;
2416 *ebx = 0;
2417 *ecx = 0;
2418 *edx = 0;
2419 }
2420 break;
2421 case 0xD: {
2422 KVMState *s = cs->kvm_state;
2423 uint64_t kvm_mask;
2424 int i;
2425
2426 /* Processor Extended State */
2427 *eax = 0;
2428 *ebx = 0;
2429 *ecx = 0;
2430 *edx = 0;
2431 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2432 break;
2433 }
2434 kvm_mask =
2435 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2436 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2437
2438 if (count == 0) {
2439 *ecx = 0x240;
2440 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2441 const ExtSaveArea *esa = &ext_save_areas[i];
2442 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2443 (kvm_mask & (1 << i)) != 0) {
2444 if (i < 32) {
2445 *eax |= 1 << i;
2446 } else {
2447 *edx |= 1 << (i - 32);
2448 }
2449 *ecx = MAX(*ecx, esa->offset + esa->size);
2450 }
2451 }
2452 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2453 *ebx = *ecx;
2454 } else if (count == 1) {
2455 *eax = env->features[FEAT_XSAVE];
2456 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2457 const ExtSaveArea *esa = &ext_save_areas[count];
2458 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2459 (kvm_mask & (1 << count)) != 0) {
2460 *eax = esa->size;
2461 *ebx = esa->offset;
2462 }
2463 }
2464 break;
2465 }
2466 case 0x80000000:
2467 *eax = env->cpuid_xlevel;
2468 *ebx = env->cpuid_vendor1;
2469 *edx = env->cpuid_vendor2;
2470 *ecx = env->cpuid_vendor3;
2471 break;
2472 case 0x80000001:
2473 *eax = env->cpuid_version;
2474 *ebx = 0;
2475 *ecx = env->features[FEAT_8000_0001_ECX];
2476 *edx = env->features[FEAT_8000_0001_EDX];
2477
2478 /* The Linux kernel checks for the CMPLegacy bit and
2479 * discards multiple thread information if it is set.
2480 * So dont set it here for Intel to make Linux guests happy.
2481 */
2482 if (cs->nr_cores * cs->nr_threads > 1) {
2483 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2484 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2485 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2486 *ecx |= 1 << 1; /* CmpLegacy bit */
2487 }
2488 }
2489 break;
2490 case 0x80000002:
2491 case 0x80000003:
2492 case 0x80000004:
2493 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2494 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2495 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2496 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2497 break;
2498 case 0x80000005:
2499 /* cache info (L1 cache) */
2500 if (cpu->cache_info_passthrough) {
2501 host_cpuid(index, 0, eax, ebx, ecx, edx);
2502 break;
2503 }
2504 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2505 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2506 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2507 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2508 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2509 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2510 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2511 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2512 break;
2513 case 0x80000006:
2514 /* cache info (L2 cache) */
2515 if (cpu->cache_info_passthrough) {
2516 host_cpuid(index, 0, eax, ebx, ecx, edx);
2517 break;
2518 }
2519 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2520 (L2_DTLB_2M_ENTRIES << 16) | \
2521 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2522 (L2_ITLB_2M_ENTRIES);
2523 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2524 (L2_DTLB_4K_ENTRIES << 16) | \
2525 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2526 (L2_ITLB_4K_ENTRIES);
2527 *ecx = (L2_SIZE_KB_AMD << 16) | \
2528 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2529 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2530 *edx = ((L3_SIZE_KB/512) << 18) | \
2531 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2532 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2533 break;
2534 case 0x80000007:
2535 *eax = 0;
2536 *ebx = 0;
2537 *ecx = 0;
2538 *edx = env->features[FEAT_8000_0007_EDX];
2539 break;
2540 case 0x80000008:
2541 /* virtual & phys address size in low 2 bytes. */
2542 /* XXX: This value must match the one used in the MMU code. */
2543 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2544 /* 64 bit processor */
2545 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2546 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2547 } else {
2548 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2549 *eax = 0x00000024; /* 36 bits physical */
2550 } else {
2551 *eax = 0x00000020; /* 32 bits physical */
2552 }
2553 }
2554 *ebx = 0;
2555 *ecx = 0;
2556 *edx = 0;
2557 if (cs->nr_cores * cs->nr_threads > 1) {
2558 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2559 }
2560 break;
2561 case 0x8000000A:
2562 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2563 *eax = 0x00000001; /* SVM Revision */
2564 *ebx = 0x00000010; /* nr of ASIDs */
2565 *ecx = 0;
2566 *edx = env->features[FEAT_SVM]; /* optional features */
2567 } else {
2568 *eax = 0;
2569 *ebx = 0;
2570 *ecx = 0;
2571 *edx = 0;
2572 }
2573 break;
2574 case 0xC0000000:
2575 *eax = env->cpuid_xlevel2;
2576 *ebx = 0;
2577 *ecx = 0;
2578 *edx = 0;
2579 break;
2580 case 0xC0000001:
2581 /* Support for VIA CPU's CPUID instruction */
2582 *eax = env->cpuid_version;
2583 *ebx = 0;
2584 *ecx = 0;
2585 *edx = env->features[FEAT_C000_0001_EDX];
2586 break;
2587 case 0xC0000002:
2588 case 0xC0000003:
2589 case 0xC0000004:
2590 /* Reserved for the future, and now filled with zero */
2591 *eax = 0;
2592 *ebx = 0;
2593 *ecx = 0;
2594 *edx = 0;
2595 break;
2596 default:
2597 /* reserved values: zero */
2598 *eax = 0;
2599 *ebx = 0;
2600 *ecx = 0;
2601 *edx = 0;
2602 break;
2603 }
2604 }
2605
2606 /* CPUClass::reset() */
2607 static void x86_cpu_reset(CPUState *s)
2608 {
2609 X86CPU *cpu = X86_CPU(s);
2610 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2611 CPUX86State *env = &cpu->env;
2612 int i;
2613
2614 xcc->parent_reset(s);
2615
2616 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2617
2618 tlb_flush(s, 1);
2619
2620 env->old_exception = -1;
2621
2622 /* init to reset state */
2623
2624 #ifdef CONFIG_SOFTMMU
2625 env->hflags |= HF_SOFTMMU_MASK;
2626 #endif
2627 env->hflags2 |= HF2_GIF_MASK;
2628
2629 cpu_x86_update_cr0(env, 0x60000010);
2630 env->a20_mask = ~0x0;
2631 env->smbase = 0x30000;
2632
2633 env->idt.limit = 0xffff;
2634 env->gdt.limit = 0xffff;
2635 env->ldt.limit = 0xffff;
2636 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2637 env->tr.limit = 0xffff;
2638 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2639
2640 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2641 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2642 DESC_R_MASK | DESC_A_MASK);
2643 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2644 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2645 DESC_A_MASK);
2646 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2647 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2648 DESC_A_MASK);
2649 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2650 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2651 DESC_A_MASK);
2652 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2653 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2654 DESC_A_MASK);
2655 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2656 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2657 DESC_A_MASK);
2658
2659 env->eip = 0xfff0;
2660 env->regs[R_EDX] = env->cpuid_version;
2661
2662 env->eflags = 0x2;
2663
2664 /* FPU init */
2665 for (i = 0; i < 8; i++) {
2666 env->fptags[i] = 1;
2667 }
2668 cpu_set_fpuc(env, 0x37f);
2669
2670 env->mxcsr = 0x1f80;
2671 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2672
2673 env->pat = 0x0007040600070406ULL;
2674 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2675
2676 memset(env->dr, 0, sizeof(env->dr));
2677 env->dr[6] = DR6_FIXED_1;
2678 env->dr[7] = DR7_FIXED_1;
2679 cpu_breakpoint_remove_all(s, BP_CPU);
2680 cpu_watchpoint_remove_all(s, BP_CPU);
2681
2682 env->xcr0 = 1;
2683
2684 /*
2685 * SDM 11.11.5 requires:
2686 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2687 * - IA32_MTRR_PHYSMASKn.V = 0
2688 * All other bits are undefined. For simplification, zero it all.
2689 */
2690 env->mtrr_deftype = 0;
2691 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2692 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2693
2694 #if !defined(CONFIG_USER_ONLY)
2695 /* We hard-wire the BSP to the first CPU. */
2696 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2697
2698 s->halted = !cpu_is_bsp(cpu);
2699
2700 if (kvm_enabled()) {
2701 kvm_arch_reset_vcpu(cpu);
2702 }
2703 #endif
2704 }
2705
2706 #ifndef CONFIG_USER_ONLY
2707 bool cpu_is_bsp(X86CPU *cpu)
2708 {
2709 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2710 }
2711
2712 /* TODO: remove me, when reset over QOM tree is implemented */
2713 static void x86_cpu_machine_reset_cb(void *opaque)
2714 {
2715 X86CPU *cpu = opaque;
2716 cpu_reset(CPU(cpu));
2717 }
2718 #endif
2719
2720 static void mce_init(X86CPU *cpu)
2721 {
2722 CPUX86State *cenv = &cpu->env;
2723 unsigned int bank;
2724
2725 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2726 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2727 (CPUID_MCE | CPUID_MCA)) {
2728 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2729 cenv->mcg_ctl = ~(uint64_t)0;
2730 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2731 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2732 }
2733 }
2734 }
2735
2736 #ifndef CONFIG_USER_ONLY
2737 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2738 {
2739 DeviceState *dev = DEVICE(cpu);
2740 APICCommonState *apic;
2741 const char *apic_type = "apic";
2742
2743 if (kvm_irqchip_in_kernel()) {
2744 apic_type = "kvm-apic";
2745 } else if (xen_enabled()) {
2746 apic_type = "xen-apic";
2747 }
2748
2749 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2750 if (cpu->apic_state == NULL) {
2751 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2752 return;
2753 }
2754
2755 object_property_add_child(OBJECT(cpu), "apic",
2756 OBJECT(cpu->apic_state), NULL);
2757 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2758 /* TODO: convert to link<> */
2759 apic = APIC_COMMON(cpu->apic_state);
2760 apic->cpu = cpu;
2761 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2762 }
2763
2764 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2765 {
2766 APICCommonState *apic;
2767 static bool apic_mmio_map_once;
2768
2769 if (cpu->apic_state == NULL) {
2770 return;
2771 }
2772 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2773 errp);
2774
2775 /* Map APIC MMIO area */
2776 apic = APIC_COMMON(cpu->apic_state);
2777 if (!apic_mmio_map_once) {
2778 memory_region_add_subregion_overlap(get_system_memory(),
2779 apic->apicbase &
2780 MSR_IA32_APICBASE_BASE,
2781 &apic->io_memory,
2782 0x1000);
2783 apic_mmio_map_once = true;
2784 }
2785 }
2786
2787 static void x86_cpu_machine_done(Notifier *n, void *unused)
2788 {
2789 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2790 MemoryRegion *smram =
2791 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2792
2793 if (smram) {
2794 cpu->smram = g_new(MemoryRegion, 1);
2795 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2796 smram, 0, 1ull << 32);
2797 memory_region_set_enabled(cpu->smram, false);
2798 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2799 }
2800 }
2801 #else
2802 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2803 {
2804 }
2805 #endif
2806
2807
2808 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2809 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2810 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2811 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2812 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2813 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2814 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2815 {
2816 CPUState *cs = CPU(dev);
2817 X86CPU *cpu = X86_CPU(dev);
2818 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2819 CPUX86State *env = &cpu->env;
2820 Error *local_err = NULL;
2821 static bool ht_warned;
2822
2823 if (cpu->apic_id < 0) {
2824 error_setg(errp, "apic-id property was not initialized properly");
2825 return;
2826 }
2827
2828 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2829 env->cpuid_level = 7;
2830 }
2831
2832 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2833 * CPUID[1].EDX.
2834 */
2835 if (IS_AMD_CPU(env)) {
2836 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2837 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2838 & CPUID_EXT2_AMD_ALIASES);
2839 }
2840
2841
2842 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2843 error_setg(&local_err,
2844 kvm_enabled() ?
2845 "Host doesn't support requested features" :
2846 "TCG doesn't support requested features");
2847 goto out;
2848 }
2849
2850 #ifndef CONFIG_USER_ONLY
2851 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2852
2853 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2854 x86_cpu_apic_create(cpu, &local_err);
2855 if (local_err != NULL) {
2856 goto out;
2857 }
2858 }
2859 #endif
2860
2861 mce_init(cpu);
2862
2863 #ifndef CONFIG_USER_ONLY
2864 if (tcg_enabled()) {
2865 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2866 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2867 cs->as = g_new(AddressSpace, 1);
2868
2869 /* Outer container... */
2870 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2871 memory_region_set_enabled(cpu->cpu_as_root, true);
2872
2873 /* ... with two regions inside: normal system memory with low
2874 * priority, and...
2875 */
2876 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2877 get_system_memory(), 0, ~0ull);
2878 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2879 memory_region_set_enabled(cpu->cpu_as_mem, true);
2880 address_space_init(cs->as, cpu->cpu_as_root, "CPU");
2881
2882 /* ... SMRAM with higher priority, linked from /machine/smram. */
2883 cpu->machine_done.notify = x86_cpu_machine_done;
2884 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2885 }
2886 #endif
2887
2888 qemu_init_vcpu(cs);
2889
2890 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2891 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2892 * based on inputs (sockets,cores,threads), it is still better to gives
2893 * users a warning.
2894 *
2895 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2896 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2897 */
2898 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2899 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2900 " -smp options properly.");
2901 ht_warned = true;
2902 }
2903
2904 x86_cpu_apic_realize(cpu, &local_err);
2905 if (local_err != NULL) {
2906 goto out;
2907 }
2908 cpu_reset(cs);
2909
2910 xcc->parent_realize(dev, &local_err);
2911
2912 out:
2913 if (local_err != NULL) {
2914 error_propagate(errp, local_err);
2915 return;
2916 }
2917 }
2918
2919 typedef struct BitProperty {
2920 uint32_t *ptr;
2921 uint32_t mask;
2922 } BitProperty;
2923
2924 static void x86_cpu_get_bit_prop(Object *obj,
2925 struct Visitor *v,
2926 void *opaque,
2927 const char *name,
2928 Error **errp)
2929 {
2930 BitProperty *fp = opaque;
2931 bool value = (*fp->ptr & fp->mask) == fp->mask;
2932 visit_type_bool(v, &value, name, errp);
2933 }
2934
2935 static void x86_cpu_set_bit_prop(Object *obj,
2936 struct Visitor *v,
2937 void *opaque,
2938 const char *name,
2939 Error **errp)
2940 {
2941 DeviceState *dev = DEVICE(obj);
2942 BitProperty *fp = opaque;
2943 Error *local_err = NULL;
2944 bool value;
2945
2946 if (dev->realized) {
2947 qdev_prop_set_after_realize(dev, name, errp);
2948 return;
2949 }
2950
2951 visit_type_bool(v, &value, name, &local_err);
2952 if (local_err) {
2953 error_propagate(errp, local_err);
2954 return;
2955 }
2956
2957 if (value) {
2958 *fp->ptr |= fp->mask;
2959 } else {
2960 *fp->ptr &= ~fp->mask;
2961 }
2962 }
2963
2964 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
2965 void *opaque)
2966 {
2967 BitProperty *prop = opaque;
2968 g_free(prop);
2969 }
2970
2971 /* Register a boolean property to get/set a single bit in a uint32_t field.
2972 *
2973 * The same property name can be registered multiple times to make it affect
2974 * multiple bits in the same FeatureWord. In that case, the getter will return
2975 * true only if all bits are set.
2976 */
2977 static void x86_cpu_register_bit_prop(X86CPU *cpu,
2978 const char *prop_name,
2979 uint32_t *field,
2980 int bitnr)
2981 {
2982 BitProperty *fp;
2983 ObjectProperty *op;
2984 uint32_t mask = (1UL << bitnr);
2985
2986 op = object_property_find(OBJECT(cpu), prop_name, NULL);
2987 if (op) {
2988 fp = op->opaque;
2989 assert(fp->ptr == field);
2990 fp->mask |= mask;
2991 } else {
2992 fp = g_new0(BitProperty, 1);
2993 fp->ptr = field;
2994 fp->mask = mask;
2995 object_property_add(OBJECT(cpu), prop_name, "bool",
2996 x86_cpu_get_bit_prop,
2997 x86_cpu_set_bit_prop,
2998 x86_cpu_release_bit_prop, fp, &error_abort);
2999 }
3000 }
3001
3002 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3003 FeatureWord w,
3004 int bitnr)
3005 {
3006 Object *obj = OBJECT(cpu);
3007 int i;
3008 char **names;
3009 FeatureWordInfo *fi = &feature_word_info[w];
3010
3011 if (!fi->feat_names) {
3012 return;
3013 }
3014 if (!fi->feat_names[bitnr]) {
3015 return;
3016 }
3017
3018 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3019
3020 feat2prop(names[0]);
3021 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3022
3023 for (i = 1; names[i]; i++) {
3024 feat2prop(names[i]);
3025 object_property_add_alias(obj, names[i], obj, names[0],
3026 &error_abort);
3027 }
3028
3029 g_strfreev(names);
3030 }
3031
3032 static void x86_cpu_initfn(Object *obj)
3033 {
3034 CPUState *cs = CPU(obj);
3035 X86CPU *cpu = X86_CPU(obj);
3036 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3037 CPUX86State *env = &cpu->env;
3038 FeatureWord w;
3039 static int inited;
3040
3041 cs->env_ptr = env;
3042 cpu_exec_init(cs, &error_abort);
3043
3044 object_property_add(obj, "family", "int",
3045 x86_cpuid_version_get_family,
3046 x86_cpuid_version_set_family, NULL, NULL, NULL);
3047 object_property_add(obj, "model", "int",
3048 x86_cpuid_version_get_model,
3049 x86_cpuid_version_set_model, NULL, NULL, NULL);
3050 object_property_add(obj, "stepping", "int",
3051 x86_cpuid_version_get_stepping,
3052 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3053 object_property_add_str(obj, "vendor",
3054 x86_cpuid_get_vendor,
3055 x86_cpuid_set_vendor, NULL);
3056 object_property_add_str(obj, "model-id",
3057 x86_cpuid_get_model_id,
3058 x86_cpuid_set_model_id, NULL);
3059 object_property_add(obj, "tsc-frequency", "int",
3060 x86_cpuid_get_tsc_freq,
3061 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3062 object_property_add(obj, "apic-id", "int",
3063 x86_cpuid_get_apic_id,
3064 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3065 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3066 x86_cpu_get_feature_words,
3067 NULL, NULL, (void *)env->features, NULL);
3068 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3069 x86_cpu_get_feature_words,
3070 NULL, NULL, (void *)cpu->filtered_features, NULL);
3071
3072 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3073
3074 #ifndef CONFIG_USER_ONLY
3075 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3076 cpu->apic_id = -1;
3077 #endif
3078
3079 for (w = 0; w < FEATURE_WORDS; w++) {
3080 int bitnr;
3081
3082 for (bitnr = 0; bitnr < 32; bitnr++) {
3083 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3084 }
3085 }
3086
3087 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3088
3089 /* init various static tables used in TCG mode */
3090 if (tcg_enabled() && !inited) {
3091 inited = 1;
3092 optimize_flags_init();
3093 }
3094 }
3095
3096 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3097 {
3098 X86CPU *cpu = X86_CPU(cs);
3099
3100 return cpu->apic_id;
3101 }
3102
3103 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3104 {
3105 X86CPU *cpu = X86_CPU(cs);
3106
3107 return cpu->env.cr[0] & CR0_PG_MASK;
3108 }
3109
3110 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3111 {
3112 X86CPU *cpu = X86_CPU(cs);
3113
3114 cpu->env.eip = value;
3115 }
3116
3117 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3118 {
3119 X86CPU *cpu = X86_CPU(cs);
3120
3121 cpu->env.eip = tb->pc - tb->cs_base;
3122 }
3123
3124 static bool x86_cpu_has_work(CPUState *cs)
3125 {
3126 X86CPU *cpu = X86_CPU(cs);
3127 CPUX86State *env = &cpu->env;
3128
3129 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3130 CPU_INTERRUPT_POLL)) &&
3131 (env->eflags & IF_MASK)) ||
3132 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3133 CPU_INTERRUPT_INIT |
3134 CPU_INTERRUPT_SIPI |
3135 CPU_INTERRUPT_MCE)) ||
3136 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3137 !(env->hflags & HF_SMM_MASK));
3138 }
3139
3140 static Property x86_cpu_properties[] = {
3141 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3142 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3143 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3144 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3145 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3146 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3147 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
3148 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3149 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3150 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3151 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3152 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3153 DEFINE_PROP_END_OF_LIST()
3154 };
3155
3156 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3157 {
3158 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3159 CPUClass *cc = CPU_CLASS(oc);
3160 DeviceClass *dc = DEVICE_CLASS(oc);
3161
3162 xcc->parent_realize = dc->realize;
3163 dc->realize = x86_cpu_realizefn;
3164 dc->bus_type = TYPE_ICC_BUS;
3165 dc->props = x86_cpu_properties;
3166
3167 xcc->parent_reset = cc->reset;
3168 cc->reset = x86_cpu_reset;
3169 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3170
3171 cc->class_by_name = x86_cpu_class_by_name;
3172 cc->parse_features = x86_cpu_parse_featurestr;
3173 cc->has_work = x86_cpu_has_work;
3174 cc->do_interrupt = x86_cpu_do_interrupt;
3175 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3176 cc->dump_state = x86_cpu_dump_state;
3177 cc->set_pc = x86_cpu_set_pc;
3178 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3179 cc->gdb_read_register = x86_cpu_gdb_read_register;
3180 cc->gdb_write_register = x86_cpu_gdb_write_register;
3181 cc->get_arch_id = x86_cpu_get_arch_id;
3182 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3183 #ifdef CONFIG_USER_ONLY
3184 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3185 #else
3186 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3187 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3188 cc->write_elf64_note = x86_cpu_write_elf64_note;
3189 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3190 cc->write_elf32_note = x86_cpu_write_elf32_note;
3191 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3192 cc->vmsd = &vmstate_x86_cpu;
3193 #endif
3194 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3195 #ifndef CONFIG_USER_ONLY
3196 cc->debug_excp_handler = breakpoint_handler;
3197 #endif
3198 cc->cpu_exec_enter = x86_cpu_exec_enter;
3199 cc->cpu_exec_exit = x86_cpu_exec_exit;
3200 }
3201
3202 static const TypeInfo x86_cpu_type_info = {
3203 .name = TYPE_X86_CPU,
3204 .parent = TYPE_CPU,
3205 .instance_size = sizeof(X86CPU),
3206 .instance_init = x86_cpu_initfn,
3207 .abstract = true,
3208 .class_size = sizeof(X86CPUClass),
3209 .class_init = x86_cpu_common_class_init,
3210 };
3211
3212 static void x86_cpu_register_types(void)
3213 {
3214 int i;
3215
3216 type_register_static(&x86_cpu_type_info);
3217 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3218 x86_register_cpudef_type(&builtin_x86_defs[i]);
3219 }
3220 #ifdef CONFIG_KVM
3221 type_register_static(&host_x86_cpu_type_info);
3222 #endif
3223 }
3224
3225 type_init(x86_cpu_register_types)