]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
cpu: move exec-all.h inclusion out of cpu.h
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/hw.h"
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
50
51
52 /* Cache topology CPUID constants: */
53
54 /* CPUID Leaf 2 Descriptors */
55
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
59
60
61 /* CPUID Leaf 4 constants: */
62
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
67
68 #define CPUID_4_LEVEL(l) ((l) << 5)
69
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
72
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
77
78 #define ASSOC_FULL 0xFF
79
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
93
94
95 /* Definitions of the hardcoded cache entries we expose: */
96
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
108
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
120
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
132
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
138
139 /* TLB definitions: */
140
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
145
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
150
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
155
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
160
161
162
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
165 {
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
171 }
172 dst[CPUID_VENDOR_SZ] = '\0';
173 }
174
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
178 */
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
188 };
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
198 };
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
203 */
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
213 };
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
223 };
224
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 };
235
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 };
246
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 };
257
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
262 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
263 };
264
265 static const char *cpuid_7_0_ecx_feature_name[] = {
266 NULL, NULL, NULL, "pku",
267 "ospke", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 };
275
276 static const char *cpuid_apm_edx_feature_name[] = {
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 "invtsc", NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 };
286
287 static const char *cpuid_xsave_feature_name[] = {
288 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 };
297
298 static const char *cpuid_6_feature_name[] = {
299 NULL, NULL, "arat", NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 };
308
309 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
310 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
311 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
312 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_FXSR)
315 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
316 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
317 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
318 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
319 CPUID_PAE | CPUID_SEP | CPUID_APIC)
320
321 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
322 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
323 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
324 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
325 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
326 /* partly implemented:
327 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
328 /* missing:
329 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
330 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
331 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
332 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
333 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
340 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
341
342 #ifdef TARGET_X86_64
343 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
344 #else
345 #define TCG_EXT2_X86_64_FEATURES 0
346 #endif
347
348 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
349 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
350 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
351 TCG_EXT2_X86_64_FEATURES)
352 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
353 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
354 #define TCG_EXT4_FEATURES 0
355 #define TCG_SVM_FEATURES 0
356 #define TCG_KVM_FEATURES 0
357 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
358 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
359 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
360 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
361 /* missing:
362 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
363 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
364 CPUID_7_0_EBX_RDSEED */
365 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
366 #define TCG_APM_FEATURES 0
367 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
368 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
369 /* missing:
370 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
371
372 typedef struct FeatureWordInfo {
373 const char **feat_names;
374 uint32_t cpuid_eax; /* Input EAX for CPUID */
375 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
376 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
377 int cpuid_reg; /* output register (R_* constant) */
378 uint32_t tcg_features; /* Feature flags supported by TCG */
379 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
380 } FeatureWordInfo;
381
382 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
383 [FEAT_1_EDX] = {
384 .feat_names = feature_name,
385 .cpuid_eax = 1, .cpuid_reg = R_EDX,
386 .tcg_features = TCG_FEATURES,
387 },
388 [FEAT_1_ECX] = {
389 .feat_names = ext_feature_name,
390 .cpuid_eax = 1, .cpuid_reg = R_ECX,
391 .tcg_features = TCG_EXT_FEATURES,
392 },
393 [FEAT_8000_0001_EDX] = {
394 .feat_names = ext2_feature_name,
395 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
396 .tcg_features = TCG_EXT2_FEATURES,
397 },
398 [FEAT_8000_0001_ECX] = {
399 .feat_names = ext3_feature_name,
400 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
401 .tcg_features = TCG_EXT3_FEATURES,
402 },
403 [FEAT_C000_0001_EDX] = {
404 .feat_names = ext4_feature_name,
405 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
406 .tcg_features = TCG_EXT4_FEATURES,
407 },
408 [FEAT_KVM] = {
409 .feat_names = kvm_feature_name,
410 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
411 .tcg_features = TCG_KVM_FEATURES,
412 },
413 [FEAT_SVM] = {
414 .feat_names = svm_feature_name,
415 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
416 .tcg_features = TCG_SVM_FEATURES,
417 },
418 [FEAT_7_0_EBX] = {
419 .feat_names = cpuid_7_0_ebx_feature_name,
420 .cpuid_eax = 7,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
422 .cpuid_reg = R_EBX,
423 .tcg_features = TCG_7_0_EBX_FEATURES,
424 },
425 [FEAT_7_0_ECX] = {
426 .feat_names = cpuid_7_0_ecx_feature_name,
427 .cpuid_eax = 7,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .cpuid_reg = R_ECX,
430 .tcg_features = TCG_7_0_ECX_FEATURES,
431 },
432 [FEAT_8000_0007_EDX] = {
433 .feat_names = cpuid_apm_edx_feature_name,
434 .cpuid_eax = 0x80000007,
435 .cpuid_reg = R_EDX,
436 .tcg_features = TCG_APM_FEATURES,
437 .unmigratable_flags = CPUID_APM_INVTSC,
438 },
439 [FEAT_XSAVE] = {
440 .feat_names = cpuid_xsave_feature_name,
441 .cpuid_eax = 0xd,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
443 .cpuid_reg = R_EAX,
444 .tcg_features = TCG_XSAVE_FEATURES,
445 },
446 [FEAT_6_EAX] = {
447 .feat_names = cpuid_6_feature_name,
448 .cpuid_eax = 6, .cpuid_reg = R_EAX,
449 .tcg_features = TCG_6_EAX_FEATURES,
450 },
451 };
452
453 typedef struct X86RegisterInfo32 {
454 /* Name of register */
455 const char *name;
456 /* QAPI enum value register */
457 X86CPURegister32 qapi_enum;
458 } X86RegisterInfo32;
459
460 #define REGISTER(reg) \
461 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
462 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
463 REGISTER(EAX),
464 REGISTER(ECX),
465 REGISTER(EDX),
466 REGISTER(EBX),
467 REGISTER(ESP),
468 REGISTER(EBP),
469 REGISTER(ESI),
470 REGISTER(EDI),
471 };
472 #undef REGISTER
473
474 const ExtSaveArea x86_ext_save_areas[] = {
475 [XSTATE_YMM_BIT] =
476 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = 0x240, .size = 0x100 },
478 [XSTATE_BNDREGS_BIT] =
479 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
480 .offset = 0x3c0, .size = 0x40 },
481 [XSTATE_BNDCSR_BIT] =
482 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
483 .offset = 0x400, .size = 0x40 },
484 [XSTATE_OPMASK_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
486 .offset = 0x440, .size = 0x40 },
487 [XSTATE_ZMM_Hi256_BIT] =
488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
489 .offset = 0x480, .size = 0x200 },
490 [XSTATE_Hi16_ZMM_BIT] =
491 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
492 .offset = 0x680, .size = 0x400 },
493 [XSTATE_PKRU_BIT] =
494 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
495 .offset = 0xA80, .size = 0x8 },
496 };
497
498 const char *get_register_name_32(unsigned int reg)
499 {
500 if (reg >= CPU_NB_REGS32) {
501 return NULL;
502 }
503 return x86_reg_info_32[reg].name;
504 }
505
506 /*
507 * Returns the set of feature flags that are supported and migratable by
508 * QEMU, for a given FeatureWord.
509 */
510 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
511 {
512 FeatureWordInfo *wi = &feature_word_info[w];
513 uint32_t r = 0;
514 int i;
515
516 for (i = 0; i < 32; i++) {
517 uint32_t f = 1U << i;
518 /* If the feature name is unknown, it is not supported by QEMU yet */
519 if (!wi->feat_names[i]) {
520 continue;
521 }
522 /* Skip features known to QEMU, but explicitly marked as unmigratable */
523 if (wi->unmigratable_flags & f) {
524 continue;
525 }
526 r |= f;
527 }
528 return r;
529 }
530
531 void host_cpuid(uint32_t function, uint32_t count,
532 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
533 {
534 uint32_t vec[4];
535
536 #ifdef __x86_64__
537 asm volatile("cpuid"
538 : "=a"(vec[0]), "=b"(vec[1]),
539 "=c"(vec[2]), "=d"(vec[3])
540 : "0"(function), "c"(count) : "cc");
541 #elif defined(__i386__)
542 asm volatile("pusha \n\t"
543 "cpuid \n\t"
544 "mov %%eax, 0(%2) \n\t"
545 "mov %%ebx, 4(%2) \n\t"
546 "mov %%ecx, 8(%2) \n\t"
547 "mov %%edx, 12(%2) \n\t"
548 "popa"
549 : : "a"(function), "c"(count), "S"(vec)
550 : "memory", "cc");
551 #else
552 abort();
553 #endif
554
555 if (eax)
556 *eax = vec[0];
557 if (ebx)
558 *ebx = vec[1];
559 if (ecx)
560 *ecx = vec[2];
561 if (edx)
562 *edx = vec[3];
563 }
564
565 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
566
567 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
568 * a substring. ex if !NULL points to the first char after a substring,
569 * otherwise the string is assumed to sized by a terminating nul.
570 * Return lexical ordering of *s1:*s2.
571 */
572 static int sstrcmp(const char *s1, const char *e1,
573 const char *s2, const char *e2)
574 {
575 for (;;) {
576 if (!*s1 || !*s2 || *s1 != *s2)
577 return (*s1 - *s2);
578 ++s1, ++s2;
579 if (s1 == e1 && s2 == e2)
580 return (0);
581 else if (s1 == e1)
582 return (*s2);
583 else if (s2 == e2)
584 return (*s1);
585 }
586 }
587
588 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
589 * '|' delimited (possibly empty) strings in which case search for a match
590 * within the alternatives proceeds left to right. Return 0 for success,
591 * non-zero otherwise.
592 */
593 static int altcmp(const char *s, const char *e, const char *altstr)
594 {
595 const char *p, *q;
596
597 for (q = p = altstr; ; ) {
598 while (*p && *p != '|')
599 ++p;
600 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
601 return (0);
602 if (!*p)
603 return (1);
604 else
605 q = ++p;
606 }
607 }
608
609 /* search featureset for flag *[s..e), if found set corresponding bit in
610 * *pval and return true, otherwise return false
611 */
612 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
613 const char **featureset)
614 {
615 uint32_t mask;
616 const char **ppc;
617 bool found = false;
618
619 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
620 if (*ppc && !altcmp(s, e, *ppc)) {
621 *pval |= mask;
622 found = true;
623 }
624 }
625 return found;
626 }
627
628 static void add_flagname_to_bitmaps(const char *flagname,
629 FeatureWordArray words,
630 Error **errp)
631 {
632 FeatureWord w;
633 for (w = 0; w < FEATURE_WORDS; w++) {
634 FeatureWordInfo *wi = &feature_word_info[w];
635 if (wi->feat_names &&
636 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
637 break;
638 }
639 }
640 if (w == FEATURE_WORDS) {
641 error_setg(errp, "CPU feature %s not found", flagname);
642 }
643 }
644
645 /* CPU class name definitions: */
646
647 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
648 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
649
650 /* Return type name for a given CPU model name
651 * Caller is responsible for freeing the returned string.
652 */
653 static char *x86_cpu_type_name(const char *model_name)
654 {
655 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
656 }
657
658 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
659 {
660 ObjectClass *oc;
661 char *typename;
662
663 if (cpu_model == NULL) {
664 return NULL;
665 }
666
667 typename = x86_cpu_type_name(cpu_model);
668 oc = object_class_by_name(typename);
669 g_free(typename);
670 return oc;
671 }
672
673 struct X86CPUDefinition {
674 const char *name;
675 uint32_t level;
676 uint32_t xlevel;
677 uint32_t xlevel2;
678 /* vendor is zero-terminated, 12 character ASCII string */
679 char vendor[CPUID_VENDOR_SZ + 1];
680 int family;
681 int model;
682 int stepping;
683 FeatureWordArray features;
684 char model_id[48];
685 };
686
687 static X86CPUDefinition builtin_x86_defs[] = {
688 {
689 .name = "qemu64",
690 .level = 0xd,
691 .vendor = CPUID_VENDOR_AMD,
692 .family = 6,
693 .model = 6,
694 .stepping = 3,
695 .features[FEAT_1_EDX] =
696 PPRO_FEATURES |
697 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
698 CPUID_PSE36,
699 .features[FEAT_1_ECX] =
700 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
701 .features[FEAT_8000_0001_EDX] =
702 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
703 .features[FEAT_8000_0001_ECX] =
704 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
705 .xlevel = 0x8000000A,
706 },
707 {
708 .name = "phenom",
709 .level = 5,
710 .vendor = CPUID_VENDOR_AMD,
711 .family = 16,
712 .model = 2,
713 .stepping = 3,
714 /* Missing: CPUID_HT */
715 .features[FEAT_1_EDX] =
716 PPRO_FEATURES |
717 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
718 CPUID_PSE36 | CPUID_VME,
719 .features[FEAT_1_ECX] =
720 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
721 CPUID_EXT_POPCNT,
722 .features[FEAT_8000_0001_EDX] =
723 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
724 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
725 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
726 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
727 CPUID_EXT3_CR8LEG,
728 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
729 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
730 .features[FEAT_8000_0001_ECX] =
731 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
732 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
733 /* Missing: CPUID_SVM_LBRV */
734 .features[FEAT_SVM] =
735 CPUID_SVM_NPT,
736 .xlevel = 0x8000001A,
737 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
738 },
739 {
740 .name = "core2duo",
741 .level = 10,
742 .vendor = CPUID_VENDOR_INTEL,
743 .family = 6,
744 .model = 15,
745 .stepping = 11,
746 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
747 .features[FEAT_1_EDX] =
748 PPRO_FEATURES |
749 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
750 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
751 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
752 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
753 .features[FEAT_1_ECX] =
754 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
755 CPUID_EXT_CX16,
756 .features[FEAT_8000_0001_EDX] =
757 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
758 .features[FEAT_8000_0001_ECX] =
759 CPUID_EXT3_LAHF_LM,
760 .xlevel = 0x80000008,
761 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
762 },
763 {
764 .name = "kvm64",
765 .level = 0xd,
766 .vendor = CPUID_VENDOR_INTEL,
767 .family = 15,
768 .model = 6,
769 .stepping = 1,
770 /* Missing: CPUID_HT */
771 .features[FEAT_1_EDX] =
772 PPRO_FEATURES | CPUID_VME |
773 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
774 CPUID_PSE36,
775 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
776 .features[FEAT_1_ECX] =
777 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
778 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
779 .features[FEAT_8000_0001_EDX] =
780 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
781 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
782 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
783 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
784 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
785 .features[FEAT_8000_0001_ECX] =
786 0,
787 .xlevel = 0x80000008,
788 .model_id = "Common KVM processor"
789 },
790 {
791 .name = "qemu32",
792 .level = 4,
793 .vendor = CPUID_VENDOR_INTEL,
794 .family = 6,
795 .model = 6,
796 .stepping = 3,
797 .features[FEAT_1_EDX] =
798 PPRO_FEATURES,
799 .features[FEAT_1_ECX] =
800 CPUID_EXT_SSE3,
801 .xlevel = 0x80000004,
802 },
803 {
804 .name = "kvm32",
805 .level = 5,
806 .vendor = CPUID_VENDOR_INTEL,
807 .family = 15,
808 .model = 6,
809 .stepping = 1,
810 .features[FEAT_1_EDX] =
811 PPRO_FEATURES | CPUID_VME |
812 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
813 .features[FEAT_1_ECX] =
814 CPUID_EXT_SSE3,
815 .features[FEAT_8000_0001_ECX] =
816 0,
817 .xlevel = 0x80000008,
818 .model_id = "Common 32-bit KVM processor"
819 },
820 {
821 .name = "coreduo",
822 .level = 10,
823 .vendor = CPUID_VENDOR_INTEL,
824 .family = 6,
825 .model = 14,
826 .stepping = 8,
827 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
828 .features[FEAT_1_EDX] =
829 PPRO_FEATURES | CPUID_VME |
830 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
831 CPUID_SS,
832 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
833 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
834 .features[FEAT_1_ECX] =
835 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
836 .features[FEAT_8000_0001_EDX] =
837 CPUID_EXT2_NX,
838 .xlevel = 0x80000008,
839 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
840 },
841 {
842 .name = "486",
843 .level = 1,
844 .vendor = CPUID_VENDOR_INTEL,
845 .family = 4,
846 .model = 8,
847 .stepping = 0,
848 .features[FEAT_1_EDX] =
849 I486_FEATURES,
850 .xlevel = 0,
851 },
852 {
853 .name = "pentium",
854 .level = 1,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 5,
857 .model = 4,
858 .stepping = 3,
859 .features[FEAT_1_EDX] =
860 PENTIUM_FEATURES,
861 .xlevel = 0,
862 },
863 {
864 .name = "pentium2",
865 .level = 2,
866 .vendor = CPUID_VENDOR_INTEL,
867 .family = 6,
868 .model = 5,
869 .stepping = 2,
870 .features[FEAT_1_EDX] =
871 PENTIUM2_FEATURES,
872 .xlevel = 0,
873 },
874 {
875 .name = "pentium3",
876 .level = 3,
877 .vendor = CPUID_VENDOR_INTEL,
878 .family = 6,
879 .model = 7,
880 .stepping = 3,
881 .features[FEAT_1_EDX] =
882 PENTIUM3_FEATURES,
883 .xlevel = 0,
884 },
885 {
886 .name = "athlon",
887 .level = 2,
888 .vendor = CPUID_VENDOR_AMD,
889 .family = 6,
890 .model = 2,
891 .stepping = 3,
892 .features[FEAT_1_EDX] =
893 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
894 CPUID_MCA,
895 .features[FEAT_8000_0001_EDX] =
896 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
897 .xlevel = 0x80000008,
898 },
899 {
900 .name = "n270",
901 .level = 10,
902 .vendor = CPUID_VENDOR_INTEL,
903 .family = 6,
904 .model = 28,
905 .stepping = 2,
906 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
907 .features[FEAT_1_EDX] =
908 PPRO_FEATURES |
909 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
910 CPUID_ACPI | CPUID_SS,
911 /* Some CPUs got no CPUID_SEP */
912 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
913 * CPUID_EXT_XTPR */
914 .features[FEAT_1_ECX] =
915 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
916 CPUID_EXT_MOVBE,
917 .features[FEAT_8000_0001_EDX] =
918 CPUID_EXT2_NX,
919 .features[FEAT_8000_0001_ECX] =
920 CPUID_EXT3_LAHF_LM,
921 .xlevel = 0x80000008,
922 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
923 },
924 {
925 .name = "Conroe",
926 .level = 10,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 6,
929 .model = 15,
930 .stepping = 3,
931 .features[FEAT_1_EDX] =
932 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
933 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
934 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
935 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
936 CPUID_DE | CPUID_FP87,
937 .features[FEAT_1_ECX] =
938 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
939 .features[FEAT_8000_0001_EDX] =
940 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
941 .features[FEAT_8000_0001_ECX] =
942 CPUID_EXT3_LAHF_LM,
943 .xlevel = 0x80000008,
944 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
945 },
946 {
947 .name = "Penryn",
948 .level = 10,
949 .vendor = CPUID_VENDOR_INTEL,
950 .family = 6,
951 .model = 23,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
955 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
956 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
957 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
958 CPUID_DE | CPUID_FP87,
959 .features[FEAT_1_ECX] =
960 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
961 CPUID_EXT_SSE3,
962 .features[FEAT_8000_0001_EDX] =
963 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
964 .features[FEAT_8000_0001_ECX] =
965 CPUID_EXT3_LAHF_LM,
966 .xlevel = 0x80000008,
967 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
968 },
969 {
970 .name = "Nehalem",
971 .level = 11,
972 .vendor = CPUID_VENDOR_INTEL,
973 .family = 6,
974 .model = 26,
975 .stepping = 3,
976 .features[FEAT_1_EDX] =
977 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
978 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
979 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
980 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
981 CPUID_DE | CPUID_FP87,
982 .features[FEAT_1_ECX] =
983 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
984 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
985 .features[FEAT_8000_0001_EDX] =
986 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
987 .features[FEAT_8000_0001_ECX] =
988 CPUID_EXT3_LAHF_LM,
989 .xlevel = 0x80000008,
990 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
991 },
992 {
993 .name = "Westmere",
994 .level = 11,
995 .vendor = CPUID_VENDOR_INTEL,
996 .family = 6,
997 .model = 44,
998 .stepping = 1,
999 .features[FEAT_1_EDX] =
1000 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1001 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1002 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1003 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1004 CPUID_DE | CPUID_FP87,
1005 .features[FEAT_1_ECX] =
1006 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1007 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1008 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1009 .features[FEAT_8000_0001_EDX] =
1010 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1011 .features[FEAT_8000_0001_ECX] =
1012 CPUID_EXT3_LAHF_LM,
1013 .features[FEAT_6_EAX] =
1014 CPUID_6_EAX_ARAT,
1015 .xlevel = 0x80000008,
1016 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1017 },
1018 {
1019 .name = "SandyBridge",
1020 .level = 0xd,
1021 .vendor = CPUID_VENDOR_INTEL,
1022 .family = 6,
1023 .model = 42,
1024 .stepping = 1,
1025 .features[FEAT_1_EDX] =
1026 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1027 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1028 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1029 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1030 CPUID_DE | CPUID_FP87,
1031 .features[FEAT_1_ECX] =
1032 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1033 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1034 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1035 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1036 CPUID_EXT_SSE3,
1037 .features[FEAT_8000_0001_EDX] =
1038 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1039 CPUID_EXT2_SYSCALL,
1040 .features[FEAT_8000_0001_ECX] =
1041 CPUID_EXT3_LAHF_LM,
1042 .features[FEAT_XSAVE] =
1043 CPUID_XSAVE_XSAVEOPT,
1044 .features[FEAT_6_EAX] =
1045 CPUID_6_EAX_ARAT,
1046 .xlevel = 0x80000008,
1047 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1048 },
1049 {
1050 .name = "IvyBridge",
1051 .level = 0xd,
1052 .vendor = CPUID_VENDOR_INTEL,
1053 .family = 6,
1054 .model = 58,
1055 .stepping = 9,
1056 .features[FEAT_1_EDX] =
1057 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1058 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1059 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1060 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1061 CPUID_DE | CPUID_FP87,
1062 .features[FEAT_1_ECX] =
1063 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1064 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1065 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1066 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1067 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1068 .features[FEAT_7_0_EBX] =
1069 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1070 CPUID_7_0_EBX_ERMS,
1071 .features[FEAT_8000_0001_EDX] =
1072 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1073 CPUID_EXT2_SYSCALL,
1074 .features[FEAT_8000_0001_ECX] =
1075 CPUID_EXT3_LAHF_LM,
1076 .features[FEAT_XSAVE] =
1077 CPUID_XSAVE_XSAVEOPT,
1078 .features[FEAT_6_EAX] =
1079 CPUID_6_EAX_ARAT,
1080 .xlevel = 0x80000008,
1081 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1082 },
1083 {
1084 .name = "Haswell-noTSX",
1085 .level = 0xd,
1086 .vendor = CPUID_VENDOR_INTEL,
1087 .family = 6,
1088 .model = 60,
1089 .stepping = 1,
1090 .features[FEAT_1_EDX] =
1091 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1092 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1093 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1094 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1095 CPUID_DE | CPUID_FP87,
1096 .features[FEAT_1_ECX] =
1097 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1098 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1099 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1100 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1101 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1102 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1105 CPUID_EXT2_SYSCALL,
1106 .features[FEAT_8000_0001_ECX] =
1107 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1108 .features[FEAT_7_0_EBX] =
1109 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1110 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1111 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1112 .features[FEAT_XSAVE] =
1113 CPUID_XSAVE_XSAVEOPT,
1114 .features[FEAT_6_EAX] =
1115 CPUID_6_EAX_ARAT,
1116 .xlevel = 0x80000008,
1117 .model_id = "Intel Core Processor (Haswell, no TSX)",
1118 }, {
1119 .name = "Haswell",
1120 .level = 0xd,
1121 .vendor = CPUID_VENDOR_INTEL,
1122 .family = 6,
1123 .model = 60,
1124 .stepping = 1,
1125 .features[FEAT_1_EDX] =
1126 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1127 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1128 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1129 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1130 CPUID_DE | CPUID_FP87,
1131 .features[FEAT_1_ECX] =
1132 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1133 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1134 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1135 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1136 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1137 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1138 .features[FEAT_8000_0001_EDX] =
1139 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1140 CPUID_EXT2_SYSCALL,
1141 .features[FEAT_8000_0001_ECX] =
1142 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1143 .features[FEAT_7_0_EBX] =
1144 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1145 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1146 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1147 CPUID_7_0_EBX_RTM,
1148 .features[FEAT_XSAVE] =
1149 CPUID_XSAVE_XSAVEOPT,
1150 .features[FEAT_6_EAX] =
1151 CPUID_6_EAX_ARAT,
1152 .xlevel = 0x80000008,
1153 .model_id = "Intel Core Processor (Haswell)",
1154 },
1155 {
1156 .name = "Broadwell-noTSX",
1157 .level = 0xd,
1158 .vendor = CPUID_VENDOR_INTEL,
1159 .family = 6,
1160 .model = 61,
1161 .stepping = 2,
1162 .features[FEAT_1_EDX] =
1163 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1164 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1165 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1166 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1167 CPUID_DE | CPUID_FP87,
1168 .features[FEAT_1_ECX] =
1169 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1170 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1171 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1172 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1173 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1174 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1175 .features[FEAT_8000_0001_EDX] =
1176 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1177 CPUID_EXT2_SYSCALL,
1178 .features[FEAT_8000_0001_ECX] =
1179 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1180 .features[FEAT_7_0_EBX] =
1181 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1182 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1183 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1184 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1185 CPUID_7_0_EBX_SMAP,
1186 .features[FEAT_XSAVE] =
1187 CPUID_XSAVE_XSAVEOPT,
1188 .features[FEAT_6_EAX] =
1189 CPUID_6_EAX_ARAT,
1190 .xlevel = 0x80000008,
1191 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1192 },
1193 {
1194 .name = "Broadwell",
1195 .level = 0xd,
1196 .vendor = CPUID_VENDOR_INTEL,
1197 .family = 6,
1198 .model = 61,
1199 .stepping = 2,
1200 .features[FEAT_1_EDX] =
1201 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1202 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1203 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1204 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1205 CPUID_DE | CPUID_FP87,
1206 .features[FEAT_1_ECX] =
1207 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1208 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1209 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1210 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1211 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1212 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1213 .features[FEAT_8000_0001_EDX] =
1214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1215 CPUID_EXT2_SYSCALL,
1216 .features[FEAT_8000_0001_ECX] =
1217 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1218 .features[FEAT_7_0_EBX] =
1219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1220 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1222 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1223 CPUID_7_0_EBX_SMAP,
1224 .features[FEAT_XSAVE] =
1225 CPUID_XSAVE_XSAVEOPT,
1226 .features[FEAT_6_EAX] =
1227 CPUID_6_EAX_ARAT,
1228 .xlevel = 0x80000008,
1229 .model_id = "Intel Core Processor (Broadwell)",
1230 },
1231 {
1232 .name = "Opteron_G1",
1233 .level = 5,
1234 .vendor = CPUID_VENDOR_AMD,
1235 .family = 15,
1236 .model = 6,
1237 .stepping = 1,
1238 .features[FEAT_1_EDX] =
1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1243 CPUID_DE | CPUID_FP87,
1244 .features[FEAT_1_ECX] =
1245 CPUID_EXT_SSE3,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1248 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1249 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1250 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1251 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1252 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1253 .xlevel = 0x80000008,
1254 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1255 },
1256 {
1257 .name = "Opteron_G2",
1258 .level = 5,
1259 .vendor = CPUID_VENDOR_AMD,
1260 .family = 15,
1261 .model = 6,
1262 .stepping = 1,
1263 .features[FEAT_1_EDX] =
1264 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1265 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1266 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1267 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1268 CPUID_DE | CPUID_FP87,
1269 .features[FEAT_1_ECX] =
1270 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1271 /* Missing: CPUID_EXT2_RDTSCP */
1272 .features[FEAT_8000_0001_EDX] =
1273 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1274 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1275 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1276 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1277 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1278 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1279 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1280 .features[FEAT_8000_0001_ECX] =
1281 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1282 .xlevel = 0x80000008,
1283 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1284 },
1285 {
1286 .name = "Opteron_G3",
1287 .level = 5,
1288 .vendor = CPUID_VENDOR_AMD,
1289 .family = 15,
1290 .model = 6,
1291 .stepping = 1,
1292 .features[FEAT_1_EDX] =
1293 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1294 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1295 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1296 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1297 CPUID_DE | CPUID_FP87,
1298 .features[FEAT_1_ECX] =
1299 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1300 CPUID_EXT_SSE3,
1301 /* Missing: CPUID_EXT2_RDTSCP */
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1304 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1305 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1306 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1307 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1308 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1309 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1310 .features[FEAT_8000_0001_ECX] =
1311 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1312 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1313 .xlevel = 0x80000008,
1314 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1315 },
1316 {
1317 .name = "Opteron_G4",
1318 .level = 0xd,
1319 .vendor = CPUID_VENDOR_AMD,
1320 .family = 21,
1321 .model = 1,
1322 .stepping = 2,
1323 .features[FEAT_1_EDX] =
1324 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1325 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1326 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1327 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1328 CPUID_DE | CPUID_FP87,
1329 .features[FEAT_1_ECX] =
1330 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1331 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1332 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1333 CPUID_EXT_SSE3,
1334 /* Missing: CPUID_EXT2_RDTSCP */
1335 .features[FEAT_8000_0001_EDX] =
1336 CPUID_EXT2_LM |
1337 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1338 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1339 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1340 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1341 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1342 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1343 .features[FEAT_8000_0001_ECX] =
1344 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1345 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1346 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1347 CPUID_EXT3_LAHF_LM,
1348 /* no xsaveopt! */
1349 .xlevel = 0x8000001A,
1350 .model_id = "AMD Opteron 62xx class CPU",
1351 },
1352 {
1353 .name = "Opteron_G5",
1354 .level = 0xd,
1355 .vendor = CPUID_VENDOR_AMD,
1356 .family = 21,
1357 .model = 2,
1358 .stepping = 0,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1367 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1368 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1369 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1370 /* Missing: CPUID_EXT2_RDTSCP */
1371 .features[FEAT_8000_0001_EDX] =
1372 CPUID_EXT2_LM |
1373 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1374 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1375 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1376 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1377 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1378 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1379 .features[FEAT_8000_0001_ECX] =
1380 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1381 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1382 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1383 CPUID_EXT3_LAHF_LM,
1384 /* no xsaveopt! */
1385 .xlevel = 0x8000001A,
1386 .model_id = "AMD Opteron 63xx class CPU",
1387 },
1388 };
1389
1390 typedef struct PropValue {
1391 const char *prop, *value;
1392 } PropValue;
1393
1394 /* KVM-specific features that are automatically added/removed
1395 * from all CPU models when KVM is enabled.
1396 */
1397 static PropValue kvm_default_props[] = {
1398 { "kvmclock", "on" },
1399 { "kvm-nopiodelay", "on" },
1400 { "kvm-asyncpf", "on" },
1401 { "kvm-steal-time", "on" },
1402 { "kvm-pv-eoi", "on" },
1403 { "kvmclock-stable-bit", "on" },
1404 { "x2apic", "on" },
1405 { "acpi", "off" },
1406 { "monitor", "off" },
1407 { "svm", "off" },
1408 { NULL, NULL },
1409 };
1410
1411 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1412 {
1413 PropValue *pv;
1414 for (pv = kvm_default_props; pv->prop; pv++) {
1415 if (!strcmp(pv->prop, prop)) {
1416 pv->value = value;
1417 break;
1418 }
1419 }
1420
1421 /* It is valid to call this function only for properties that
1422 * are already present in the kvm_default_props table.
1423 */
1424 assert(pv->prop);
1425 }
1426
1427 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1428 bool migratable_only);
1429
1430 #ifdef CONFIG_KVM
1431
1432 static int cpu_x86_fill_model_id(char *str)
1433 {
1434 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1435 int i;
1436
1437 for (i = 0; i < 3; i++) {
1438 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1439 memcpy(str + i * 16 + 0, &eax, 4);
1440 memcpy(str + i * 16 + 4, &ebx, 4);
1441 memcpy(str + i * 16 + 8, &ecx, 4);
1442 memcpy(str + i * 16 + 12, &edx, 4);
1443 }
1444 return 0;
1445 }
1446
1447 static X86CPUDefinition host_cpudef;
1448
1449 static Property host_x86_cpu_properties[] = {
1450 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1451 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1452 DEFINE_PROP_END_OF_LIST()
1453 };
1454
1455 /* class_init for the "host" CPU model
1456 *
1457 * This function may be called before KVM is initialized.
1458 */
1459 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1460 {
1461 DeviceClass *dc = DEVICE_CLASS(oc);
1462 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1463 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1464
1465 xcc->kvm_required = true;
1466
1467 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1468 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1469
1470 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1471 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1472 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1473 host_cpudef.stepping = eax & 0x0F;
1474
1475 cpu_x86_fill_model_id(host_cpudef.model_id);
1476
1477 xcc->cpu_def = &host_cpudef;
1478
1479 /* level, xlevel, xlevel2, and the feature words are initialized on
1480 * instance_init, because they require KVM to be initialized.
1481 */
1482
1483 dc->props = host_x86_cpu_properties;
1484 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1485 dc->cannot_destroy_with_object_finalize_yet = true;
1486 }
1487
1488 static void host_x86_cpu_initfn(Object *obj)
1489 {
1490 X86CPU *cpu = X86_CPU(obj);
1491 CPUX86State *env = &cpu->env;
1492 KVMState *s = kvm_state;
1493
1494 assert(kvm_enabled());
1495
1496 /* We can't fill the features array here because we don't know yet if
1497 * "migratable" is true or false.
1498 */
1499 cpu->host_features = true;
1500
1501 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1502 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1503 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1504
1505 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1506 }
1507
1508 static const TypeInfo host_x86_cpu_type_info = {
1509 .name = X86_CPU_TYPE_NAME("host"),
1510 .parent = TYPE_X86_CPU,
1511 .instance_init = host_x86_cpu_initfn,
1512 .class_init = host_x86_cpu_class_init,
1513 };
1514
1515 #endif
1516
1517 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1518 {
1519 FeatureWordInfo *f = &feature_word_info[w];
1520 int i;
1521
1522 for (i = 0; i < 32; ++i) {
1523 if ((1UL << i) & mask) {
1524 const char *reg = get_register_name_32(f->cpuid_reg);
1525 assert(reg);
1526 fprintf(stderr, "warning: %s doesn't support requested feature: "
1527 "CPUID.%02XH:%s%s%s [bit %d]\n",
1528 kvm_enabled() ? "host" : "TCG",
1529 f->cpuid_eax, reg,
1530 f->feat_names[i] ? "." : "",
1531 f->feat_names[i] ? f->feat_names[i] : "", i);
1532 }
1533 }
1534 }
1535
1536 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1537 const char *name, void *opaque,
1538 Error **errp)
1539 {
1540 X86CPU *cpu = X86_CPU(obj);
1541 CPUX86State *env = &cpu->env;
1542 int64_t value;
1543
1544 value = (env->cpuid_version >> 8) & 0xf;
1545 if (value == 0xf) {
1546 value += (env->cpuid_version >> 20) & 0xff;
1547 }
1548 visit_type_int(v, name, &value, errp);
1549 }
1550
1551 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1552 const char *name, void *opaque,
1553 Error **errp)
1554 {
1555 X86CPU *cpu = X86_CPU(obj);
1556 CPUX86State *env = &cpu->env;
1557 const int64_t min = 0;
1558 const int64_t max = 0xff + 0xf;
1559 Error *local_err = NULL;
1560 int64_t value;
1561
1562 visit_type_int(v, name, &value, &local_err);
1563 if (local_err) {
1564 error_propagate(errp, local_err);
1565 return;
1566 }
1567 if (value < min || value > max) {
1568 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1569 name ? name : "null", value, min, max);
1570 return;
1571 }
1572
1573 env->cpuid_version &= ~0xff00f00;
1574 if (value > 0x0f) {
1575 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1576 } else {
1577 env->cpuid_version |= value << 8;
1578 }
1579 }
1580
1581 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1582 const char *name, void *opaque,
1583 Error **errp)
1584 {
1585 X86CPU *cpu = X86_CPU(obj);
1586 CPUX86State *env = &cpu->env;
1587 int64_t value;
1588
1589 value = (env->cpuid_version >> 4) & 0xf;
1590 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1591 visit_type_int(v, name, &value, errp);
1592 }
1593
1594 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1595 const char *name, void *opaque,
1596 Error **errp)
1597 {
1598 X86CPU *cpu = X86_CPU(obj);
1599 CPUX86State *env = &cpu->env;
1600 const int64_t min = 0;
1601 const int64_t max = 0xff;
1602 Error *local_err = NULL;
1603 int64_t value;
1604
1605 visit_type_int(v, name, &value, &local_err);
1606 if (local_err) {
1607 error_propagate(errp, local_err);
1608 return;
1609 }
1610 if (value < min || value > max) {
1611 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1612 name ? name : "null", value, min, max);
1613 return;
1614 }
1615
1616 env->cpuid_version &= ~0xf00f0;
1617 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1618 }
1619
1620 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1621 const char *name, void *opaque,
1622 Error **errp)
1623 {
1624 X86CPU *cpu = X86_CPU(obj);
1625 CPUX86State *env = &cpu->env;
1626 int64_t value;
1627
1628 value = env->cpuid_version & 0xf;
1629 visit_type_int(v, name, &value, errp);
1630 }
1631
1632 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1633 const char *name, void *opaque,
1634 Error **errp)
1635 {
1636 X86CPU *cpu = X86_CPU(obj);
1637 CPUX86State *env = &cpu->env;
1638 const int64_t min = 0;
1639 const int64_t max = 0xf;
1640 Error *local_err = NULL;
1641 int64_t value;
1642
1643 visit_type_int(v, name, &value, &local_err);
1644 if (local_err) {
1645 error_propagate(errp, local_err);
1646 return;
1647 }
1648 if (value < min || value > max) {
1649 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1650 name ? name : "null", value, min, max);
1651 return;
1652 }
1653
1654 env->cpuid_version &= ~0xf;
1655 env->cpuid_version |= value & 0xf;
1656 }
1657
1658 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1659 {
1660 X86CPU *cpu = X86_CPU(obj);
1661 CPUX86State *env = &cpu->env;
1662 char *value;
1663
1664 value = g_malloc(CPUID_VENDOR_SZ + 1);
1665 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1666 env->cpuid_vendor3);
1667 return value;
1668 }
1669
1670 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1671 Error **errp)
1672 {
1673 X86CPU *cpu = X86_CPU(obj);
1674 CPUX86State *env = &cpu->env;
1675 int i;
1676
1677 if (strlen(value) != CPUID_VENDOR_SZ) {
1678 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1679 return;
1680 }
1681
1682 env->cpuid_vendor1 = 0;
1683 env->cpuid_vendor2 = 0;
1684 env->cpuid_vendor3 = 0;
1685 for (i = 0; i < 4; i++) {
1686 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1687 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1688 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1689 }
1690 }
1691
1692 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1693 {
1694 X86CPU *cpu = X86_CPU(obj);
1695 CPUX86State *env = &cpu->env;
1696 char *value;
1697 int i;
1698
1699 value = g_malloc(48 + 1);
1700 for (i = 0; i < 48; i++) {
1701 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1702 }
1703 value[48] = '\0';
1704 return value;
1705 }
1706
1707 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1708 Error **errp)
1709 {
1710 X86CPU *cpu = X86_CPU(obj);
1711 CPUX86State *env = &cpu->env;
1712 int c, len, i;
1713
1714 if (model_id == NULL) {
1715 model_id = "";
1716 }
1717 len = strlen(model_id);
1718 memset(env->cpuid_model, 0, 48);
1719 for (i = 0; i < 48; i++) {
1720 if (i >= len) {
1721 c = '\0';
1722 } else {
1723 c = (uint8_t)model_id[i];
1724 }
1725 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1726 }
1727 }
1728
1729 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1730 void *opaque, Error **errp)
1731 {
1732 X86CPU *cpu = X86_CPU(obj);
1733 int64_t value;
1734
1735 value = cpu->env.tsc_khz * 1000;
1736 visit_type_int(v, name, &value, errp);
1737 }
1738
1739 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1740 void *opaque, Error **errp)
1741 {
1742 X86CPU *cpu = X86_CPU(obj);
1743 const int64_t min = 0;
1744 const int64_t max = INT64_MAX;
1745 Error *local_err = NULL;
1746 int64_t value;
1747
1748 visit_type_int(v, name, &value, &local_err);
1749 if (local_err) {
1750 error_propagate(errp, local_err);
1751 return;
1752 }
1753 if (value < min || value > max) {
1754 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1755 name ? name : "null", value, min, max);
1756 return;
1757 }
1758
1759 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1760 }
1761
1762 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1763 void *opaque, Error **errp)
1764 {
1765 X86CPU *cpu = X86_CPU(obj);
1766 int64_t value = cpu->apic_id;
1767
1768 visit_type_int(v, name, &value, errp);
1769 }
1770
1771 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1772 void *opaque, Error **errp)
1773 {
1774 X86CPU *cpu = X86_CPU(obj);
1775 DeviceState *dev = DEVICE(obj);
1776 const int64_t min = 0;
1777 const int64_t max = UINT32_MAX;
1778 Error *error = NULL;
1779 int64_t value;
1780
1781 if (dev->realized) {
1782 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1783 "it was realized", name, object_get_typename(obj));
1784 return;
1785 }
1786
1787 visit_type_int(v, name, &value, &error);
1788 if (error) {
1789 error_propagate(errp, error);
1790 return;
1791 }
1792 if (value < min || value > max) {
1793 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1794 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1795 object_get_typename(obj), name, value, min, max);
1796 return;
1797 }
1798
1799 if ((value != cpu->apic_id) && cpu_exists(value)) {
1800 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1801 return;
1802 }
1803 cpu->apic_id = value;
1804 }
1805
1806 /* Generic getter for "feature-words" and "filtered-features" properties */
1807 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1808 const char *name, void *opaque,
1809 Error **errp)
1810 {
1811 uint32_t *array = (uint32_t *)opaque;
1812 FeatureWord w;
1813 Error *err = NULL;
1814 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1815 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1816 X86CPUFeatureWordInfoList *list = NULL;
1817
1818 for (w = 0; w < FEATURE_WORDS; w++) {
1819 FeatureWordInfo *wi = &feature_word_info[w];
1820 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1821 qwi->cpuid_input_eax = wi->cpuid_eax;
1822 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1823 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1824 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1825 qwi->features = array[w];
1826
1827 /* List will be in reverse order, but order shouldn't matter */
1828 list_entries[w].next = list;
1829 list_entries[w].value = &word_infos[w];
1830 list = &list_entries[w];
1831 }
1832
1833 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1834 error_propagate(errp, err);
1835 }
1836
1837 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1838 void *opaque, Error **errp)
1839 {
1840 X86CPU *cpu = X86_CPU(obj);
1841 int64_t value = cpu->hyperv_spinlock_attempts;
1842
1843 visit_type_int(v, name, &value, errp);
1844 }
1845
1846 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1847 void *opaque, Error **errp)
1848 {
1849 const int64_t min = 0xFFF;
1850 const int64_t max = UINT_MAX;
1851 X86CPU *cpu = X86_CPU(obj);
1852 Error *err = NULL;
1853 int64_t value;
1854
1855 visit_type_int(v, name, &value, &err);
1856 if (err) {
1857 error_propagate(errp, err);
1858 return;
1859 }
1860
1861 if (value < min || value > max) {
1862 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1863 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1864 object_get_typename(obj), name ? name : "null",
1865 value, min, max);
1866 return;
1867 }
1868 cpu->hyperv_spinlock_attempts = value;
1869 }
1870
1871 static PropertyInfo qdev_prop_spinlocks = {
1872 .name = "int",
1873 .get = x86_get_hv_spinlocks,
1874 .set = x86_set_hv_spinlocks,
1875 };
1876
1877 /* Convert all '_' in a feature string option name to '-', to make feature
1878 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1879 */
1880 static inline void feat2prop(char *s)
1881 {
1882 while ((s = strchr(s, '_'))) {
1883 *s = '-';
1884 }
1885 }
1886
1887 /* Parse "+feature,-feature,feature=foo" CPU feature string
1888 */
1889 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1890 Error **errp)
1891 {
1892 X86CPU *cpu = X86_CPU(cs);
1893 char *featurestr; /* Single 'key=value" string being parsed */
1894 FeatureWord w;
1895 /* Features to be added */
1896 FeatureWordArray plus_features = { 0 };
1897 /* Features to be removed */
1898 FeatureWordArray minus_features = { 0 };
1899 uint32_t numvalue;
1900 CPUX86State *env = &cpu->env;
1901 Error *local_err = NULL;
1902
1903 featurestr = features ? strtok(features, ",") : NULL;
1904
1905 while (featurestr) {
1906 char *val;
1907 if (featurestr[0] == '+') {
1908 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1909 } else if (featurestr[0] == '-') {
1910 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1911 } else if ((val = strchr(featurestr, '='))) {
1912 *val = 0; val++;
1913 feat2prop(featurestr);
1914 if (!strcmp(featurestr, "xlevel")) {
1915 char *err;
1916 char num[32];
1917
1918 numvalue = strtoul(val, &err, 0);
1919 if (!*val || *err) {
1920 error_setg(errp, "bad numerical value %s", val);
1921 return;
1922 }
1923 if (numvalue < 0x80000000) {
1924 error_report("xlevel value shall always be >= 0x80000000"
1925 ", fixup will be removed in future versions");
1926 numvalue += 0x80000000;
1927 }
1928 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1929 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1930 } else if (!strcmp(featurestr, "tsc-freq")) {
1931 int64_t tsc_freq;
1932 char *err;
1933 char num[32];
1934
1935 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1936 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1937 if (tsc_freq < 0 || *err) {
1938 error_setg(errp, "bad numerical value %s", val);
1939 return;
1940 }
1941 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1942 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1943 &local_err);
1944 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1945 char *err;
1946 const int min = 0xFFF;
1947 char num[32];
1948 numvalue = strtoul(val, &err, 0);
1949 if (!*val || *err) {
1950 error_setg(errp, "bad numerical value %s", val);
1951 return;
1952 }
1953 if (numvalue < min) {
1954 error_report("hv-spinlocks value shall always be >= 0x%x"
1955 ", fixup will be removed in future versions",
1956 min);
1957 numvalue = min;
1958 }
1959 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1960 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1961 } else {
1962 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1963 }
1964 } else {
1965 feat2prop(featurestr);
1966 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1967 }
1968 if (local_err) {
1969 error_propagate(errp, local_err);
1970 return;
1971 }
1972 featurestr = strtok(NULL, ",");
1973 }
1974
1975 if (cpu->host_features) {
1976 for (w = 0; w < FEATURE_WORDS; w++) {
1977 env->features[w] =
1978 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1979 }
1980 }
1981
1982 for (w = 0; w < FEATURE_WORDS; w++) {
1983 env->features[w] |= plus_features[w];
1984 env->features[w] &= ~minus_features[w];
1985 }
1986 }
1987
1988 /* Print all cpuid feature names in featureset
1989 */
1990 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1991 {
1992 int bit;
1993 bool first = true;
1994
1995 for (bit = 0; bit < 32; bit++) {
1996 if (featureset[bit]) {
1997 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1998 first = false;
1999 }
2000 }
2001 }
2002
2003 /* generate CPU information. */
2004 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2005 {
2006 X86CPUDefinition *def;
2007 char buf[256];
2008 int i;
2009
2010 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2011 def = &builtin_x86_defs[i];
2012 snprintf(buf, sizeof(buf), "%s", def->name);
2013 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2014 }
2015 #ifdef CONFIG_KVM
2016 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2017 "KVM processor with all supported host features "
2018 "(only available in KVM mode)");
2019 #endif
2020
2021 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2022 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2023 FeatureWordInfo *fw = &feature_word_info[i];
2024
2025 (*cpu_fprintf)(f, " ");
2026 listflags(f, cpu_fprintf, fw->feat_names);
2027 (*cpu_fprintf)(f, "\n");
2028 }
2029 }
2030
2031 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2032 {
2033 CpuDefinitionInfoList *cpu_list = NULL;
2034 X86CPUDefinition *def;
2035 int i;
2036
2037 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2038 CpuDefinitionInfoList *entry;
2039 CpuDefinitionInfo *info;
2040
2041 def = &builtin_x86_defs[i];
2042 info = g_malloc0(sizeof(*info));
2043 info->name = g_strdup(def->name);
2044
2045 entry = g_malloc0(sizeof(*entry));
2046 entry->value = info;
2047 entry->next = cpu_list;
2048 cpu_list = entry;
2049 }
2050
2051 return cpu_list;
2052 }
2053
2054 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2055 bool migratable_only)
2056 {
2057 FeatureWordInfo *wi = &feature_word_info[w];
2058 uint32_t r;
2059
2060 if (kvm_enabled()) {
2061 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2062 wi->cpuid_ecx,
2063 wi->cpuid_reg);
2064 } else if (tcg_enabled()) {
2065 r = wi->tcg_features;
2066 } else {
2067 return ~0;
2068 }
2069 if (migratable_only) {
2070 r &= x86_cpu_get_migratable_flags(w);
2071 }
2072 return r;
2073 }
2074
2075 /*
2076 * Filters CPU feature words based on host availability of each feature.
2077 *
2078 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2079 */
2080 static int x86_cpu_filter_features(X86CPU *cpu)
2081 {
2082 CPUX86State *env = &cpu->env;
2083 FeatureWord w;
2084 int rv = 0;
2085
2086 for (w = 0; w < FEATURE_WORDS; w++) {
2087 uint32_t host_feat =
2088 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2089 uint32_t requested_features = env->features[w];
2090 env->features[w] &= host_feat;
2091 cpu->filtered_features[w] = requested_features & ~env->features[w];
2092 if (cpu->filtered_features[w]) {
2093 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2094 report_unavailable_features(w, cpu->filtered_features[w]);
2095 }
2096 rv = 1;
2097 }
2098 }
2099
2100 return rv;
2101 }
2102
2103 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2104 {
2105 PropValue *pv;
2106 for (pv = props; pv->prop; pv++) {
2107 if (!pv->value) {
2108 continue;
2109 }
2110 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2111 &error_abort);
2112 }
2113 }
2114
2115 /* Load data from X86CPUDefinition
2116 */
2117 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2118 {
2119 CPUX86State *env = &cpu->env;
2120 const char *vendor;
2121 char host_vendor[CPUID_VENDOR_SZ + 1];
2122 FeatureWord w;
2123
2124 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2125 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2126 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2127 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2128 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2129 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2130 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2131 for (w = 0; w < FEATURE_WORDS; w++) {
2132 env->features[w] = def->features[w];
2133 }
2134
2135 /* Special cases not set in the X86CPUDefinition structs: */
2136 if (kvm_enabled()) {
2137 if (!kvm_irqchip_in_kernel()) {
2138 x86_cpu_change_kvm_default("x2apic", "off");
2139 }
2140
2141 x86_cpu_apply_props(cpu, kvm_default_props);
2142 }
2143
2144 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2145
2146 /* sysenter isn't supported in compatibility mode on AMD,
2147 * syscall isn't supported in compatibility mode on Intel.
2148 * Normally we advertise the actual CPU vendor, but you can
2149 * override this using the 'vendor' property if you want to use
2150 * KVM's sysenter/syscall emulation in compatibility mode and
2151 * when doing cross vendor migration
2152 */
2153 vendor = def->vendor;
2154 if (kvm_enabled()) {
2155 uint32_t ebx = 0, ecx = 0, edx = 0;
2156 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2157 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2158 vendor = host_vendor;
2159 }
2160
2161 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2162
2163 }
2164
2165 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2166 {
2167 X86CPU *cpu = NULL;
2168 X86CPUClass *xcc;
2169 ObjectClass *oc;
2170 gchar **model_pieces;
2171 char *name, *features;
2172 Error *error = NULL;
2173
2174 model_pieces = g_strsplit(cpu_model, ",", 2);
2175 if (!model_pieces[0]) {
2176 error_setg(&error, "Invalid/empty CPU model name");
2177 goto out;
2178 }
2179 name = model_pieces[0];
2180 features = model_pieces[1];
2181
2182 oc = x86_cpu_class_by_name(name);
2183 if (oc == NULL) {
2184 error_setg(&error, "Unable to find CPU definition: %s", name);
2185 goto out;
2186 }
2187 xcc = X86_CPU_CLASS(oc);
2188
2189 if (xcc->kvm_required && !kvm_enabled()) {
2190 error_setg(&error, "CPU model '%s' requires KVM", name);
2191 goto out;
2192 }
2193
2194 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2195
2196 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2197 if (error) {
2198 goto out;
2199 }
2200
2201 out:
2202 if (error != NULL) {
2203 error_propagate(errp, error);
2204 if (cpu) {
2205 object_unref(OBJECT(cpu));
2206 cpu = NULL;
2207 }
2208 }
2209 g_strfreev(model_pieces);
2210 return cpu;
2211 }
2212
2213 X86CPU *cpu_x86_init(const char *cpu_model)
2214 {
2215 Error *error = NULL;
2216 X86CPU *cpu;
2217
2218 cpu = cpu_x86_create(cpu_model, &error);
2219 if (error) {
2220 goto out;
2221 }
2222
2223 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2224
2225 out:
2226 if (error) {
2227 error_report_err(error);
2228 if (cpu != NULL) {
2229 object_unref(OBJECT(cpu));
2230 cpu = NULL;
2231 }
2232 }
2233 return cpu;
2234 }
2235
2236 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2237 {
2238 X86CPUDefinition *cpudef = data;
2239 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2240
2241 xcc->cpu_def = cpudef;
2242 }
2243
2244 static void x86_register_cpudef_type(X86CPUDefinition *def)
2245 {
2246 char *typename = x86_cpu_type_name(def->name);
2247 TypeInfo ti = {
2248 .name = typename,
2249 .parent = TYPE_X86_CPU,
2250 .class_init = x86_cpu_cpudef_class_init,
2251 .class_data = def,
2252 };
2253
2254 type_register(&ti);
2255 g_free(typename);
2256 }
2257
2258 #if !defined(CONFIG_USER_ONLY)
2259
2260 void cpu_clear_apic_feature(CPUX86State *env)
2261 {
2262 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2263 }
2264
2265 #endif /* !CONFIG_USER_ONLY */
2266
2267 /* Initialize list of CPU models, filling some non-static fields if necessary
2268 */
2269 void x86_cpudef_setup(void)
2270 {
2271 int i, j;
2272 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2273
2274 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2275 X86CPUDefinition *def = &builtin_x86_defs[i];
2276
2277 /* Look for specific "cpudef" models that */
2278 /* have the QEMU version in .model_id */
2279 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2280 if (strcmp(model_with_versions[j], def->name) == 0) {
2281 pstrcpy(def->model_id, sizeof(def->model_id),
2282 "QEMU Virtual CPU version ");
2283 pstrcat(def->model_id, sizeof(def->model_id),
2284 qemu_hw_version());
2285 break;
2286 }
2287 }
2288 }
2289 }
2290
2291 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2292 uint32_t *eax, uint32_t *ebx,
2293 uint32_t *ecx, uint32_t *edx)
2294 {
2295 X86CPU *cpu = x86_env_get_cpu(env);
2296 CPUState *cs = CPU(cpu);
2297
2298 /* test if maximum index reached */
2299 if (index & 0x80000000) {
2300 if (index > env->cpuid_xlevel) {
2301 if (env->cpuid_xlevel2 > 0) {
2302 /* Handle the Centaur's CPUID instruction. */
2303 if (index > env->cpuid_xlevel2) {
2304 index = env->cpuid_xlevel2;
2305 } else if (index < 0xC0000000) {
2306 index = env->cpuid_xlevel;
2307 }
2308 } else {
2309 /* Intel documentation states that invalid EAX input will
2310 * return the same information as EAX=cpuid_level
2311 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2312 */
2313 index = env->cpuid_level;
2314 }
2315 }
2316 } else {
2317 if (index > env->cpuid_level)
2318 index = env->cpuid_level;
2319 }
2320
2321 switch(index) {
2322 case 0:
2323 *eax = env->cpuid_level;
2324 *ebx = env->cpuid_vendor1;
2325 *edx = env->cpuid_vendor2;
2326 *ecx = env->cpuid_vendor3;
2327 break;
2328 case 1:
2329 *eax = env->cpuid_version;
2330 *ebx = (cpu->apic_id << 24) |
2331 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2332 *ecx = env->features[FEAT_1_ECX];
2333 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2334 *ecx |= CPUID_EXT_OSXSAVE;
2335 }
2336 *edx = env->features[FEAT_1_EDX];
2337 if (cs->nr_cores * cs->nr_threads > 1) {
2338 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2339 *edx |= CPUID_HT;
2340 }
2341 break;
2342 case 2:
2343 /* cache info: needed for Pentium Pro compatibility */
2344 if (cpu->cache_info_passthrough) {
2345 host_cpuid(index, 0, eax, ebx, ecx, edx);
2346 break;
2347 }
2348 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2349 *ebx = 0;
2350 *ecx = 0;
2351 *edx = (L1D_DESCRIPTOR << 16) | \
2352 (L1I_DESCRIPTOR << 8) | \
2353 (L2_DESCRIPTOR);
2354 break;
2355 case 4:
2356 /* cache info: needed for Core compatibility */
2357 if (cpu->cache_info_passthrough) {
2358 host_cpuid(index, count, eax, ebx, ecx, edx);
2359 *eax &= ~0xFC000000;
2360 } else {
2361 *eax = 0;
2362 switch (count) {
2363 case 0: /* L1 dcache info */
2364 *eax |= CPUID_4_TYPE_DCACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1D_LINE_SIZE - 1) | \
2368 ((L1D_PARTITIONS - 1) << 12) | \
2369 ((L1D_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1D_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 1: /* L1 icache info */
2374 *eax |= CPUID_4_TYPE_ICACHE | \
2375 CPUID_4_LEVEL(1) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 *ebx = (L1I_LINE_SIZE - 1) | \
2378 ((L1I_PARTITIONS - 1) << 12) | \
2379 ((L1I_ASSOCIATIVITY - 1) << 22);
2380 *ecx = L1I_SETS - 1;
2381 *edx = CPUID_4_NO_INVD_SHARING;
2382 break;
2383 case 2: /* L2 cache info */
2384 *eax |= CPUID_4_TYPE_UNIFIED | \
2385 CPUID_4_LEVEL(2) | \
2386 CPUID_4_SELF_INIT_LEVEL;
2387 if (cs->nr_threads > 1) {
2388 *eax |= (cs->nr_threads - 1) << 14;
2389 }
2390 *ebx = (L2_LINE_SIZE - 1) | \
2391 ((L2_PARTITIONS - 1) << 12) | \
2392 ((L2_ASSOCIATIVITY - 1) << 22);
2393 *ecx = L2_SETS - 1;
2394 *edx = CPUID_4_NO_INVD_SHARING;
2395 break;
2396 default: /* end of info */
2397 *eax = 0;
2398 *ebx = 0;
2399 *ecx = 0;
2400 *edx = 0;
2401 break;
2402 }
2403 }
2404
2405 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2406 if ((*eax & 31) && cs->nr_cores > 1) {
2407 *eax |= (cs->nr_cores - 1) << 26;
2408 }
2409 break;
2410 case 5:
2411 /* mwait info: needed for Core compatibility */
2412 *eax = 0; /* Smallest monitor-line size in bytes */
2413 *ebx = 0; /* Largest monitor-line size in bytes */
2414 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2415 *edx = 0;
2416 break;
2417 case 6:
2418 /* Thermal and Power Leaf */
2419 *eax = env->features[FEAT_6_EAX];
2420 *ebx = 0;
2421 *ecx = 0;
2422 *edx = 0;
2423 break;
2424 case 7:
2425 /* Structured Extended Feature Flags Enumeration Leaf */
2426 if (count == 0) {
2427 *eax = 0; /* Maximum ECX value for sub-leaves */
2428 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2429 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2430 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2431 *ecx |= CPUID_7_0_ECX_OSPKE;
2432 }
2433 *edx = 0; /* Reserved */
2434 } else {
2435 *eax = 0;
2436 *ebx = 0;
2437 *ecx = 0;
2438 *edx = 0;
2439 }
2440 break;
2441 case 9:
2442 /* Direct Cache Access Information Leaf */
2443 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2444 *ebx = 0;
2445 *ecx = 0;
2446 *edx = 0;
2447 break;
2448 case 0xA:
2449 /* Architectural Performance Monitoring Leaf */
2450 if (kvm_enabled() && cpu->enable_pmu) {
2451 KVMState *s = cs->kvm_state;
2452
2453 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2454 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2455 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2456 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2457 } else {
2458 *eax = 0;
2459 *ebx = 0;
2460 *ecx = 0;
2461 *edx = 0;
2462 }
2463 break;
2464 case 0xD: {
2465 KVMState *s = cs->kvm_state;
2466 uint64_t ena_mask;
2467 int i;
2468
2469 /* Processor Extended State */
2470 *eax = 0;
2471 *ebx = 0;
2472 *ecx = 0;
2473 *edx = 0;
2474 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2475 break;
2476 }
2477 if (kvm_enabled()) {
2478 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2479 ena_mask <<= 32;
2480 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2481 } else {
2482 ena_mask = -1;
2483 }
2484
2485 if (count == 0) {
2486 *ecx = 0x240;
2487 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2488 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2489 if ((env->features[esa->feature] & esa->bits) == esa->bits
2490 && ((ena_mask >> i) & 1) != 0) {
2491 if (i < 32) {
2492 *eax |= 1u << i;
2493 } else {
2494 *edx |= 1u << (i - 32);
2495 }
2496 *ecx = MAX(*ecx, esa->offset + esa->size);
2497 }
2498 }
2499 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2500 *ebx = *ecx;
2501 } else if (count == 1) {
2502 *eax = env->features[FEAT_XSAVE];
2503 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2504 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2505 if ((env->features[esa->feature] & esa->bits) == esa->bits
2506 && ((ena_mask >> count) & 1) != 0) {
2507 *eax = esa->size;
2508 *ebx = esa->offset;
2509 }
2510 }
2511 break;
2512 }
2513 case 0x80000000:
2514 *eax = env->cpuid_xlevel;
2515 *ebx = env->cpuid_vendor1;
2516 *edx = env->cpuid_vendor2;
2517 *ecx = env->cpuid_vendor3;
2518 break;
2519 case 0x80000001:
2520 *eax = env->cpuid_version;
2521 *ebx = 0;
2522 *ecx = env->features[FEAT_8000_0001_ECX];
2523 *edx = env->features[FEAT_8000_0001_EDX];
2524
2525 /* The Linux kernel checks for the CMPLegacy bit and
2526 * discards multiple thread information if it is set.
2527 * So don't set it here for Intel to make Linux guests happy.
2528 */
2529 if (cs->nr_cores * cs->nr_threads > 1) {
2530 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2531 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2532 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2533 *ecx |= 1 << 1; /* CmpLegacy bit */
2534 }
2535 }
2536 break;
2537 case 0x80000002:
2538 case 0x80000003:
2539 case 0x80000004:
2540 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2541 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2542 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2543 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2544 break;
2545 case 0x80000005:
2546 /* cache info (L1 cache) */
2547 if (cpu->cache_info_passthrough) {
2548 host_cpuid(index, 0, eax, ebx, ecx, edx);
2549 break;
2550 }
2551 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2552 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2553 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2554 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2555 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2556 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2557 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2558 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2559 break;
2560 case 0x80000006:
2561 /* cache info (L2 cache) */
2562 if (cpu->cache_info_passthrough) {
2563 host_cpuid(index, 0, eax, ebx, ecx, edx);
2564 break;
2565 }
2566 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2567 (L2_DTLB_2M_ENTRIES << 16) | \
2568 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2569 (L2_ITLB_2M_ENTRIES);
2570 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2571 (L2_DTLB_4K_ENTRIES << 16) | \
2572 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2573 (L2_ITLB_4K_ENTRIES);
2574 *ecx = (L2_SIZE_KB_AMD << 16) | \
2575 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2576 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2577 *edx = ((L3_SIZE_KB/512) << 18) | \
2578 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2579 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2580 break;
2581 case 0x80000007:
2582 *eax = 0;
2583 *ebx = 0;
2584 *ecx = 0;
2585 *edx = env->features[FEAT_8000_0007_EDX];
2586 break;
2587 case 0x80000008:
2588 /* virtual & phys address size in low 2 bytes. */
2589 /* XXX: This value must match the one used in the MMU code. */
2590 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2591 /* 64 bit processor */
2592 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2593 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2594 } else {
2595 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2596 *eax = 0x00000024; /* 36 bits physical */
2597 } else {
2598 *eax = 0x00000020; /* 32 bits physical */
2599 }
2600 }
2601 *ebx = 0;
2602 *ecx = 0;
2603 *edx = 0;
2604 if (cs->nr_cores * cs->nr_threads > 1) {
2605 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2606 }
2607 break;
2608 case 0x8000000A:
2609 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2610 *eax = 0x00000001; /* SVM Revision */
2611 *ebx = 0x00000010; /* nr of ASIDs */
2612 *ecx = 0;
2613 *edx = env->features[FEAT_SVM]; /* optional features */
2614 } else {
2615 *eax = 0;
2616 *ebx = 0;
2617 *ecx = 0;
2618 *edx = 0;
2619 }
2620 break;
2621 case 0xC0000000:
2622 *eax = env->cpuid_xlevel2;
2623 *ebx = 0;
2624 *ecx = 0;
2625 *edx = 0;
2626 break;
2627 case 0xC0000001:
2628 /* Support for VIA CPU's CPUID instruction */
2629 *eax = env->cpuid_version;
2630 *ebx = 0;
2631 *ecx = 0;
2632 *edx = env->features[FEAT_C000_0001_EDX];
2633 break;
2634 case 0xC0000002:
2635 case 0xC0000003:
2636 case 0xC0000004:
2637 /* Reserved for the future, and now filled with zero */
2638 *eax = 0;
2639 *ebx = 0;
2640 *ecx = 0;
2641 *edx = 0;
2642 break;
2643 default:
2644 /* reserved values: zero */
2645 *eax = 0;
2646 *ebx = 0;
2647 *ecx = 0;
2648 *edx = 0;
2649 break;
2650 }
2651 }
2652
2653 /* CPUClass::reset() */
2654 static void x86_cpu_reset(CPUState *s)
2655 {
2656 X86CPU *cpu = X86_CPU(s);
2657 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2658 CPUX86State *env = &cpu->env;
2659 target_ulong cr4;
2660 uint64_t xcr0;
2661 int i;
2662
2663 xcc->parent_reset(s);
2664
2665 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2666
2667 tlb_flush(s, 1);
2668
2669 env->old_exception = -1;
2670
2671 /* init to reset state */
2672
2673 #ifdef CONFIG_SOFTMMU
2674 env->hflags |= HF_SOFTMMU_MASK;
2675 #endif
2676 env->hflags2 |= HF2_GIF_MASK;
2677
2678 cpu_x86_update_cr0(env, 0x60000010);
2679 env->a20_mask = ~0x0;
2680 env->smbase = 0x30000;
2681
2682 env->idt.limit = 0xffff;
2683 env->gdt.limit = 0xffff;
2684 env->ldt.limit = 0xffff;
2685 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2686 env->tr.limit = 0xffff;
2687 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2688
2689 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2690 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2691 DESC_R_MASK | DESC_A_MASK);
2692 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2693 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2694 DESC_A_MASK);
2695 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2696 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2697 DESC_A_MASK);
2698 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2699 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2700 DESC_A_MASK);
2701 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2702 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2703 DESC_A_MASK);
2704 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2705 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2706 DESC_A_MASK);
2707
2708 env->eip = 0xfff0;
2709 env->regs[R_EDX] = env->cpuid_version;
2710
2711 env->eflags = 0x2;
2712
2713 /* FPU init */
2714 for (i = 0; i < 8; i++) {
2715 env->fptags[i] = 1;
2716 }
2717 cpu_set_fpuc(env, 0x37f);
2718
2719 env->mxcsr = 0x1f80;
2720 /* All units are in INIT state. */
2721 env->xstate_bv = 0;
2722
2723 env->pat = 0x0007040600070406ULL;
2724 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2725
2726 memset(env->dr, 0, sizeof(env->dr));
2727 env->dr[6] = DR6_FIXED_1;
2728 env->dr[7] = DR7_FIXED_1;
2729 cpu_breakpoint_remove_all(s, BP_CPU);
2730 cpu_watchpoint_remove_all(s, BP_CPU);
2731
2732 cr4 = 0;
2733 xcr0 = XSTATE_FP_MASK;
2734
2735 #ifdef CONFIG_USER_ONLY
2736 /* Enable all the features for user-mode. */
2737 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2738 xcr0 |= XSTATE_SSE_MASK;
2739 }
2740 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2741 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2742 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2743 xcr0 |= 1ull << i;
2744 }
2745 }
2746
2747 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2748 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2749 }
2750 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2751 cr4 |= CR4_FSGSBASE_MASK;
2752 }
2753 #endif
2754
2755 env->xcr0 = xcr0;
2756 cpu_x86_update_cr4(env, cr4);
2757
2758 /*
2759 * SDM 11.11.5 requires:
2760 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2761 * - IA32_MTRR_PHYSMASKn.V = 0
2762 * All other bits are undefined. For simplification, zero it all.
2763 */
2764 env->mtrr_deftype = 0;
2765 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2766 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2767
2768 #if !defined(CONFIG_USER_ONLY)
2769 /* We hard-wire the BSP to the first CPU. */
2770 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2771
2772 s->halted = !cpu_is_bsp(cpu);
2773
2774 if (kvm_enabled()) {
2775 kvm_arch_reset_vcpu(cpu);
2776 }
2777 #endif
2778 }
2779
2780 #ifndef CONFIG_USER_ONLY
2781 bool cpu_is_bsp(X86CPU *cpu)
2782 {
2783 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2784 }
2785
2786 /* TODO: remove me, when reset over QOM tree is implemented */
2787 static void x86_cpu_machine_reset_cb(void *opaque)
2788 {
2789 X86CPU *cpu = opaque;
2790 cpu_reset(CPU(cpu));
2791 }
2792 #endif
2793
2794 static void mce_init(X86CPU *cpu)
2795 {
2796 CPUX86State *cenv = &cpu->env;
2797 unsigned int bank;
2798
2799 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2800 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2801 (CPUID_MCE | CPUID_MCA)) {
2802 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2803 cenv->mcg_ctl = ~(uint64_t)0;
2804 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2805 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2806 }
2807 }
2808 }
2809
2810 #ifndef CONFIG_USER_ONLY
2811 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2812 {
2813 APICCommonState *apic;
2814 const char *apic_type = "apic";
2815
2816 if (kvm_apic_in_kernel()) {
2817 apic_type = "kvm-apic";
2818 } else if (xen_enabled()) {
2819 apic_type = "xen-apic";
2820 }
2821
2822 cpu->apic_state = DEVICE(object_new(apic_type));
2823
2824 object_property_add_child(OBJECT(cpu), "apic",
2825 OBJECT(cpu->apic_state), NULL);
2826 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2827 /* TODO: convert to link<> */
2828 apic = APIC_COMMON(cpu->apic_state);
2829 apic->cpu = cpu;
2830 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2831 }
2832
2833 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2834 {
2835 APICCommonState *apic;
2836 static bool apic_mmio_map_once;
2837
2838 if (cpu->apic_state == NULL) {
2839 return;
2840 }
2841 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2842 errp);
2843
2844 /* Map APIC MMIO area */
2845 apic = APIC_COMMON(cpu->apic_state);
2846 if (!apic_mmio_map_once) {
2847 memory_region_add_subregion_overlap(get_system_memory(),
2848 apic->apicbase &
2849 MSR_IA32_APICBASE_BASE,
2850 &apic->io_memory,
2851 0x1000);
2852 apic_mmio_map_once = true;
2853 }
2854 }
2855
2856 static void x86_cpu_machine_done(Notifier *n, void *unused)
2857 {
2858 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2859 MemoryRegion *smram =
2860 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2861
2862 if (smram) {
2863 cpu->smram = g_new(MemoryRegion, 1);
2864 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2865 smram, 0, 1ull << 32);
2866 memory_region_set_enabled(cpu->smram, false);
2867 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2868 }
2869 }
2870 #else
2871 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2872 {
2873 }
2874 #endif
2875
2876
2877 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2878 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2879 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2880 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2881 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2882 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2883 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2884 {
2885 CPUState *cs = CPU(dev);
2886 X86CPU *cpu = X86_CPU(dev);
2887 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2888 CPUX86State *env = &cpu->env;
2889 Error *local_err = NULL;
2890 static bool ht_warned;
2891
2892 if (cpu->apic_id < 0) {
2893 error_setg(errp, "apic-id property was not initialized properly");
2894 return;
2895 }
2896
2897 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2898 env->cpuid_level = 7;
2899 }
2900
2901 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2902 error_setg(&local_err,
2903 kvm_enabled() ?
2904 "Host doesn't support requested features" :
2905 "TCG doesn't support requested features");
2906 goto out;
2907 }
2908
2909 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2910 * CPUID[1].EDX.
2911 */
2912 if (IS_AMD_CPU(env)) {
2913 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2914 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2915 & CPUID_EXT2_AMD_ALIASES);
2916 }
2917
2918
2919 #ifndef CONFIG_USER_ONLY
2920 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2921
2922 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2923 x86_cpu_apic_create(cpu, &local_err);
2924 if (local_err != NULL) {
2925 goto out;
2926 }
2927 }
2928 #endif
2929
2930 mce_init(cpu);
2931
2932 #ifndef CONFIG_USER_ONLY
2933 if (tcg_enabled()) {
2934 AddressSpace *newas = g_new(AddressSpace, 1);
2935
2936 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2937 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2938
2939 /* Outer container... */
2940 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2941 memory_region_set_enabled(cpu->cpu_as_root, true);
2942
2943 /* ... with two regions inside: normal system memory with low
2944 * priority, and...
2945 */
2946 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2947 get_system_memory(), 0, ~0ull);
2948 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2949 memory_region_set_enabled(cpu->cpu_as_mem, true);
2950 address_space_init(newas, cpu->cpu_as_root, "CPU");
2951 cs->num_ases = 1;
2952 cpu_address_space_init(cs, newas, 0);
2953
2954 /* ... SMRAM with higher priority, linked from /machine/smram. */
2955 cpu->machine_done.notify = x86_cpu_machine_done;
2956 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2957 }
2958 #endif
2959
2960 qemu_init_vcpu(cs);
2961
2962 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2963 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2964 * based on inputs (sockets,cores,threads), it is still better to gives
2965 * users a warning.
2966 *
2967 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2968 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2969 */
2970 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2971 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2972 " -smp options properly.");
2973 ht_warned = true;
2974 }
2975
2976 x86_cpu_apic_realize(cpu, &local_err);
2977 if (local_err != NULL) {
2978 goto out;
2979 }
2980 cpu_reset(cs);
2981
2982 xcc->parent_realize(dev, &local_err);
2983
2984 out:
2985 if (local_err != NULL) {
2986 error_propagate(errp, local_err);
2987 return;
2988 }
2989 }
2990
2991 typedef struct BitProperty {
2992 uint32_t *ptr;
2993 uint32_t mask;
2994 } BitProperty;
2995
2996 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2997 void *opaque, Error **errp)
2998 {
2999 BitProperty *fp = opaque;
3000 bool value = (*fp->ptr & fp->mask) == fp->mask;
3001 visit_type_bool(v, name, &value, errp);
3002 }
3003
3004 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3005 void *opaque, Error **errp)
3006 {
3007 DeviceState *dev = DEVICE(obj);
3008 BitProperty *fp = opaque;
3009 Error *local_err = NULL;
3010 bool value;
3011
3012 if (dev->realized) {
3013 qdev_prop_set_after_realize(dev, name, errp);
3014 return;
3015 }
3016
3017 visit_type_bool(v, name, &value, &local_err);
3018 if (local_err) {
3019 error_propagate(errp, local_err);
3020 return;
3021 }
3022
3023 if (value) {
3024 *fp->ptr |= fp->mask;
3025 } else {
3026 *fp->ptr &= ~fp->mask;
3027 }
3028 }
3029
3030 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3031 void *opaque)
3032 {
3033 BitProperty *prop = opaque;
3034 g_free(prop);
3035 }
3036
3037 /* Register a boolean property to get/set a single bit in a uint32_t field.
3038 *
3039 * The same property name can be registered multiple times to make it affect
3040 * multiple bits in the same FeatureWord. In that case, the getter will return
3041 * true only if all bits are set.
3042 */
3043 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3044 const char *prop_name,
3045 uint32_t *field,
3046 int bitnr)
3047 {
3048 BitProperty *fp;
3049 ObjectProperty *op;
3050 uint32_t mask = (1UL << bitnr);
3051
3052 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3053 if (op) {
3054 fp = op->opaque;
3055 assert(fp->ptr == field);
3056 fp->mask |= mask;
3057 } else {
3058 fp = g_new0(BitProperty, 1);
3059 fp->ptr = field;
3060 fp->mask = mask;
3061 object_property_add(OBJECT(cpu), prop_name, "bool",
3062 x86_cpu_get_bit_prop,
3063 x86_cpu_set_bit_prop,
3064 x86_cpu_release_bit_prop, fp, &error_abort);
3065 }
3066 }
3067
3068 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3069 FeatureWord w,
3070 int bitnr)
3071 {
3072 Object *obj = OBJECT(cpu);
3073 int i;
3074 char **names;
3075 FeatureWordInfo *fi = &feature_word_info[w];
3076
3077 if (!fi->feat_names) {
3078 return;
3079 }
3080 if (!fi->feat_names[bitnr]) {
3081 return;
3082 }
3083
3084 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3085
3086 feat2prop(names[0]);
3087 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3088
3089 for (i = 1; names[i]; i++) {
3090 feat2prop(names[i]);
3091 object_property_add_alias(obj, names[i], obj, names[0],
3092 &error_abort);
3093 }
3094
3095 g_strfreev(names);
3096 }
3097
3098 static void x86_cpu_initfn(Object *obj)
3099 {
3100 CPUState *cs = CPU(obj);
3101 X86CPU *cpu = X86_CPU(obj);
3102 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3103 CPUX86State *env = &cpu->env;
3104 FeatureWord w;
3105 static int inited;
3106
3107 cs->env_ptr = env;
3108 cpu_exec_init(cs, &error_abort);
3109
3110 object_property_add(obj, "family", "int",
3111 x86_cpuid_version_get_family,
3112 x86_cpuid_version_set_family, NULL, NULL, NULL);
3113 object_property_add(obj, "model", "int",
3114 x86_cpuid_version_get_model,
3115 x86_cpuid_version_set_model, NULL, NULL, NULL);
3116 object_property_add(obj, "stepping", "int",
3117 x86_cpuid_version_get_stepping,
3118 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3119 object_property_add_str(obj, "vendor",
3120 x86_cpuid_get_vendor,
3121 x86_cpuid_set_vendor, NULL);
3122 object_property_add_str(obj, "model-id",
3123 x86_cpuid_get_model_id,
3124 x86_cpuid_set_model_id, NULL);
3125 object_property_add(obj, "tsc-frequency", "int",
3126 x86_cpuid_get_tsc_freq,
3127 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3128 object_property_add(obj, "apic-id", "int",
3129 x86_cpuid_get_apic_id,
3130 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3131 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3132 x86_cpu_get_feature_words,
3133 NULL, NULL, (void *)env->features, NULL);
3134 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3135 x86_cpu_get_feature_words,
3136 NULL, NULL, (void *)cpu->filtered_features, NULL);
3137
3138 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3139
3140 #ifndef CONFIG_USER_ONLY
3141 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3142 cpu->apic_id = -1;
3143 #endif
3144
3145 for (w = 0; w < FEATURE_WORDS; w++) {
3146 int bitnr;
3147
3148 for (bitnr = 0; bitnr < 32; bitnr++) {
3149 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3150 }
3151 }
3152
3153 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3154
3155 /* init various static tables used in TCG mode */
3156 if (tcg_enabled() && !inited) {
3157 inited = 1;
3158 tcg_x86_init();
3159 }
3160 }
3161
3162 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3163 {
3164 X86CPU *cpu = X86_CPU(cs);
3165
3166 return cpu->apic_id;
3167 }
3168
3169 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3170 {
3171 X86CPU *cpu = X86_CPU(cs);
3172
3173 return cpu->env.cr[0] & CR0_PG_MASK;
3174 }
3175
3176 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3177 {
3178 X86CPU *cpu = X86_CPU(cs);
3179
3180 cpu->env.eip = value;
3181 }
3182
3183 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3184 {
3185 X86CPU *cpu = X86_CPU(cs);
3186
3187 cpu->env.eip = tb->pc - tb->cs_base;
3188 }
3189
3190 static bool x86_cpu_has_work(CPUState *cs)
3191 {
3192 X86CPU *cpu = X86_CPU(cs);
3193 CPUX86State *env = &cpu->env;
3194
3195 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3196 CPU_INTERRUPT_POLL)) &&
3197 (env->eflags & IF_MASK)) ||
3198 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3199 CPU_INTERRUPT_INIT |
3200 CPU_INTERRUPT_SIPI |
3201 CPU_INTERRUPT_MCE)) ||
3202 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3203 !(env->hflags & HF_SMM_MASK));
3204 }
3205
3206 static Property x86_cpu_properties[] = {
3207 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3208 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3209 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3210 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3211 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3212 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3213 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3214 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3215 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3216 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3217 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3218 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3219 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3220 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3221 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3222 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3223 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3224 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3225 DEFINE_PROP_END_OF_LIST()
3226 };
3227
3228 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3229 {
3230 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3231 CPUClass *cc = CPU_CLASS(oc);
3232 DeviceClass *dc = DEVICE_CLASS(oc);
3233
3234 xcc->parent_realize = dc->realize;
3235 dc->realize = x86_cpu_realizefn;
3236 dc->props = x86_cpu_properties;
3237
3238 xcc->parent_reset = cc->reset;
3239 cc->reset = x86_cpu_reset;
3240 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3241
3242 cc->class_by_name = x86_cpu_class_by_name;
3243 cc->parse_features = x86_cpu_parse_featurestr;
3244 cc->has_work = x86_cpu_has_work;
3245 cc->do_interrupt = x86_cpu_do_interrupt;
3246 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3247 cc->dump_state = x86_cpu_dump_state;
3248 cc->set_pc = x86_cpu_set_pc;
3249 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3250 cc->gdb_read_register = x86_cpu_gdb_read_register;
3251 cc->gdb_write_register = x86_cpu_gdb_write_register;
3252 cc->get_arch_id = x86_cpu_get_arch_id;
3253 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3254 #ifdef CONFIG_USER_ONLY
3255 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3256 #else
3257 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3258 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3259 cc->write_elf64_note = x86_cpu_write_elf64_note;
3260 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3261 cc->write_elf32_note = x86_cpu_write_elf32_note;
3262 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3263 cc->vmsd = &vmstate_x86_cpu;
3264 #endif
3265 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3266 #ifndef CONFIG_USER_ONLY
3267 cc->debug_excp_handler = breakpoint_handler;
3268 #endif
3269 cc->cpu_exec_enter = x86_cpu_exec_enter;
3270 cc->cpu_exec_exit = x86_cpu_exec_exit;
3271
3272 /*
3273 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3274 * object in cpus -> dangling pointer after final object_unref().
3275 */
3276 dc->cannot_destroy_with_object_finalize_yet = true;
3277 }
3278
3279 static const TypeInfo x86_cpu_type_info = {
3280 .name = TYPE_X86_CPU,
3281 .parent = TYPE_CPU,
3282 .instance_size = sizeof(X86CPU),
3283 .instance_init = x86_cpu_initfn,
3284 .abstract = true,
3285 .class_size = sizeof(X86CPUClass),
3286 .class_init = x86_cpu_common_class_init,
3287 };
3288
3289 static void x86_cpu_register_types(void)
3290 {
3291 int i;
3292
3293 type_register_static(&x86_cpu_type_info);
3294 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3295 x86_register_cpudef_type(&builtin_x86_defs[i]);
3296 }
3297 #ifdef CONFIG_KVM
3298 type_register_static(&host_x86_cpu_type_info);
3299 #endif
3300 }
3301
3302 type_init(x86_cpu_register_types)