]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Avoid using locals outside their scope
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
362 CPUID_7_0_EBX_ERMS)
363 /* missing:
364 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
365 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
366 CPUID_7_0_EBX_RDSEED */
367 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
368 #define TCG_APM_FEATURES 0
369 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
370 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
371 /* missing:
372 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
373
374 typedef struct FeatureWordInfo {
375 const char **feat_names;
376 uint32_t cpuid_eax; /* Input EAX for CPUID */
377 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
378 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
379 int cpuid_reg; /* output register (R_* constant) */
380 uint32_t tcg_features; /* Feature flags supported by TCG */
381 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
382 } FeatureWordInfo;
383
384 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
385 [FEAT_1_EDX] = {
386 .feat_names = feature_name,
387 .cpuid_eax = 1, .cpuid_reg = R_EDX,
388 .tcg_features = TCG_FEATURES,
389 },
390 [FEAT_1_ECX] = {
391 .feat_names = ext_feature_name,
392 .cpuid_eax = 1, .cpuid_reg = R_ECX,
393 .tcg_features = TCG_EXT_FEATURES,
394 },
395 [FEAT_8000_0001_EDX] = {
396 .feat_names = ext2_feature_name,
397 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
398 .tcg_features = TCG_EXT2_FEATURES,
399 },
400 [FEAT_8000_0001_ECX] = {
401 .feat_names = ext3_feature_name,
402 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
403 .tcg_features = TCG_EXT3_FEATURES,
404 },
405 [FEAT_C000_0001_EDX] = {
406 .feat_names = ext4_feature_name,
407 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
408 .tcg_features = TCG_EXT4_FEATURES,
409 },
410 [FEAT_KVM] = {
411 .feat_names = kvm_feature_name,
412 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
413 .tcg_features = TCG_KVM_FEATURES,
414 },
415 [FEAT_SVM] = {
416 .feat_names = svm_feature_name,
417 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
418 .tcg_features = TCG_SVM_FEATURES,
419 },
420 [FEAT_7_0_EBX] = {
421 .feat_names = cpuid_7_0_ebx_feature_name,
422 .cpuid_eax = 7,
423 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
424 .cpuid_reg = R_EBX,
425 .tcg_features = TCG_7_0_EBX_FEATURES,
426 },
427 [FEAT_7_0_ECX] = {
428 .feat_names = cpuid_7_0_ecx_feature_name,
429 .cpuid_eax = 7,
430 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
431 .cpuid_reg = R_ECX,
432 .tcg_features = TCG_7_0_ECX_FEATURES,
433 },
434 [FEAT_8000_0007_EDX] = {
435 .feat_names = cpuid_apm_edx_feature_name,
436 .cpuid_eax = 0x80000007,
437 .cpuid_reg = R_EDX,
438 .tcg_features = TCG_APM_FEATURES,
439 .unmigratable_flags = CPUID_APM_INVTSC,
440 },
441 [FEAT_XSAVE] = {
442 .feat_names = cpuid_xsave_feature_name,
443 .cpuid_eax = 0xd,
444 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
445 .cpuid_reg = R_EAX,
446 .tcg_features = TCG_XSAVE_FEATURES,
447 },
448 [FEAT_6_EAX] = {
449 .feat_names = cpuid_6_feature_name,
450 .cpuid_eax = 6, .cpuid_reg = R_EAX,
451 .tcg_features = TCG_6_EAX_FEATURES,
452 },
453 };
454
455 typedef struct X86RegisterInfo32 {
456 /* Name of register */
457 const char *name;
458 /* QAPI enum value register */
459 X86CPURegister32 qapi_enum;
460 } X86RegisterInfo32;
461
462 #define REGISTER(reg) \
463 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
464 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
465 REGISTER(EAX),
466 REGISTER(ECX),
467 REGISTER(EDX),
468 REGISTER(EBX),
469 REGISTER(ESP),
470 REGISTER(EBP),
471 REGISTER(ESI),
472 REGISTER(EDI),
473 };
474 #undef REGISTER
475
476 const ExtSaveArea x86_ext_save_areas[] = {
477 [XSTATE_YMM_BIT] =
478 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
479 .offset = offsetof(X86XSaveArea, avx_state),
480 .size = sizeof(XSaveAVX) },
481 [XSTATE_BNDREGS_BIT] =
482 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
483 .offset = offsetof(X86XSaveArea, bndreg_state),
484 .size = sizeof(XSaveBNDREG) },
485 [XSTATE_BNDCSR_BIT] =
486 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
487 .offset = offsetof(X86XSaveArea, bndcsr_state),
488 .size = sizeof(XSaveBNDCSR) },
489 [XSTATE_OPMASK_BIT] =
490 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
491 .offset = offsetof(X86XSaveArea, opmask_state),
492 .size = sizeof(XSaveOpmask) },
493 [XSTATE_ZMM_Hi256_BIT] =
494 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
495 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
496 .size = sizeof(XSaveZMM_Hi256) },
497 [XSTATE_Hi16_ZMM_BIT] =
498 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
499 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
500 .size = sizeof(XSaveHi16_ZMM) },
501 [XSTATE_PKRU_BIT] =
502 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
503 .offset = offsetof(X86XSaveArea, pkru_state),
504 .size = sizeof(XSavePKRU) },
505 };
506
507 const char *get_register_name_32(unsigned int reg)
508 {
509 if (reg >= CPU_NB_REGS32) {
510 return NULL;
511 }
512 return x86_reg_info_32[reg].name;
513 }
514
515 /*
516 * Returns the set of feature flags that are supported and migratable by
517 * QEMU, for a given FeatureWord.
518 */
519 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
520 {
521 FeatureWordInfo *wi = &feature_word_info[w];
522 uint32_t r = 0;
523 int i;
524
525 for (i = 0; i < 32; i++) {
526 uint32_t f = 1U << i;
527 /* If the feature name is unknown, it is not supported by QEMU yet */
528 if (!wi->feat_names[i]) {
529 continue;
530 }
531 /* Skip features known to QEMU, but explicitly marked as unmigratable */
532 if (wi->unmigratable_flags & f) {
533 continue;
534 }
535 r |= f;
536 }
537 return r;
538 }
539
540 void host_cpuid(uint32_t function, uint32_t count,
541 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
542 {
543 uint32_t vec[4];
544
545 #ifdef __x86_64__
546 asm volatile("cpuid"
547 : "=a"(vec[0]), "=b"(vec[1]),
548 "=c"(vec[2]), "=d"(vec[3])
549 : "0"(function), "c"(count) : "cc");
550 #elif defined(__i386__)
551 asm volatile("pusha \n\t"
552 "cpuid \n\t"
553 "mov %%eax, 0(%2) \n\t"
554 "mov %%ebx, 4(%2) \n\t"
555 "mov %%ecx, 8(%2) \n\t"
556 "mov %%edx, 12(%2) \n\t"
557 "popa"
558 : : "a"(function), "c"(count), "S"(vec)
559 : "memory", "cc");
560 #else
561 abort();
562 #endif
563
564 if (eax)
565 *eax = vec[0];
566 if (ebx)
567 *ebx = vec[1];
568 if (ecx)
569 *ecx = vec[2];
570 if (edx)
571 *edx = vec[3];
572 }
573
574 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
575
576 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
577 * a substring. ex if !NULL points to the first char after a substring,
578 * otherwise the string is assumed to sized by a terminating nul.
579 * Return lexical ordering of *s1:*s2.
580 */
581 static int sstrcmp(const char *s1, const char *e1,
582 const char *s2, const char *e2)
583 {
584 for (;;) {
585 if (!*s1 || !*s2 || *s1 != *s2)
586 return (*s1 - *s2);
587 ++s1, ++s2;
588 if (s1 == e1 && s2 == e2)
589 return (0);
590 else if (s1 == e1)
591 return (*s2);
592 else if (s2 == e2)
593 return (*s1);
594 }
595 }
596
597 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
598 * '|' delimited (possibly empty) strings in which case search for a match
599 * within the alternatives proceeds left to right. Return 0 for success,
600 * non-zero otherwise.
601 */
602 static int altcmp(const char *s, const char *e, const char *altstr)
603 {
604 const char *p, *q;
605
606 for (q = p = altstr; ; ) {
607 while (*p && *p != '|')
608 ++p;
609 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
610 return (0);
611 if (!*p)
612 return (1);
613 else
614 q = ++p;
615 }
616 }
617
618 /* search featureset for flag *[s..e), if found set corresponding bit in
619 * *pval and return true, otherwise return false
620 */
621 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
622 const char **featureset)
623 {
624 uint32_t mask;
625 const char **ppc;
626 bool found = false;
627
628 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
629 if (*ppc && !altcmp(s, e, *ppc)) {
630 *pval |= mask;
631 found = true;
632 }
633 }
634 return found;
635 }
636
637 static void add_flagname_to_bitmaps(const char *flagname,
638 FeatureWordArray words,
639 Error **errp)
640 {
641 FeatureWord w;
642 for (w = 0; w < FEATURE_WORDS; w++) {
643 FeatureWordInfo *wi = &feature_word_info[w];
644 if (wi->feat_names &&
645 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
646 break;
647 }
648 }
649 if (w == FEATURE_WORDS) {
650 error_setg(errp, "CPU feature %s not found", flagname);
651 }
652 }
653
654 /* CPU class name definitions: */
655
656 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
657 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
658
659 /* Return type name for a given CPU model name
660 * Caller is responsible for freeing the returned string.
661 */
662 static char *x86_cpu_type_name(const char *model_name)
663 {
664 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
665 }
666
667 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
668 {
669 ObjectClass *oc;
670 char *typename;
671
672 if (cpu_model == NULL) {
673 return NULL;
674 }
675
676 typename = x86_cpu_type_name(cpu_model);
677 oc = object_class_by_name(typename);
678 g_free(typename);
679 return oc;
680 }
681
682 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
683 {
684 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
685 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
686 return g_strndup(class_name,
687 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
688 }
689
690 struct X86CPUDefinition {
691 const char *name;
692 uint32_t level;
693 uint32_t xlevel;
694 uint32_t xlevel2;
695 /* vendor is zero-terminated, 12 character ASCII string */
696 char vendor[CPUID_VENDOR_SZ + 1];
697 int family;
698 int model;
699 int stepping;
700 FeatureWordArray features;
701 char model_id[48];
702 };
703
704 static X86CPUDefinition builtin_x86_defs[] = {
705 {
706 .name = "qemu64",
707 .level = 0xd,
708 .vendor = CPUID_VENDOR_AMD,
709 .family = 6,
710 .model = 6,
711 .stepping = 3,
712 .features[FEAT_1_EDX] =
713 PPRO_FEATURES |
714 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
715 CPUID_PSE36,
716 .features[FEAT_1_ECX] =
717 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
718 .features[FEAT_8000_0001_EDX] =
719 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
720 .features[FEAT_8000_0001_ECX] =
721 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
722 .xlevel = 0x8000000A,
723 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
724 },
725 {
726 .name = "phenom",
727 .level = 5,
728 .vendor = CPUID_VENDOR_AMD,
729 .family = 16,
730 .model = 2,
731 .stepping = 3,
732 /* Missing: CPUID_HT */
733 .features[FEAT_1_EDX] =
734 PPRO_FEATURES |
735 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
736 CPUID_PSE36 | CPUID_VME,
737 .features[FEAT_1_ECX] =
738 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
739 CPUID_EXT_POPCNT,
740 .features[FEAT_8000_0001_EDX] =
741 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
742 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
743 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
744 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
745 CPUID_EXT3_CR8LEG,
746 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
747 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
748 .features[FEAT_8000_0001_ECX] =
749 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
750 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
751 /* Missing: CPUID_SVM_LBRV */
752 .features[FEAT_SVM] =
753 CPUID_SVM_NPT,
754 .xlevel = 0x8000001A,
755 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
756 },
757 {
758 .name = "core2duo",
759 .level = 10,
760 .vendor = CPUID_VENDOR_INTEL,
761 .family = 6,
762 .model = 15,
763 .stepping = 11,
764 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
768 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
769 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
770 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
771 .features[FEAT_1_ECX] =
772 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
773 CPUID_EXT_CX16,
774 .features[FEAT_8000_0001_EDX] =
775 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
776 .features[FEAT_8000_0001_ECX] =
777 CPUID_EXT3_LAHF_LM,
778 .xlevel = 0x80000008,
779 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
780 },
781 {
782 .name = "kvm64",
783 .level = 0xd,
784 .vendor = CPUID_VENDOR_INTEL,
785 .family = 15,
786 .model = 6,
787 .stepping = 1,
788 /* Missing: CPUID_HT */
789 .features[FEAT_1_EDX] =
790 PPRO_FEATURES | CPUID_VME |
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
792 CPUID_PSE36,
793 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
794 .features[FEAT_1_ECX] =
795 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
796 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
797 .features[FEAT_8000_0001_EDX] =
798 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
799 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
800 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
801 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
802 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
803 .features[FEAT_8000_0001_ECX] =
804 0,
805 .xlevel = 0x80000008,
806 .model_id = "Common KVM processor"
807 },
808 {
809 .name = "qemu32",
810 .level = 4,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 6,
814 .stepping = 3,
815 .features[FEAT_1_EDX] =
816 PPRO_FEATURES,
817 .features[FEAT_1_ECX] =
818 CPUID_EXT_SSE3,
819 .xlevel = 0x80000004,
820 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
821 },
822 {
823 .name = "kvm32",
824 .level = 5,
825 .vendor = CPUID_VENDOR_INTEL,
826 .family = 15,
827 .model = 6,
828 .stepping = 1,
829 .features[FEAT_1_EDX] =
830 PPRO_FEATURES | CPUID_VME |
831 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
832 .features[FEAT_1_ECX] =
833 CPUID_EXT_SSE3,
834 .features[FEAT_8000_0001_ECX] =
835 0,
836 .xlevel = 0x80000008,
837 .model_id = "Common 32-bit KVM processor"
838 },
839 {
840 .name = "coreduo",
841 .level = 10,
842 .vendor = CPUID_VENDOR_INTEL,
843 .family = 6,
844 .model = 14,
845 .stepping = 8,
846 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
847 .features[FEAT_1_EDX] =
848 PPRO_FEATURES | CPUID_VME |
849 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
850 CPUID_SS,
851 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
852 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
853 .features[FEAT_1_ECX] =
854 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
855 .features[FEAT_8000_0001_EDX] =
856 CPUID_EXT2_NX,
857 .xlevel = 0x80000008,
858 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
859 },
860 {
861 .name = "486",
862 .level = 1,
863 .vendor = CPUID_VENDOR_INTEL,
864 .family = 4,
865 .model = 8,
866 .stepping = 0,
867 .features[FEAT_1_EDX] =
868 I486_FEATURES,
869 .xlevel = 0,
870 },
871 {
872 .name = "pentium",
873 .level = 1,
874 .vendor = CPUID_VENDOR_INTEL,
875 .family = 5,
876 .model = 4,
877 .stepping = 3,
878 .features[FEAT_1_EDX] =
879 PENTIUM_FEATURES,
880 .xlevel = 0,
881 },
882 {
883 .name = "pentium2",
884 .level = 2,
885 .vendor = CPUID_VENDOR_INTEL,
886 .family = 6,
887 .model = 5,
888 .stepping = 2,
889 .features[FEAT_1_EDX] =
890 PENTIUM2_FEATURES,
891 .xlevel = 0,
892 },
893 {
894 .name = "pentium3",
895 .level = 3,
896 .vendor = CPUID_VENDOR_INTEL,
897 .family = 6,
898 .model = 7,
899 .stepping = 3,
900 .features[FEAT_1_EDX] =
901 PENTIUM3_FEATURES,
902 .xlevel = 0,
903 },
904 {
905 .name = "athlon",
906 .level = 2,
907 .vendor = CPUID_VENDOR_AMD,
908 .family = 6,
909 .model = 2,
910 .stepping = 3,
911 .features[FEAT_1_EDX] =
912 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
913 CPUID_MCA,
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
916 .xlevel = 0x80000008,
917 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
918 },
919 {
920 .name = "n270",
921 .level = 10,
922 .vendor = CPUID_VENDOR_INTEL,
923 .family = 6,
924 .model = 28,
925 .stepping = 2,
926 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
927 .features[FEAT_1_EDX] =
928 PPRO_FEATURES |
929 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
930 CPUID_ACPI | CPUID_SS,
931 /* Some CPUs got no CPUID_SEP */
932 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
933 * CPUID_EXT_XTPR */
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
936 CPUID_EXT_MOVBE,
937 .features[FEAT_8000_0001_EDX] =
938 CPUID_EXT2_NX,
939 .features[FEAT_8000_0001_ECX] =
940 CPUID_EXT3_LAHF_LM,
941 .xlevel = 0x80000008,
942 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
943 },
944 {
945 .name = "Conroe",
946 .level = 10,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 15,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
953 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
954 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
955 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
956 CPUID_DE | CPUID_FP87,
957 .features[FEAT_1_ECX] =
958 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
959 .features[FEAT_8000_0001_EDX] =
960 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
961 .features[FEAT_8000_0001_ECX] =
962 CPUID_EXT3_LAHF_LM,
963 .xlevel = 0x80000008,
964 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
965 },
966 {
967 .name = "Penryn",
968 .level = 10,
969 .vendor = CPUID_VENDOR_INTEL,
970 .family = 6,
971 .model = 23,
972 .stepping = 3,
973 .features[FEAT_1_EDX] =
974 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
975 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
976 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
977 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
978 CPUID_DE | CPUID_FP87,
979 .features[FEAT_1_ECX] =
980 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
981 CPUID_EXT_SSE3,
982 .features[FEAT_8000_0001_EDX] =
983 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
984 .features[FEAT_8000_0001_ECX] =
985 CPUID_EXT3_LAHF_LM,
986 .xlevel = 0x80000008,
987 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
988 },
989 {
990 .name = "Nehalem",
991 .level = 11,
992 .vendor = CPUID_VENDOR_INTEL,
993 .family = 6,
994 .model = 26,
995 .stepping = 3,
996 .features[FEAT_1_EDX] =
997 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
998 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
999 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1000 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1001 CPUID_DE | CPUID_FP87,
1002 .features[FEAT_1_ECX] =
1003 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1004 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1005 .features[FEAT_8000_0001_EDX] =
1006 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1007 .features[FEAT_8000_0001_ECX] =
1008 CPUID_EXT3_LAHF_LM,
1009 .xlevel = 0x80000008,
1010 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1011 },
1012 {
1013 .name = "Westmere",
1014 .level = 11,
1015 .vendor = CPUID_VENDOR_INTEL,
1016 .family = 6,
1017 .model = 44,
1018 .stepping = 1,
1019 .features[FEAT_1_EDX] =
1020 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1021 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1022 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1023 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1024 CPUID_DE | CPUID_FP87,
1025 .features[FEAT_1_ECX] =
1026 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1027 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1028 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1029 .features[FEAT_8000_0001_EDX] =
1030 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1031 .features[FEAT_8000_0001_ECX] =
1032 CPUID_EXT3_LAHF_LM,
1033 .features[FEAT_6_EAX] =
1034 CPUID_6_EAX_ARAT,
1035 .xlevel = 0x80000008,
1036 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1037 },
1038 {
1039 .name = "SandyBridge",
1040 .level = 0xd,
1041 .vendor = CPUID_VENDOR_INTEL,
1042 .family = 6,
1043 .model = 42,
1044 .stepping = 1,
1045 .features[FEAT_1_EDX] =
1046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1050 CPUID_DE | CPUID_FP87,
1051 .features[FEAT_1_ECX] =
1052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1053 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1054 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1056 CPUID_EXT_SSE3,
1057 .features[FEAT_8000_0001_EDX] =
1058 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1059 CPUID_EXT2_SYSCALL,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .features[FEAT_XSAVE] =
1063 CPUID_XSAVE_XSAVEOPT,
1064 .features[FEAT_6_EAX] =
1065 CPUID_6_EAX_ARAT,
1066 .xlevel = 0x80000008,
1067 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1068 },
1069 {
1070 .name = "IvyBridge",
1071 .level = 0xd,
1072 .vendor = CPUID_VENDOR_INTEL,
1073 .family = 6,
1074 .model = 58,
1075 .stepping = 9,
1076 .features[FEAT_1_EDX] =
1077 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1078 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1079 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1080 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1081 CPUID_DE | CPUID_FP87,
1082 .features[FEAT_1_ECX] =
1083 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1084 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1085 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1086 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1087 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1088 .features[FEAT_7_0_EBX] =
1089 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1090 CPUID_7_0_EBX_ERMS,
1091 .features[FEAT_8000_0001_EDX] =
1092 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1093 CPUID_EXT2_SYSCALL,
1094 .features[FEAT_8000_0001_ECX] =
1095 CPUID_EXT3_LAHF_LM,
1096 .features[FEAT_XSAVE] =
1097 CPUID_XSAVE_XSAVEOPT,
1098 .features[FEAT_6_EAX] =
1099 CPUID_6_EAX_ARAT,
1100 .xlevel = 0x80000008,
1101 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1102 },
1103 {
1104 .name = "Haswell-noTSX",
1105 .level = 0xd,
1106 .vendor = CPUID_VENDOR_INTEL,
1107 .family = 6,
1108 .model = 60,
1109 .stepping = 1,
1110 .features[FEAT_1_EDX] =
1111 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1112 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1113 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1114 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1115 CPUID_DE | CPUID_FP87,
1116 .features[FEAT_1_ECX] =
1117 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1118 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1119 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1120 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1121 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1122 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1123 .features[FEAT_8000_0001_EDX] =
1124 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1125 CPUID_EXT2_SYSCALL,
1126 .features[FEAT_8000_0001_ECX] =
1127 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1128 .features[FEAT_7_0_EBX] =
1129 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1130 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1131 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1132 .features[FEAT_XSAVE] =
1133 CPUID_XSAVE_XSAVEOPT,
1134 .features[FEAT_6_EAX] =
1135 CPUID_6_EAX_ARAT,
1136 .xlevel = 0x80000008,
1137 .model_id = "Intel Core Processor (Haswell, no TSX)",
1138 }, {
1139 .name = "Haswell",
1140 .level = 0xd,
1141 .vendor = CPUID_VENDOR_INTEL,
1142 .family = 6,
1143 .model = 60,
1144 .stepping = 1,
1145 .features[FEAT_1_EDX] =
1146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1153 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1154 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1155 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1156 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1157 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1158 .features[FEAT_8000_0001_EDX] =
1159 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1160 CPUID_EXT2_SYSCALL,
1161 .features[FEAT_8000_0001_ECX] =
1162 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1163 .features[FEAT_7_0_EBX] =
1164 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1165 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1166 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1167 CPUID_7_0_EBX_RTM,
1168 .features[FEAT_XSAVE] =
1169 CPUID_XSAVE_XSAVEOPT,
1170 .features[FEAT_6_EAX] =
1171 CPUID_6_EAX_ARAT,
1172 .xlevel = 0x80000008,
1173 .model_id = "Intel Core Processor (Haswell)",
1174 },
1175 {
1176 .name = "Broadwell-noTSX",
1177 .level = 0xd,
1178 .vendor = CPUID_VENDOR_INTEL,
1179 .family = 6,
1180 .model = 61,
1181 .stepping = 2,
1182 .features[FEAT_1_EDX] =
1183 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1184 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1185 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1186 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1187 CPUID_DE | CPUID_FP87,
1188 .features[FEAT_1_ECX] =
1189 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1190 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1193 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1194 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1195 .features[FEAT_8000_0001_EDX] =
1196 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1197 CPUID_EXT2_SYSCALL,
1198 .features[FEAT_8000_0001_ECX] =
1199 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1200 .features[FEAT_7_0_EBX] =
1201 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1202 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1203 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1204 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1205 CPUID_7_0_EBX_SMAP,
1206 .features[FEAT_XSAVE] =
1207 CPUID_XSAVE_XSAVEOPT,
1208 .features[FEAT_6_EAX] =
1209 CPUID_6_EAX_ARAT,
1210 .xlevel = 0x80000008,
1211 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1212 },
1213 {
1214 .name = "Broadwell",
1215 .level = 0xd,
1216 .vendor = CPUID_VENDOR_INTEL,
1217 .family = 6,
1218 .model = 61,
1219 .stepping = 2,
1220 .features[FEAT_1_EDX] =
1221 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1222 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1223 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1224 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1225 CPUID_DE | CPUID_FP87,
1226 .features[FEAT_1_ECX] =
1227 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1228 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1229 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1230 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1231 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1232 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1233 .features[FEAT_8000_0001_EDX] =
1234 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1235 CPUID_EXT2_SYSCALL,
1236 .features[FEAT_8000_0001_ECX] =
1237 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1238 .features[FEAT_7_0_EBX] =
1239 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1240 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1241 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1242 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1243 CPUID_7_0_EBX_SMAP,
1244 .features[FEAT_XSAVE] =
1245 CPUID_XSAVE_XSAVEOPT,
1246 .features[FEAT_6_EAX] =
1247 CPUID_6_EAX_ARAT,
1248 .xlevel = 0x80000008,
1249 .model_id = "Intel Core Processor (Broadwell)",
1250 },
1251 {
1252 .name = "Skylake-Client",
1253 .level = 0xd,
1254 .vendor = CPUID_VENDOR_INTEL,
1255 .family = 6,
1256 .model = 94,
1257 .stepping = 3,
1258 .features[FEAT_1_EDX] =
1259 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1260 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1261 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1262 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1263 CPUID_DE | CPUID_FP87,
1264 .features[FEAT_1_ECX] =
1265 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1266 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1267 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1268 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1269 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1270 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1271 .features[FEAT_8000_0001_EDX] =
1272 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1273 CPUID_EXT2_SYSCALL,
1274 .features[FEAT_8000_0001_ECX] =
1275 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1276 .features[FEAT_7_0_EBX] =
1277 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1278 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1279 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1280 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1281 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1282 /* Missing: XSAVES (not supported by some Linux versions,
1283 * including v4.1 to v4.6).
1284 * KVM doesn't yet expose any XSAVES state save component,
1285 * and the only one defined in Skylake (processor tracing)
1286 * probably will block migration anyway.
1287 */
1288 .features[FEAT_XSAVE] =
1289 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1290 CPUID_XSAVE_XGETBV1,
1291 .features[FEAT_6_EAX] =
1292 CPUID_6_EAX_ARAT,
1293 .xlevel = 0x80000008,
1294 .model_id = "Intel Core Processor (Skylake)",
1295 },
1296 {
1297 .name = "Opteron_G1",
1298 .level = 5,
1299 .vendor = CPUID_VENDOR_AMD,
1300 .family = 15,
1301 .model = 6,
1302 .stepping = 1,
1303 .features[FEAT_1_EDX] =
1304 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1305 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1306 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1307 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1308 CPUID_DE | CPUID_FP87,
1309 .features[FEAT_1_ECX] =
1310 CPUID_EXT_SSE3,
1311 .features[FEAT_8000_0001_EDX] =
1312 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1313 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1314 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1315 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1316 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1317 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1318 .xlevel = 0x80000008,
1319 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1320 },
1321 {
1322 .name = "Opteron_G2",
1323 .level = 5,
1324 .vendor = CPUID_VENDOR_AMD,
1325 .family = 15,
1326 .model = 6,
1327 .stepping = 1,
1328 .features[FEAT_1_EDX] =
1329 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1330 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1331 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1332 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1333 CPUID_DE | CPUID_FP87,
1334 .features[FEAT_1_ECX] =
1335 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1336 /* Missing: CPUID_EXT2_RDTSCP */
1337 .features[FEAT_8000_0001_EDX] =
1338 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1339 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1340 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1341 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1342 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1343 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1344 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1345 .features[FEAT_8000_0001_ECX] =
1346 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1347 .xlevel = 0x80000008,
1348 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1349 },
1350 {
1351 .name = "Opteron_G3",
1352 .level = 5,
1353 .vendor = CPUID_VENDOR_AMD,
1354 .family = 15,
1355 .model = 6,
1356 .stepping = 1,
1357 .features[FEAT_1_EDX] =
1358 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1359 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1360 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1361 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1362 CPUID_DE | CPUID_FP87,
1363 .features[FEAT_1_ECX] =
1364 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1365 CPUID_EXT_SSE3,
1366 /* Missing: CPUID_EXT2_RDTSCP */
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1369 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1370 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1371 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1372 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1373 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1374 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1375 .features[FEAT_8000_0001_ECX] =
1376 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1377 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1378 .xlevel = 0x80000008,
1379 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1380 },
1381 {
1382 .name = "Opteron_G4",
1383 .level = 0xd,
1384 .vendor = CPUID_VENDOR_AMD,
1385 .family = 21,
1386 .model = 1,
1387 .stepping = 2,
1388 .features[FEAT_1_EDX] =
1389 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1390 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1391 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1392 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1393 CPUID_DE | CPUID_FP87,
1394 .features[FEAT_1_ECX] =
1395 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1396 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1397 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1398 CPUID_EXT_SSE3,
1399 /* Missing: CPUID_EXT2_RDTSCP */
1400 .features[FEAT_8000_0001_EDX] =
1401 CPUID_EXT2_LM |
1402 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1403 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1404 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1405 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1406 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1407 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1408 .features[FEAT_8000_0001_ECX] =
1409 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1410 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1411 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1412 CPUID_EXT3_LAHF_LM,
1413 /* no xsaveopt! */
1414 .xlevel = 0x8000001A,
1415 .model_id = "AMD Opteron 62xx class CPU",
1416 },
1417 {
1418 .name = "Opteron_G5",
1419 .level = 0xd,
1420 .vendor = CPUID_VENDOR_AMD,
1421 .family = 21,
1422 .model = 2,
1423 .stepping = 0,
1424 .features[FEAT_1_EDX] =
1425 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1426 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1427 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1428 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1429 CPUID_DE | CPUID_FP87,
1430 .features[FEAT_1_ECX] =
1431 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1432 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1433 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1434 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1435 /* Missing: CPUID_EXT2_RDTSCP */
1436 .features[FEAT_8000_0001_EDX] =
1437 CPUID_EXT2_LM |
1438 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1439 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1440 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1441 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1442 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1443 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1444 .features[FEAT_8000_0001_ECX] =
1445 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1446 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1447 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1448 CPUID_EXT3_LAHF_LM,
1449 /* no xsaveopt! */
1450 .xlevel = 0x8000001A,
1451 .model_id = "AMD Opteron 63xx class CPU",
1452 },
1453 };
1454
1455 typedef struct PropValue {
1456 const char *prop, *value;
1457 } PropValue;
1458
1459 /* KVM-specific features that are automatically added/removed
1460 * from all CPU models when KVM is enabled.
1461 */
1462 static PropValue kvm_default_props[] = {
1463 { "kvmclock", "on" },
1464 { "kvm-nopiodelay", "on" },
1465 { "kvm-asyncpf", "on" },
1466 { "kvm-steal-time", "on" },
1467 { "kvm-pv-eoi", "on" },
1468 { "kvmclock-stable-bit", "on" },
1469 { "x2apic", "on" },
1470 { "acpi", "off" },
1471 { "monitor", "off" },
1472 { "svm", "off" },
1473 { NULL, NULL },
1474 };
1475
1476 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1477 {
1478 PropValue *pv;
1479 for (pv = kvm_default_props; pv->prop; pv++) {
1480 if (!strcmp(pv->prop, prop)) {
1481 pv->value = value;
1482 break;
1483 }
1484 }
1485
1486 /* It is valid to call this function only for properties that
1487 * are already present in the kvm_default_props table.
1488 */
1489 assert(pv->prop);
1490 }
1491
1492 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1493 bool migratable_only);
1494
1495 #ifdef CONFIG_KVM
1496
1497 static int cpu_x86_fill_model_id(char *str)
1498 {
1499 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1500 int i;
1501
1502 for (i = 0; i < 3; i++) {
1503 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1504 memcpy(str + i * 16 + 0, &eax, 4);
1505 memcpy(str + i * 16 + 4, &ebx, 4);
1506 memcpy(str + i * 16 + 8, &ecx, 4);
1507 memcpy(str + i * 16 + 12, &edx, 4);
1508 }
1509 return 0;
1510 }
1511
1512 static X86CPUDefinition host_cpudef;
1513
1514 static Property host_x86_cpu_properties[] = {
1515 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1516 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1517 DEFINE_PROP_END_OF_LIST()
1518 };
1519
1520 /* class_init for the "host" CPU model
1521 *
1522 * This function may be called before KVM is initialized.
1523 */
1524 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1525 {
1526 DeviceClass *dc = DEVICE_CLASS(oc);
1527 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1528 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1529
1530 xcc->kvm_required = true;
1531
1532 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1533 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1534
1535 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1536 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1537 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1538 host_cpudef.stepping = eax & 0x0F;
1539
1540 cpu_x86_fill_model_id(host_cpudef.model_id);
1541
1542 xcc->cpu_def = &host_cpudef;
1543
1544 /* level, xlevel, xlevel2, and the feature words are initialized on
1545 * instance_init, because they require KVM to be initialized.
1546 */
1547
1548 dc->props = host_x86_cpu_properties;
1549 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1550 dc->cannot_destroy_with_object_finalize_yet = true;
1551 }
1552
1553 static void host_x86_cpu_initfn(Object *obj)
1554 {
1555 X86CPU *cpu = X86_CPU(obj);
1556 CPUX86State *env = &cpu->env;
1557 KVMState *s = kvm_state;
1558
1559 /* We can't fill the features array here because we don't know yet if
1560 * "migratable" is true or false.
1561 */
1562 cpu->host_features = true;
1563
1564 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1565 if (kvm_enabled()) {
1566 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1567 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1568 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1569 }
1570
1571 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1572 }
1573
1574 static const TypeInfo host_x86_cpu_type_info = {
1575 .name = X86_CPU_TYPE_NAME("host"),
1576 .parent = TYPE_X86_CPU,
1577 .instance_init = host_x86_cpu_initfn,
1578 .class_init = host_x86_cpu_class_init,
1579 };
1580
1581 #endif
1582
1583 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1584 {
1585 FeatureWordInfo *f = &feature_word_info[w];
1586 int i;
1587
1588 for (i = 0; i < 32; ++i) {
1589 if ((1UL << i) & mask) {
1590 const char *reg = get_register_name_32(f->cpuid_reg);
1591 assert(reg);
1592 fprintf(stderr, "warning: %s doesn't support requested feature: "
1593 "CPUID.%02XH:%s%s%s [bit %d]\n",
1594 kvm_enabled() ? "host" : "TCG",
1595 f->cpuid_eax, reg,
1596 f->feat_names[i] ? "." : "",
1597 f->feat_names[i] ? f->feat_names[i] : "", i);
1598 }
1599 }
1600 }
1601
1602 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1603 const char *name, void *opaque,
1604 Error **errp)
1605 {
1606 X86CPU *cpu = X86_CPU(obj);
1607 CPUX86State *env = &cpu->env;
1608 int64_t value;
1609
1610 value = (env->cpuid_version >> 8) & 0xf;
1611 if (value == 0xf) {
1612 value += (env->cpuid_version >> 20) & 0xff;
1613 }
1614 visit_type_int(v, name, &value, errp);
1615 }
1616
1617 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1618 const char *name, void *opaque,
1619 Error **errp)
1620 {
1621 X86CPU *cpu = X86_CPU(obj);
1622 CPUX86State *env = &cpu->env;
1623 const int64_t min = 0;
1624 const int64_t max = 0xff + 0xf;
1625 Error *local_err = NULL;
1626 int64_t value;
1627
1628 visit_type_int(v, name, &value, &local_err);
1629 if (local_err) {
1630 error_propagate(errp, local_err);
1631 return;
1632 }
1633 if (value < min || value > max) {
1634 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1635 name ? name : "null", value, min, max);
1636 return;
1637 }
1638
1639 env->cpuid_version &= ~0xff00f00;
1640 if (value > 0x0f) {
1641 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1642 } else {
1643 env->cpuid_version |= value << 8;
1644 }
1645 }
1646
1647 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1648 const char *name, void *opaque,
1649 Error **errp)
1650 {
1651 X86CPU *cpu = X86_CPU(obj);
1652 CPUX86State *env = &cpu->env;
1653 int64_t value;
1654
1655 value = (env->cpuid_version >> 4) & 0xf;
1656 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1657 visit_type_int(v, name, &value, errp);
1658 }
1659
1660 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1661 const char *name, void *opaque,
1662 Error **errp)
1663 {
1664 X86CPU *cpu = X86_CPU(obj);
1665 CPUX86State *env = &cpu->env;
1666 const int64_t min = 0;
1667 const int64_t max = 0xff;
1668 Error *local_err = NULL;
1669 int64_t value;
1670
1671 visit_type_int(v, name, &value, &local_err);
1672 if (local_err) {
1673 error_propagate(errp, local_err);
1674 return;
1675 }
1676 if (value < min || value > max) {
1677 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1678 name ? name : "null", value, min, max);
1679 return;
1680 }
1681
1682 env->cpuid_version &= ~0xf00f0;
1683 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1684 }
1685
1686 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1687 const char *name, void *opaque,
1688 Error **errp)
1689 {
1690 X86CPU *cpu = X86_CPU(obj);
1691 CPUX86State *env = &cpu->env;
1692 int64_t value;
1693
1694 value = env->cpuid_version & 0xf;
1695 visit_type_int(v, name, &value, errp);
1696 }
1697
1698 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1699 const char *name, void *opaque,
1700 Error **errp)
1701 {
1702 X86CPU *cpu = X86_CPU(obj);
1703 CPUX86State *env = &cpu->env;
1704 const int64_t min = 0;
1705 const int64_t max = 0xf;
1706 Error *local_err = NULL;
1707 int64_t value;
1708
1709 visit_type_int(v, name, &value, &local_err);
1710 if (local_err) {
1711 error_propagate(errp, local_err);
1712 return;
1713 }
1714 if (value < min || value > max) {
1715 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1716 name ? name : "null", value, min, max);
1717 return;
1718 }
1719
1720 env->cpuid_version &= ~0xf;
1721 env->cpuid_version |= value & 0xf;
1722 }
1723
1724 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1725 {
1726 X86CPU *cpu = X86_CPU(obj);
1727 CPUX86State *env = &cpu->env;
1728 char *value;
1729
1730 value = g_malloc(CPUID_VENDOR_SZ + 1);
1731 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1732 env->cpuid_vendor3);
1733 return value;
1734 }
1735
1736 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1737 Error **errp)
1738 {
1739 X86CPU *cpu = X86_CPU(obj);
1740 CPUX86State *env = &cpu->env;
1741 int i;
1742
1743 if (strlen(value) != CPUID_VENDOR_SZ) {
1744 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1745 return;
1746 }
1747
1748 env->cpuid_vendor1 = 0;
1749 env->cpuid_vendor2 = 0;
1750 env->cpuid_vendor3 = 0;
1751 for (i = 0; i < 4; i++) {
1752 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1753 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1754 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1755 }
1756 }
1757
1758 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1759 {
1760 X86CPU *cpu = X86_CPU(obj);
1761 CPUX86State *env = &cpu->env;
1762 char *value;
1763 int i;
1764
1765 value = g_malloc(48 + 1);
1766 for (i = 0; i < 48; i++) {
1767 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1768 }
1769 value[48] = '\0';
1770 return value;
1771 }
1772
1773 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1774 Error **errp)
1775 {
1776 X86CPU *cpu = X86_CPU(obj);
1777 CPUX86State *env = &cpu->env;
1778 int c, len, i;
1779
1780 if (model_id == NULL) {
1781 model_id = "";
1782 }
1783 len = strlen(model_id);
1784 memset(env->cpuid_model, 0, 48);
1785 for (i = 0; i < 48; i++) {
1786 if (i >= len) {
1787 c = '\0';
1788 } else {
1789 c = (uint8_t)model_id[i];
1790 }
1791 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1792 }
1793 }
1794
1795 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1796 void *opaque, Error **errp)
1797 {
1798 X86CPU *cpu = X86_CPU(obj);
1799 int64_t value;
1800
1801 value = cpu->env.tsc_khz * 1000;
1802 visit_type_int(v, name, &value, errp);
1803 }
1804
1805 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1806 void *opaque, Error **errp)
1807 {
1808 X86CPU *cpu = X86_CPU(obj);
1809 const int64_t min = 0;
1810 const int64_t max = INT64_MAX;
1811 Error *local_err = NULL;
1812 int64_t value;
1813
1814 visit_type_int(v, name, &value, &local_err);
1815 if (local_err) {
1816 error_propagate(errp, local_err);
1817 return;
1818 }
1819 if (value < min || value > max) {
1820 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1821 name ? name : "null", value, min, max);
1822 return;
1823 }
1824
1825 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1826 }
1827
1828 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1829 void *opaque, Error **errp)
1830 {
1831 X86CPU *cpu = X86_CPU(obj);
1832 int64_t value = cpu->apic_id;
1833
1834 visit_type_int(v, name, &value, errp);
1835 }
1836
1837 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1838 void *opaque, Error **errp)
1839 {
1840 X86CPU *cpu = X86_CPU(obj);
1841 DeviceState *dev = DEVICE(obj);
1842 const int64_t min = 0;
1843 const int64_t max = UINT32_MAX;
1844 Error *error = NULL;
1845 int64_t value;
1846
1847 if (dev->realized) {
1848 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1849 "it was realized", name, object_get_typename(obj));
1850 return;
1851 }
1852
1853 visit_type_int(v, name, &value, &error);
1854 if (error) {
1855 error_propagate(errp, error);
1856 return;
1857 }
1858 if (value < min || value > max) {
1859 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1860 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1861 object_get_typename(obj), name, value, min, max);
1862 return;
1863 }
1864
1865 if ((value != cpu->apic_id) && cpu_exists(value)) {
1866 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1867 return;
1868 }
1869 cpu->apic_id = value;
1870 }
1871
1872 /* Generic getter for "feature-words" and "filtered-features" properties */
1873 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1874 const char *name, void *opaque,
1875 Error **errp)
1876 {
1877 uint32_t *array = (uint32_t *)opaque;
1878 FeatureWord w;
1879 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1880 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1881 X86CPUFeatureWordInfoList *list = NULL;
1882
1883 for (w = 0; w < FEATURE_WORDS; w++) {
1884 FeatureWordInfo *wi = &feature_word_info[w];
1885 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1886 qwi->cpuid_input_eax = wi->cpuid_eax;
1887 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1888 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1889 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1890 qwi->features = array[w];
1891
1892 /* List will be in reverse order, but order shouldn't matter */
1893 list_entries[w].next = list;
1894 list_entries[w].value = &word_infos[w];
1895 list = &list_entries[w];
1896 }
1897
1898 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1899 }
1900
1901 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1902 void *opaque, Error **errp)
1903 {
1904 X86CPU *cpu = X86_CPU(obj);
1905 int64_t value = cpu->hyperv_spinlock_attempts;
1906
1907 visit_type_int(v, name, &value, errp);
1908 }
1909
1910 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1911 void *opaque, Error **errp)
1912 {
1913 const int64_t min = 0xFFF;
1914 const int64_t max = UINT_MAX;
1915 X86CPU *cpu = X86_CPU(obj);
1916 Error *err = NULL;
1917 int64_t value;
1918
1919 visit_type_int(v, name, &value, &err);
1920 if (err) {
1921 error_propagate(errp, err);
1922 return;
1923 }
1924
1925 if (value < min || value > max) {
1926 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1927 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1928 object_get_typename(obj), name ? name : "null",
1929 value, min, max);
1930 return;
1931 }
1932 cpu->hyperv_spinlock_attempts = value;
1933 }
1934
1935 static PropertyInfo qdev_prop_spinlocks = {
1936 .name = "int",
1937 .get = x86_get_hv_spinlocks,
1938 .set = x86_set_hv_spinlocks,
1939 };
1940
1941 /* Convert all '_' in a feature string option name to '-', to make feature
1942 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1943 */
1944 static inline void feat2prop(char *s)
1945 {
1946 while ((s = strchr(s, '_'))) {
1947 *s = '-';
1948 }
1949 }
1950
1951 /* Compatibily hack to maintain legacy +-feat semantic,
1952 * where +-feat overwrites any feature set by
1953 * feat=on|feat even if the later is parsed after +-feat
1954 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1955 */
1956 static FeatureWordArray plus_features = { 0 };
1957 static FeatureWordArray minus_features = { 0 };
1958
1959 /* Parse "+feature,-feature,feature=foo" CPU feature string
1960 */
1961 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1962 Error **errp)
1963 {
1964 X86CPU *cpu = X86_CPU(cs);
1965 char *featurestr; /* Single 'key=value" string being parsed */
1966 Error *local_err = NULL;
1967
1968 if (!features) {
1969 return;
1970 }
1971
1972 for (featurestr = strtok(features, ",");
1973 featurestr && !local_err;
1974 featurestr = strtok(NULL, ",")) {
1975 const char *name;
1976 const char *val = NULL;
1977 char *eq = NULL;
1978 char num[32];
1979
1980 /* Compatibility syntax: */
1981 if (featurestr[0] == '+') {
1982 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1983 continue;
1984 } else if (featurestr[0] == '-') {
1985 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1986 continue;
1987 }
1988
1989 eq = strchr(featurestr, '=');
1990 if (eq) {
1991 *eq++ = 0;
1992 val = eq;
1993 } else {
1994 val = "on";
1995 }
1996
1997 feat2prop(featurestr);
1998 name = featurestr;
1999
2000 /* Special case: */
2001 if (!strcmp(name, "tsc-freq")) {
2002 int64_t tsc_freq;
2003 char *err;
2004
2005 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2006 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2007 if (tsc_freq < 0 || *err) {
2008 error_setg(errp, "bad numerical value %s", val);
2009 return;
2010 }
2011 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2012 val = num;
2013 name = "tsc-frequency";
2014 }
2015
2016 object_property_parse(OBJECT(cpu), val, name, &local_err);
2017 }
2018
2019 if (local_err) {
2020 error_propagate(errp, local_err);
2021 }
2022 }
2023
2024 /* Print all cpuid feature names in featureset
2025 */
2026 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2027 {
2028 int bit;
2029 bool first = true;
2030
2031 for (bit = 0; bit < 32; bit++) {
2032 if (featureset[bit]) {
2033 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2034 first = false;
2035 }
2036 }
2037 }
2038
2039 /* generate CPU information. */
2040 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2041 {
2042 X86CPUDefinition *def;
2043 char buf[256];
2044 int i;
2045
2046 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2047 def = &builtin_x86_defs[i];
2048 snprintf(buf, sizeof(buf), "%s", def->name);
2049 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2050 }
2051 #ifdef CONFIG_KVM
2052 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2053 "KVM processor with all supported host features "
2054 "(only available in KVM mode)");
2055 #endif
2056
2057 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2058 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2059 FeatureWordInfo *fw = &feature_word_info[i];
2060
2061 (*cpu_fprintf)(f, " ");
2062 listflags(f, cpu_fprintf, fw->feat_names);
2063 (*cpu_fprintf)(f, "\n");
2064 }
2065 }
2066
2067 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2068 {
2069 CpuDefinitionInfoList *cpu_list = NULL;
2070 X86CPUDefinition *def;
2071 int i;
2072
2073 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2074 CpuDefinitionInfoList *entry;
2075 CpuDefinitionInfo *info;
2076
2077 def = &builtin_x86_defs[i];
2078 info = g_malloc0(sizeof(*info));
2079 info->name = g_strdup(def->name);
2080
2081 entry = g_malloc0(sizeof(*entry));
2082 entry->value = info;
2083 entry->next = cpu_list;
2084 cpu_list = entry;
2085 }
2086
2087 return cpu_list;
2088 }
2089
2090 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2091 bool migratable_only)
2092 {
2093 FeatureWordInfo *wi = &feature_word_info[w];
2094 uint32_t r;
2095
2096 if (kvm_enabled()) {
2097 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2098 wi->cpuid_ecx,
2099 wi->cpuid_reg);
2100 } else if (tcg_enabled()) {
2101 r = wi->tcg_features;
2102 } else {
2103 return ~0;
2104 }
2105 if (migratable_only) {
2106 r &= x86_cpu_get_migratable_flags(w);
2107 }
2108 return r;
2109 }
2110
2111 /*
2112 * Filters CPU feature words based on host availability of each feature.
2113 *
2114 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2115 */
2116 static int x86_cpu_filter_features(X86CPU *cpu)
2117 {
2118 CPUX86State *env = &cpu->env;
2119 FeatureWord w;
2120 int rv = 0;
2121
2122 for (w = 0; w < FEATURE_WORDS; w++) {
2123 uint32_t host_feat =
2124 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2125 uint32_t requested_features = env->features[w];
2126 env->features[w] &= host_feat;
2127 cpu->filtered_features[w] = requested_features & ~env->features[w];
2128 if (cpu->filtered_features[w]) {
2129 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2130 report_unavailable_features(w, cpu->filtered_features[w]);
2131 }
2132 rv = 1;
2133 }
2134 }
2135
2136 return rv;
2137 }
2138
2139 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2140 {
2141 PropValue *pv;
2142 for (pv = props; pv->prop; pv++) {
2143 if (!pv->value) {
2144 continue;
2145 }
2146 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2147 &error_abort);
2148 }
2149 }
2150
2151 /* Load data from X86CPUDefinition
2152 */
2153 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2154 {
2155 CPUX86State *env = &cpu->env;
2156 const char *vendor;
2157 char host_vendor[CPUID_VENDOR_SZ + 1];
2158 FeatureWord w;
2159
2160 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2161 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2162 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2163 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2164 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2165 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2166 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2167 for (w = 0; w < FEATURE_WORDS; w++) {
2168 env->features[w] = def->features[w];
2169 }
2170
2171 /* Special cases not set in the X86CPUDefinition structs: */
2172 if (kvm_enabled()) {
2173 if (!kvm_irqchip_in_kernel()) {
2174 x86_cpu_change_kvm_default("x2apic", "off");
2175 }
2176
2177 x86_cpu_apply_props(cpu, kvm_default_props);
2178 }
2179
2180 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2181
2182 /* sysenter isn't supported in compatibility mode on AMD,
2183 * syscall isn't supported in compatibility mode on Intel.
2184 * Normally we advertise the actual CPU vendor, but you can
2185 * override this using the 'vendor' property if you want to use
2186 * KVM's sysenter/syscall emulation in compatibility mode and
2187 * when doing cross vendor migration
2188 */
2189 vendor = def->vendor;
2190 if (kvm_enabled()) {
2191 uint32_t ebx = 0, ecx = 0, edx = 0;
2192 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2193 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2194 vendor = host_vendor;
2195 }
2196
2197 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2198
2199 }
2200
2201 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2202 {
2203 X86CPU *cpu = NULL;
2204 ObjectClass *oc;
2205 gchar **model_pieces;
2206 char *name, *features;
2207 Error *error = NULL;
2208
2209 model_pieces = g_strsplit(cpu_model, ",", 2);
2210 if (!model_pieces[0]) {
2211 error_setg(&error, "Invalid/empty CPU model name");
2212 goto out;
2213 }
2214 name = model_pieces[0];
2215 features = model_pieces[1];
2216
2217 oc = x86_cpu_class_by_name(name);
2218 if (oc == NULL) {
2219 error_setg(&error, "Unable to find CPU definition: %s", name);
2220 goto out;
2221 }
2222
2223 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2224
2225 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2226 if (error) {
2227 goto out;
2228 }
2229
2230 out:
2231 if (error != NULL) {
2232 error_propagate(errp, error);
2233 if (cpu) {
2234 object_unref(OBJECT(cpu));
2235 cpu = NULL;
2236 }
2237 }
2238 g_strfreev(model_pieces);
2239 return cpu;
2240 }
2241
2242 X86CPU *cpu_x86_init(const char *cpu_model)
2243 {
2244 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2245 }
2246
2247 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2248 {
2249 X86CPUDefinition *cpudef = data;
2250 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2251
2252 xcc->cpu_def = cpudef;
2253 }
2254
2255 static void x86_register_cpudef_type(X86CPUDefinition *def)
2256 {
2257 char *typename = x86_cpu_type_name(def->name);
2258 TypeInfo ti = {
2259 .name = typename,
2260 .parent = TYPE_X86_CPU,
2261 .class_init = x86_cpu_cpudef_class_init,
2262 .class_data = def,
2263 };
2264
2265 type_register(&ti);
2266 g_free(typename);
2267 }
2268
2269 #if !defined(CONFIG_USER_ONLY)
2270
2271 void cpu_clear_apic_feature(CPUX86State *env)
2272 {
2273 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2274 }
2275
2276 #endif /* !CONFIG_USER_ONLY */
2277
2278 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2279 uint32_t *eax, uint32_t *ebx,
2280 uint32_t *ecx, uint32_t *edx)
2281 {
2282 X86CPU *cpu = x86_env_get_cpu(env);
2283 CPUState *cs = CPU(cpu);
2284
2285 /* test if maximum index reached */
2286 if (index & 0x80000000) {
2287 if (index > env->cpuid_xlevel) {
2288 if (env->cpuid_xlevel2 > 0) {
2289 /* Handle the Centaur's CPUID instruction. */
2290 if (index > env->cpuid_xlevel2) {
2291 index = env->cpuid_xlevel2;
2292 } else if (index < 0xC0000000) {
2293 index = env->cpuid_xlevel;
2294 }
2295 } else {
2296 /* Intel documentation states that invalid EAX input will
2297 * return the same information as EAX=cpuid_level
2298 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2299 */
2300 index = env->cpuid_level;
2301 }
2302 }
2303 } else {
2304 if (index > env->cpuid_level)
2305 index = env->cpuid_level;
2306 }
2307
2308 switch(index) {
2309 case 0:
2310 *eax = env->cpuid_level;
2311 *ebx = env->cpuid_vendor1;
2312 *edx = env->cpuid_vendor2;
2313 *ecx = env->cpuid_vendor3;
2314 break;
2315 case 1:
2316 *eax = env->cpuid_version;
2317 *ebx = (cpu->apic_id << 24) |
2318 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2319 *ecx = env->features[FEAT_1_ECX];
2320 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2321 *ecx |= CPUID_EXT_OSXSAVE;
2322 }
2323 *edx = env->features[FEAT_1_EDX];
2324 if (cs->nr_cores * cs->nr_threads > 1) {
2325 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2326 *edx |= CPUID_HT;
2327 }
2328 break;
2329 case 2:
2330 /* cache info: needed for Pentium Pro compatibility */
2331 if (cpu->cache_info_passthrough) {
2332 host_cpuid(index, 0, eax, ebx, ecx, edx);
2333 break;
2334 }
2335 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2336 *ebx = 0;
2337 *ecx = 0;
2338 *edx = (L1D_DESCRIPTOR << 16) | \
2339 (L1I_DESCRIPTOR << 8) | \
2340 (L2_DESCRIPTOR);
2341 break;
2342 case 4:
2343 /* cache info: needed for Core compatibility */
2344 if (cpu->cache_info_passthrough) {
2345 host_cpuid(index, count, eax, ebx, ecx, edx);
2346 *eax &= ~0xFC000000;
2347 } else {
2348 *eax = 0;
2349 switch (count) {
2350 case 0: /* L1 dcache info */
2351 *eax |= CPUID_4_TYPE_DCACHE | \
2352 CPUID_4_LEVEL(1) | \
2353 CPUID_4_SELF_INIT_LEVEL;
2354 *ebx = (L1D_LINE_SIZE - 1) | \
2355 ((L1D_PARTITIONS - 1) << 12) | \
2356 ((L1D_ASSOCIATIVITY - 1) << 22);
2357 *ecx = L1D_SETS - 1;
2358 *edx = CPUID_4_NO_INVD_SHARING;
2359 break;
2360 case 1: /* L1 icache info */
2361 *eax |= CPUID_4_TYPE_ICACHE | \
2362 CPUID_4_LEVEL(1) | \
2363 CPUID_4_SELF_INIT_LEVEL;
2364 *ebx = (L1I_LINE_SIZE - 1) | \
2365 ((L1I_PARTITIONS - 1) << 12) | \
2366 ((L1I_ASSOCIATIVITY - 1) << 22);
2367 *ecx = L1I_SETS - 1;
2368 *edx = CPUID_4_NO_INVD_SHARING;
2369 break;
2370 case 2: /* L2 cache info */
2371 *eax |= CPUID_4_TYPE_UNIFIED | \
2372 CPUID_4_LEVEL(2) | \
2373 CPUID_4_SELF_INIT_LEVEL;
2374 if (cs->nr_threads > 1) {
2375 *eax |= (cs->nr_threads - 1) << 14;
2376 }
2377 *ebx = (L2_LINE_SIZE - 1) | \
2378 ((L2_PARTITIONS - 1) << 12) | \
2379 ((L2_ASSOCIATIVITY - 1) << 22);
2380 *ecx = L2_SETS - 1;
2381 *edx = CPUID_4_NO_INVD_SHARING;
2382 break;
2383 default: /* end of info */
2384 *eax = 0;
2385 *ebx = 0;
2386 *ecx = 0;
2387 *edx = 0;
2388 break;
2389 }
2390 }
2391
2392 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2393 if ((*eax & 31) && cs->nr_cores > 1) {
2394 *eax |= (cs->nr_cores - 1) << 26;
2395 }
2396 break;
2397 case 5:
2398 /* mwait info: needed for Core compatibility */
2399 *eax = 0; /* Smallest monitor-line size in bytes */
2400 *ebx = 0; /* Largest monitor-line size in bytes */
2401 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2402 *edx = 0;
2403 break;
2404 case 6:
2405 /* Thermal and Power Leaf */
2406 *eax = env->features[FEAT_6_EAX];
2407 *ebx = 0;
2408 *ecx = 0;
2409 *edx = 0;
2410 break;
2411 case 7:
2412 /* Structured Extended Feature Flags Enumeration Leaf */
2413 if (count == 0) {
2414 *eax = 0; /* Maximum ECX value for sub-leaves */
2415 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2416 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2417 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2418 *ecx |= CPUID_7_0_ECX_OSPKE;
2419 }
2420 *edx = 0; /* Reserved */
2421 } else {
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2426 }
2427 break;
2428 case 9:
2429 /* Direct Cache Access Information Leaf */
2430 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2431 *ebx = 0;
2432 *ecx = 0;
2433 *edx = 0;
2434 break;
2435 case 0xA:
2436 /* Architectural Performance Monitoring Leaf */
2437 if (kvm_enabled() && cpu->enable_pmu) {
2438 KVMState *s = cs->kvm_state;
2439
2440 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2441 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2442 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2443 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2449 }
2450 break;
2451 case 0xB:
2452 /* Extended Topology Enumeration Leaf */
2453 if (!cpu->enable_cpuid_0xb) {
2454 *eax = *ebx = *ecx = *edx = 0;
2455 break;
2456 }
2457
2458 *ecx = count & 0xff;
2459 *edx = cpu->apic_id;
2460
2461 switch (count) {
2462 case 0:
2463 *eax = apicid_core_offset(smp_cores, smp_threads);
2464 *ebx = smp_threads;
2465 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2466 break;
2467 case 1:
2468 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2469 *ebx = smp_cores * smp_threads;
2470 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2471 break;
2472 default:
2473 *eax = 0;
2474 *ebx = 0;
2475 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2476 }
2477
2478 assert(!(*eax & ~0x1f));
2479 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2480 break;
2481 case 0xD: {
2482 KVMState *s = cs->kvm_state;
2483 uint64_t ena_mask;
2484 int i;
2485
2486 /* Processor Extended State */
2487 *eax = 0;
2488 *ebx = 0;
2489 *ecx = 0;
2490 *edx = 0;
2491 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2492 break;
2493 }
2494 if (kvm_enabled()) {
2495 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2496 ena_mask <<= 32;
2497 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2498 } else {
2499 ena_mask = -1;
2500 }
2501
2502 if (count == 0) {
2503 *ecx = 0x240;
2504 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2505 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2506 if ((env->features[esa->feature] & esa->bits) == esa->bits
2507 && ((ena_mask >> i) & 1) != 0) {
2508 if (i < 32) {
2509 *eax |= 1u << i;
2510 } else {
2511 *edx |= 1u << (i - 32);
2512 }
2513 *ecx = MAX(*ecx, esa->offset + esa->size);
2514 }
2515 }
2516 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2517 *ebx = *ecx;
2518 } else if (count == 1) {
2519 *eax = env->features[FEAT_XSAVE];
2520 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2521 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2522 if ((env->features[esa->feature] & esa->bits) == esa->bits
2523 && ((ena_mask >> count) & 1) != 0) {
2524 *eax = esa->size;
2525 *ebx = esa->offset;
2526 }
2527 }
2528 break;
2529 }
2530 case 0x80000000:
2531 *eax = env->cpuid_xlevel;
2532 *ebx = env->cpuid_vendor1;
2533 *edx = env->cpuid_vendor2;
2534 *ecx = env->cpuid_vendor3;
2535 break;
2536 case 0x80000001:
2537 *eax = env->cpuid_version;
2538 *ebx = 0;
2539 *ecx = env->features[FEAT_8000_0001_ECX];
2540 *edx = env->features[FEAT_8000_0001_EDX];
2541
2542 /* The Linux kernel checks for the CMPLegacy bit and
2543 * discards multiple thread information if it is set.
2544 * So don't set it here for Intel to make Linux guests happy.
2545 */
2546 if (cs->nr_cores * cs->nr_threads > 1) {
2547 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2548 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2549 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2550 *ecx |= 1 << 1; /* CmpLegacy bit */
2551 }
2552 }
2553 break;
2554 case 0x80000002:
2555 case 0x80000003:
2556 case 0x80000004:
2557 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2558 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2559 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2560 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2561 break;
2562 case 0x80000005:
2563 /* cache info (L1 cache) */
2564 if (cpu->cache_info_passthrough) {
2565 host_cpuid(index, 0, eax, ebx, ecx, edx);
2566 break;
2567 }
2568 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2569 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2570 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2571 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2572 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2573 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2574 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2575 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2576 break;
2577 case 0x80000006:
2578 /* cache info (L2 cache) */
2579 if (cpu->cache_info_passthrough) {
2580 host_cpuid(index, 0, eax, ebx, ecx, edx);
2581 break;
2582 }
2583 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2584 (L2_DTLB_2M_ENTRIES << 16) | \
2585 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2586 (L2_ITLB_2M_ENTRIES);
2587 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2588 (L2_DTLB_4K_ENTRIES << 16) | \
2589 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2590 (L2_ITLB_4K_ENTRIES);
2591 *ecx = (L2_SIZE_KB_AMD << 16) | \
2592 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2593 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2594 *edx = ((L3_SIZE_KB/512) << 18) | \
2595 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2596 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2597 break;
2598 case 0x80000007:
2599 *eax = 0;
2600 *ebx = 0;
2601 *ecx = 0;
2602 *edx = env->features[FEAT_8000_0007_EDX];
2603 break;
2604 case 0x80000008:
2605 /* virtual & phys address size in low 2 bytes. */
2606 /* XXX: This value must match the one used in the MMU code. */
2607 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2608 /* 64 bit processor */
2609 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2610 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2611 } else {
2612 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2613 *eax = 0x00000024; /* 36 bits physical */
2614 } else {
2615 *eax = 0x00000020; /* 32 bits physical */
2616 }
2617 }
2618 *ebx = 0;
2619 *ecx = 0;
2620 *edx = 0;
2621 if (cs->nr_cores * cs->nr_threads > 1) {
2622 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2623 }
2624 break;
2625 case 0x8000000A:
2626 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2627 *eax = 0x00000001; /* SVM Revision */
2628 *ebx = 0x00000010; /* nr of ASIDs */
2629 *ecx = 0;
2630 *edx = env->features[FEAT_SVM]; /* optional features */
2631 } else {
2632 *eax = 0;
2633 *ebx = 0;
2634 *ecx = 0;
2635 *edx = 0;
2636 }
2637 break;
2638 case 0xC0000000:
2639 *eax = env->cpuid_xlevel2;
2640 *ebx = 0;
2641 *ecx = 0;
2642 *edx = 0;
2643 break;
2644 case 0xC0000001:
2645 /* Support for VIA CPU's CPUID instruction */
2646 *eax = env->cpuid_version;
2647 *ebx = 0;
2648 *ecx = 0;
2649 *edx = env->features[FEAT_C000_0001_EDX];
2650 break;
2651 case 0xC0000002:
2652 case 0xC0000003:
2653 case 0xC0000004:
2654 /* Reserved for the future, and now filled with zero */
2655 *eax = 0;
2656 *ebx = 0;
2657 *ecx = 0;
2658 *edx = 0;
2659 break;
2660 default:
2661 /* reserved values: zero */
2662 *eax = 0;
2663 *ebx = 0;
2664 *ecx = 0;
2665 *edx = 0;
2666 break;
2667 }
2668 }
2669
2670 /* CPUClass::reset() */
2671 static void x86_cpu_reset(CPUState *s)
2672 {
2673 X86CPU *cpu = X86_CPU(s);
2674 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2675 CPUX86State *env = &cpu->env;
2676 target_ulong cr4;
2677 uint64_t xcr0;
2678 int i;
2679
2680 xcc->parent_reset(s);
2681
2682 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2683
2684 tlb_flush(s, 1);
2685
2686 env->old_exception = -1;
2687
2688 /* init to reset state */
2689
2690 #ifdef CONFIG_SOFTMMU
2691 env->hflags |= HF_SOFTMMU_MASK;
2692 #endif
2693 env->hflags2 |= HF2_GIF_MASK;
2694
2695 cpu_x86_update_cr0(env, 0x60000010);
2696 env->a20_mask = ~0x0;
2697 env->smbase = 0x30000;
2698
2699 env->idt.limit = 0xffff;
2700 env->gdt.limit = 0xffff;
2701 env->ldt.limit = 0xffff;
2702 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2703 env->tr.limit = 0xffff;
2704 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2705
2706 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2707 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2708 DESC_R_MASK | DESC_A_MASK);
2709 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2710 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2711 DESC_A_MASK);
2712 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2713 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2714 DESC_A_MASK);
2715 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2716 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2717 DESC_A_MASK);
2718 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2719 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2720 DESC_A_MASK);
2721 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2722 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2723 DESC_A_MASK);
2724
2725 env->eip = 0xfff0;
2726 env->regs[R_EDX] = env->cpuid_version;
2727
2728 env->eflags = 0x2;
2729
2730 /* FPU init */
2731 for (i = 0; i < 8; i++) {
2732 env->fptags[i] = 1;
2733 }
2734 cpu_set_fpuc(env, 0x37f);
2735
2736 env->mxcsr = 0x1f80;
2737 /* All units are in INIT state. */
2738 env->xstate_bv = 0;
2739
2740 env->pat = 0x0007040600070406ULL;
2741 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2742
2743 memset(env->dr, 0, sizeof(env->dr));
2744 env->dr[6] = DR6_FIXED_1;
2745 env->dr[7] = DR7_FIXED_1;
2746 cpu_breakpoint_remove_all(s, BP_CPU);
2747 cpu_watchpoint_remove_all(s, BP_CPU);
2748
2749 cr4 = 0;
2750 xcr0 = XSTATE_FP_MASK;
2751
2752 #ifdef CONFIG_USER_ONLY
2753 /* Enable all the features for user-mode. */
2754 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2755 xcr0 |= XSTATE_SSE_MASK;
2756 }
2757 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2758 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2759 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2760 xcr0 |= 1ull << i;
2761 }
2762 }
2763
2764 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2765 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2766 }
2767 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2768 cr4 |= CR4_FSGSBASE_MASK;
2769 }
2770 #endif
2771
2772 env->xcr0 = xcr0;
2773 cpu_x86_update_cr4(env, cr4);
2774
2775 /*
2776 * SDM 11.11.5 requires:
2777 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2778 * - IA32_MTRR_PHYSMASKn.V = 0
2779 * All other bits are undefined. For simplification, zero it all.
2780 */
2781 env->mtrr_deftype = 0;
2782 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2783 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2784
2785 #if !defined(CONFIG_USER_ONLY)
2786 /* We hard-wire the BSP to the first CPU. */
2787 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2788
2789 s->halted = !cpu_is_bsp(cpu);
2790
2791 if (kvm_enabled()) {
2792 kvm_arch_reset_vcpu(cpu);
2793 }
2794 #endif
2795 }
2796
2797 #ifndef CONFIG_USER_ONLY
2798 bool cpu_is_bsp(X86CPU *cpu)
2799 {
2800 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2801 }
2802
2803 /* TODO: remove me, when reset over QOM tree is implemented */
2804 static void x86_cpu_machine_reset_cb(void *opaque)
2805 {
2806 X86CPU *cpu = opaque;
2807 cpu_reset(CPU(cpu));
2808 }
2809 #endif
2810
2811 static void mce_init(X86CPU *cpu)
2812 {
2813 CPUX86State *cenv = &cpu->env;
2814 unsigned int bank;
2815
2816 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2817 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2818 (CPUID_MCE | CPUID_MCA)) {
2819 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2820 cenv->mcg_ctl = ~(uint64_t)0;
2821 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2822 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2823 }
2824 }
2825 }
2826
2827 #ifndef CONFIG_USER_ONLY
2828 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2829 {
2830 APICCommonState *apic;
2831 const char *apic_type = "apic";
2832
2833 if (kvm_apic_in_kernel()) {
2834 apic_type = "kvm-apic";
2835 } else if (xen_enabled()) {
2836 apic_type = "xen-apic";
2837 }
2838
2839 cpu->apic_state = DEVICE(object_new(apic_type));
2840
2841 object_property_add_child(OBJECT(cpu), "apic",
2842 OBJECT(cpu->apic_state), NULL);
2843 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2844 /* TODO: convert to link<> */
2845 apic = APIC_COMMON(cpu->apic_state);
2846 apic->cpu = cpu;
2847 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2848 }
2849
2850 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2851 {
2852 APICCommonState *apic;
2853 static bool apic_mmio_map_once;
2854
2855 if (cpu->apic_state == NULL) {
2856 return;
2857 }
2858 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2859 errp);
2860
2861 /* Map APIC MMIO area */
2862 apic = APIC_COMMON(cpu->apic_state);
2863 if (!apic_mmio_map_once) {
2864 memory_region_add_subregion_overlap(get_system_memory(),
2865 apic->apicbase &
2866 MSR_IA32_APICBASE_BASE,
2867 &apic->io_memory,
2868 0x1000);
2869 apic_mmio_map_once = true;
2870 }
2871 }
2872
2873 static void x86_cpu_machine_done(Notifier *n, void *unused)
2874 {
2875 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2876 MemoryRegion *smram =
2877 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2878
2879 if (smram) {
2880 cpu->smram = g_new(MemoryRegion, 1);
2881 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2882 smram, 0, 1ull << 32);
2883 memory_region_set_enabled(cpu->smram, false);
2884 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2885 }
2886 }
2887 #else
2888 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2889 {
2890 }
2891 #endif
2892
2893
2894 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2895 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2896 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2897 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2898 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2899 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2900 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2901 {
2902 CPUState *cs = CPU(dev);
2903 X86CPU *cpu = X86_CPU(dev);
2904 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2905 CPUX86State *env = &cpu->env;
2906 Error *local_err = NULL;
2907 static bool ht_warned;
2908 FeatureWord w;
2909
2910 if (xcc->kvm_required && !kvm_enabled()) {
2911 char *name = x86_cpu_class_get_model_name(xcc);
2912 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2913 g_free(name);
2914 goto out;
2915 }
2916
2917 if (cpu->apic_id < 0) {
2918 error_setg(errp, "apic-id property was not initialized properly");
2919 return;
2920 }
2921
2922 /*TODO: cpu->host_features incorrectly overwrites features
2923 * set using "feat=on|off". Once we fix this, we can convert
2924 * plus_features & minus_features to global properties
2925 * inside x86_cpu_parse_featurestr() too.
2926 */
2927 if (cpu->host_features) {
2928 for (w = 0; w < FEATURE_WORDS; w++) {
2929 env->features[w] =
2930 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2931 }
2932 }
2933
2934 for (w = 0; w < FEATURE_WORDS; w++) {
2935 cpu->env.features[w] |= plus_features[w];
2936 cpu->env.features[w] &= ~minus_features[w];
2937 }
2938
2939 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2940 env->cpuid_level = 7;
2941 }
2942
2943 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2944 error_setg(&local_err,
2945 kvm_enabled() ?
2946 "Host doesn't support requested features" :
2947 "TCG doesn't support requested features");
2948 goto out;
2949 }
2950
2951 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2952 * CPUID[1].EDX.
2953 */
2954 if (IS_AMD_CPU(env)) {
2955 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2956 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2957 & CPUID_EXT2_AMD_ALIASES);
2958 }
2959
2960
2961 cpu_exec_init(cs, &error_abort);
2962
2963 if (tcg_enabled()) {
2964 tcg_x86_init();
2965 }
2966
2967 #ifndef CONFIG_USER_ONLY
2968 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2969
2970 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2971 x86_cpu_apic_create(cpu, &local_err);
2972 if (local_err != NULL) {
2973 goto out;
2974 }
2975 }
2976 #endif
2977
2978 mce_init(cpu);
2979
2980 #ifndef CONFIG_USER_ONLY
2981 if (tcg_enabled()) {
2982 AddressSpace *newas = g_new(AddressSpace, 1);
2983
2984 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2985 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2986
2987 /* Outer container... */
2988 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2989 memory_region_set_enabled(cpu->cpu_as_root, true);
2990
2991 /* ... with two regions inside: normal system memory with low
2992 * priority, and...
2993 */
2994 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2995 get_system_memory(), 0, ~0ull);
2996 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2997 memory_region_set_enabled(cpu->cpu_as_mem, true);
2998 address_space_init(newas, cpu->cpu_as_root, "CPU");
2999 cs->num_ases = 1;
3000 cpu_address_space_init(cs, newas, 0);
3001
3002 /* ... SMRAM with higher priority, linked from /machine/smram. */
3003 cpu->machine_done.notify = x86_cpu_machine_done;
3004 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3005 }
3006 #endif
3007
3008 qemu_init_vcpu(cs);
3009
3010 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3011 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3012 * based on inputs (sockets,cores,threads), it is still better to gives
3013 * users a warning.
3014 *
3015 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3016 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3017 */
3018 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3019 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3020 " -smp options properly.");
3021 ht_warned = true;
3022 }
3023
3024 x86_cpu_apic_realize(cpu, &local_err);
3025 if (local_err != NULL) {
3026 goto out;
3027 }
3028 cpu_reset(cs);
3029
3030 xcc->parent_realize(dev, &local_err);
3031
3032 out:
3033 if (local_err != NULL) {
3034 error_propagate(errp, local_err);
3035 return;
3036 }
3037 }
3038
3039 typedef struct BitProperty {
3040 uint32_t *ptr;
3041 uint32_t mask;
3042 } BitProperty;
3043
3044 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3045 void *opaque, Error **errp)
3046 {
3047 BitProperty *fp = opaque;
3048 bool value = (*fp->ptr & fp->mask) == fp->mask;
3049 visit_type_bool(v, name, &value, errp);
3050 }
3051
3052 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3053 void *opaque, Error **errp)
3054 {
3055 DeviceState *dev = DEVICE(obj);
3056 BitProperty *fp = opaque;
3057 Error *local_err = NULL;
3058 bool value;
3059
3060 if (dev->realized) {
3061 qdev_prop_set_after_realize(dev, name, errp);
3062 return;
3063 }
3064
3065 visit_type_bool(v, name, &value, &local_err);
3066 if (local_err) {
3067 error_propagate(errp, local_err);
3068 return;
3069 }
3070
3071 if (value) {
3072 *fp->ptr |= fp->mask;
3073 } else {
3074 *fp->ptr &= ~fp->mask;
3075 }
3076 }
3077
3078 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3079 void *opaque)
3080 {
3081 BitProperty *prop = opaque;
3082 g_free(prop);
3083 }
3084
3085 /* Register a boolean property to get/set a single bit in a uint32_t field.
3086 *
3087 * The same property name can be registered multiple times to make it affect
3088 * multiple bits in the same FeatureWord. In that case, the getter will return
3089 * true only if all bits are set.
3090 */
3091 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3092 const char *prop_name,
3093 uint32_t *field,
3094 int bitnr)
3095 {
3096 BitProperty *fp;
3097 ObjectProperty *op;
3098 uint32_t mask = (1UL << bitnr);
3099
3100 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3101 if (op) {
3102 fp = op->opaque;
3103 assert(fp->ptr == field);
3104 fp->mask |= mask;
3105 } else {
3106 fp = g_new0(BitProperty, 1);
3107 fp->ptr = field;
3108 fp->mask = mask;
3109 object_property_add(OBJECT(cpu), prop_name, "bool",
3110 x86_cpu_get_bit_prop,
3111 x86_cpu_set_bit_prop,
3112 x86_cpu_release_bit_prop, fp, &error_abort);
3113 }
3114 }
3115
3116 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3117 FeatureWord w,
3118 int bitnr)
3119 {
3120 Object *obj = OBJECT(cpu);
3121 int i;
3122 char **names;
3123 FeatureWordInfo *fi = &feature_word_info[w];
3124
3125 if (!fi->feat_names) {
3126 return;
3127 }
3128 if (!fi->feat_names[bitnr]) {
3129 return;
3130 }
3131
3132 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3133
3134 feat2prop(names[0]);
3135 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3136
3137 for (i = 1; names[i]; i++) {
3138 feat2prop(names[i]);
3139 object_property_add_alias(obj, names[i], obj, names[0],
3140 &error_abort);
3141 }
3142
3143 g_strfreev(names);
3144 }
3145
3146 static void x86_cpu_initfn(Object *obj)
3147 {
3148 CPUState *cs = CPU(obj);
3149 X86CPU *cpu = X86_CPU(obj);
3150 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3151 CPUX86State *env = &cpu->env;
3152 FeatureWord w;
3153
3154 cs->env_ptr = env;
3155
3156 object_property_add(obj, "family", "int",
3157 x86_cpuid_version_get_family,
3158 x86_cpuid_version_set_family, NULL, NULL, NULL);
3159 object_property_add(obj, "model", "int",
3160 x86_cpuid_version_get_model,
3161 x86_cpuid_version_set_model, NULL, NULL, NULL);
3162 object_property_add(obj, "stepping", "int",
3163 x86_cpuid_version_get_stepping,
3164 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3165 object_property_add_str(obj, "vendor",
3166 x86_cpuid_get_vendor,
3167 x86_cpuid_set_vendor, NULL);
3168 object_property_add_str(obj, "model-id",
3169 x86_cpuid_get_model_id,
3170 x86_cpuid_set_model_id, NULL);
3171 object_property_add(obj, "tsc-frequency", "int",
3172 x86_cpuid_get_tsc_freq,
3173 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3174 object_property_add(obj, "apic-id", "int",
3175 x86_cpuid_get_apic_id,
3176 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3177 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3178 x86_cpu_get_feature_words,
3179 NULL, NULL, (void *)env->features, NULL);
3180 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3181 x86_cpu_get_feature_words,
3182 NULL, NULL, (void *)cpu->filtered_features, NULL);
3183
3184 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3185
3186 #ifndef CONFIG_USER_ONLY
3187 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3188 cpu->apic_id = -1;
3189 #endif
3190
3191 for (w = 0; w < FEATURE_WORDS; w++) {
3192 int bitnr;
3193
3194 for (bitnr = 0; bitnr < 32; bitnr++) {
3195 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3196 }
3197 }
3198
3199 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3200 }
3201
3202 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3203 {
3204 X86CPU *cpu = X86_CPU(cs);
3205
3206 return cpu->apic_id;
3207 }
3208
3209 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3210 {
3211 X86CPU *cpu = X86_CPU(cs);
3212
3213 return cpu->env.cr[0] & CR0_PG_MASK;
3214 }
3215
3216 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3217 {
3218 X86CPU *cpu = X86_CPU(cs);
3219
3220 cpu->env.eip = value;
3221 }
3222
3223 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3224 {
3225 X86CPU *cpu = X86_CPU(cs);
3226
3227 cpu->env.eip = tb->pc - tb->cs_base;
3228 }
3229
3230 static bool x86_cpu_has_work(CPUState *cs)
3231 {
3232 X86CPU *cpu = X86_CPU(cs);
3233 CPUX86State *env = &cpu->env;
3234
3235 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3236 CPU_INTERRUPT_POLL)) &&
3237 (env->eflags & IF_MASK)) ||
3238 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3239 CPU_INTERRUPT_INIT |
3240 CPU_INTERRUPT_SIPI |
3241 CPU_INTERRUPT_MCE)) ||
3242 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3243 !(env->hflags & HF_SMM_MASK));
3244 }
3245
3246 static Property x86_cpu_properties[] = {
3247 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3248 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3249 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3250 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3251 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3252 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3253 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3254 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3255 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3256 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3257 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3258 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3259 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3260 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3261 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3262 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3263 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3264 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3265 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3266 DEFINE_PROP_END_OF_LIST()
3267 };
3268
3269 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3270 {
3271 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3272 CPUClass *cc = CPU_CLASS(oc);
3273 DeviceClass *dc = DEVICE_CLASS(oc);
3274
3275 xcc->parent_realize = dc->realize;
3276 dc->realize = x86_cpu_realizefn;
3277 dc->props = x86_cpu_properties;
3278
3279 xcc->parent_reset = cc->reset;
3280 cc->reset = x86_cpu_reset;
3281 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3282
3283 cc->class_by_name = x86_cpu_class_by_name;
3284 cc->parse_features = x86_cpu_parse_featurestr;
3285 cc->has_work = x86_cpu_has_work;
3286 cc->do_interrupt = x86_cpu_do_interrupt;
3287 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3288 cc->dump_state = x86_cpu_dump_state;
3289 cc->set_pc = x86_cpu_set_pc;
3290 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3291 cc->gdb_read_register = x86_cpu_gdb_read_register;
3292 cc->gdb_write_register = x86_cpu_gdb_write_register;
3293 cc->get_arch_id = x86_cpu_get_arch_id;
3294 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3295 #ifdef CONFIG_USER_ONLY
3296 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3297 #else
3298 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3299 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3300 cc->write_elf64_note = x86_cpu_write_elf64_note;
3301 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3302 cc->write_elf32_note = x86_cpu_write_elf32_note;
3303 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3304 cc->vmsd = &vmstate_x86_cpu;
3305 #endif
3306 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3307 #ifndef CONFIG_USER_ONLY
3308 cc->debug_excp_handler = breakpoint_handler;
3309 #endif
3310 cc->cpu_exec_enter = x86_cpu_exec_enter;
3311 cc->cpu_exec_exit = x86_cpu_exec_exit;
3312
3313 /*
3314 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3315 * object in cpus -> dangling pointer after final object_unref().
3316 */
3317 dc->cannot_destroy_with_object_finalize_yet = true;
3318 }
3319
3320 static const TypeInfo x86_cpu_type_info = {
3321 .name = TYPE_X86_CPU,
3322 .parent = TYPE_CPU,
3323 .instance_size = sizeof(X86CPU),
3324 .instance_init = x86_cpu_initfn,
3325 .abstract = true,
3326 .class_size = sizeof(X86CPUClass),
3327 .class_init = x86_cpu_common_class_init,
3328 };
3329
3330 static void x86_cpu_register_types(void)
3331 {
3332 int i;
3333
3334 type_register_static(&x86_cpu_type_info);
3335 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3336 x86_register_cpudef_type(&builtin_x86_defs[i]);
3337 }
3338 #ifdef CONFIG_KVM
3339 type_register_static(&host_x86_cpu_type_info);
3340 #endif
3341 }
3342
3343 type_init(x86_cpu_register_types)