]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
Merge remote-tracking branch 'remotes/amit-migration/tags/migration-for-2.7-4' into...
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
263 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_7_0_ecx_feature_name[] = {
267 NULL, NULL, NULL, "pku",
268 "ospke", NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_apm_edx_feature_name[] = {
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 "invtsc", NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 static const char *cpuid_xsave_feature_name[] = {
289 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 };
298
299 static const char *cpuid_6_feature_name[] = {
300 NULL, NULL, "arat", NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 NULL, NULL, NULL, NULL,
308 };
309
310 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
311 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
312 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
313 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
314 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
315 CPUID_PSE36 | CPUID_FXSR)
316 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
317 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
318 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
319 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
320 CPUID_PAE | CPUID_SEP | CPUID_APIC)
321
322 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
323 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
324 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
325 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
326 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
327 /* partly implemented:
328 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
329 /* missing:
330 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
331 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
332 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
333 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
334 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
335 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
336 /* missing:
337 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
338 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
339 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
340 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
341 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
342
343 #ifdef TARGET_X86_64
344 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
345 #else
346 #define TCG_EXT2_X86_64_FEATURES 0
347 #endif
348
349 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
350 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
351 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
352 TCG_EXT2_X86_64_FEATURES)
353 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
354 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
355 #define TCG_EXT4_FEATURES 0
356 #define TCG_SVM_FEATURES 0
357 #define TCG_KVM_FEATURES 0
358 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
359 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
360 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
361 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
362 /* missing:
363 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
364 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
365 CPUID_7_0_EBX_RDSEED */
366 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
367 #define TCG_APM_FEATURES 0
368 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
369 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
370 /* missing:
371 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
372
373 typedef struct FeatureWordInfo {
374 const char **feat_names;
375 uint32_t cpuid_eax; /* Input EAX for CPUID */
376 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
377 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
378 int cpuid_reg; /* output register (R_* constant) */
379 uint32_t tcg_features; /* Feature flags supported by TCG */
380 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
381 } FeatureWordInfo;
382
383 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
384 [FEAT_1_EDX] = {
385 .feat_names = feature_name,
386 .cpuid_eax = 1, .cpuid_reg = R_EDX,
387 .tcg_features = TCG_FEATURES,
388 },
389 [FEAT_1_ECX] = {
390 .feat_names = ext_feature_name,
391 .cpuid_eax = 1, .cpuid_reg = R_ECX,
392 .tcg_features = TCG_EXT_FEATURES,
393 },
394 [FEAT_8000_0001_EDX] = {
395 .feat_names = ext2_feature_name,
396 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
397 .tcg_features = TCG_EXT2_FEATURES,
398 },
399 [FEAT_8000_0001_ECX] = {
400 .feat_names = ext3_feature_name,
401 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
402 .tcg_features = TCG_EXT3_FEATURES,
403 },
404 [FEAT_C000_0001_EDX] = {
405 .feat_names = ext4_feature_name,
406 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
407 .tcg_features = TCG_EXT4_FEATURES,
408 },
409 [FEAT_KVM] = {
410 .feat_names = kvm_feature_name,
411 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
412 .tcg_features = TCG_KVM_FEATURES,
413 },
414 [FEAT_SVM] = {
415 .feat_names = svm_feature_name,
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = cpuid_7_0_ebx_feature_name,
421 .cpuid_eax = 7,
422 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
423 .cpuid_reg = R_EBX,
424 .tcg_features = TCG_7_0_EBX_FEATURES,
425 },
426 [FEAT_7_0_ECX] = {
427 .feat_names = cpuid_7_0_ecx_feature_name,
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_ECX,
431 .tcg_features = TCG_7_0_ECX_FEATURES,
432 },
433 [FEAT_8000_0007_EDX] = {
434 .feat_names = cpuid_apm_edx_feature_name,
435 .cpuid_eax = 0x80000007,
436 .cpuid_reg = R_EDX,
437 .tcg_features = TCG_APM_FEATURES,
438 .unmigratable_flags = CPUID_APM_INVTSC,
439 },
440 [FEAT_XSAVE] = {
441 .feat_names = cpuid_xsave_feature_name,
442 .cpuid_eax = 0xd,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
444 .cpuid_reg = R_EAX,
445 .tcg_features = TCG_XSAVE_FEATURES,
446 },
447 [FEAT_6_EAX] = {
448 .feat_names = cpuid_6_feature_name,
449 .cpuid_eax = 6, .cpuid_reg = R_EAX,
450 .tcg_features = TCG_6_EAX_FEATURES,
451 },
452 };
453
454 typedef struct X86RegisterInfo32 {
455 /* Name of register */
456 const char *name;
457 /* QAPI enum value register */
458 X86CPURegister32 qapi_enum;
459 } X86RegisterInfo32;
460
461 #define REGISTER(reg) \
462 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
463 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
464 REGISTER(EAX),
465 REGISTER(ECX),
466 REGISTER(EDX),
467 REGISTER(EBX),
468 REGISTER(ESP),
469 REGISTER(EBP),
470 REGISTER(ESI),
471 REGISTER(EDI),
472 };
473 #undef REGISTER
474
475 const ExtSaveArea x86_ext_save_areas[] = {
476 [XSTATE_YMM_BIT] =
477 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
478 .offset = offsetof(X86XSaveArea, avx_state),
479 .size = sizeof(XSaveAVX) },
480 [XSTATE_BNDREGS_BIT] =
481 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
482 .offset = offsetof(X86XSaveArea, bndreg_state),
483 .size = sizeof(XSaveBNDREG) },
484 [XSTATE_BNDCSR_BIT] =
485 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
486 .offset = offsetof(X86XSaveArea, bndcsr_state),
487 .size = sizeof(XSaveBNDCSR) },
488 [XSTATE_OPMASK_BIT] =
489 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
490 .offset = offsetof(X86XSaveArea, opmask_state),
491 .size = sizeof(XSaveOpmask) },
492 [XSTATE_ZMM_Hi256_BIT] =
493 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
494 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
495 .size = sizeof(XSaveZMM_Hi256) },
496 [XSTATE_Hi16_ZMM_BIT] =
497 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
498 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
499 .size = sizeof(XSaveHi16_ZMM) },
500 [XSTATE_PKRU_BIT] =
501 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
502 .offset = offsetof(X86XSaveArea, pkru_state),
503 .size = sizeof(XSavePKRU) },
504 };
505
506 const char *get_register_name_32(unsigned int reg)
507 {
508 if (reg >= CPU_NB_REGS32) {
509 return NULL;
510 }
511 return x86_reg_info_32[reg].name;
512 }
513
514 /*
515 * Returns the set of feature flags that are supported and migratable by
516 * QEMU, for a given FeatureWord.
517 */
518 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
519 {
520 FeatureWordInfo *wi = &feature_word_info[w];
521 uint32_t r = 0;
522 int i;
523
524 for (i = 0; i < 32; i++) {
525 uint32_t f = 1U << i;
526 /* If the feature name is unknown, it is not supported by QEMU yet */
527 if (!wi->feat_names[i]) {
528 continue;
529 }
530 /* Skip features known to QEMU, but explicitly marked as unmigratable */
531 if (wi->unmigratable_flags & f) {
532 continue;
533 }
534 r |= f;
535 }
536 return r;
537 }
538
539 void host_cpuid(uint32_t function, uint32_t count,
540 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
541 {
542 uint32_t vec[4];
543
544 #ifdef __x86_64__
545 asm volatile("cpuid"
546 : "=a"(vec[0]), "=b"(vec[1]),
547 "=c"(vec[2]), "=d"(vec[3])
548 : "0"(function), "c"(count) : "cc");
549 #elif defined(__i386__)
550 asm volatile("pusha \n\t"
551 "cpuid \n\t"
552 "mov %%eax, 0(%2) \n\t"
553 "mov %%ebx, 4(%2) \n\t"
554 "mov %%ecx, 8(%2) \n\t"
555 "mov %%edx, 12(%2) \n\t"
556 "popa"
557 : : "a"(function), "c"(count), "S"(vec)
558 : "memory", "cc");
559 #else
560 abort();
561 #endif
562
563 if (eax)
564 *eax = vec[0];
565 if (ebx)
566 *ebx = vec[1];
567 if (ecx)
568 *ecx = vec[2];
569 if (edx)
570 *edx = vec[3];
571 }
572
573 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
574
575 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
576 * a substring. ex if !NULL points to the first char after a substring,
577 * otherwise the string is assumed to sized by a terminating nul.
578 * Return lexical ordering of *s1:*s2.
579 */
580 static int sstrcmp(const char *s1, const char *e1,
581 const char *s2, const char *e2)
582 {
583 for (;;) {
584 if (!*s1 || !*s2 || *s1 != *s2)
585 return (*s1 - *s2);
586 ++s1, ++s2;
587 if (s1 == e1 && s2 == e2)
588 return (0);
589 else if (s1 == e1)
590 return (*s2);
591 else if (s2 == e2)
592 return (*s1);
593 }
594 }
595
596 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
597 * '|' delimited (possibly empty) strings in which case search for a match
598 * within the alternatives proceeds left to right. Return 0 for success,
599 * non-zero otherwise.
600 */
601 static int altcmp(const char *s, const char *e, const char *altstr)
602 {
603 const char *p, *q;
604
605 for (q = p = altstr; ; ) {
606 while (*p && *p != '|')
607 ++p;
608 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
609 return (0);
610 if (!*p)
611 return (1);
612 else
613 q = ++p;
614 }
615 }
616
617 /* search featureset for flag *[s..e), if found set corresponding bit in
618 * *pval and return true, otherwise return false
619 */
620 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
621 const char **featureset)
622 {
623 uint32_t mask;
624 const char **ppc;
625 bool found = false;
626
627 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
628 if (*ppc && !altcmp(s, e, *ppc)) {
629 *pval |= mask;
630 found = true;
631 }
632 }
633 return found;
634 }
635
636 static void add_flagname_to_bitmaps(const char *flagname,
637 FeatureWordArray words,
638 Error **errp)
639 {
640 FeatureWord w;
641 for (w = 0; w < FEATURE_WORDS; w++) {
642 FeatureWordInfo *wi = &feature_word_info[w];
643 if (wi->feat_names &&
644 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
645 break;
646 }
647 }
648 if (w == FEATURE_WORDS) {
649 error_setg(errp, "CPU feature %s not found", flagname);
650 }
651 }
652
653 /* CPU class name definitions: */
654
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
660 */
661 static char *x86_cpu_type_name(const char *model_name)
662 {
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
664 }
665
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 {
668 ObjectClass *oc;
669 char *typename;
670
671 if (cpu_model == NULL) {
672 return NULL;
673 }
674
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
679 }
680
681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
682 {
683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
685 return g_strndup(class_name,
686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
687 }
688
689 struct X86CPUDefinition {
690 const char *name;
691 uint32_t level;
692 uint32_t xlevel;
693 uint32_t xlevel2;
694 /* vendor is zero-terminated, 12 character ASCII string */
695 char vendor[CPUID_VENDOR_SZ + 1];
696 int family;
697 int model;
698 int stepping;
699 FeatureWordArray features;
700 char model_id[48];
701 };
702
703 static X86CPUDefinition builtin_x86_defs[] = {
704 {
705 .name = "qemu64",
706 .level = 0xd,
707 .vendor = CPUID_VENDOR_AMD,
708 .family = 6,
709 .model = 6,
710 .stepping = 3,
711 .features[FEAT_1_EDX] =
712 PPRO_FEATURES |
713 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
714 CPUID_PSE36,
715 .features[FEAT_1_ECX] =
716 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
717 .features[FEAT_8000_0001_EDX] =
718 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
719 .features[FEAT_8000_0001_ECX] =
720 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
721 .xlevel = 0x8000000A,
722 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
723 },
724 {
725 .name = "phenom",
726 .level = 5,
727 .vendor = CPUID_VENDOR_AMD,
728 .family = 16,
729 .model = 2,
730 .stepping = 3,
731 /* Missing: CPUID_HT */
732 .features[FEAT_1_EDX] =
733 PPRO_FEATURES |
734 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
735 CPUID_PSE36 | CPUID_VME,
736 .features[FEAT_1_ECX] =
737 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
738 CPUID_EXT_POPCNT,
739 .features[FEAT_8000_0001_EDX] =
740 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
741 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
742 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
743 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
744 CPUID_EXT3_CR8LEG,
745 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
746 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
747 .features[FEAT_8000_0001_ECX] =
748 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
749 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
750 /* Missing: CPUID_SVM_LBRV */
751 .features[FEAT_SVM] =
752 CPUID_SVM_NPT,
753 .xlevel = 0x8000001A,
754 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
755 },
756 {
757 .name = "core2duo",
758 .level = 10,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 6,
761 .model = 15,
762 .stepping = 11,
763 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
768 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
769 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
770 .features[FEAT_1_ECX] =
771 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
772 CPUID_EXT_CX16,
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 .features[FEAT_8000_0001_ECX] =
776 CPUID_EXT3_LAHF_LM,
777 .xlevel = 0x80000008,
778 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
779 },
780 {
781 .name = "kvm64",
782 .level = 0xd,
783 .vendor = CPUID_VENDOR_INTEL,
784 .family = 15,
785 .model = 6,
786 .stepping = 1,
787 /* Missing: CPUID_HT */
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES | CPUID_VME |
790 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
791 CPUID_PSE36,
792 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
795 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
798 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
799 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
800 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
801 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
802 .features[FEAT_8000_0001_ECX] =
803 0,
804 .xlevel = 0x80000008,
805 .model_id = "Common KVM processor"
806 },
807 {
808 .name = "qemu32",
809 .level = 4,
810 .vendor = CPUID_VENDOR_INTEL,
811 .family = 6,
812 .model = 6,
813 .stepping = 3,
814 .features[FEAT_1_EDX] =
815 PPRO_FEATURES,
816 .features[FEAT_1_ECX] =
817 CPUID_EXT_SSE3,
818 .xlevel = 0x80000004,
819 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
820 },
821 {
822 .name = "kvm32",
823 .level = 5,
824 .vendor = CPUID_VENDOR_INTEL,
825 .family = 15,
826 .model = 6,
827 .stepping = 1,
828 .features[FEAT_1_EDX] =
829 PPRO_FEATURES | CPUID_VME |
830 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
831 .features[FEAT_1_ECX] =
832 CPUID_EXT_SSE3,
833 .features[FEAT_8000_0001_ECX] =
834 0,
835 .xlevel = 0x80000008,
836 .model_id = "Common 32-bit KVM processor"
837 },
838 {
839 .name = "coreduo",
840 .level = 10,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 14,
844 .stepping = 8,
845 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES | CPUID_VME |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
849 CPUID_SS,
850 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
851 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
852 .features[FEAT_1_ECX] =
853 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
854 .features[FEAT_8000_0001_EDX] =
855 CPUID_EXT2_NX,
856 .xlevel = 0x80000008,
857 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
858 },
859 {
860 .name = "486",
861 .level = 1,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 4,
864 .model = 8,
865 .stepping = 0,
866 .features[FEAT_1_EDX] =
867 I486_FEATURES,
868 .xlevel = 0,
869 },
870 {
871 .name = "pentium",
872 .level = 1,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 5,
875 .model = 4,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM_FEATURES,
879 .xlevel = 0,
880 },
881 {
882 .name = "pentium2",
883 .level = 2,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 5,
887 .stepping = 2,
888 .features[FEAT_1_EDX] =
889 PENTIUM2_FEATURES,
890 .xlevel = 0,
891 },
892 {
893 .name = "pentium3",
894 .level = 3,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 7,
898 .stepping = 3,
899 .features[FEAT_1_EDX] =
900 PENTIUM3_FEATURES,
901 .xlevel = 0,
902 },
903 {
904 .name = "athlon",
905 .level = 2,
906 .vendor = CPUID_VENDOR_AMD,
907 .family = 6,
908 .model = 2,
909 .stepping = 3,
910 .features[FEAT_1_EDX] =
911 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
912 CPUID_MCA,
913 .features[FEAT_8000_0001_EDX] =
914 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
915 .xlevel = 0x80000008,
916 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
917 },
918 {
919 .name = "n270",
920 .level = 10,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 28,
924 .stepping = 2,
925 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
926 .features[FEAT_1_EDX] =
927 PPRO_FEATURES |
928 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
929 CPUID_ACPI | CPUID_SS,
930 /* Some CPUs got no CPUID_SEP */
931 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
932 * CPUID_EXT_XTPR */
933 .features[FEAT_1_ECX] =
934 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
935 CPUID_EXT_MOVBE,
936 .features[FEAT_8000_0001_EDX] =
937 CPUID_EXT2_NX,
938 .features[FEAT_8000_0001_ECX] =
939 CPUID_EXT3_LAHF_LM,
940 .xlevel = 0x80000008,
941 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
942 },
943 {
944 .name = "Conroe",
945 .level = 10,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 6,
948 .model = 15,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
952 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
953 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
954 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
955 CPUID_DE | CPUID_FP87,
956 .features[FEAT_1_ECX] =
957 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
958 .features[FEAT_8000_0001_EDX] =
959 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
960 .features[FEAT_8000_0001_ECX] =
961 CPUID_EXT3_LAHF_LM,
962 .xlevel = 0x80000008,
963 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
964 },
965 {
966 .name = "Penryn",
967 .level = 10,
968 .vendor = CPUID_VENDOR_INTEL,
969 .family = 6,
970 .model = 23,
971 .stepping = 3,
972 .features[FEAT_1_EDX] =
973 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
974 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
975 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
976 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
977 CPUID_DE | CPUID_FP87,
978 .features[FEAT_1_ECX] =
979 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
980 CPUID_EXT_SSE3,
981 .features[FEAT_8000_0001_EDX] =
982 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
983 .features[FEAT_8000_0001_ECX] =
984 CPUID_EXT3_LAHF_LM,
985 .xlevel = 0x80000008,
986 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
987 },
988 {
989 .name = "Nehalem",
990 .level = 11,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 6,
993 .model = 26,
994 .stepping = 3,
995 .features[FEAT_1_EDX] =
996 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
997 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
998 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
999 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1000 CPUID_DE | CPUID_FP87,
1001 .features[FEAT_1_ECX] =
1002 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1003 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1004 .features[FEAT_8000_0001_EDX] =
1005 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1006 .features[FEAT_8000_0001_ECX] =
1007 CPUID_EXT3_LAHF_LM,
1008 .xlevel = 0x80000008,
1009 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1010 },
1011 {
1012 .name = "Westmere",
1013 .level = 11,
1014 .vendor = CPUID_VENDOR_INTEL,
1015 .family = 6,
1016 .model = 44,
1017 .stepping = 1,
1018 .features[FEAT_1_EDX] =
1019 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1020 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1021 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1022 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1023 CPUID_DE | CPUID_FP87,
1024 .features[FEAT_1_ECX] =
1025 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1026 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1027 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .features[FEAT_6_EAX] =
1033 CPUID_6_EAX_ARAT,
1034 .xlevel = 0x80000008,
1035 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1036 },
1037 {
1038 .name = "SandyBridge",
1039 .level = 0xd,
1040 .vendor = CPUID_VENDOR_INTEL,
1041 .family = 6,
1042 .model = 42,
1043 .stepping = 1,
1044 .features[FEAT_1_EDX] =
1045 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1046 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1047 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1048 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1049 CPUID_DE | CPUID_FP87,
1050 .features[FEAT_1_ECX] =
1051 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1052 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1053 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1054 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1055 CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1058 CPUID_EXT2_SYSCALL,
1059 .features[FEAT_8000_0001_ECX] =
1060 CPUID_EXT3_LAHF_LM,
1061 .features[FEAT_XSAVE] =
1062 CPUID_XSAVE_XSAVEOPT,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1067 },
1068 {
1069 .name = "IvyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 58,
1074 .stepping = 9,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1087 .features[FEAT_7_0_EBX] =
1088 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1089 CPUID_7_0_EBX_ERMS,
1090 .features[FEAT_8000_0001_EDX] =
1091 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1092 CPUID_EXT2_SYSCALL,
1093 .features[FEAT_8000_0001_ECX] =
1094 CPUID_EXT3_LAHF_LM,
1095 .features[FEAT_XSAVE] =
1096 CPUID_XSAVE_XSAVEOPT,
1097 .features[FEAT_6_EAX] =
1098 CPUID_6_EAX_ARAT,
1099 .xlevel = 0x80000008,
1100 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1101 },
1102 {
1103 .name = "Haswell-noTSX",
1104 .level = 0xd,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 60,
1108 .stepping = 1,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1117 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1118 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1119 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1121 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1122 .features[FEAT_8000_0001_EDX] =
1123 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1124 CPUID_EXT2_SYSCALL,
1125 .features[FEAT_8000_0001_ECX] =
1126 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1127 .features[FEAT_7_0_EBX] =
1128 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1129 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1130 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Intel Core Processor (Haswell, no TSX)",
1137 }, {
1138 .name = "Haswell",
1139 .level = 0xd,
1140 .vendor = CPUID_VENDOR_INTEL,
1141 .family = 6,
1142 .model = 60,
1143 .stepping = 1,
1144 .features[FEAT_1_EDX] =
1145 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1146 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1147 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1148 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1149 CPUID_DE | CPUID_FP87,
1150 .features[FEAT_1_ECX] =
1151 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1152 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1155 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1156 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1159 CPUID_EXT2_SYSCALL,
1160 .features[FEAT_8000_0001_ECX] =
1161 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1162 .features[FEAT_7_0_EBX] =
1163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1166 CPUID_7_0_EBX_RTM,
1167 .features[FEAT_XSAVE] =
1168 CPUID_XSAVE_XSAVEOPT,
1169 .features[FEAT_6_EAX] =
1170 CPUID_6_EAX_ARAT,
1171 .xlevel = 0x80000008,
1172 .model_id = "Intel Core Processor (Haswell)",
1173 },
1174 {
1175 .name = "Broadwell-noTSX",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 61,
1180 .stepping = 2,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1204 CPUID_7_0_EBX_SMAP,
1205 .features[FEAT_XSAVE] =
1206 CPUID_XSAVE_XSAVEOPT,
1207 .features[FEAT_6_EAX] =
1208 CPUID_6_EAX_ARAT,
1209 .xlevel = 0x80000008,
1210 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1211 },
1212 {
1213 .name = "Broadwell",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 61,
1218 .stepping = 2,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1228 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1229 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1230 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1231 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1232 .features[FEAT_8000_0001_EDX] =
1233 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1234 CPUID_EXT2_SYSCALL,
1235 .features[FEAT_8000_0001_ECX] =
1236 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1237 .features[FEAT_7_0_EBX] =
1238 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1239 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1240 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1241 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1242 CPUID_7_0_EBX_SMAP,
1243 .features[FEAT_XSAVE] =
1244 CPUID_XSAVE_XSAVEOPT,
1245 .features[FEAT_6_EAX] =
1246 CPUID_6_EAX_ARAT,
1247 .xlevel = 0x80000008,
1248 .model_id = "Intel Core Processor (Broadwell)",
1249 },
1250 {
1251 .name = "Skylake-Client",
1252 .level = 0xd,
1253 .vendor = CPUID_VENDOR_INTEL,
1254 .family = 6,
1255 .model = 94,
1256 .stepping = 3,
1257 .features[FEAT_1_EDX] =
1258 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1259 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1260 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1261 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1262 CPUID_DE | CPUID_FP87,
1263 .features[FEAT_1_ECX] =
1264 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1265 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1266 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1267 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1268 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1269 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1270 .features[FEAT_8000_0001_EDX] =
1271 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1272 CPUID_EXT2_SYSCALL,
1273 .features[FEAT_8000_0001_ECX] =
1274 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1275 .features[FEAT_7_0_EBX] =
1276 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1277 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1278 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1279 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1280 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1281 /* Missing: XSAVES (not supported by some Linux versions,
1282 * including v4.1 to v4.6).
1283 * KVM doesn't yet expose any XSAVES state save component,
1284 * and the only one defined in Skylake (processor tracing)
1285 * probably will block migration anyway.
1286 */
1287 .features[FEAT_XSAVE] =
1288 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1289 CPUID_XSAVE_XGETBV1,
1290 .features[FEAT_6_EAX] =
1291 CPUID_6_EAX_ARAT,
1292 .xlevel = 0x80000008,
1293 .model_id = "Intel Core Processor (Skylake)",
1294 },
1295 {
1296 .name = "Opteron_G1",
1297 .level = 5,
1298 .vendor = CPUID_VENDOR_AMD,
1299 .family = 15,
1300 .model = 6,
1301 .stepping = 1,
1302 .features[FEAT_1_EDX] =
1303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1307 CPUID_DE | CPUID_FP87,
1308 .features[FEAT_1_ECX] =
1309 CPUID_EXT_SSE3,
1310 .features[FEAT_8000_0001_EDX] =
1311 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1312 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1313 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1314 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1315 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1316 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .xlevel = 0x80000008,
1318 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1319 },
1320 {
1321 .name = "Opteron_G2",
1322 .level = 5,
1323 .vendor = CPUID_VENDOR_AMD,
1324 .family = 15,
1325 .model = 6,
1326 .stepping = 1,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1335 /* Missing: CPUID_EXT2_RDTSCP */
1336 .features[FEAT_8000_0001_EDX] =
1337 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1338 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1339 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1340 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1341 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1342 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1343 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1344 .features[FEAT_8000_0001_ECX] =
1345 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1346 .xlevel = 0x80000008,
1347 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1348 },
1349 {
1350 .name = "Opteron_G3",
1351 .level = 5,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 15,
1354 .model = 6,
1355 .stepping = 1,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1364 CPUID_EXT_SSE3,
1365 /* Missing: CPUID_EXT2_RDTSCP */
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1368 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1369 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1370 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1371 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1372 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1373 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .features[FEAT_8000_0001_ECX] =
1375 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1376 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1377 .xlevel = 0x80000008,
1378 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1379 },
1380 {
1381 .name = "Opteron_G4",
1382 .level = 0xd,
1383 .vendor = CPUID_VENDOR_AMD,
1384 .family = 21,
1385 .model = 1,
1386 .stepping = 2,
1387 .features[FEAT_1_EDX] =
1388 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1389 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1390 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1391 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1392 CPUID_DE | CPUID_FP87,
1393 .features[FEAT_1_ECX] =
1394 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1395 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1396 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1397 CPUID_EXT_SSE3,
1398 /* Missing: CPUID_EXT2_RDTSCP */
1399 .features[FEAT_8000_0001_EDX] =
1400 CPUID_EXT2_LM |
1401 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1402 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1403 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1404 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1405 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1406 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1407 .features[FEAT_8000_0001_ECX] =
1408 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1409 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1410 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1411 CPUID_EXT3_LAHF_LM,
1412 /* no xsaveopt! */
1413 .xlevel = 0x8000001A,
1414 .model_id = "AMD Opteron 62xx class CPU",
1415 },
1416 {
1417 .name = "Opteron_G5",
1418 .level = 0xd,
1419 .vendor = CPUID_VENDOR_AMD,
1420 .family = 21,
1421 .model = 2,
1422 .stepping = 0,
1423 .features[FEAT_1_EDX] =
1424 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1425 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1426 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1427 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1428 CPUID_DE | CPUID_FP87,
1429 .features[FEAT_1_ECX] =
1430 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1431 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1432 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1433 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1434 /* Missing: CPUID_EXT2_RDTSCP */
1435 .features[FEAT_8000_0001_EDX] =
1436 CPUID_EXT2_LM |
1437 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1438 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1439 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1440 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1441 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1442 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1443 .features[FEAT_8000_0001_ECX] =
1444 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1445 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1446 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1447 CPUID_EXT3_LAHF_LM,
1448 /* no xsaveopt! */
1449 .xlevel = 0x8000001A,
1450 .model_id = "AMD Opteron 63xx class CPU",
1451 },
1452 };
1453
1454 typedef struct PropValue {
1455 const char *prop, *value;
1456 } PropValue;
1457
1458 /* KVM-specific features that are automatically added/removed
1459 * from all CPU models when KVM is enabled.
1460 */
1461 static PropValue kvm_default_props[] = {
1462 { "kvmclock", "on" },
1463 { "kvm-nopiodelay", "on" },
1464 { "kvm-asyncpf", "on" },
1465 { "kvm-steal-time", "on" },
1466 { "kvm-pv-eoi", "on" },
1467 { "kvmclock-stable-bit", "on" },
1468 { "x2apic", "on" },
1469 { "acpi", "off" },
1470 { "monitor", "off" },
1471 { "svm", "off" },
1472 { NULL, NULL },
1473 };
1474
1475 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1476 {
1477 PropValue *pv;
1478 for (pv = kvm_default_props; pv->prop; pv++) {
1479 if (!strcmp(pv->prop, prop)) {
1480 pv->value = value;
1481 break;
1482 }
1483 }
1484
1485 /* It is valid to call this function only for properties that
1486 * are already present in the kvm_default_props table.
1487 */
1488 assert(pv->prop);
1489 }
1490
1491 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1492 bool migratable_only);
1493
1494 #ifdef CONFIG_KVM
1495
1496 static int cpu_x86_fill_model_id(char *str)
1497 {
1498 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1499 int i;
1500
1501 for (i = 0; i < 3; i++) {
1502 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1503 memcpy(str + i * 16 + 0, &eax, 4);
1504 memcpy(str + i * 16 + 4, &ebx, 4);
1505 memcpy(str + i * 16 + 8, &ecx, 4);
1506 memcpy(str + i * 16 + 12, &edx, 4);
1507 }
1508 return 0;
1509 }
1510
1511 static X86CPUDefinition host_cpudef;
1512
1513 static Property host_x86_cpu_properties[] = {
1514 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1515 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1516 DEFINE_PROP_END_OF_LIST()
1517 };
1518
1519 /* class_init for the "host" CPU model
1520 *
1521 * This function may be called before KVM is initialized.
1522 */
1523 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1524 {
1525 DeviceClass *dc = DEVICE_CLASS(oc);
1526 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1528
1529 xcc->kvm_required = true;
1530
1531 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1532 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1533
1534 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1535 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1536 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1537 host_cpudef.stepping = eax & 0x0F;
1538
1539 cpu_x86_fill_model_id(host_cpudef.model_id);
1540
1541 xcc->cpu_def = &host_cpudef;
1542
1543 /* level, xlevel, xlevel2, and the feature words are initialized on
1544 * instance_init, because they require KVM to be initialized.
1545 */
1546
1547 dc->props = host_x86_cpu_properties;
1548 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1549 dc->cannot_destroy_with_object_finalize_yet = true;
1550 }
1551
1552 static void host_x86_cpu_initfn(Object *obj)
1553 {
1554 X86CPU *cpu = X86_CPU(obj);
1555 CPUX86State *env = &cpu->env;
1556 KVMState *s = kvm_state;
1557
1558 /* We can't fill the features array here because we don't know yet if
1559 * "migratable" is true or false.
1560 */
1561 cpu->host_features = true;
1562
1563 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1564 if (kvm_enabled()) {
1565 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1566 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1567 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1568 }
1569
1570 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1571 }
1572
1573 static const TypeInfo host_x86_cpu_type_info = {
1574 .name = X86_CPU_TYPE_NAME("host"),
1575 .parent = TYPE_X86_CPU,
1576 .instance_init = host_x86_cpu_initfn,
1577 .class_init = host_x86_cpu_class_init,
1578 };
1579
1580 #endif
1581
1582 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1583 {
1584 FeatureWordInfo *f = &feature_word_info[w];
1585 int i;
1586
1587 for (i = 0; i < 32; ++i) {
1588 if ((1UL << i) & mask) {
1589 const char *reg = get_register_name_32(f->cpuid_reg);
1590 assert(reg);
1591 fprintf(stderr, "warning: %s doesn't support requested feature: "
1592 "CPUID.%02XH:%s%s%s [bit %d]\n",
1593 kvm_enabled() ? "host" : "TCG",
1594 f->cpuid_eax, reg,
1595 f->feat_names[i] ? "." : "",
1596 f->feat_names[i] ? f->feat_names[i] : "", i);
1597 }
1598 }
1599 }
1600
1601 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1602 const char *name, void *opaque,
1603 Error **errp)
1604 {
1605 X86CPU *cpu = X86_CPU(obj);
1606 CPUX86State *env = &cpu->env;
1607 int64_t value;
1608
1609 value = (env->cpuid_version >> 8) & 0xf;
1610 if (value == 0xf) {
1611 value += (env->cpuid_version >> 20) & 0xff;
1612 }
1613 visit_type_int(v, name, &value, errp);
1614 }
1615
1616 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1617 const char *name, void *opaque,
1618 Error **errp)
1619 {
1620 X86CPU *cpu = X86_CPU(obj);
1621 CPUX86State *env = &cpu->env;
1622 const int64_t min = 0;
1623 const int64_t max = 0xff + 0xf;
1624 Error *local_err = NULL;
1625 int64_t value;
1626
1627 visit_type_int(v, name, &value, &local_err);
1628 if (local_err) {
1629 error_propagate(errp, local_err);
1630 return;
1631 }
1632 if (value < min || value > max) {
1633 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1634 name ? name : "null", value, min, max);
1635 return;
1636 }
1637
1638 env->cpuid_version &= ~0xff00f00;
1639 if (value > 0x0f) {
1640 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1641 } else {
1642 env->cpuid_version |= value << 8;
1643 }
1644 }
1645
1646 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1647 const char *name, void *opaque,
1648 Error **errp)
1649 {
1650 X86CPU *cpu = X86_CPU(obj);
1651 CPUX86State *env = &cpu->env;
1652 int64_t value;
1653
1654 value = (env->cpuid_version >> 4) & 0xf;
1655 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1656 visit_type_int(v, name, &value, errp);
1657 }
1658
1659 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1660 const char *name, void *opaque,
1661 Error **errp)
1662 {
1663 X86CPU *cpu = X86_CPU(obj);
1664 CPUX86State *env = &cpu->env;
1665 const int64_t min = 0;
1666 const int64_t max = 0xff;
1667 Error *local_err = NULL;
1668 int64_t value;
1669
1670 visit_type_int(v, name, &value, &local_err);
1671 if (local_err) {
1672 error_propagate(errp, local_err);
1673 return;
1674 }
1675 if (value < min || value > max) {
1676 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1677 name ? name : "null", value, min, max);
1678 return;
1679 }
1680
1681 env->cpuid_version &= ~0xf00f0;
1682 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1683 }
1684
1685 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1686 const char *name, void *opaque,
1687 Error **errp)
1688 {
1689 X86CPU *cpu = X86_CPU(obj);
1690 CPUX86State *env = &cpu->env;
1691 int64_t value;
1692
1693 value = env->cpuid_version & 0xf;
1694 visit_type_int(v, name, &value, errp);
1695 }
1696
1697 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1698 const char *name, void *opaque,
1699 Error **errp)
1700 {
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1703 const int64_t min = 0;
1704 const int64_t max = 0xf;
1705 Error *local_err = NULL;
1706 int64_t value;
1707
1708 visit_type_int(v, name, &value, &local_err);
1709 if (local_err) {
1710 error_propagate(errp, local_err);
1711 return;
1712 }
1713 if (value < min || value > max) {
1714 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1715 name ? name : "null", value, min, max);
1716 return;
1717 }
1718
1719 env->cpuid_version &= ~0xf;
1720 env->cpuid_version |= value & 0xf;
1721 }
1722
1723 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1724 {
1725 X86CPU *cpu = X86_CPU(obj);
1726 CPUX86State *env = &cpu->env;
1727 char *value;
1728
1729 value = g_malloc(CPUID_VENDOR_SZ + 1);
1730 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1731 env->cpuid_vendor3);
1732 return value;
1733 }
1734
1735 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1736 Error **errp)
1737 {
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 int i;
1741
1742 if (strlen(value) != CPUID_VENDOR_SZ) {
1743 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1744 return;
1745 }
1746
1747 env->cpuid_vendor1 = 0;
1748 env->cpuid_vendor2 = 0;
1749 env->cpuid_vendor3 = 0;
1750 for (i = 0; i < 4; i++) {
1751 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1752 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1753 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1754 }
1755 }
1756
1757 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1758 {
1759 X86CPU *cpu = X86_CPU(obj);
1760 CPUX86State *env = &cpu->env;
1761 char *value;
1762 int i;
1763
1764 value = g_malloc(48 + 1);
1765 for (i = 0; i < 48; i++) {
1766 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1767 }
1768 value[48] = '\0';
1769 return value;
1770 }
1771
1772 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1773 Error **errp)
1774 {
1775 X86CPU *cpu = X86_CPU(obj);
1776 CPUX86State *env = &cpu->env;
1777 int c, len, i;
1778
1779 if (model_id == NULL) {
1780 model_id = "";
1781 }
1782 len = strlen(model_id);
1783 memset(env->cpuid_model, 0, 48);
1784 for (i = 0; i < 48; i++) {
1785 if (i >= len) {
1786 c = '\0';
1787 } else {
1788 c = (uint8_t)model_id[i];
1789 }
1790 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1791 }
1792 }
1793
1794 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1795 void *opaque, Error **errp)
1796 {
1797 X86CPU *cpu = X86_CPU(obj);
1798 int64_t value;
1799
1800 value = cpu->env.tsc_khz * 1000;
1801 visit_type_int(v, name, &value, errp);
1802 }
1803
1804 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1805 void *opaque, Error **errp)
1806 {
1807 X86CPU *cpu = X86_CPU(obj);
1808 const int64_t min = 0;
1809 const int64_t max = INT64_MAX;
1810 Error *local_err = NULL;
1811 int64_t value;
1812
1813 visit_type_int(v, name, &value, &local_err);
1814 if (local_err) {
1815 error_propagate(errp, local_err);
1816 return;
1817 }
1818 if (value < min || value > max) {
1819 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1820 name ? name : "null", value, min, max);
1821 return;
1822 }
1823
1824 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1825 }
1826
1827 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1828 void *opaque, Error **errp)
1829 {
1830 X86CPU *cpu = X86_CPU(obj);
1831 int64_t value = cpu->apic_id;
1832
1833 visit_type_int(v, name, &value, errp);
1834 }
1835
1836 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1838 {
1839 X86CPU *cpu = X86_CPU(obj);
1840 DeviceState *dev = DEVICE(obj);
1841 const int64_t min = 0;
1842 const int64_t max = UINT32_MAX;
1843 Error *error = NULL;
1844 int64_t value;
1845
1846 if (dev->realized) {
1847 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1848 "it was realized", name, object_get_typename(obj));
1849 return;
1850 }
1851
1852 visit_type_int(v, name, &value, &error);
1853 if (error) {
1854 error_propagate(errp, error);
1855 return;
1856 }
1857 if (value < min || value > max) {
1858 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1859 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1860 object_get_typename(obj), name, value, min, max);
1861 return;
1862 }
1863
1864 if ((value != cpu->apic_id) && cpu_exists(value)) {
1865 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1866 return;
1867 }
1868 cpu->apic_id = value;
1869 }
1870
1871 /* Generic getter for "feature-words" and "filtered-features" properties */
1872 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1873 const char *name, void *opaque,
1874 Error **errp)
1875 {
1876 uint32_t *array = (uint32_t *)opaque;
1877 FeatureWord w;
1878 Error *err = NULL;
1879 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1880 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1881 X86CPUFeatureWordInfoList *list = NULL;
1882
1883 for (w = 0; w < FEATURE_WORDS; w++) {
1884 FeatureWordInfo *wi = &feature_word_info[w];
1885 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1886 qwi->cpuid_input_eax = wi->cpuid_eax;
1887 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1888 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1889 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1890 qwi->features = array[w];
1891
1892 /* List will be in reverse order, but order shouldn't matter */
1893 list_entries[w].next = list;
1894 list_entries[w].value = &word_infos[w];
1895 list = &list_entries[w];
1896 }
1897
1898 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1899 error_propagate(errp, err);
1900 }
1901
1902 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1903 void *opaque, Error **errp)
1904 {
1905 X86CPU *cpu = X86_CPU(obj);
1906 int64_t value = cpu->hyperv_spinlock_attempts;
1907
1908 visit_type_int(v, name, &value, errp);
1909 }
1910
1911 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1912 void *opaque, Error **errp)
1913 {
1914 const int64_t min = 0xFFF;
1915 const int64_t max = UINT_MAX;
1916 X86CPU *cpu = X86_CPU(obj);
1917 Error *err = NULL;
1918 int64_t value;
1919
1920 visit_type_int(v, name, &value, &err);
1921 if (err) {
1922 error_propagate(errp, err);
1923 return;
1924 }
1925
1926 if (value < min || value > max) {
1927 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1928 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1929 object_get_typename(obj), name ? name : "null",
1930 value, min, max);
1931 return;
1932 }
1933 cpu->hyperv_spinlock_attempts = value;
1934 }
1935
1936 static PropertyInfo qdev_prop_spinlocks = {
1937 .name = "int",
1938 .get = x86_get_hv_spinlocks,
1939 .set = x86_set_hv_spinlocks,
1940 };
1941
1942 /* Convert all '_' in a feature string option name to '-', to make feature
1943 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1944 */
1945 static inline void feat2prop(char *s)
1946 {
1947 while ((s = strchr(s, '_'))) {
1948 *s = '-';
1949 }
1950 }
1951
1952 /* Compatibily hack to maintain legacy +-feat semantic,
1953 * where +-feat overwrites any feature set by
1954 * feat=on|feat even if the later is parsed after +-feat
1955 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1956 */
1957 static FeatureWordArray plus_features = { 0 };
1958 static FeatureWordArray minus_features = { 0 };
1959
1960 /* Parse "+feature,-feature,feature=foo" CPU feature string
1961 */
1962 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1963 Error **errp)
1964 {
1965 X86CPU *cpu = X86_CPU(cs);
1966 char *featurestr; /* Single 'key=value" string being parsed */
1967 Error *local_err = NULL;
1968
1969 if (!features) {
1970 return;
1971 }
1972
1973 for (featurestr = strtok(features, ",");
1974 featurestr && !local_err;
1975 featurestr = strtok(NULL, ",")) {
1976 const char *name;
1977 const char *val = NULL;
1978 char *eq = NULL;
1979
1980 /* Compatibility syntax: */
1981 if (featurestr[0] == '+') {
1982 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1983 continue;
1984 } else if (featurestr[0] == '-') {
1985 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1986 continue;
1987 }
1988
1989 eq = strchr(featurestr, '=');
1990 if (eq) {
1991 *eq++ = 0;
1992 val = eq;
1993 } else {
1994 val = "on";
1995 }
1996
1997 feat2prop(featurestr);
1998 name = featurestr;
1999
2000 /* Special case: */
2001 if (!strcmp(name, "tsc-freq")) {
2002 int64_t tsc_freq;
2003 char *err;
2004 char num[32];
2005
2006 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2007 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2008 if (tsc_freq < 0 || *err) {
2009 error_setg(errp, "bad numerical value %s", val);
2010 return;
2011 }
2012 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2013 val = num;
2014 name = "tsc-frequency";
2015 }
2016
2017 object_property_parse(OBJECT(cpu), val, name, &local_err);
2018 }
2019
2020 if (local_err) {
2021 error_propagate(errp, local_err);
2022 }
2023 }
2024
2025 /* Print all cpuid feature names in featureset
2026 */
2027 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2028 {
2029 int bit;
2030 bool first = true;
2031
2032 for (bit = 0; bit < 32; bit++) {
2033 if (featureset[bit]) {
2034 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2035 first = false;
2036 }
2037 }
2038 }
2039
2040 /* generate CPU information. */
2041 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2042 {
2043 X86CPUDefinition *def;
2044 char buf[256];
2045 int i;
2046
2047 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2048 def = &builtin_x86_defs[i];
2049 snprintf(buf, sizeof(buf), "%s", def->name);
2050 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2051 }
2052 #ifdef CONFIG_KVM
2053 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2054 "KVM processor with all supported host features "
2055 "(only available in KVM mode)");
2056 #endif
2057
2058 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2059 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2060 FeatureWordInfo *fw = &feature_word_info[i];
2061
2062 (*cpu_fprintf)(f, " ");
2063 listflags(f, cpu_fprintf, fw->feat_names);
2064 (*cpu_fprintf)(f, "\n");
2065 }
2066 }
2067
2068 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2069 {
2070 CpuDefinitionInfoList *cpu_list = NULL;
2071 X86CPUDefinition *def;
2072 int i;
2073
2074 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2075 CpuDefinitionInfoList *entry;
2076 CpuDefinitionInfo *info;
2077
2078 def = &builtin_x86_defs[i];
2079 info = g_malloc0(sizeof(*info));
2080 info->name = g_strdup(def->name);
2081
2082 entry = g_malloc0(sizeof(*entry));
2083 entry->value = info;
2084 entry->next = cpu_list;
2085 cpu_list = entry;
2086 }
2087
2088 return cpu_list;
2089 }
2090
2091 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2092 bool migratable_only)
2093 {
2094 FeatureWordInfo *wi = &feature_word_info[w];
2095 uint32_t r;
2096
2097 if (kvm_enabled()) {
2098 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2099 wi->cpuid_ecx,
2100 wi->cpuid_reg);
2101 } else if (tcg_enabled()) {
2102 r = wi->tcg_features;
2103 } else {
2104 return ~0;
2105 }
2106 if (migratable_only) {
2107 r &= x86_cpu_get_migratable_flags(w);
2108 }
2109 return r;
2110 }
2111
2112 /*
2113 * Filters CPU feature words based on host availability of each feature.
2114 *
2115 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2116 */
2117 static int x86_cpu_filter_features(X86CPU *cpu)
2118 {
2119 CPUX86State *env = &cpu->env;
2120 FeatureWord w;
2121 int rv = 0;
2122
2123 for (w = 0; w < FEATURE_WORDS; w++) {
2124 uint32_t host_feat =
2125 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2126 uint32_t requested_features = env->features[w];
2127 env->features[w] &= host_feat;
2128 cpu->filtered_features[w] = requested_features & ~env->features[w];
2129 if (cpu->filtered_features[w]) {
2130 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2131 report_unavailable_features(w, cpu->filtered_features[w]);
2132 }
2133 rv = 1;
2134 }
2135 }
2136
2137 return rv;
2138 }
2139
2140 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2141 {
2142 PropValue *pv;
2143 for (pv = props; pv->prop; pv++) {
2144 if (!pv->value) {
2145 continue;
2146 }
2147 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2148 &error_abort);
2149 }
2150 }
2151
2152 /* Load data from X86CPUDefinition
2153 */
2154 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2155 {
2156 CPUX86State *env = &cpu->env;
2157 const char *vendor;
2158 char host_vendor[CPUID_VENDOR_SZ + 1];
2159 FeatureWord w;
2160
2161 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2162 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2163 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2164 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2165 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2166 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2167 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2168 for (w = 0; w < FEATURE_WORDS; w++) {
2169 env->features[w] = def->features[w];
2170 }
2171
2172 /* Special cases not set in the X86CPUDefinition structs: */
2173 if (kvm_enabled()) {
2174 if (!kvm_irqchip_in_kernel()) {
2175 x86_cpu_change_kvm_default("x2apic", "off");
2176 }
2177
2178 x86_cpu_apply_props(cpu, kvm_default_props);
2179 }
2180
2181 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2182
2183 /* sysenter isn't supported in compatibility mode on AMD,
2184 * syscall isn't supported in compatibility mode on Intel.
2185 * Normally we advertise the actual CPU vendor, but you can
2186 * override this using the 'vendor' property if you want to use
2187 * KVM's sysenter/syscall emulation in compatibility mode and
2188 * when doing cross vendor migration
2189 */
2190 vendor = def->vendor;
2191 if (kvm_enabled()) {
2192 uint32_t ebx = 0, ecx = 0, edx = 0;
2193 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2194 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2195 vendor = host_vendor;
2196 }
2197
2198 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2199
2200 }
2201
2202 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2203 {
2204 X86CPU *cpu = NULL;
2205 ObjectClass *oc;
2206 gchar **model_pieces;
2207 char *name, *features;
2208 Error *error = NULL;
2209
2210 model_pieces = g_strsplit(cpu_model, ",", 2);
2211 if (!model_pieces[0]) {
2212 error_setg(&error, "Invalid/empty CPU model name");
2213 goto out;
2214 }
2215 name = model_pieces[0];
2216 features = model_pieces[1];
2217
2218 oc = x86_cpu_class_by_name(name);
2219 if (oc == NULL) {
2220 error_setg(&error, "Unable to find CPU definition: %s", name);
2221 goto out;
2222 }
2223
2224 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2225
2226 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2227 if (error) {
2228 goto out;
2229 }
2230
2231 out:
2232 if (error != NULL) {
2233 error_propagate(errp, error);
2234 if (cpu) {
2235 object_unref(OBJECT(cpu));
2236 cpu = NULL;
2237 }
2238 }
2239 g_strfreev(model_pieces);
2240 return cpu;
2241 }
2242
2243 X86CPU *cpu_x86_init(const char *cpu_model)
2244 {
2245 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2246 }
2247
2248 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2249 {
2250 X86CPUDefinition *cpudef = data;
2251 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2252
2253 xcc->cpu_def = cpudef;
2254 }
2255
2256 static void x86_register_cpudef_type(X86CPUDefinition *def)
2257 {
2258 char *typename = x86_cpu_type_name(def->name);
2259 TypeInfo ti = {
2260 .name = typename,
2261 .parent = TYPE_X86_CPU,
2262 .class_init = x86_cpu_cpudef_class_init,
2263 .class_data = def,
2264 };
2265
2266 type_register(&ti);
2267 g_free(typename);
2268 }
2269
2270 #if !defined(CONFIG_USER_ONLY)
2271
2272 void cpu_clear_apic_feature(CPUX86State *env)
2273 {
2274 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2275 }
2276
2277 #endif /* !CONFIG_USER_ONLY */
2278
2279 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2280 uint32_t *eax, uint32_t *ebx,
2281 uint32_t *ecx, uint32_t *edx)
2282 {
2283 X86CPU *cpu = x86_env_get_cpu(env);
2284 CPUState *cs = CPU(cpu);
2285
2286 /* test if maximum index reached */
2287 if (index & 0x80000000) {
2288 if (index > env->cpuid_xlevel) {
2289 if (env->cpuid_xlevel2 > 0) {
2290 /* Handle the Centaur's CPUID instruction. */
2291 if (index > env->cpuid_xlevel2) {
2292 index = env->cpuid_xlevel2;
2293 } else if (index < 0xC0000000) {
2294 index = env->cpuid_xlevel;
2295 }
2296 } else {
2297 /* Intel documentation states that invalid EAX input will
2298 * return the same information as EAX=cpuid_level
2299 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2300 */
2301 index = env->cpuid_level;
2302 }
2303 }
2304 } else {
2305 if (index > env->cpuid_level)
2306 index = env->cpuid_level;
2307 }
2308
2309 switch(index) {
2310 case 0:
2311 *eax = env->cpuid_level;
2312 *ebx = env->cpuid_vendor1;
2313 *edx = env->cpuid_vendor2;
2314 *ecx = env->cpuid_vendor3;
2315 break;
2316 case 1:
2317 *eax = env->cpuid_version;
2318 *ebx = (cpu->apic_id << 24) |
2319 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2320 *ecx = env->features[FEAT_1_ECX];
2321 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2322 *ecx |= CPUID_EXT_OSXSAVE;
2323 }
2324 *edx = env->features[FEAT_1_EDX];
2325 if (cs->nr_cores * cs->nr_threads > 1) {
2326 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2327 *edx |= CPUID_HT;
2328 }
2329 break;
2330 case 2:
2331 /* cache info: needed for Pentium Pro compatibility */
2332 if (cpu->cache_info_passthrough) {
2333 host_cpuid(index, 0, eax, ebx, ecx, edx);
2334 break;
2335 }
2336 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2337 *ebx = 0;
2338 *ecx = 0;
2339 *edx = (L1D_DESCRIPTOR << 16) | \
2340 (L1I_DESCRIPTOR << 8) | \
2341 (L2_DESCRIPTOR);
2342 break;
2343 case 4:
2344 /* cache info: needed for Core compatibility */
2345 if (cpu->cache_info_passthrough) {
2346 host_cpuid(index, count, eax, ebx, ecx, edx);
2347 *eax &= ~0xFC000000;
2348 } else {
2349 *eax = 0;
2350 switch (count) {
2351 case 0: /* L1 dcache info */
2352 *eax |= CPUID_4_TYPE_DCACHE | \
2353 CPUID_4_LEVEL(1) | \
2354 CPUID_4_SELF_INIT_LEVEL;
2355 *ebx = (L1D_LINE_SIZE - 1) | \
2356 ((L1D_PARTITIONS - 1) << 12) | \
2357 ((L1D_ASSOCIATIVITY - 1) << 22);
2358 *ecx = L1D_SETS - 1;
2359 *edx = CPUID_4_NO_INVD_SHARING;
2360 break;
2361 case 1: /* L1 icache info */
2362 *eax |= CPUID_4_TYPE_ICACHE | \
2363 CPUID_4_LEVEL(1) | \
2364 CPUID_4_SELF_INIT_LEVEL;
2365 *ebx = (L1I_LINE_SIZE - 1) | \
2366 ((L1I_PARTITIONS - 1) << 12) | \
2367 ((L1I_ASSOCIATIVITY - 1) << 22);
2368 *ecx = L1I_SETS - 1;
2369 *edx = CPUID_4_NO_INVD_SHARING;
2370 break;
2371 case 2: /* L2 cache info */
2372 *eax |= CPUID_4_TYPE_UNIFIED | \
2373 CPUID_4_LEVEL(2) | \
2374 CPUID_4_SELF_INIT_LEVEL;
2375 if (cs->nr_threads > 1) {
2376 *eax |= (cs->nr_threads - 1) << 14;
2377 }
2378 *ebx = (L2_LINE_SIZE - 1) | \
2379 ((L2_PARTITIONS - 1) << 12) | \
2380 ((L2_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L2_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2383 break;
2384 default: /* end of info */
2385 *eax = 0;
2386 *ebx = 0;
2387 *ecx = 0;
2388 *edx = 0;
2389 break;
2390 }
2391 }
2392
2393 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2394 if ((*eax & 31) && cs->nr_cores > 1) {
2395 *eax |= (cs->nr_cores - 1) << 26;
2396 }
2397 break;
2398 case 5:
2399 /* mwait info: needed for Core compatibility */
2400 *eax = 0; /* Smallest monitor-line size in bytes */
2401 *ebx = 0; /* Largest monitor-line size in bytes */
2402 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2403 *edx = 0;
2404 break;
2405 case 6:
2406 /* Thermal and Power Leaf */
2407 *eax = env->features[FEAT_6_EAX];
2408 *ebx = 0;
2409 *ecx = 0;
2410 *edx = 0;
2411 break;
2412 case 7:
2413 /* Structured Extended Feature Flags Enumeration Leaf */
2414 if (count == 0) {
2415 *eax = 0; /* Maximum ECX value for sub-leaves */
2416 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2417 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2418 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2419 *ecx |= CPUID_7_0_ECX_OSPKE;
2420 }
2421 *edx = 0; /* Reserved */
2422 } else {
2423 *eax = 0;
2424 *ebx = 0;
2425 *ecx = 0;
2426 *edx = 0;
2427 }
2428 break;
2429 case 9:
2430 /* Direct Cache Access Information Leaf */
2431 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2432 *ebx = 0;
2433 *ecx = 0;
2434 *edx = 0;
2435 break;
2436 case 0xA:
2437 /* Architectural Performance Monitoring Leaf */
2438 if (kvm_enabled() && cpu->enable_pmu) {
2439 KVMState *s = cs->kvm_state;
2440
2441 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2442 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2443 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2444 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2445 } else {
2446 *eax = 0;
2447 *ebx = 0;
2448 *ecx = 0;
2449 *edx = 0;
2450 }
2451 break;
2452 case 0xB:
2453 /* Extended Topology Enumeration Leaf */
2454 if (!cpu->enable_cpuid_0xb) {
2455 *eax = *ebx = *ecx = *edx = 0;
2456 break;
2457 }
2458
2459 *ecx = count & 0xff;
2460 *edx = cpu->apic_id;
2461
2462 switch (count) {
2463 case 0:
2464 *eax = apicid_core_offset(smp_cores, smp_threads);
2465 *ebx = smp_threads;
2466 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2467 break;
2468 case 1:
2469 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2470 *ebx = smp_cores * smp_threads;
2471 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2472 break;
2473 default:
2474 *eax = 0;
2475 *ebx = 0;
2476 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2477 }
2478
2479 assert(!(*eax & ~0x1f));
2480 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2481 break;
2482 case 0xD: {
2483 KVMState *s = cs->kvm_state;
2484 uint64_t ena_mask;
2485 int i;
2486
2487 /* Processor Extended State */
2488 *eax = 0;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = 0;
2492 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2493 break;
2494 }
2495 if (kvm_enabled()) {
2496 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2497 ena_mask <<= 32;
2498 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2499 } else {
2500 ena_mask = -1;
2501 }
2502
2503 if (count == 0) {
2504 *ecx = 0x240;
2505 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2506 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2507 if ((env->features[esa->feature] & esa->bits) == esa->bits
2508 && ((ena_mask >> i) & 1) != 0) {
2509 if (i < 32) {
2510 *eax |= 1u << i;
2511 } else {
2512 *edx |= 1u << (i - 32);
2513 }
2514 *ecx = MAX(*ecx, esa->offset + esa->size);
2515 }
2516 }
2517 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2518 *ebx = *ecx;
2519 } else if (count == 1) {
2520 *eax = env->features[FEAT_XSAVE];
2521 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2522 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2523 if ((env->features[esa->feature] & esa->bits) == esa->bits
2524 && ((ena_mask >> count) & 1) != 0) {
2525 *eax = esa->size;
2526 *ebx = esa->offset;
2527 }
2528 }
2529 break;
2530 }
2531 case 0x80000000:
2532 *eax = env->cpuid_xlevel;
2533 *ebx = env->cpuid_vendor1;
2534 *edx = env->cpuid_vendor2;
2535 *ecx = env->cpuid_vendor3;
2536 break;
2537 case 0x80000001:
2538 *eax = env->cpuid_version;
2539 *ebx = 0;
2540 *ecx = env->features[FEAT_8000_0001_ECX];
2541 *edx = env->features[FEAT_8000_0001_EDX];
2542
2543 /* The Linux kernel checks for the CMPLegacy bit and
2544 * discards multiple thread information if it is set.
2545 * So don't set it here for Intel to make Linux guests happy.
2546 */
2547 if (cs->nr_cores * cs->nr_threads > 1) {
2548 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2549 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2550 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2551 *ecx |= 1 << 1; /* CmpLegacy bit */
2552 }
2553 }
2554 break;
2555 case 0x80000002:
2556 case 0x80000003:
2557 case 0x80000004:
2558 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2559 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2560 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2561 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2562 break;
2563 case 0x80000005:
2564 /* cache info (L1 cache) */
2565 if (cpu->cache_info_passthrough) {
2566 host_cpuid(index, 0, eax, ebx, ecx, edx);
2567 break;
2568 }
2569 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2570 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2571 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2572 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2573 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2574 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2575 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2576 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2577 break;
2578 case 0x80000006:
2579 /* cache info (L2 cache) */
2580 if (cpu->cache_info_passthrough) {
2581 host_cpuid(index, 0, eax, ebx, ecx, edx);
2582 break;
2583 }
2584 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2585 (L2_DTLB_2M_ENTRIES << 16) | \
2586 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2587 (L2_ITLB_2M_ENTRIES);
2588 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2589 (L2_DTLB_4K_ENTRIES << 16) | \
2590 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2591 (L2_ITLB_4K_ENTRIES);
2592 *ecx = (L2_SIZE_KB_AMD << 16) | \
2593 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2594 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2595 *edx = ((L3_SIZE_KB/512) << 18) | \
2596 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2597 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2598 break;
2599 case 0x80000007:
2600 *eax = 0;
2601 *ebx = 0;
2602 *ecx = 0;
2603 *edx = env->features[FEAT_8000_0007_EDX];
2604 break;
2605 case 0x80000008:
2606 /* virtual & phys address size in low 2 bytes. */
2607 /* XXX: This value must match the one used in the MMU code. */
2608 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2609 /* 64 bit processor */
2610 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2611 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2612 } else {
2613 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2614 *eax = 0x00000024; /* 36 bits physical */
2615 } else {
2616 *eax = 0x00000020; /* 32 bits physical */
2617 }
2618 }
2619 *ebx = 0;
2620 *ecx = 0;
2621 *edx = 0;
2622 if (cs->nr_cores * cs->nr_threads > 1) {
2623 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2624 }
2625 break;
2626 case 0x8000000A:
2627 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2628 *eax = 0x00000001; /* SVM Revision */
2629 *ebx = 0x00000010; /* nr of ASIDs */
2630 *ecx = 0;
2631 *edx = env->features[FEAT_SVM]; /* optional features */
2632 } else {
2633 *eax = 0;
2634 *ebx = 0;
2635 *ecx = 0;
2636 *edx = 0;
2637 }
2638 break;
2639 case 0xC0000000:
2640 *eax = env->cpuid_xlevel2;
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = 0;
2644 break;
2645 case 0xC0000001:
2646 /* Support for VIA CPU's CPUID instruction */
2647 *eax = env->cpuid_version;
2648 *ebx = 0;
2649 *ecx = 0;
2650 *edx = env->features[FEAT_C000_0001_EDX];
2651 break;
2652 case 0xC0000002:
2653 case 0xC0000003:
2654 case 0xC0000004:
2655 /* Reserved for the future, and now filled with zero */
2656 *eax = 0;
2657 *ebx = 0;
2658 *ecx = 0;
2659 *edx = 0;
2660 break;
2661 default:
2662 /* reserved values: zero */
2663 *eax = 0;
2664 *ebx = 0;
2665 *ecx = 0;
2666 *edx = 0;
2667 break;
2668 }
2669 }
2670
2671 /* CPUClass::reset() */
2672 static void x86_cpu_reset(CPUState *s)
2673 {
2674 X86CPU *cpu = X86_CPU(s);
2675 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2676 CPUX86State *env = &cpu->env;
2677 target_ulong cr4;
2678 uint64_t xcr0;
2679 int i;
2680
2681 xcc->parent_reset(s);
2682
2683 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2684
2685 tlb_flush(s, 1);
2686
2687 env->old_exception = -1;
2688
2689 /* init to reset state */
2690
2691 #ifdef CONFIG_SOFTMMU
2692 env->hflags |= HF_SOFTMMU_MASK;
2693 #endif
2694 env->hflags2 |= HF2_GIF_MASK;
2695
2696 cpu_x86_update_cr0(env, 0x60000010);
2697 env->a20_mask = ~0x0;
2698 env->smbase = 0x30000;
2699
2700 env->idt.limit = 0xffff;
2701 env->gdt.limit = 0xffff;
2702 env->ldt.limit = 0xffff;
2703 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2704 env->tr.limit = 0xffff;
2705 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2706
2707 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2708 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2709 DESC_R_MASK | DESC_A_MASK);
2710 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2711 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2712 DESC_A_MASK);
2713 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2714 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2715 DESC_A_MASK);
2716 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2717 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2718 DESC_A_MASK);
2719 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2720 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2721 DESC_A_MASK);
2722 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2723 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2724 DESC_A_MASK);
2725
2726 env->eip = 0xfff0;
2727 env->regs[R_EDX] = env->cpuid_version;
2728
2729 env->eflags = 0x2;
2730
2731 /* FPU init */
2732 for (i = 0; i < 8; i++) {
2733 env->fptags[i] = 1;
2734 }
2735 cpu_set_fpuc(env, 0x37f);
2736
2737 env->mxcsr = 0x1f80;
2738 /* All units are in INIT state. */
2739 env->xstate_bv = 0;
2740
2741 env->pat = 0x0007040600070406ULL;
2742 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2743
2744 memset(env->dr, 0, sizeof(env->dr));
2745 env->dr[6] = DR6_FIXED_1;
2746 env->dr[7] = DR7_FIXED_1;
2747 cpu_breakpoint_remove_all(s, BP_CPU);
2748 cpu_watchpoint_remove_all(s, BP_CPU);
2749
2750 cr4 = 0;
2751 xcr0 = XSTATE_FP_MASK;
2752
2753 #ifdef CONFIG_USER_ONLY
2754 /* Enable all the features for user-mode. */
2755 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2756 xcr0 |= XSTATE_SSE_MASK;
2757 }
2758 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2759 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2760 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2761 xcr0 |= 1ull << i;
2762 }
2763 }
2764
2765 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2766 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2767 }
2768 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2769 cr4 |= CR4_FSGSBASE_MASK;
2770 }
2771 #endif
2772
2773 env->xcr0 = xcr0;
2774 cpu_x86_update_cr4(env, cr4);
2775
2776 /*
2777 * SDM 11.11.5 requires:
2778 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2779 * - IA32_MTRR_PHYSMASKn.V = 0
2780 * All other bits are undefined. For simplification, zero it all.
2781 */
2782 env->mtrr_deftype = 0;
2783 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2784 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2785
2786 #if !defined(CONFIG_USER_ONLY)
2787 /* We hard-wire the BSP to the first CPU. */
2788 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2789
2790 s->halted = !cpu_is_bsp(cpu);
2791
2792 if (kvm_enabled()) {
2793 kvm_arch_reset_vcpu(cpu);
2794 }
2795 #endif
2796 }
2797
2798 #ifndef CONFIG_USER_ONLY
2799 bool cpu_is_bsp(X86CPU *cpu)
2800 {
2801 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2802 }
2803
2804 /* TODO: remove me, when reset over QOM tree is implemented */
2805 static void x86_cpu_machine_reset_cb(void *opaque)
2806 {
2807 X86CPU *cpu = opaque;
2808 cpu_reset(CPU(cpu));
2809 }
2810 #endif
2811
2812 static void mce_init(X86CPU *cpu)
2813 {
2814 CPUX86State *cenv = &cpu->env;
2815 unsigned int bank;
2816
2817 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2818 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2819 (CPUID_MCE | CPUID_MCA)) {
2820 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2821 cenv->mcg_ctl = ~(uint64_t)0;
2822 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2823 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2824 }
2825 }
2826 }
2827
2828 #ifndef CONFIG_USER_ONLY
2829 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2830 {
2831 APICCommonState *apic;
2832 const char *apic_type = "apic";
2833
2834 if (kvm_apic_in_kernel()) {
2835 apic_type = "kvm-apic";
2836 } else if (xen_enabled()) {
2837 apic_type = "xen-apic";
2838 }
2839
2840 cpu->apic_state = DEVICE(object_new(apic_type));
2841
2842 object_property_add_child(OBJECT(cpu), "apic",
2843 OBJECT(cpu->apic_state), NULL);
2844 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2845 /* TODO: convert to link<> */
2846 apic = APIC_COMMON(cpu->apic_state);
2847 apic->cpu = cpu;
2848 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2849 }
2850
2851 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2852 {
2853 APICCommonState *apic;
2854 static bool apic_mmio_map_once;
2855
2856 if (cpu->apic_state == NULL) {
2857 return;
2858 }
2859 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2860 errp);
2861
2862 /* Map APIC MMIO area */
2863 apic = APIC_COMMON(cpu->apic_state);
2864 if (!apic_mmio_map_once) {
2865 memory_region_add_subregion_overlap(get_system_memory(),
2866 apic->apicbase &
2867 MSR_IA32_APICBASE_BASE,
2868 &apic->io_memory,
2869 0x1000);
2870 apic_mmio_map_once = true;
2871 }
2872 }
2873
2874 static void x86_cpu_machine_done(Notifier *n, void *unused)
2875 {
2876 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2877 MemoryRegion *smram =
2878 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2879
2880 if (smram) {
2881 cpu->smram = g_new(MemoryRegion, 1);
2882 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2883 smram, 0, 1ull << 32);
2884 memory_region_set_enabled(cpu->smram, false);
2885 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2886 }
2887 }
2888 #else
2889 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2890 {
2891 }
2892 #endif
2893
2894
2895 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2896 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2897 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2898 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2899 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2900 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2901 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2902 {
2903 CPUState *cs = CPU(dev);
2904 X86CPU *cpu = X86_CPU(dev);
2905 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2906 CPUX86State *env = &cpu->env;
2907 Error *local_err = NULL;
2908 static bool ht_warned;
2909 FeatureWord w;
2910
2911 if (xcc->kvm_required && !kvm_enabled()) {
2912 char *name = x86_cpu_class_get_model_name(xcc);
2913 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2914 g_free(name);
2915 goto out;
2916 }
2917
2918 if (cpu->apic_id < 0) {
2919 error_setg(errp, "apic-id property was not initialized properly");
2920 return;
2921 }
2922
2923 /*TODO: cpu->host_features incorrectly overwrites features
2924 * set using "feat=on|off". Once we fix this, we can convert
2925 * plus_features & minus_features to global properties
2926 * inside x86_cpu_parse_featurestr() too.
2927 */
2928 if (cpu->host_features) {
2929 for (w = 0; w < FEATURE_WORDS; w++) {
2930 env->features[w] =
2931 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2932 }
2933 }
2934
2935 for (w = 0; w < FEATURE_WORDS; w++) {
2936 cpu->env.features[w] |= plus_features[w];
2937 cpu->env.features[w] &= ~minus_features[w];
2938 }
2939
2940 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2941 env->cpuid_level = 7;
2942 }
2943
2944 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2945 error_setg(&local_err,
2946 kvm_enabled() ?
2947 "Host doesn't support requested features" :
2948 "TCG doesn't support requested features");
2949 goto out;
2950 }
2951
2952 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2953 * CPUID[1].EDX.
2954 */
2955 if (IS_AMD_CPU(env)) {
2956 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2957 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2958 & CPUID_EXT2_AMD_ALIASES);
2959 }
2960
2961
2962 cpu_exec_init(cs, &error_abort);
2963
2964 if (tcg_enabled()) {
2965 tcg_x86_init();
2966 }
2967
2968 #ifndef CONFIG_USER_ONLY
2969 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2970
2971 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2972 x86_cpu_apic_create(cpu, &local_err);
2973 if (local_err != NULL) {
2974 goto out;
2975 }
2976 }
2977 #endif
2978
2979 mce_init(cpu);
2980
2981 #ifndef CONFIG_USER_ONLY
2982 if (tcg_enabled()) {
2983 AddressSpace *newas = g_new(AddressSpace, 1);
2984
2985 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2986 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2987
2988 /* Outer container... */
2989 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2990 memory_region_set_enabled(cpu->cpu_as_root, true);
2991
2992 /* ... with two regions inside: normal system memory with low
2993 * priority, and...
2994 */
2995 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2996 get_system_memory(), 0, ~0ull);
2997 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2998 memory_region_set_enabled(cpu->cpu_as_mem, true);
2999 address_space_init(newas, cpu->cpu_as_root, "CPU");
3000 cs->num_ases = 1;
3001 cpu_address_space_init(cs, newas, 0);
3002
3003 /* ... SMRAM with higher priority, linked from /machine/smram. */
3004 cpu->machine_done.notify = x86_cpu_machine_done;
3005 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3006 }
3007 #endif
3008
3009 qemu_init_vcpu(cs);
3010
3011 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3012 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3013 * based on inputs (sockets,cores,threads), it is still better to gives
3014 * users a warning.
3015 *
3016 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3017 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3018 */
3019 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3020 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3021 " -smp options properly.");
3022 ht_warned = true;
3023 }
3024
3025 x86_cpu_apic_realize(cpu, &local_err);
3026 if (local_err != NULL) {
3027 goto out;
3028 }
3029 cpu_reset(cs);
3030
3031 xcc->parent_realize(dev, &local_err);
3032
3033 out:
3034 if (local_err != NULL) {
3035 error_propagate(errp, local_err);
3036 return;
3037 }
3038 }
3039
3040 typedef struct BitProperty {
3041 uint32_t *ptr;
3042 uint32_t mask;
3043 } BitProperty;
3044
3045 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3046 void *opaque, Error **errp)
3047 {
3048 BitProperty *fp = opaque;
3049 bool value = (*fp->ptr & fp->mask) == fp->mask;
3050 visit_type_bool(v, name, &value, errp);
3051 }
3052
3053 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3054 void *opaque, Error **errp)
3055 {
3056 DeviceState *dev = DEVICE(obj);
3057 BitProperty *fp = opaque;
3058 Error *local_err = NULL;
3059 bool value;
3060
3061 if (dev->realized) {
3062 qdev_prop_set_after_realize(dev, name, errp);
3063 return;
3064 }
3065
3066 visit_type_bool(v, name, &value, &local_err);
3067 if (local_err) {
3068 error_propagate(errp, local_err);
3069 return;
3070 }
3071
3072 if (value) {
3073 *fp->ptr |= fp->mask;
3074 } else {
3075 *fp->ptr &= ~fp->mask;
3076 }
3077 }
3078
3079 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3080 void *opaque)
3081 {
3082 BitProperty *prop = opaque;
3083 g_free(prop);
3084 }
3085
3086 /* Register a boolean property to get/set a single bit in a uint32_t field.
3087 *
3088 * The same property name can be registered multiple times to make it affect
3089 * multiple bits in the same FeatureWord. In that case, the getter will return
3090 * true only if all bits are set.
3091 */
3092 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3093 const char *prop_name,
3094 uint32_t *field,
3095 int bitnr)
3096 {
3097 BitProperty *fp;
3098 ObjectProperty *op;
3099 uint32_t mask = (1UL << bitnr);
3100
3101 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3102 if (op) {
3103 fp = op->opaque;
3104 assert(fp->ptr == field);
3105 fp->mask |= mask;
3106 } else {
3107 fp = g_new0(BitProperty, 1);
3108 fp->ptr = field;
3109 fp->mask = mask;
3110 object_property_add(OBJECT(cpu), prop_name, "bool",
3111 x86_cpu_get_bit_prop,
3112 x86_cpu_set_bit_prop,
3113 x86_cpu_release_bit_prop, fp, &error_abort);
3114 }
3115 }
3116
3117 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3118 FeatureWord w,
3119 int bitnr)
3120 {
3121 Object *obj = OBJECT(cpu);
3122 int i;
3123 char **names;
3124 FeatureWordInfo *fi = &feature_word_info[w];
3125
3126 if (!fi->feat_names) {
3127 return;
3128 }
3129 if (!fi->feat_names[bitnr]) {
3130 return;
3131 }
3132
3133 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3134
3135 feat2prop(names[0]);
3136 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3137
3138 for (i = 1; names[i]; i++) {
3139 feat2prop(names[i]);
3140 object_property_add_alias(obj, names[i], obj, names[0],
3141 &error_abort);
3142 }
3143
3144 g_strfreev(names);
3145 }
3146
3147 static void x86_cpu_initfn(Object *obj)
3148 {
3149 CPUState *cs = CPU(obj);
3150 X86CPU *cpu = X86_CPU(obj);
3151 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3152 CPUX86State *env = &cpu->env;
3153 FeatureWord w;
3154
3155 cs->env_ptr = env;
3156
3157 object_property_add(obj, "family", "int",
3158 x86_cpuid_version_get_family,
3159 x86_cpuid_version_set_family, NULL, NULL, NULL);
3160 object_property_add(obj, "model", "int",
3161 x86_cpuid_version_get_model,
3162 x86_cpuid_version_set_model, NULL, NULL, NULL);
3163 object_property_add(obj, "stepping", "int",
3164 x86_cpuid_version_get_stepping,
3165 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3166 object_property_add_str(obj, "vendor",
3167 x86_cpuid_get_vendor,
3168 x86_cpuid_set_vendor, NULL);
3169 object_property_add_str(obj, "model-id",
3170 x86_cpuid_get_model_id,
3171 x86_cpuid_set_model_id, NULL);
3172 object_property_add(obj, "tsc-frequency", "int",
3173 x86_cpuid_get_tsc_freq,
3174 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3175 object_property_add(obj, "apic-id", "int",
3176 x86_cpuid_get_apic_id,
3177 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3178 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3179 x86_cpu_get_feature_words,
3180 NULL, NULL, (void *)env->features, NULL);
3181 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3182 x86_cpu_get_feature_words,
3183 NULL, NULL, (void *)cpu->filtered_features, NULL);
3184
3185 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3186
3187 #ifndef CONFIG_USER_ONLY
3188 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3189 cpu->apic_id = -1;
3190 #endif
3191
3192 for (w = 0; w < FEATURE_WORDS; w++) {
3193 int bitnr;
3194
3195 for (bitnr = 0; bitnr < 32; bitnr++) {
3196 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3197 }
3198 }
3199
3200 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3201 }
3202
3203 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3204 {
3205 X86CPU *cpu = X86_CPU(cs);
3206
3207 return cpu->apic_id;
3208 }
3209
3210 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3211 {
3212 X86CPU *cpu = X86_CPU(cs);
3213
3214 return cpu->env.cr[0] & CR0_PG_MASK;
3215 }
3216
3217 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3218 {
3219 X86CPU *cpu = X86_CPU(cs);
3220
3221 cpu->env.eip = value;
3222 }
3223
3224 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3225 {
3226 X86CPU *cpu = X86_CPU(cs);
3227
3228 cpu->env.eip = tb->pc - tb->cs_base;
3229 }
3230
3231 static bool x86_cpu_has_work(CPUState *cs)
3232 {
3233 X86CPU *cpu = X86_CPU(cs);
3234 CPUX86State *env = &cpu->env;
3235
3236 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3237 CPU_INTERRUPT_POLL)) &&
3238 (env->eflags & IF_MASK)) ||
3239 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3240 CPU_INTERRUPT_INIT |
3241 CPU_INTERRUPT_SIPI |
3242 CPU_INTERRUPT_MCE)) ||
3243 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3244 !(env->hflags & HF_SMM_MASK));
3245 }
3246
3247 static Property x86_cpu_properties[] = {
3248 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3249 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3250 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3251 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3252 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3253 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3254 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3255 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3256 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3257 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3258 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3259 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3260 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3261 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3262 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3263 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3264 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3265 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3266 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3267 DEFINE_PROP_END_OF_LIST()
3268 };
3269
3270 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3271 {
3272 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3273 CPUClass *cc = CPU_CLASS(oc);
3274 DeviceClass *dc = DEVICE_CLASS(oc);
3275
3276 xcc->parent_realize = dc->realize;
3277 dc->realize = x86_cpu_realizefn;
3278 dc->props = x86_cpu_properties;
3279
3280 xcc->parent_reset = cc->reset;
3281 cc->reset = x86_cpu_reset;
3282 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3283
3284 cc->class_by_name = x86_cpu_class_by_name;
3285 cc->parse_features = x86_cpu_parse_featurestr;
3286 cc->has_work = x86_cpu_has_work;
3287 cc->do_interrupt = x86_cpu_do_interrupt;
3288 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3289 cc->dump_state = x86_cpu_dump_state;
3290 cc->set_pc = x86_cpu_set_pc;
3291 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3292 cc->gdb_read_register = x86_cpu_gdb_read_register;
3293 cc->gdb_write_register = x86_cpu_gdb_write_register;
3294 cc->get_arch_id = x86_cpu_get_arch_id;
3295 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3296 #ifdef CONFIG_USER_ONLY
3297 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3298 #else
3299 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3300 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3301 cc->write_elf64_note = x86_cpu_write_elf64_note;
3302 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3303 cc->write_elf32_note = x86_cpu_write_elf32_note;
3304 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3305 cc->vmsd = &vmstate_x86_cpu;
3306 #endif
3307 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3308 #ifndef CONFIG_USER_ONLY
3309 cc->debug_excp_handler = breakpoint_handler;
3310 #endif
3311 cc->cpu_exec_enter = x86_cpu_exec_enter;
3312 cc->cpu_exec_exit = x86_cpu_exec_exit;
3313
3314 /*
3315 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3316 * object in cpus -> dangling pointer after final object_unref().
3317 */
3318 dc->cannot_destroy_with_object_finalize_yet = true;
3319 }
3320
3321 static const TypeInfo x86_cpu_type_info = {
3322 .name = TYPE_X86_CPU,
3323 .parent = TYPE_CPU,
3324 .instance_size = sizeof(X86CPU),
3325 .instance_init = x86_cpu_initfn,
3326 .abstract = true,
3327 .class_size = sizeof(X86CPUClass),
3328 .class_init = x86_cpu_common_class_init,
3329 };
3330
3331 static void x86_cpu_register_types(void)
3332 {
3333 int i;
3334
3335 type_register_static(&x86_cpu_type_info);
3336 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3337 x86_register_cpudef_type(&builtin_x86_defs[i]);
3338 }
3339 #ifdef CONFIG_KVM
3340 type_register_static(&host_x86_cpu_type_info);
3341 #endif
3342 }
3343
3344 type_init(x86_cpu_register_types)