]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Use xsave structs for ext_save_area
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/hw.h"
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
50
51
52 /* Cache topology CPUID constants: */
53
54 /* CPUID Leaf 2 Descriptors */
55
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
59
60
61 /* CPUID Leaf 4 constants: */
62
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
67
68 #define CPUID_4_LEVEL(l) ((l) << 5)
69
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
72
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
77
78 #define ASSOC_FULL 0xFF
79
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
93
94
95 /* Definitions of the hardcoded cache entries we expose: */
96
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
108
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
120
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
132
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
138
139 /* TLB definitions: */
140
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
145
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
150
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
155
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
160
161
162
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
165 {
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
171 }
172 dst[CPUID_VENDOR_SZ] = '\0';
173 }
174
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
178 */
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
188 };
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
198 };
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
203 */
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
213 };
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
223 };
224
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 };
235
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 };
246
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 };
257
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
262 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
263 };
264
265 static const char *cpuid_7_0_ecx_feature_name[] = {
266 NULL, NULL, NULL, "pku",
267 "ospke", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 };
275
276 static const char *cpuid_apm_edx_feature_name[] = {
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 "invtsc", NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 };
286
287 static const char *cpuid_xsave_feature_name[] = {
288 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 };
297
298 static const char *cpuid_6_feature_name[] = {
299 NULL, NULL, "arat", NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 };
308
309 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
310 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
311 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
312 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_FXSR)
315 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
316 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
317 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
318 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
319 CPUID_PAE | CPUID_SEP | CPUID_APIC)
320
321 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
322 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
323 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
324 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
325 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
326 /* partly implemented:
327 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
328 /* missing:
329 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
330 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
331 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
332 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
333 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
340 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
341
342 #ifdef TARGET_X86_64
343 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
344 #else
345 #define TCG_EXT2_X86_64_FEATURES 0
346 #endif
347
348 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
349 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
350 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
351 TCG_EXT2_X86_64_FEATURES)
352 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
353 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
354 #define TCG_EXT4_FEATURES 0
355 #define TCG_SVM_FEATURES 0
356 #define TCG_KVM_FEATURES 0
357 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
358 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
359 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
360 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
361 /* missing:
362 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
363 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
364 CPUID_7_0_EBX_RDSEED */
365 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
366 #define TCG_APM_FEATURES 0
367 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
368 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
369 /* missing:
370 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
371
372 typedef struct FeatureWordInfo {
373 const char **feat_names;
374 uint32_t cpuid_eax; /* Input EAX for CPUID */
375 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
376 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
377 int cpuid_reg; /* output register (R_* constant) */
378 uint32_t tcg_features; /* Feature flags supported by TCG */
379 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
380 } FeatureWordInfo;
381
382 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
383 [FEAT_1_EDX] = {
384 .feat_names = feature_name,
385 .cpuid_eax = 1, .cpuid_reg = R_EDX,
386 .tcg_features = TCG_FEATURES,
387 },
388 [FEAT_1_ECX] = {
389 .feat_names = ext_feature_name,
390 .cpuid_eax = 1, .cpuid_reg = R_ECX,
391 .tcg_features = TCG_EXT_FEATURES,
392 },
393 [FEAT_8000_0001_EDX] = {
394 .feat_names = ext2_feature_name,
395 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
396 .tcg_features = TCG_EXT2_FEATURES,
397 },
398 [FEAT_8000_0001_ECX] = {
399 .feat_names = ext3_feature_name,
400 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
401 .tcg_features = TCG_EXT3_FEATURES,
402 },
403 [FEAT_C000_0001_EDX] = {
404 .feat_names = ext4_feature_name,
405 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
406 .tcg_features = TCG_EXT4_FEATURES,
407 },
408 [FEAT_KVM] = {
409 .feat_names = kvm_feature_name,
410 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
411 .tcg_features = TCG_KVM_FEATURES,
412 },
413 [FEAT_SVM] = {
414 .feat_names = svm_feature_name,
415 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
416 .tcg_features = TCG_SVM_FEATURES,
417 },
418 [FEAT_7_0_EBX] = {
419 .feat_names = cpuid_7_0_ebx_feature_name,
420 .cpuid_eax = 7,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
422 .cpuid_reg = R_EBX,
423 .tcg_features = TCG_7_0_EBX_FEATURES,
424 },
425 [FEAT_7_0_ECX] = {
426 .feat_names = cpuid_7_0_ecx_feature_name,
427 .cpuid_eax = 7,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .cpuid_reg = R_ECX,
430 .tcg_features = TCG_7_0_ECX_FEATURES,
431 },
432 [FEAT_8000_0007_EDX] = {
433 .feat_names = cpuid_apm_edx_feature_name,
434 .cpuid_eax = 0x80000007,
435 .cpuid_reg = R_EDX,
436 .tcg_features = TCG_APM_FEATURES,
437 .unmigratable_flags = CPUID_APM_INVTSC,
438 },
439 [FEAT_XSAVE] = {
440 .feat_names = cpuid_xsave_feature_name,
441 .cpuid_eax = 0xd,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
443 .cpuid_reg = R_EAX,
444 .tcg_features = TCG_XSAVE_FEATURES,
445 },
446 [FEAT_6_EAX] = {
447 .feat_names = cpuid_6_feature_name,
448 .cpuid_eax = 6, .cpuid_reg = R_EAX,
449 .tcg_features = TCG_6_EAX_FEATURES,
450 },
451 };
452
453 typedef struct X86RegisterInfo32 {
454 /* Name of register */
455 const char *name;
456 /* QAPI enum value register */
457 X86CPURegister32 qapi_enum;
458 } X86RegisterInfo32;
459
460 #define REGISTER(reg) \
461 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
462 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
463 REGISTER(EAX),
464 REGISTER(ECX),
465 REGISTER(EDX),
466 REGISTER(EBX),
467 REGISTER(ESP),
468 REGISTER(EBP),
469 REGISTER(ESI),
470 REGISTER(EDI),
471 };
472 #undef REGISTER
473
474 const ExtSaveArea x86_ext_save_areas[] = {
475 [XSTATE_YMM_BIT] =
476 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = offsetof(X86XSaveArea, avx_state),
478 .size = sizeof(XSaveAVX) },
479 [XSTATE_BNDREGS_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = offsetof(X86XSaveArea, bndreg_state),
482 .size = sizeof(XSaveBNDREG) },
483 [XSTATE_BNDCSR_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
485 .offset = offsetof(X86XSaveArea, bndcsr_state),
486 .size = sizeof(XSaveBNDCSR) },
487 [XSTATE_OPMASK_BIT] =
488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
489 .offset = offsetof(X86XSaveArea, opmask_state),
490 .size = sizeof(XSaveOpmask) },
491 [XSTATE_ZMM_Hi256_BIT] =
492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
493 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
494 .size = sizeof(XSaveZMM_Hi256) },
495 [XSTATE_Hi16_ZMM_BIT] =
496 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
497 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
498 .size = sizeof(XSaveHi16_ZMM) },
499 [XSTATE_PKRU_BIT] =
500 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
501 .offset = offsetof(X86XSaveArea, pkru_state),
502 .size = sizeof(XSavePKRU) },
503 };
504
505 const char *get_register_name_32(unsigned int reg)
506 {
507 if (reg >= CPU_NB_REGS32) {
508 return NULL;
509 }
510 return x86_reg_info_32[reg].name;
511 }
512
513 /*
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
516 */
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
518 {
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
522
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
528 }
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
532 }
533 r |= f;
534 }
535 return r;
536 }
537
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
540 {
541 uint32_t vec[4];
542
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
561
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
570 }
571
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
573
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
578 */
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
581 {
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
592 }
593 }
594
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
599 */
600 static int altcmp(const char *s, const char *e, const char *altstr)
601 {
602 const char *p, *q;
603
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
613 }
614 }
615
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
618 */
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
621 {
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
625
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
630 }
631 }
632 return found;
633 }
634
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
638 {
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
645 }
646 }
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
649 }
650 }
651
652 /* CPU class name definitions: */
653
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
656
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
659 */
660 static char *x86_cpu_type_name(const char *model_name)
661 {
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
663 }
664
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
666 {
667 ObjectClass *oc;
668 char *typename;
669
670 if (cpu_model == NULL) {
671 return NULL;
672 }
673
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
678 }
679
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
692 };
693
694 static X86CPUDefinition builtin_x86_defs[] = {
695 {
696 .name = "qemu64",
697 .level = 0xd,
698 .vendor = CPUID_VENDOR_AMD,
699 .family = 6,
700 .model = 6,
701 .stepping = 3,
702 .features[FEAT_1_EDX] =
703 PPRO_FEATURES |
704 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
705 CPUID_PSE36,
706 .features[FEAT_1_ECX] =
707 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
708 .features[FEAT_8000_0001_EDX] =
709 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
710 .features[FEAT_8000_0001_ECX] =
711 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
712 .xlevel = 0x8000000A,
713 },
714 {
715 .name = "phenom",
716 .level = 5,
717 .vendor = CPUID_VENDOR_AMD,
718 .family = 16,
719 .model = 2,
720 .stepping = 3,
721 /* Missing: CPUID_HT */
722 .features[FEAT_1_EDX] =
723 PPRO_FEATURES |
724 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
725 CPUID_PSE36 | CPUID_VME,
726 .features[FEAT_1_ECX] =
727 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
728 CPUID_EXT_POPCNT,
729 .features[FEAT_8000_0001_EDX] =
730 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
731 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
732 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
733 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
734 CPUID_EXT3_CR8LEG,
735 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
736 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
737 .features[FEAT_8000_0001_ECX] =
738 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
739 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
740 /* Missing: CPUID_SVM_LBRV */
741 .features[FEAT_SVM] =
742 CPUID_SVM_NPT,
743 .xlevel = 0x8000001A,
744 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
745 },
746 {
747 .name = "core2duo",
748 .level = 10,
749 .vendor = CPUID_VENDOR_INTEL,
750 .family = 6,
751 .model = 15,
752 .stepping = 11,
753 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
754 .features[FEAT_1_EDX] =
755 PPRO_FEATURES |
756 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
757 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
758 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
759 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
760 .features[FEAT_1_ECX] =
761 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
762 CPUID_EXT_CX16,
763 .features[FEAT_8000_0001_EDX] =
764 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
765 .features[FEAT_8000_0001_ECX] =
766 CPUID_EXT3_LAHF_LM,
767 .xlevel = 0x80000008,
768 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
769 },
770 {
771 .name = "kvm64",
772 .level = 0xd,
773 .vendor = CPUID_VENDOR_INTEL,
774 .family = 15,
775 .model = 6,
776 .stepping = 1,
777 /* Missing: CPUID_HT */
778 .features[FEAT_1_EDX] =
779 PPRO_FEATURES | CPUID_VME |
780 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
781 CPUID_PSE36,
782 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
783 .features[FEAT_1_ECX] =
784 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
785 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
786 .features[FEAT_8000_0001_EDX] =
787 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
788 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
789 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
790 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
791 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
792 .features[FEAT_8000_0001_ECX] =
793 0,
794 .xlevel = 0x80000008,
795 .model_id = "Common KVM processor"
796 },
797 {
798 .name = "qemu32",
799 .level = 4,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 6,
802 .model = 6,
803 .stepping = 3,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES,
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3,
808 .xlevel = 0x80000004,
809 },
810 {
811 .name = "kvm32",
812 .level = 5,
813 .vendor = CPUID_VENDOR_INTEL,
814 .family = 15,
815 .model = 6,
816 .stepping = 1,
817 .features[FEAT_1_EDX] =
818 PPRO_FEATURES | CPUID_VME |
819 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
820 .features[FEAT_1_ECX] =
821 CPUID_EXT_SSE3,
822 .features[FEAT_8000_0001_ECX] =
823 0,
824 .xlevel = 0x80000008,
825 .model_id = "Common 32-bit KVM processor"
826 },
827 {
828 .name = "coreduo",
829 .level = 10,
830 .vendor = CPUID_VENDOR_INTEL,
831 .family = 6,
832 .model = 14,
833 .stepping = 8,
834 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
835 .features[FEAT_1_EDX] =
836 PPRO_FEATURES | CPUID_VME |
837 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
838 CPUID_SS,
839 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
840 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
841 .features[FEAT_1_ECX] =
842 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
843 .features[FEAT_8000_0001_EDX] =
844 CPUID_EXT2_NX,
845 .xlevel = 0x80000008,
846 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
847 },
848 {
849 .name = "486",
850 .level = 1,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 4,
853 .model = 8,
854 .stepping = 0,
855 .features[FEAT_1_EDX] =
856 I486_FEATURES,
857 .xlevel = 0,
858 },
859 {
860 .name = "pentium",
861 .level = 1,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 5,
864 .model = 4,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PENTIUM_FEATURES,
868 .xlevel = 0,
869 },
870 {
871 .name = "pentium2",
872 .level = 2,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 6,
875 .model = 5,
876 .stepping = 2,
877 .features[FEAT_1_EDX] =
878 PENTIUM2_FEATURES,
879 .xlevel = 0,
880 },
881 {
882 .name = "pentium3",
883 .level = 3,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 7,
887 .stepping = 3,
888 .features[FEAT_1_EDX] =
889 PENTIUM3_FEATURES,
890 .xlevel = 0,
891 },
892 {
893 .name = "athlon",
894 .level = 2,
895 .vendor = CPUID_VENDOR_AMD,
896 .family = 6,
897 .model = 2,
898 .stepping = 3,
899 .features[FEAT_1_EDX] =
900 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
901 CPUID_MCA,
902 .features[FEAT_8000_0001_EDX] =
903 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
904 .xlevel = 0x80000008,
905 },
906 {
907 .name = "n270",
908 .level = 10,
909 .vendor = CPUID_VENDOR_INTEL,
910 .family = 6,
911 .model = 28,
912 .stepping = 2,
913 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
914 .features[FEAT_1_EDX] =
915 PPRO_FEATURES |
916 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
917 CPUID_ACPI | CPUID_SS,
918 /* Some CPUs got no CPUID_SEP */
919 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
920 * CPUID_EXT_XTPR */
921 .features[FEAT_1_ECX] =
922 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
923 CPUID_EXT_MOVBE,
924 .features[FEAT_8000_0001_EDX] =
925 CPUID_EXT2_NX,
926 .features[FEAT_8000_0001_ECX] =
927 CPUID_EXT3_LAHF_LM,
928 .xlevel = 0x80000008,
929 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
930 },
931 {
932 .name = "Conroe",
933 .level = 10,
934 .vendor = CPUID_VENDOR_INTEL,
935 .family = 6,
936 .model = 15,
937 .stepping = 3,
938 .features[FEAT_1_EDX] =
939 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
940 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
941 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
942 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
943 CPUID_DE | CPUID_FP87,
944 .features[FEAT_1_ECX] =
945 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
946 .features[FEAT_8000_0001_EDX] =
947 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
948 .features[FEAT_8000_0001_ECX] =
949 CPUID_EXT3_LAHF_LM,
950 .xlevel = 0x80000008,
951 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
952 },
953 {
954 .name = "Penryn",
955 .level = 10,
956 .vendor = CPUID_VENDOR_INTEL,
957 .family = 6,
958 .model = 23,
959 .stepping = 3,
960 .features[FEAT_1_EDX] =
961 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
962 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
963 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
964 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
965 CPUID_DE | CPUID_FP87,
966 .features[FEAT_1_ECX] =
967 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
968 CPUID_EXT_SSE3,
969 .features[FEAT_8000_0001_EDX] =
970 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
971 .features[FEAT_8000_0001_ECX] =
972 CPUID_EXT3_LAHF_LM,
973 .xlevel = 0x80000008,
974 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
975 },
976 {
977 .name = "Nehalem",
978 .level = 11,
979 .vendor = CPUID_VENDOR_INTEL,
980 .family = 6,
981 .model = 26,
982 .stepping = 3,
983 .features[FEAT_1_EDX] =
984 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
985 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
986 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
987 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
988 CPUID_DE | CPUID_FP87,
989 .features[FEAT_1_ECX] =
990 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
991 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
992 .features[FEAT_8000_0001_EDX] =
993 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
994 .features[FEAT_8000_0001_ECX] =
995 CPUID_EXT3_LAHF_LM,
996 .xlevel = 0x80000008,
997 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
998 },
999 {
1000 .name = "Westmere",
1001 .level = 11,
1002 .vendor = CPUID_VENDOR_INTEL,
1003 .family = 6,
1004 .model = 44,
1005 .stepping = 1,
1006 .features[FEAT_1_EDX] =
1007 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1008 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1009 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1010 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1011 CPUID_DE | CPUID_FP87,
1012 .features[FEAT_1_ECX] =
1013 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1014 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1015 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1016 .features[FEAT_8000_0001_EDX] =
1017 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1018 .features[FEAT_8000_0001_ECX] =
1019 CPUID_EXT3_LAHF_LM,
1020 .features[FEAT_6_EAX] =
1021 CPUID_6_EAX_ARAT,
1022 .xlevel = 0x80000008,
1023 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1024 },
1025 {
1026 .name = "SandyBridge",
1027 .level = 0xd,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 42,
1031 .stepping = 1,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1040 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1041 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1042 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1043 CPUID_EXT_SSE3,
1044 .features[FEAT_8000_0001_EDX] =
1045 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1046 CPUID_EXT2_SYSCALL,
1047 .features[FEAT_8000_0001_ECX] =
1048 CPUID_EXT3_LAHF_LM,
1049 .features[FEAT_XSAVE] =
1050 CPUID_XSAVE_XSAVEOPT,
1051 .features[FEAT_6_EAX] =
1052 CPUID_6_EAX_ARAT,
1053 .xlevel = 0x80000008,
1054 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1055 },
1056 {
1057 .name = "IvyBridge",
1058 .level = 0xd,
1059 .vendor = CPUID_VENDOR_INTEL,
1060 .family = 6,
1061 .model = 58,
1062 .stepping = 9,
1063 .features[FEAT_1_EDX] =
1064 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1065 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1066 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1067 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1068 CPUID_DE | CPUID_FP87,
1069 .features[FEAT_1_ECX] =
1070 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1071 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1072 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1073 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1074 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1075 .features[FEAT_7_0_EBX] =
1076 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1077 CPUID_7_0_EBX_ERMS,
1078 .features[FEAT_8000_0001_EDX] =
1079 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1080 CPUID_EXT2_SYSCALL,
1081 .features[FEAT_8000_0001_ECX] =
1082 CPUID_EXT3_LAHF_LM,
1083 .features[FEAT_XSAVE] =
1084 CPUID_XSAVE_XSAVEOPT,
1085 .features[FEAT_6_EAX] =
1086 CPUID_6_EAX_ARAT,
1087 .xlevel = 0x80000008,
1088 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1089 },
1090 {
1091 .name = "Haswell-noTSX",
1092 .level = 0xd,
1093 .vendor = CPUID_VENDOR_INTEL,
1094 .family = 6,
1095 .model = 60,
1096 .stepping = 1,
1097 .features[FEAT_1_EDX] =
1098 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1099 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1100 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1101 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1102 CPUID_DE | CPUID_FP87,
1103 .features[FEAT_1_ECX] =
1104 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1105 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1106 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1107 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1108 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1109 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1110 .features[FEAT_8000_0001_EDX] =
1111 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1112 CPUID_EXT2_SYSCALL,
1113 .features[FEAT_8000_0001_ECX] =
1114 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1115 .features[FEAT_7_0_EBX] =
1116 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1117 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1118 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1119 .features[FEAT_XSAVE] =
1120 CPUID_XSAVE_XSAVEOPT,
1121 .features[FEAT_6_EAX] =
1122 CPUID_6_EAX_ARAT,
1123 .xlevel = 0x80000008,
1124 .model_id = "Intel Core Processor (Haswell, no TSX)",
1125 }, {
1126 .name = "Haswell",
1127 .level = 0xd,
1128 .vendor = CPUID_VENDOR_INTEL,
1129 .family = 6,
1130 .model = 60,
1131 .stepping = 1,
1132 .features[FEAT_1_EDX] =
1133 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1134 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1135 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1136 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1137 CPUID_DE | CPUID_FP87,
1138 .features[FEAT_1_ECX] =
1139 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1140 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1141 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1142 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1143 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1144 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1145 .features[FEAT_8000_0001_EDX] =
1146 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1147 CPUID_EXT2_SYSCALL,
1148 .features[FEAT_8000_0001_ECX] =
1149 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1150 .features[FEAT_7_0_EBX] =
1151 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1152 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1153 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1154 CPUID_7_0_EBX_RTM,
1155 .features[FEAT_XSAVE] =
1156 CPUID_XSAVE_XSAVEOPT,
1157 .features[FEAT_6_EAX] =
1158 CPUID_6_EAX_ARAT,
1159 .xlevel = 0x80000008,
1160 .model_id = "Intel Core Processor (Haswell)",
1161 },
1162 {
1163 .name = "Broadwell-noTSX",
1164 .level = 0xd,
1165 .vendor = CPUID_VENDOR_INTEL,
1166 .family = 6,
1167 .model = 61,
1168 .stepping = 2,
1169 .features[FEAT_1_EDX] =
1170 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1171 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1172 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1173 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1174 CPUID_DE | CPUID_FP87,
1175 .features[FEAT_1_ECX] =
1176 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1177 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1178 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1179 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1180 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1181 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1182 .features[FEAT_8000_0001_EDX] =
1183 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1184 CPUID_EXT2_SYSCALL,
1185 .features[FEAT_8000_0001_ECX] =
1186 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1187 .features[FEAT_7_0_EBX] =
1188 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1189 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1190 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1191 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1192 CPUID_7_0_EBX_SMAP,
1193 .features[FEAT_XSAVE] =
1194 CPUID_XSAVE_XSAVEOPT,
1195 .features[FEAT_6_EAX] =
1196 CPUID_6_EAX_ARAT,
1197 .xlevel = 0x80000008,
1198 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1199 },
1200 {
1201 .name = "Broadwell",
1202 .level = 0xd,
1203 .vendor = CPUID_VENDOR_INTEL,
1204 .family = 6,
1205 .model = 61,
1206 .stepping = 2,
1207 .features[FEAT_1_EDX] =
1208 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1209 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1210 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1211 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1212 CPUID_DE | CPUID_FP87,
1213 .features[FEAT_1_ECX] =
1214 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1215 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1216 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1217 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1218 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1219 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1220 .features[FEAT_8000_0001_EDX] =
1221 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1222 CPUID_EXT2_SYSCALL,
1223 .features[FEAT_8000_0001_ECX] =
1224 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1225 .features[FEAT_7_0_EBX] =
1226 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1227 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1228 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1229 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1230 CPUID_7_0_EBX_SMAP,
1231 .features[FEAT_XSAVE] =
1232 CPUID_XSAVE_XSAVEOPT,
1233 .features[FEAT_6_EAX] =
1234 CPUID_6_EAX_ARAT,
1235 .xlevel = 0x80000008,
1236 .model_id = "Intel Core Processor (Broadwell)",
1237 },
1238 {
1239 .name = "Opteron_G1",
1240 .level = 5,
1241 .vendor = CPUID_VENDOR_AMD,
1242 .family = 15,
1243 .model = 6,
1244 .stepping = 1,
1245 .features[FEAT_1_EDX] =
1246 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1247 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1248 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1249 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1250 CPUID_DE | CPUID_FP87,
1251 .features[FEAT_1_ECX] =
1252 CPUID_EXT_SSE3,
1253 .features[FEAT_8000_0001_EDX] =
1254 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1255 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1256 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1257 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1258 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1259 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1260 .xlevel = 0x80000008,
1261 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1262 },
1263 {
1264 .name = "Opteron_G2",
1265 .level = 5,
1266 .vendor = CPUID_VENDOR_AMD,
1267 .family = 15,
1268 .model = 6,
1269 .stepping = 1,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1278 /* Missing: CPUID_EXT2_RDTSCP */
1279 .features[FEAT_8000_0001_EDX] =
1280 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1281 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1282 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1283 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1284 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1285 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1286 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1289 .xlevel = 0x80000008,
1290 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1291 },
1292 {
1293 .name = "Opteron_G3",
1294 .level = 5,
1295 .vendor = CPUID_VENDOR_AMD,
1296 .family = 15,
1297 .model = 6,
1298 .stepping = 1,
1299 .features[FEAT_1_EDX] =
1300 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1301 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1302 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1303 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1304 CPUID_DE | CPUID_FP87,
1305 .features[FEAT_1_ECX] =
1306 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1307 CPUID_EXT_SSE3,
1308 /* Missing: CPUID_EXT2_RDTSCP */
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1311 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1312 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1313 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1314 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1315 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1316 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1317 .features[FEAT_8000_0001_ECX] =
1318 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1319 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1320 .xlevel = 0x80000008,
1321 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1322 },
1323 {
1324 .name = "Opteron_G4",
1325 .level = 0xd,
1326 .vendor = CPUID_VENDOR_AMD,
1327 .family = 21,
1328 .model = 1,
1329 .stepping = 2,
1330 .features[FEAT_1_EDX] =
1331 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1332 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1333 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1334 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1335 CPUID_DE | CPUID_FP87,
1336 .features[FEAT_1_ECX] =
1337 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1338 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1339 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1340 CPUID_EXT_SSE3,
1341 /* Missing: CPUID_EXT2_RDTSCP */
1342 .features[FEAT_8000_0001_EDX] =
1343 CPUID_EXT2_LM |
1344 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1345 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1346 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1347 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1348 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1349 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1350 .features[FEAT_8000_0001_ECX] =
1351 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1352 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1353 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1354 CPUID_EXT3_LAHF_LM,
1355 /* no xsaveopt! */
1356 .xlevel = 0x8000001A,
1357 .model_id = "AMD Opteron 62xx class CPU",
1358 },
1359 {
1360 .name = "Opteron_G5",
1361 .level = 0xd,
1362 .vendor = CPUID_VENDOR_AMD,
1363 .family = 21,
1364 .model = 2,
1365 .stepping = 0,
1366 .features[FEAT_1_EDX] =
1367 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1368 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1369 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1370 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1371 CPUID_DE | CPUID_FP87,
1372 .features[FEAT_1_ECX] =
1373 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1374 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1375 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1376 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1377 /* Missing: CPUID_EXT2_RDTSCP */
1378 .features[FEAT_8000_0001_EDX] =
1379 CPUID_EXT2_LM |
1380 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1381 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1382 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1383 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1384 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1385 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1386 .features[FEAT_8000_0001_ECX] =
1387 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1388 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1389 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1390 CPUID_EXT3_LAHF_LM,
1391 /* no xsaveopt! */
1392 .xlevel = 0x8000001A,
1393 .model_id = "AMD Opteron 63xx class CPU",
1394 },
1395 };
1396
1397 typedef struct PropValue {
1398 const char *prop, *value;
1399 } PropValue;
1400
1401 /* KVM-specific features that are automatically added/removed
1402 * from all CPU models when KVM is enabled.
1403 */
1404 static PropValue kvm_default_props[] = {
1405 { "kvmclock", "on" },
1406 { "kvm-nopiodelay", "on" },
1407 { "kvm-asyncpf", "on" },
1408 { "kvm-steal-time", "on" },
1409 { "kvm-pv-eoi", "on" },
1410 { "kvmclock-stable-bit", "on" },
1411 { "x2apic", "on" },
1412 { "acpi", "off" },
1413 { "monitor", "off" },
1414 { "svm", "off" },
1415 { NULL, NULL },
1416 };
1417
1418 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1419 {
1420 PropValue *pv;
1421 for (pv = kvm_default_props; pv->prop; pv++) {
1422 if (!strcmp(pv->prop, prop)) {
1423 pv->value = value;
1424 break;
1425 }
1426 }
1427
1428 /* It is valid to call this function only for properties that
1429 * are already present in the kvm_default_props table.
1430 */
1431 assert(pv->prop);
1432 }
1433
1434 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1435 bool migratable_only);
1436
1437 #ifdef CONFIG_KVM
1438
1439 static int cpu_x86_fill_model_id(char *str)
1440 {
1441 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1442 int i;
1443
1444 for (i = 0; i < 3; i++) {
1445 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1446 memcpy(str + i * 16 + 0, &eax, 4);
1447 memcpy(str + i * 16 + 4, &ebx, 4);
1448 memcpy(str + i * 16 + 8, &ecx, 4);
1449 memcpy(str + i * 16 + 12, &edx, 4);
1450 }
1451 return 0;
1452 }
1453
1454 static X86CPUDefinition host_cpudef;
1455
1456 static Property host_x86_cpu_properties[] = {
1457 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1458 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1459 DEFINE_PROP_END_OF_LIST()
1460 };
1461
1462 /* class_init for the "host" CPU model
1463 *
1464 * This function may be called before KVM is initialized.
1465 */
1466 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1467 {
1468 DeviceClass *dc = DEVICE_CLASS(oc);
1469 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1470 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1471
1472 xcc->kvm_required = true;
1473
1474 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1475 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1476
1477 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1478 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1479 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1480 host_cpudef.stepping = eax & 0x0F;
1481
1482 cpu_x86_fill_model_id(host_cpudef.model_id);
1483
1484 xcc->cpu_def = &host_cpudef;
1485
1486 /* level, xlevel, xlevel2, and the feature words are initialized on
1487 * instance_init, because they require KVM to be initialized.
1488 */
1489
1490 dc->props = host_x86_cpu_properties;
1491 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1492 dc->cannot_destroy_with_object_finalize_yet = true;
1493 }
1494
1495 static void host_x86_cpu_initfn(Object *obj)
1496 {
1497 X86CPU *cpu = X86_CPU(obj);
1498 CPUX86State *env = &cpu->env;
1499 KVMState *s = kvm_state;
1500
1501 assert(kvm_enabled());
1502
1503 /* We can't fill the features array here because we don't know yet if
1504 * "migratable" is true or false.
1505 */
1506 cpu->host_features = true;
1507
1508 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1509 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1510 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1511
1512 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1513 }
1514
1515 static const TypeInfo host_x86_cpu_type_info = {
1516 .name = X86_CPU_TYPE_NAME("host"),
1517 .parent = TYPE_X86_CPU,
1518 .instance_init = host_x86_cpu_initfn,
1519 .class_init = host_x86_cpu_class_init,
1520 };
1521
1522 #endif
1523
1524 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1525 {
1526 FeatureWordInfo *f = &feature_word_info[w];
1527 int i;
1528
1529 for (i = 0; i < 32; ++i) {
1530 if ((1UL << i) & mask) {
1531 const char *reg = get_register_name_32(f->cpuid_reg);
1532 assert(reg);
1533 fprintf(stderr, "warning: %s doesn't support requested feature: "
1534 "CPUID.%02XH:%s%s%s [bit %d]\n",
1535 kvm_enabled() ? "host" : "TCG",
1536 f->cpuid_eax, reg,
1537 f->feat_names[i] ? "." : "",
1538 f->feat_names[i] ? f->feat_names[i] : "", i);
1539 }
1540 }
1541 }
1542
1543 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1544 const char *name, void *opaque,
1545 Error **errp)
1546 {
1547 X86CPU *cpu = X86_CPU(obj);
1548 CPUX86State *env = &cpu->env;
1549 int64_t value;
1550
1551 value = (env->cpuid_version >> 8) & 0xf;
1552 if (value == 0xf) {
1553 value += (env->cpuid_version >> 20) & 0xff;
1554 }
1555 visit_type_int(v, name, &value, errp);
1556 }
1557
1558 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1559 const char *name, void *opaque,
1560 Error **errp)
1561 {
1562 X86CPU *cpu = X86_CPU(obj);
1563 CPUX86State *env = &cpu->env;
1564 const int64_t min = 0;
1565 const int64_t max = 0xff + 0xf;
1566 Error *local_err = NULL;
1567 int64_t value;
1568
1569 visit_type_int(v, name, &value, &local_err);
1570 if (local_err) {
1571 error_propagate(errp, local_err);
1572 return;
1573 }
1574 if (value < min || value > max) {
1575 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1576 name ? name : "null", value, min, max);
1577 return;
1578 }
1579
1580 env->cpuid_version &= ~0xff00f00;
1581 if (value > 0x0f) {
1582 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1583 } else {
1584 env->cpuid_version |= value << 8;
1585 }
1586 }
1587
1588 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1589 const char *name, void *opaque,
1590 Error **errp)
1591 {
1592 X86CPU *cpu = X86_CPU(obj);
1593 CPUX86State *env = &cpu->env;
1594 int64_t value;
1595
1596 value = (env->cpuid_version >> 4) & 0xf;
1597 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1598 visit_type_int(v, name, &value, errp);
1599 }
1600
1601 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1602 const char *name, void *opaque,
1603 Error **errp)
1604 {
1605 X86CPU *cpu = X86_CPU(obj);
1606 CPUX86State *env = &cpu->env;
1607 const int64_t min = 0;
1608 const int64_t max = 0xff;
1609 Error *local_err = NULL;
1610 int64_t value;
1611
1612 visit_type_int(v, name, &value, &local_err);
1613 if (local_err) {
1614 error_propagate(errp, local_err);
1615 return;
1616 }
1617 if (value < min || value > max) {
1618 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1619 name ? name : "null", value, min, max);
1620 return;
1621 }
1622
1623 env->cpuid_version &= ~0xf00f0;
1624 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1625 }
1626
1627 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1628 const char *name, void *opaque,
1629 Error **errp)
1630 {
1631 X86CPU *cpu = X86_CPU(obj);
1632 CPUX86State *env = &cpu->env;
1633 int64_t value;
1634
1635 value = env->cpuid_version & 0xf;
1636 visit_type_int(v, name, &value, errp);
1637 }
1638
1639 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1640 const char *name, void *opaque,
1641 Error **errp)
1642 {
1643 X86CPU *cpu = X86_CPU(obj);
1644 CPUX86State *env = &cpu->env;
1645 const int64_t min = 0;
1646 const int64_t max = 0xf;
1647 Error *local_err = NULL;
1648 int64_t value;
1649
1650 visit_type_int(v, name, &value, &local_err);
1651 if (local_err) {
1652 error_propagate(errp, local_err);
1653 return;
1654 }
1655 if (value < min || value > max) {
1656 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1657 name ? name : "null", value, min, max);
1658 return;
1659 }
1660
1661 env->cpuid_version &= ~0xf;
1662 env->cpuid_version |= value & 0xf;
1663 }
1664
1665 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1666 {
1667 X86CPU *cpu = X86_CPU(obj);
1668 CPUX86State *env = &cpu->env;
1669 char *value;
1670
1671 value = g_malloc(CPUID_VENDOR_SZ + 1);
1672 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1673 env->cpuid_vendor3);
1674 return value;
1675 }
1676
1677 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1678 Error **errp)
1679 {
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int i;
1683
1684 if (strlen(value) != CPUID_VENDOR_SZ) {
1685 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1686 return;
1687 }
1688
1689 env->cpuid_vendor1 = 0;
1690 env->cpuid_vendor2 = 0;
1691 env->cpuid_vendor3 = 0;
1692 for (i = 0; i < 4; i++) {
1693 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1694 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1695 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1696 }
1697 }
1698
1699 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1700 {
1701 X86CPU *cpu = X86_CPU(obj);
1702 CPUX86State *env = &cpu->env;
1703 char *value;
1704 int i;
1705
1706 value = g_malloc(48 + 1);
1707 for (i = 0; i < 48; i++) {
1708 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1709 }
1710 value[48] = '\0';
1711 return value;
1712 }
1713
1714 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1715 Error **errp)
1716 {
1717 X86CPU *cpu = X86_CPU(obj);
1718 CPUX86State *env = &cpu->env;
1719 int c, len, i;
1720
1721 if (model_id == NULL) {
1722 model_id = "";
1723 }
1724 len = strlen(model_id);
1725 memset(env->cpuid_model, 0, 48);
1726 for (i = 0; i < 48; i++) {
1727 if (i >= len) {
1728 c = '\0';
1729 } else {
1730 c = (uint8_t)model_id[i];
1731 }
1732 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1733 }
1734 }
1735
1736 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1737 void *opaque, Error **errp)
1738 {
1739 X86CPU *cpu = X86_CPU(obj);
1740 int64_t value;
1741
1742 value = cpu->env.tsc_khz * 1000;
1743 visit_type_int(v, name, &value, errp);
1744 }
1745
1746 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1747 void *opaque, Error **errp)
1748 {
1749 X86CPU *cpu = X86_CPU(obj);
1750 const int64_t min = 0;
1751 const int64_t max = INT64_MAX;
1752 Error *local_err = NULL;
1753 int64_t value;
1754
1755 visit_type_int(v, name, &value, &local_err);
1756 if (local_err) {
1757 error_propagate(errp, local_err);
1758 return;
1759 }
1760 if (value < min || value > max) {
1761 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1762 name ? name : "null", value, min, max);
1763 return;
1764 }
1765
1766 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1767 }
1768
1769 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1770 void *opaque, Error **errp)
1771 {
1772 X86CPU *cpu = X86_CPU(obj);
1773 int64_t value = cpu->apic_id;
1774
1775 visit_type_int(v, name, &value, errp);
1776 }
1777
1778 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1779 void *opaque, Error **errp)
1780 {
1781 X86CPU *cpu = X86_CPU(obj);
1782 DeviceState *dev = DEVICE(obj);
1783 const int64_t min = 0;
1784 const int64_t max = UINT32_MAX;
1785 Error *error = NULL;
1786 int64_t value;
1787
1788 if (dev->realized) {
1789 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1790 "it was realized", name, object_get_typename(obj));
1791 return;
1792 }
1793
1794 visit_type_int(v, name, &value, &error);
1795 if (error) {
1796 error_propagate(errp, error);
1797 return;
1798 }
1799 if (value < min || value > max) {
1800 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1801 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1802 object_get_typename(obj), name, value, min, max);
1803 return;
1804 }
1805
1806 if ((value != cpu->apic_id) && cpu_exists(value)) {
1807 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1808 return;
1809 }
1810 cpu->apic_id = value;
1811 }
1812
1813 /* Generic getter for "feature-words" and "filtered-features" properties */
1814 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1815 const char *name, void *opaque,
1816 Error **errp)
1817 {
1818 uint32_t *array = (uint32_t *)opaque;
1819 FeatureWord w;
1820 Error *err = NULL;
1821 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1822 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1823 X86CPUFeatureWordInfoList *list = NULL;
1824
1825 for (w = 0; w < FEATURE_WORDS; w++) {
1826 FeatureWordInfo *wi = &feature_word_info[w];
1827 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1828 qwi->cpuid_input_eax = wi->cpuid_eax;
1829 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1830 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1831 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1832 qwi->features = array[w];
1833
1834 /* List will be in reverse order, but order shouldn't matter */
1835 list_entries[w].next = list;
1836 list_entries[w].value = &word_infos[w];
1837 list = &list_entries[w];
1838 }
1839
1840 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1841 error_propagate(errp, err);
1842 }
1843
1844 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1845 void *opaque, Error **errp)
1846 {
1847 X86CPU *cpu = X86_CPU(obj);
1848 int64_t value = cpu->hyperv_spinlock_attempts;
1849
1850 visit_type_int(v, name, &value, errp);
1851 }
1852
1853 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1854 void *opaque, Error **errp)
1855 {
1856 const int64_t min = 0xFFF;
1857 const int64_t max = UINT_MAX;
1858 X86CPU *cpu = X86_CPU(obj);
1859 Error *err = NULL;
1860 int64_t value;
1861
1862 visit_type_int(v, name, &value, &err);
1863 if (err) {
1864 error_propagate(errp, err);
1865 return;
1866 }
1867
1868 if (value < min || value > max) {
1869 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1870 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1871 object_get_typename(obj), name ? name : "null",
1872 value, min, max);
1873 return;
1874 }
1875 cpu->hyperv_spinlock_attempts = value;
1876 }
1877
1878 static PropertyInfo qdev_prop_spinlocks = {
1879 .name = "int",
1880 .get = x86_get_hv_spinlocks,
1881 .set = x86_set_hv_spinlocks,
1882 };
1883
1884 /* Convert all '_' in a feature string option name to '-', to make feature
1885 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1886 */
1887 static inline void feat2prop(char *s)
1888 {
1889 while ((s = strchr(s, '_'))) {
1890 *s = '-';
1891 }
1892 }
1893
1894 /* Parse "+feature,-feature,feature=foo" CPU feature string
1895 */
1896 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1897 Error **errp)
1898 {
1899 X86CPU *cpu = X86_CPU(cs);
1900 char *featurestr; /* Single 'key=value" string being parsed */
1901 FeatureWord w;
1902 /* Features to be added */
1903 FeatureWordArray plus_features = { 0 };
1904 /* Features to be removed */
1905 FeatureWordArray minus_features = { 0 };
1906 uint32_t numvalue;
1907 CPUX86State *env = &cpu->env;
1908 Error *local_err = NULL;
1909
1910 featurestr = features ? strtok(features, ",") : NULL;
1911
1912 while (featurestr) {
1913 char *val;
1914 if (featurestr[0] == '+') {
1915 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1916 } else if (featurestr[0] == '-') {
1917 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1918 } else if ((val = strchr(featurestr, '='))) {
1919 *val = 0; val++;
1920 feat2prop(featurestr);
1921 if (!strcmp(featurestr, "xlevel")) {
1922 char *err;
1923 char num[32];
1924
1925 numvalue = strtoul(val, &err, 0);
1926 if (!*val || *err) {
1927 error_setg(errp, "bad numerical value %s", val);
1928 return;
1929 }
1930 if (numvalue < 0x80000000) {
1931 error_report("xlevel value shall always be >= 0x80000000"
1932 ", fixup will be removed in future versions");
1933 numvalue += 0x80000000;
1934 }
1935 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1936 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1937 } else if (!strcmp(featurestr, "tsc-freq")) {
1938 int64_t tsc_freq;
1939 char *err;
1940 char num[32];
1941
1942 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1943 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1944 if (tsc_freq < 0 || *err) {
1945 error_setg(errp, "bad numerical value %s", val);
1946 return;
1947 }
1948 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1949 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1950 &local_err);
1951 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1952 char *err;
1953 const int min = 0xFFF;
1954 char num[32];
1955 numvalue = strtoul(val, &err, 0);
1956 if (!*val || *err) {
1957 error_setg(errp, "bad numerical value %s", val);
1958 return;
1959 }
1960 if (numvalue < min) {
1961 error_report("hv-spinlocks value shall always be >= 0x%x"
1962 ", fixup will be removed in future versions",
1963 min);
1964 numvalue = min;
1965 }
1966 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1967 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1968 } else {
1969 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1970 }
1971 } else {
1972 feat2prop(featurestr);
1973 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1974 }
1975 if (local_err) {
1976 error_propagate(errp, local_err);
1977 return;
1978 }
1979 featurestr = strtok(NULL, ",");
1980 }
1981
1982 if (cpu->host_features) {
1983 for (w = 0; w < FEATURE_WORDS; w++) {
1984 env->features[w] =
1985 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1986 }
1987 }
1988
1989 for (w = 0; w < FEATURE_WORDS; w++) {
1990 env->features[w] |= plus_features[w];
1991 env->features[w] &= ~minus_features[w];
1992 }
1993 }
1994
1995 /* Print all cpuid feature names in featureset
1996 */
1997 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1998 {
1999 int bit;
2000 bool first = true;
2001
2002 for (bit = 0; bit < 32; bit++) {
2003 if (featureset[bit]) {
2004 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2005 first = false;
2006 }
2007 }
2008 }
2009
2010 /* generate CPU information. */
2011 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2012 {
2013 X86CPUDefinition *def;
2014 char buf[256];
2015 int i;
2016
2017 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2018 def = &builtin_x86_defs[i];
2019 snprintf(buf, sizeof(buf), "%s", def->name);
2020 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2021 }
2022 #ifdef CONFIG_KVM
2023 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2024 "KVM processor with all supported host features "
2025 "(only available in KVM mode)");
2026 #endif
2027
2028 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2029 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2030 FeatureWordInfo *fw = &feature_word_info[i];
2031
2032 (*cpu_fprintf)(f, " ");
2033 listflags(f, cpu_fprintf, fw->feat_names);
2034 (*cpu_fprintf)(f, "\n");
2035 }
2036 }
2037
2038 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2039 {
2040 CpuDefinitionInfoList *cpu_list = NULL;
2041 X86CPUDefinition *def;
2042 int i;
2043
2044 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2045 CpuDefinitionInfoList *entry;
2046 CpuDefinitionInfo *info;
2047
2048 def = &builtin_x86_defs[i];
2049 info = g_malloc0(sizeof(*info));
2050 info->name = g_strdup(def->name);
2051
2052 entry = g_malloc0(sizeof(*entry));
2053 entry->value = info;
2054 entry->next = cpu_list;
2055 cpu_list = entry;
2056 }
2057
2058 return cpu_list;
2059 }
2060
2061 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2062 bool migratable_only)
2063 {
2064 FeatureWordInfo *wi = &feature_word_info[w];
2065 uint32_t r;
2066
2067 if (kvm_enabled()) {
2068 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2069 wi->cpuid_ecx,
2070 wi->cpuid_reg);
2071 } else if (tcg_enabled()) {
2072 r = wi->tcg_features;
2073 } else {
2074 return ~0;
2075 }
2076 if (migratable_only) {
2077 r &= x86_cpu_get_migratable_flags(w);
2078 }
2079 return r;
2080 }
2081
2082 /*
2083 * Filters CPU feature words based on host availability of each feature.
2084 *
2085 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2086 */
2087 static int x86_cpu_filter_features(X86CPU *cpu)
2088 {
2089 CPUX86State *env = &cpu->env;
2090 FeatureWord w;
2091 int rv = 0;
2092
2093 for (w = 0; w < FEATURE_WORDS; w++) {
2094 uint32_t host_feat =
2095 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2096 uint32_t requested_features = env->features[w];
2097 env->features[w] &= host_feat;
2098 cpu->filtered_features[w] = requested_features & ~env->features[w];
2099 if (cpu->filtered_features[w]) {
2100 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2101 report_unavailable_features(w, cpu->filtered_features[w]);
2102 }
2103 rv = 1;
2104 }
2105 }
2106
2107 return rv;
2108 }
2109
2110 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2111 {
2112 PropValue *pv;
2113 for (pv = props; pv->prop; pv++) {
2114 if (!pv->value) {
2115 continue;
2116 }
2117 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2118 &error_abort);
2119 }
2120 }
2121
2122 /* Load data from X86CPUDefinition
2123 */
2124 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2125 {
2126 CPUX86State *env = &cpu->env;
2127 const char *vendor;
2128 char host_vendor[CPUID_VENDOR_SZ + 1];
2129 FeatureWord w;
2130
2131 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2132 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2133 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2134 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2135 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2136 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2137 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2138 for (w = 0; w < FEATURE_WORDS; w++) {
2139 env->features[w] = def->features[w];
2140 }
2141
2142 /* Special cases not set in the X86CPUDefinition structs: */
2143 if (kvm_enabled()) {
2144 if (!kvm_irqchip_in_kernel()) {
2145 x86_cpu_change_kvm_default("x2apic", "off");
2146 }
2147
2148 x86_cpu_apply_props(cpu, kvm_default_props);
2149 }
2150
2151 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2152
2153 /* sysenter isn't supported in compatibility mode on AMD,
2154 * syscall isn't supported in compatibility mode on Intel.
2155 * Normally we advertise the actual CPU vendor, but you can
2156 * override this using the 'vendor' property if you want to use
2157 * KVM's sysenter/syscall emulation in compatibility mode and
2158 * when doing cross vendor migration
2159 */
2160 vendor = def->vendor;
2161 if (kvm_enabled()) {
2162 uint32_t ebx = 0, ecx = 0, edx = 0;
2163 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2164 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2165 vendor = host_vendor;
2166 }
2167
2168 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2169
2170 }
2171
2172 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2173 {
2174 X86CPU *cpu = NULL;
2175 X86CPUClass *xcc;
2176 ObjectClass *oc;
2177 gchar **model_pieces;
2178 char *name, *features;
2179 Error *error = NULL;
2180
2181 model_pieces = g_strsplit(cpu_model, ",", 2);
2182 if (!model_pieces[0]) {
2183 error_setg(&error, "Invalid/empty CPU model name");
2184 goto out;
2185 }
2186 name = model_pieces[0];
2187 features = model_pieces[1];
2188
2189 oc = x86_cpu_class_by_name(name);
2190 if (oc == NULL) {
2191 error_setg(&error, "Unable to find CPU definition: %s", name);
2192 goto out;
2193 }
2194 xcc = X86_CPU_CLASS(oc);
2195
2196 if (xcc->kvm_required && !kvm_enabled()) {
2197 error_setg(&error, "CPU model '%s' requires KVM", name);
2198 goto out;
2199 }
2200
2201 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2202
2203 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2204 if (error) {
2205 goto out;
2206 }
2207
2208 out:
2209 if (error != NULL) {
2210 error_propagate(errp, error);
2211 if (cpu) {
2212 object_unref(OBJECT(cpu));
2213 cpu = NULL;
2214 }
2215 }
2216 g_strfreev(model_pieces);
2217 return cpu;
2218 }
2219
2220 X86CPU *cpu_x86_init(const char *cpu_model)
2221 {
2222 Error *error = NULL;
2223 X86CPU *cpu;
2224
2225 cpu = cpu_x86_create(cpu_model, &error);
2226 if (error) {
2227 goto out;
2228 }
2229
2230 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2231
2232 out:
2233 if (error) {
2234 error_report_err(error);
2235 if (cpu != NULL) {
2236 object_unref(OBJECT(cpu));
2237 cpu = NULL;
2238 }
2239 }
2240 return cpu;
2241 }
2242
2243 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2244 {
2245 X86CPUDefinition *cpudef = data;
2246 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2247
2248 xcc->cpu_def = cpudef;
2249 }
2250
2251 static void x86_register_cpudef_type(X86CPUDefinition *def)
2252 {
2253 char *typename = x86_cpu_type_name(def->name);
2254 TypeInfo ti = {
2255 .name = typename,
2256 .parent = TYPE_X86_CPU,
2257 .class_init = x86_cpu_cpudef_class_init,
2258 .class_data = def,
2259 };
2260
2261 type_register(&ti);
2262 g_free(typename);
2263 }
2264
2265 #if !defined(CONFIG_USER_ONLY)
2266
2267 void cpu_clear_apic_feature(CPUX86State *env)
2268 {
2269 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2270 }
2271
2272 #endif /* !CONFIG_USER_ONLY */
2273
2274 /* Initialize list of CPU models, filling some non-static fields if necessary
2275 */
2276 void x86_cpudef_setup(void)
2277 {
2278 int i, j;
2279 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2280
2281 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2282 X86CPUDefinition *def = &builtin_x86_defs[i];
2283
2284 /* Look for specific "cpudef" models that */
2285 /* have the QEMU version in .model_id */
2286 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2287 if (strcmp(model_with_versions[j], def->name) == 0) {
2288 pstrcpy(def->model_id, sizeof(def->model_id),
2289 "QEMU Virtual CPU version ");
2290 pstrcat(def->model_id, sizeof(def->model_id),
2291 qemu_hw_version());
2292 break;
2293 }
2294 }
2295 }
2296 }
2297
2298 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2299 uint32_t *eax, uint32_t *ebx,
2300 uint32_t *ecx, uint32_t *edx)
2301 {
2302 X86CPU *cpu = x86_env_get_cpu(env);
2303 CPUState *cs = CPU(cpu);
2304
2305 /* test if maximum index reached */
2306 if (index & 0x80000000) {
2307 if (index > env->cpuid_xlevel) {
2308 if (env->cpuid_xlevel2 > 0) {
2309 /* Handle the Centaur's CPUID instruction. */
2310 if (index > env->cpuid_xlevel2) {
2311 index = env->cpuid_xlevel2;
2312 } else if (index < 0xC0000000) {
2313 index = env->cpuid_xlevel;
2314 }
2315 } else {
2316 /* Intel documentation states that invalid EAX input will
2317 * return the same information as EAX=cpuid_level
2318 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2319 */
2320 index = env->cpuid_level;
2321 }
2322 }
2323 } else {
2324 if (index > env->cpuid_level)
2325 index = env->cpuid_level;
2326 }
2327
2328 switch(index) {
2329 case 0:
2330 *eax = env->cpuid_level;
2331 *ebx = env->cpuid_vendor1;
2332 *edx = env->cpuid_vendor2;
2333 *ecx = env->cpuid_vendor3;
2334 break;
2335 case 1:
2336 *eax = env->cpuid_version;
2337 *ebx = (cpu->apic_id << 24) |
2338 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2339 *ecx = env->features[FEAT_1_ECX];
2340 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2341 *ecx |= CPUID_EXT_OSXSAVE;
2342 }
2343 *edx = env->features[FEAT_1_EDX];
2344 if (cs->nr_cores * cs->nr_threads > 1) {
2345 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2346 *edx |= CPUID_HT;
2347 }
2348 break;
2349 case 2:
2350 /* cache info: needed for Pentium Pro compatibility */
2351 if (cpu->cache_info_passthrough) {
2352 host_cpuid(index, 0, eax, ebx, ecx, edx);
2353 break;
2354 }
2355 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2356 *ebx = 0;
2357 *ecx = 0;
2358 *edx = (L1D_DESCRIPTOR << 16) | \
2359 (L1I_DESCRIPTOR << 8) | \
2360 (L2_DESCRIPTOR);
2361 break;
2362 case 4:
2363 /* cache info: needed for Core compatibility */
2364 if (cpu->cache_info_passthrough) {
2365 host_cpuid(index, count, eax, ebx, ecx, edx);
2366 *eax &= ~0xFC000000;
2367 } else {
2368 *eax = 0;
2369 switch (count) {
2370 case 0: /* L1 dcache info */
2371 *eax |= CPUID_4_TYPE_DCACHE | \
2372 CPUID_4_LEVEL(1) | \
2373 CPUID_4_SELF_INIT_LEVEL;
2374 *ebx = (L1D_LINE_SIZE - 1) | \
2375 ((L1D_PARTITIONS - 1) << 12) | \
2376 ((L1D_ASSOCIATIVITY - 1) << 22);
2377 *ecx = L1D_SETS - 1;
2378 *edx = CPUID_4_NO_INVD_SHARING;
2379 break;
2380 case 1: /* L1 icache info */
2381 *eax |= CPUID_4_TYPE_ICACHE | \
2382 CPUID_4_LEVEL(1) | \
2383 CPUID_4_SELF_INIT_LEVEL;
2384 *ebx = (L1I_LINE_SIZE - 1) | \
2385 ((L1I_PARTITIONS - 1) << 12) | \
2386 ((L1I_ASSOCIATIVITY - 1) << 22);
2387 *ecx = L1I_SETS - 1;
2388 *edx = CPUID_4_NO_INVD_SHARING;
2389 break;
2390 case 2: /* L2 cache info */
2391 *eax |= CPUID_4_TYPE_UNIFIED | \
2392 CPUID_4_LEVEL(2) | \
2393 CPUID_4_SELF_INIT_LEVEL;
2394 if (cs->nr_threads > 1) {
2395 *eax |= (cs->nr_threads - 1) << 14;
2396 }
2397 *ebx = (L2_LINE_SIZE - 1) | \
2398 ((L2_PARTITIONS - 1) << 12) | \
2399 ((L2_ASSOCIATIVITY - 1) << 22);
2400 *ecx = L2_SETS - 1;
2401 *edx = CPUID_4_NO_INVD_SHARING;
2402 break;
2403 default: /* end of info */
2404 *eax = 0;
2405 *ebx = 0;
2406 *ecx = 0;
2407 *edx = 0;
2408 break;
2409 }
2410 }
2411
2412 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2413 if ((*eax & 31) && cs->nr_cores > 1) {
2414 *eax |= (cs->nr_cores - 1) << 26;
2415 }
2416 break;
2417 case 5:
2418 /* mwait info: needed for Core compatibility */
2419 *eax = 0; /* Smallest monitor-line size in bytes */
2420 *ebx = 0; /* Largest monitor-line size in bytes */
2421 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2422 *edx = 0;
2423 break;
2424 case 6:
2425 /* Thermal and Power Leaf */
2426 *eax = env->features[FEAT_6_EAX];
2427 *ebx = 0;
2428 *ecx = 0;
2429 *edx = 0;
2430 break;
2431 case 7:
2432 /* Structured Extended Feature Flags Enumeration Leaf */
2433 if (count == 0) {
2434 *eax = 0; /* Maximum ECX value for sub-leaves */
2435 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2436 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2437 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2438 *ecx |= CPUID_7_0_ECX_OSPKE;
2439 }
2440 *edx = 0; /* Reserved */
2441 } else {
2442 *eax = 0;
2443 *ebx = 0;
2444 *ecx = 0;
2445 *edx = 0;
2446 }
2447 break;
2448 case 9:
2449 /* Direct Cache Access Information Leaf */
2450 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2451 *ebx = 0;
2452 *ecx = 0;
2453 *edx = 0;
2454 break;
2455 case 0xA:
2456 /* Architectural Performance Monitoring Leaf */
2457 if (kvm_enabled() && cpu->enable_pmu) {
2458 KVMState *s = cs->kvm_state;
2459
2460 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2461 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2462 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2463 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2464 } else {
2465 *eax = 0;
2466 *ebx = 0;
2467 *ecx = 0;
2468 *edx = 0;
2469 }
2470 break;
2471 case 0xD: {
2472 KVMState *s = cs->kvm_state;
2473 uint64_t ena_mask;
2474 int i;
2475
2476 /* Processor Extended State */
2477 *eax = 0;
2478 *ebx = 0;
2479 *ecx = 0;
2480 *edx = 0;
2481 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2482 break;
2483 }
2484 if (kvm_enabled()) {
2485 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2486 ena_mask <<= 32;
2487 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2488 } else {
2489 ena_mask = -1;
2490 }
2491
2492 if (count == 0) {
2493 *ecx = 0x240;
2494 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2495 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2496 if ((env->features[esa->feature] & esa->bits) == esa->bits
2497 && ((ena_mask >> i) & 1) != 0) {
2498 if (i < 32) {
2499 *eax |= 1u << i;
2500 } else {
2501 *edx |= 1u << (i - 32);
2502 }
2503 *ecx = MAX(*ecx, esa->offset + esa->size);
2504 }
2505 }
2506 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2507 *ebx = *ecx;
2508 } else if (count == 1) {
2509 *eax = env->features[FEAT_XSAVE];
2510 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2511 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2512 if ((env->features[esa->feature] & esa->bits) == esa->bits
2513 && ((ena_mask >> count) & 1) != 0) {
2514 *eax = esa->size;
2515 *ebx = esa->offset;
2516 }
2517 }
2518 break;
2519 }
2520 case 0x80000000:
2521 *eax = env->cpuid_xlevel;
2522 *ebx = env->cpuid_vendor1;
2523 *edx = env->cpuid_vendor2;
2524 *ecx = env->cpuid_vendor3;
2525 break;
2526 case 0x80000001:
2527 *eax = env->cpuid_version;
2528 *ebx = 0;
2529 *ecx = env->features[FEAT_8000_0001_ECX];
2530 *edx = env->features[FEAT_8000_0001_EDX];
2531
2532 /* The Linux kernel checks for the CMPLegacy bit and
2533 * discards multiple thread information if it is set.
2534 * So don't set it here for Intel to make Linux guests happy.
2535 */
2536 if (cs->nr_cores * cs->nr_threads > 1) {
2537 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2538 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2539 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2540 *ecx |= 1 << 1; /* CmpLegacy bit */
2541 }
2542 }
2543 break;
2544 case 0x80000002:
2545 case 0x80000003:
2546 case 0x80000004:
2547 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2548 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2549 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2550 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2551 break;
2552 case 0x80000005:
2553 /* cache info (L1 cache) */
2554 if (cpu->cache_info_passthrough) {
2555 host_cpuid(index, 0, eax, ebx, ecx, edx);
2556 break;
2557 }
2558 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2559 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2560 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2561 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2562 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2563 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2564 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2565 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2566 break;
2567 case 0x80000006:
2568 /* cache info (L2 cache) */
2569 if (cpu->cache_info_passthrough) {
2570 host_cpuid(index, 0, eax, ebx, ecx, edx);
2571 break;
2572 }
2573 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2574 (L2_DTLB_2M_ENTRIES << 16) | \
2575 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2576 (L2_ITLB_2M_ENTRIES);
2577 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2578 (L2_DTLB_4K_ENTRIES << 16) | \
2579 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2580 (L2_ITLB_4K_ENTRIES);
2581 *ecx = (L2_SIZE_KB_AMD << 16) | \
2582 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2583 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2584 *edx = ((L3_SIZE_KB/512) << 18) | \
2585 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2586 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2587 break;
2588 case 0x80000007:
2589 *eax = 0;
2590 *ebx = 0;
2591 *ecx = 0;
2592 *edx = env->features[FEAT_8000_0007_EDX];
2593 break;
2594 case 0x80000008:
2595 /* virtual & phys address size in low 2 bytes. */
2596 /* XXX: This value must match the one used in the MMU code. */
2597 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2598 /* 64 bit processor */
2599 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2600 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2601 } else {
2602 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2603 *eax = 0x00000024; /* 36 bits physical */
2604 } else {
2605 *eax = 0x00000020; /* 32 bits physical */
2606 }
2607 }
2608 *ebx = 0;
2609 *ecx = 0;
2610 *edx = 0;
2611 if (cs->nr_cores * cs->nr_threads > 1) {
2612 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2613 }
2614 break;
2615 case 0x8000000A:
2616 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2617 *eax = 0x00000001; /* SVM Revision */
2618 *ebx = 0x00000010; /* nr of ASIDs */
2619 *ecx = 0;
2620 *edx = env->features[FEAT_SVM]; /* optional features */
2621 } else {
2622 *eax = 0;
2623 *ebx = 0;
2624 *ecx = 0;
2625 *edx = 0;
2626 }
2627 break;
2628 case 0xC0000000:
2629 *eax = env->cpuid_xlevel2;
2630 *ebx = 0;
2631 *ecx = 0;
2632 *edx = 0;
2633 break;
2634 case 0xC0000001:
2635 /* Support for VIA CPU's CPUID instruction */
2636 *eax = env->cpuid_version;
2637 *ebx = 0;
2638 *ecx = 0;
2639 *edx = env->features[FEAT_C000_0001_EDX];
2640 break;
2641 case 0xC0000002:
2642 case 0xC0000003:
2643 case 0xC0000004:
2644 /* Reserved for the future, and now filled with zero */
2645 *eax = 0;
2646 *ebx = 0;
2647 *ecx = 0;
2648 *edx = 0;
2649 break;
2650 default:
2651 /* reserved values: zero */
2652 *eax = 0;
2653 *ebx = 0;
2654 *ecx = 0;
2655 *edx = 0;
2656 break;
2657 }
2658 }
2659
2660 /* CPUClass::reset() */
2661 static void x86_cpu_reset(CPUState *s)
2662 {
2663 X86CPU *cpu = X86_CPU(s);
2664 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2665 CPUX86State *env = &cpu->env;
2666 target_ulong cr4;
2667 uint64_t xcr0;
2668 int i;
2669
2670 xcc->parent_reset(s);
2671
2672 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2673
2674 tlb_flush(s, 1);
2675
2676 env->old_exception = -1;
2677
2678 /* init to reset state */
2679
2680 #ifdef CONFIG_SOFTMMU
2681 env->hflags |= HF_SOFTMMU_MASK;
2682 #endif
2683 env->hflags2 |= HF2_GIF_MASK;
2684
2685 cpu_x86_update_cr0(env, 0x60000010);
2686 env->a20_mask = ~0x0;
2687 env->smbase = 0x30000;
2688
2689 env->idt.limit = 0xffff;
2690 env->gdt.limit = 0xffff;
2691 env->ldt.limit = 0xffff;
2692 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2693 env->tr.limit = 0xffff;
2694 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2695
2696 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2697 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2698 DESC_R_MASK | DESC_A_MASK);
2699 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2700 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2701 DESC_A_MASK);
2702 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2703 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2704 DESC_A_MASK);
2705 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2706 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2707 DESC_A_MASK);
2708 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2709 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2710 DESC_A_MASK);
2711 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2712 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2713 DESC_A_MASK);
2714
2715 env->eip = 0xfff0;
2716 env->regs[R_EDX] = env->cpuid_version;
2717
2718 env->eflags = 0x2;
2719
2720 /* FPU init */
2721 for (i = 0; i < 8; i++) {
2722 env->fptags[i] = 1;
2723 }
2724 cpu_set_fpuc(env, 0x37f);
2725
2726 env->mxcsr = 0x1f80;
2727 /* All units are in INIT state. */
2728 env->xstate_bv = 0;
2729
2730 env->pat = 0x0007040600070406ULL;
2731 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2732
2733 memset(env->dr, 0, sizeof(env->dr));
2734 env->dr[6] = DR6_FIXED_1;
2735 env->dr[7] = DR7_FIXED_1;
2736 cpu_breakpoint_remove_all(s, BP_CPU);
2737 cpu_watchpoint_remove_all(s, BP_CPU);
2738
2739 cr4 = 0;
2740 xcr0 = XSTATE_FP_MASK;
2741
2742 #ifdef CONFIG_USER_ONLY
2743 /* Enable all the features for user-mode. */
2744 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2745 xcr0 |= XSTATE_SSE_MASK;
2746 }
2747 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2748 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2749 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2750 xcr0 |= 1ull << i;
2751 }
2752 }
2753
2754 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2755 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2756 }
2757 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2758 cr4 |= CR4_FSGSBASE_MASK;
2759 }
2760 #endif
2761
2762 env->xcr0 = xcr0;
2763 cpu_x86_update_cr4(env, cr4);
2764
2765 /*
2766 * SDM 11.11.5 requires:
2767 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2768 * - IA32_MTRR_PHYSMASKn.V = 0
2769 * All other bits are undefined. For simplification, zero it all.
2770 */
2771 env->mtrr_deftype = 0;
2772 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2773 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2774
2775 #if !defined(CONFIG_USER_ONLY)
2776 /* We hard-wire the BSP to the first CPU. */
2777 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2778
2779 s->halted = !cpu_is_bsp(cpu);
2780
2781 if (kvm_enabled()) {
2782 kvm_arch_reset_vcpu(cpu);
2783 }
2784 #endif
2785 }
2786
2787 #ifndef CONFIG_USER_ONLY
2788 bool cpu_is_bsp(X86CPU *cpu)
2789 {
2790 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2791 }
2792
2793 /* TODO: remove me, when reset over QOM tree is implemented */
2794 static void x86_cpu_machine_reset_cb(void *opaque)
2795 {
2796 X86CPU *cpu = opaque;
2797 cpu_reset(CPU(cpu));
2798 }
2799 #endif
2800
2801 static void mce_init(X86CPU *cpu)
2802 {
2803 CPUX86State *cenv = &cpu->env;
2804 unsigned int bank;
2805
2806 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2807 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2808 (CPUID_MCE | CPUID_MCA)) {
2809 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2810 cenv->mcg_ctl = ~(uint64_t)0;
2811 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2812 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2813 }
2814 }
2815 }
2816
2817 #ifndef CONFIG_USER_ONLY
2818 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2819 {
2820 APICCommonState *apic;
2821 const char *apic_type = "apic";
2822
2823 if (kvm_apic_in_kernel()) {
2824 apic_type = "kvm-apic";
2825 } else if (xen_enabled()) {
2826 apic_type = "xen-apic";
2827 }
2828
2829 cpu->apic_state = DEVICE(object_new(apic_type));
2830
2831 object_property_add_child(OBJECT(cpu), "apic",
2832 OBJECT(cpu->apic_state), NULL);
2833 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2834 /* TODO: convert to link<> */
2835 apic = APIC_COMMON(cpu->apic_state);
2836 apic->cpu = cpu;
2837 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2838 }
2839
2840 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2841 {
2842 APICCommonState *apic;
2843 static bool apic_mmio_map_once;
2844
2845 if (cpu->apic_state == NULL) {
2846 return;
2847 }
2848 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2849 errp);
2850
2851 /* Map APIC MMIO area */
2852 apic = APIC_COMMON(cpu->apic_state);
2853 if (!apic_mmio_map_once) {
2854 memory_region_add_subregion_overlap(get_system_memory(),
2855 apic->apicbase &
2856 MSR_IA32_APICBASE_BASE,
2857 &apic->io_memory,
2858 0x1000);
2859 apic_mmio_map_once = true;
2860 }
2861 }
2862
2863 static void x86_cpu_machine_done(Notifier *n, void *unused)
2864 {
2865 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2866 MemoryRegion *smram =
2867 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2868
2869 if (smram) {
2870 cpu->smram = g_new(MemoryRegion, 1);
2871 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2872 smram, 0, 1ull << 32);
2873 memory_region_set_enabled(cpu->smram, false);
2874 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2875 }
2876 }
2877 #else
2878 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2879 {
2880 }
2881 #endif
2882
2883
2884 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2885 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2886 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2887 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2888 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2889 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2890 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2891 {
2892 CPUState *cs = CPU(dev);
2893 X86CPU *cpu = X86_CPU(dev);
2894 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2895 CPUX86State *env = &cpu->env;
2896 Error *local_err = NULL;
2897 static bool ht_warned;
2898
2899 if (cpu->apic_id < 0) {
2900 error_setg(errp, "apic-id property was not initialized properly");
2901 return;
2902 }
2903
2904 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2905 env->cpuid_level = 7;
2906 }
2907
2908 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2909 error_setg(&local_err,
2910 kvm_enabled() ?
2911 "Host doesn't support requested features" :
2912 "TCG doesn't support requested features");
2913 goto out;
2914 }
2915
2916 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2917 * CPUID[1].EDX.
2918 */
2919 if (IS_AMD_CPU(env)) {
2920 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2921 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2922 & CPUID_EXT2_AMD_ALIASES);
2923 }
2924
2925
2926 #ifndef CONFIG_USER_ONLY
2927 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2928
2929 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2930 x86_cpu_apic_create(cpu, &local_err);
2931 if (local_err != NULL) {
2932 goto out;
2933 }
2934 }
2935 #endif
2936
2937 mce_init(cpu);
2938
2939 #ifndef CONFIG_USER_ONLY
2940 if (tcg_enabled()) {
2941 AddressSpace *newas = g_new(AddressSpace, 1);
2942
2943 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2944 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2945
2946 /* Outer container... */
2947 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2948 memory_region_set_enabled(cpu->cpu_as_root, true);
2949
2950 /* ... with two regions inside: normal system memory with low
2951 * priority, and...
2952 */
2953 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2954 get_system_memory(), 0, ~0ull);
2955 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2956 memory_region_set_enabled(cpu->cpu_as_mem, true);
2957 address_space_init(newas, cpu->cpu_as_root, "CPU");
2958 cs->num_ases = 1;
2959 cpu_address_space_init(cs, newas, 0);
2960
2961 /* ... SMRAM with higher priority, linked from /machine/smram. */
2962 cpu->machine_done.notify = x86_cpu_machine_done;
2963 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2964 }
2965 #endif
2966
2967 qemu_init_vcpu(cs);
2968
2969 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2970 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2971 * based on inputs (sockets,cores,threads), it is still better to gives
2972 * users a warning.
2973 *
2974 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2975 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2976 */
2977 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2978 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2979 " -smp options properly.");
2980 ht_warned = true;
2981 }
2982
2983 x86_cpu_apic_realize(cpu, &local_err);
2984 if (local_err != NULL) {
2985 goto out;
2986 }
2987 cpu_reset(cs);
2988
2989 xcc->parent_realize(dev, &local_err);
2990
2991 out:
2992 if (local_err != NULL) {
2993 error_propagate(errp, local_err);
2994 return;
2995 }
2996 }
2997
2998 typedef struct BitProperty {
2999 uint32_t *ptr;
3000 uint32_t mask;
3001 } BitProperty;
3002
3003 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3004 void *opaque, Error **errp)
3005 {
3006 BitProperty *fp = opaque;
3007 bool value = (*fp->ptr & fp->mask) == fp->mask;
3008 visit_type_bool(v, name, &value, errp);
3009 }
3010
3011 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3012 void *opaque, Error **errp)
3013 {
3014 DeviceState *dev = DEVICE(obj);
3015 BitProperty *fp = opaque;
3016 Error *local_err = NULL;
3017 bool value;
3018
3019 if (dev->realized) {
3020 qdev_prop_set_after_realize(dev, name, errp);
3021 return;
3022 }
3023
3024 visit_type_bool(v, name, &value, &local_err);
3025 if (local_err) {
3026 error_propagate(errp, local_err);
3027 return;
3028 }
3029
3030 if (value) {
3031 *fp->ptr |= fp->mask;
3032 } else {
3033 *fp->ptr &= ~fp->mask;
3034 }
3035 }
3036
3037 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3038 void *opaque)
3039 {
3040 BitProperty *prop = opaque;
3041 g_free(prop);
3042 }
3043
3044 /* Register a boolean property to get/set a single bit in a uint32_t field.
3045 *
3046 * The same property name can be registered multiple times to make it affect
3047 * multiple bits in the same FeatureWord. In that case, the getter will return
3048 * true only if all bits are set.
3049 */
3050 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3051 const char *prop_name,
3052 uint32_t *field,
3053 int bitnr)
3054 {
3055 BitProperty *fp;
3056 ObjectProperty *op;
3057 uint32_t mask = (1UL << bitnr);
3058
3059 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3060 if (op) {
3061 fp = op->opaque;
3062 assert(fp->ptr == field);
3063 fp->mask |= mask;
3064 } else {
3065 fp = g_new0(BitProperty, 1);
3066 fp->ptr = field;
3067 fp->mask = mask;
3068 object_property_add(OBJECT(cpu), prop_name, "bool",
3069 x86_cpu_get_bit_prop,
3070 x86_cpu_set_bit_prop,
3071 x86_cpu_release_bit_prop, fp, &error_abort);
3072 }
3073 }
3074
3075 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3076 FeatureWord w,
3077 int bitnr)
3078 {
3079 Object *obj = OBJECT(cpu);
3080 int i;
3081 char **names;
3082 FeatureWordInfo *fi = &feature_word_info[w];
3083
3084 if (!fi->feat_names) {
3085 return;
3086 }
3087 if (!fi->feat_names[bitnr]) {
3088 return;
3089 }
3090
3091 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3092
3093 feat2prop(names[0]);
3094 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3095
3096 for (i = 1; names[i]; i++) {
3097 feat2prop(names[i]);
3098 object_property_add_alias(obj, names[i], obj, names[0],
3099 &error_abort);
3100 }
3101
3102 g_strfreev(names);
3103 }
3104
3105 static void x86_cpu_initfn(Object *obj)
3106 {
3107 CPUState *cs = CPU(obj);
3108 X86CPU *cpu = X86_CPU(obj);
3109 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3110 CPUX86State *env = &cpu->env;
3111 FeatureWord w;
3112 static int inited;
3113
3114 cs->env_ptr = env;
3115 cpu_exec_init(cs, &error_abort);
3116
3117 object_property_add(obj, "family", "int",
3118 x86_cpuid_version_get_family,
3119 x86_cpuid_version_set_family, NULL, NULL, NULL);
3120 object_property_add(obj, "model", "int",
3121 x86_cpuid_version_get_model,
3122 x86_cpuid_version_set_model, NULL, NULL, NULL);
3123 object_property_add(obj, "stepping", "int",
3124 x86_cpuid_version_get_stepping,
3125 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3126 object_property_add_str(obj, "vendor",
3127 x86_cpuid_get_vendor,
3128 x86_cpuid_set_vendor, NULL);
3129 object_property_add_str(obj, "model-id",
3130 x86_cpuid_get_model_id,
3131 x86_cpuid_set_model_id, NULL);
3132 object_property_add(obj, "tsc-frequency", "int",
3133 x86_cpuid_get_tsc_freq,
3134 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3135 object_property_add(obj, "apic-id", "int",
3136 x86_cpuid_get_apic_id,
3137 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3138 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3139 x86_cpu_get_feature_words,
3140 NULL, NULL, (void *)env->features, NULL);
3141 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3142 x86_cpu_get_feature_words,
3143 NULL, NULL, (void *)cpu->filtered_features, NULL);
3144
3145 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3146
3147 #ifndef CONFIG_USER_ONLY
3148 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3149 cpu->apic_id = -1;
3150 #endif
3151
3152 for (w = 0; w < FEATURE_WORDS; w++) {
3153 int bitnr;
3154
3155 for (bitnr = 0; bitnr < 32; bitnr++) {
3156 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3157 }
3158 }
3159
3160 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3161
3162 /* init various static tables used in TCG mode */
3163 if (tcg_enabled() && !inited) {
3164 inited = 1;
3165 tcg_x86_init();
3166 }
3167 }
3168
3169 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3170 {
3171 X86CPU *cpu = X86_CPU(cs);
3172
3173 return cpu->apic_id;
3174 }
3175
3176 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3177 {
3178 X86CPU *cpu = X86_CPU(cs);
3179
3180 return cpu->env.cr[0] & CR0_PG_MASK;
3181 }
3182
3183 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3184 {
3185 X86CPU *cpu = X86_CPU(cs);
3186
3187 cpu->env.eip = value;
3188 }
3189
3190 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3191 {
3192 X86CPU *cpu = X86_CPU(cs);
3193
3194 cpu->env.eip = tb->pc - tb->cs_base;
3195 }
3196
3197 static bool x86_cpu_has_work(CPUState *cs)
3198 {
3199 X86CPU *cpu = X86_CPU(cs);
3200 CPUX86State *env = &cpu->env;
3201
3202 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3203 CPU_INTERRUPT_POLL)) &&
3204 (env->eflags & IF_MASK)) ||
3205 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3206 CPU_INTERRUPT_INIT |
3207 CPU_INTERRUPT_SIPI |
3208 CPU_INTERRUPT_MCE)) ||
3209 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3210 !(env->hflags & HF_SMM_MASK));
3211 }
3212
3213 static Property x86_cpu_properties[] = {
3214 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3215 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3216 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3217 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3218 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3219 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3220 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3221 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3222 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3223 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3224 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3225 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3226 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3227 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3228 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3229 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3230 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3231 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3232 DEFINE_PROP_END_OF_LIST()
3233 };
3234
3235 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3236 {
3237 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3238 CPUClass *cc = CPU_CLASS(oc);
3239 DeviceClass *dc = DEVICE_CLASS(oc);
3240
3241 xcc->parent_realize = dc->realize;
3242 dc->realize = x86_cpu_realizefn;
3243 dc->props = x86_cpu_properties;
3244
3245 xcc->parent_reset = cc->reset;
3246 cc->reset = x86_cpu_reset;
3247 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3248
3249 cc->class_by_name = x86_cpu_class_by_name;
3250 cc->parse_features = x86_cpu_parse_featurestr;
3251 cc->has_work = x86_cpu_has_work;
3252 cc->do_interrupt = x86_cpu_do_interrupt;
3253 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3254 cc->dump_state = x86_cpu_dump_state;
3255 cc->set_pc = x86_cpu_set_pc;
3256 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3257 cc->gdb_read_register = x86_cpu_gdb_read_register;
3258 cc->gdb_write_register = x86_cpu_gdb_write_register;
3259 cc->get_arch_id = x86_cpu_get_arch_id;
3260 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3261 #ifdef CONFIG_USER_ONLY
3262 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3263 #else
3264 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3265 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3266 cc->write_elf64_note = x86_cpu_write_elf64_note;
3267 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3268 cc->write_elf32_note = x86_cpu_write_elf32_note;
3269 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3270 cc->vmsd = &vmstate_x86_cpu;
3271 #endif
3272 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3273 #ifndef CONFIG_USER_ONLY
3274 cc->debug_excp_handler = breakpoint_handler;
3275 #endif
3276 cc->cpu_exec_enter = x86_cpu_exec_enter;
3277 cc->cpu_exec_exit = x86_cpu_exec_exit;
3278
3279 /*
3280 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3281 * object in cpus -> dangling pointer after final object_unref().
3282 */
3283 dc->cannot_destroy_with_object_finalize_yet = true;
3284 }
3285
3286 static const TypeInfo x86_cpu_type_info = {
3287 .name = TYPE_X86_CPU,
3288 .parent = TYPE_CPU,
3289 .instance_size = sizeof(X86CPU),
3290 .instance_init = x86_cpu_initfn,
3291 .abstract = true,
3292 .class_size = sizeof(X86CPUClass),
3293 .class_init = x86_cpu_common_class_init,
3294 };
3295
3296 static void x86_cpu_register_types(void)
3297 {
3298 int i;
3299
3300 type_register_static(&x86_cpu_type_info);
3301 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3302 x86_register_cpudef_type(&builtin_x86_defs[i]);
3303 }
3304 #ifdef CONFIG_KVM
3305 type_register_static(&host_x86_cpu_type_info);
3306 #endif
3307 }
3308
3309 type_init(x86_cpu_register_types)