]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Set constant model_id for qemu64/qemu32/athlon
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #ifndef CONFIG_USER_ONLY
45 #include "exec/address-spaces.h"
46 #include "hw/hw.h"
47 #include "hw/xen/xen.h"
48 #include "hw/i386/apic_internal.h"
49 #endif
50
51
52 /* Cache topology CPUID constants: */
53
54 /* CPUID Leaf 2 Descriptors */
55
56 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
57 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
58 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
59
60
61 /* CPUID Leaf 4 constants: */
62
63 /* EAX: */
64 #define CPUID_4_TYPE_DCACHE 1
65 #define CPUID_4_TYPE_ICACHE 2
66 #define CPUID_4_TYPE_UNIFIED 3
67
68 #define CPUID_4_LEVEL(l) ((l) << 5)
69
70 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
71 #define CPUID_4_FULLY_ASSOC (1 << 9)
72
73 /* EDX: */
74 #define CPUID_4_NO_INVD_SHARING (1 << 0)
75 #define CPUID_4_INCLUSIVE (1 << 1)
76 #define CPUID_4_COMPLEX_IDX (1 << 2)
77
78 #define ASSOC_FULL 0xFF
79
80 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
81 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
82 a == 2 ? 0x2 : \
83 a == 4 ? 0x4 : \
84 a == 8 ? 0x6 : \
85 a == 16 ? 0x8 : \
86 a == 32 ? 0xA : \
87 a == 48 ? 0xB : \
88 a == 64 ? 0xC : \
89 a == 96 ? 0xD : \
90 a == 128 ? 0xE : \
91 a == ASSOC_FULL ? 0xF : \
92 0 /* invalid value */)
93
94
95 /* Definitions of the hardcoded cache entries we expose: */
96
97 /* L1 data cache: */
98 #define L1D_LINE_SIZE 64
99 #define L1D_ASSOCIATIVITY 8
100 #define L1D_SETS 64
101 #define L1D_PARTITIONS 1
102 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
103 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
104 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
105 #define L1D_LINES_PER_TAG 1
106 #define L1D_SIZE_KB_AMD 64
107 #define L1D_ASSOCIATIVITY_AMD 2
108
109 /* L1 instruction cache: */
110 #define L1I_LINE_SIZE 64
111 #define L1I_ASSOCIATIVITY 8
112 #define L1I_SETS 64
113 #define L1I_PARTITIONS 1
114 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
115 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
116 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
117 #define L1I_LINES_PER_TAG 1
118 #define L1I_SIZE_KB_AMD 64
119 #define L1I_ASSOCIATIVITY_AMD 2
120
121 /* Level 2 unified cache: */
122 #define L2_LINE_SIZE 64
123 #define L2_ASSOCIATIVITY 16
124 #define L2_SETS 4096
125 #define L2_PARTITIONS 1
126 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
127 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
128 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
129 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
130 #define L2_LINES_PER_TAG 1
131 #define L2_SIZE_KB_AMD 512
132
133 /* No L3 cache: */
134 #define L3_SIZE_KB 0 /* disabled */
135 #define L3_ASSOCIATIVITY 0 /* disabled */
136 #define L3_LINES_PER_TAG 0 /* disabled */
137 #define L3_LINE_SIZE 0 /* disabled */
138
139 /* TLB definitions: */
140
141 #define L1_DTLB_2M_ASSOC 1
142 #define L1_DTLB_2M_ENTRIES 255
143 #define L1_DTLB_4K_ASSOC 1
144 #define L1_DTLB_4K_ENTRIES 255
145
146 #define L1_ITLB_2M_ASSOC 1
147 #define L1_ITLB_2M_ENTRIES 255
148 #define L1_ITLB_4K_ASSOC 1
149 #define L1_ITLB_4K_ENTRIES 255
150
151 #define L2_DTLB_2M_ASSOC 0 /* disabled */
152 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
153 #define L2_DTLB_4K_ASSOC 4
154 #define L2_DTLB_4K_ENTRIES 512
155
156 #define L2_ITLB_2M_ASSOC 0 /* disabled */
157 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
158 #define L2_ITLB_4K_ASSOC 4
159 #define L2_ITLB_4K_ENTRIES 512
160
161
162
163 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
164 uint32_t vendor2, uint32_t vendor3)
165 {
166 int i;
167 for (i = 0; i < 4; i++) {
168 dst[i] = vendor1 >> (8 * i);
169 dst[i + 4] = vendor2 >> (8 * i);
170 dst[i + 8] = vendor3 >> (8 * i);
171 }
172 dst[CPUID_VENDOR_SZ] = '\0';
173 }
174
175 /* feature flags taken from "Intel Processor Identification and the CPUID
176 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
177 * between feature naming conventions, aliases may be added.
178 */
179 static const char *feature_name[] = {
180 "fpu", "vme", "de", "pse",
181 "tsc", "msr", "pae", "mce",
182 "cx8", "apic", NULL, "sep",
183 "mtrr", "pge", "mca", "cmov",
184 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
185 NULL, "ds" /* Intel dts */, "acpi", "mmx",
186 "fxsr", "sse", "sse2", "ss",
187 "ht" /* Intel htt */, "tm", "ia64", "pbe",
188 };
189 static const char *ext_feature_name[] = {
190 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
191 "ds_cpl", "vmx", "smx", "est",
192 "tm2", "ssse3", "cid", NULL,
193 "fma", "cx16", "xtpr", "pdcm",
194 NULL, "pcid", "dca", "sse4.1|sse4_1",
195 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
196 "tsc-deadline", "aes", "xsave", "osxsave",
197 "avx", "f16c", "rdrand", "hypervisor",
198 };
199 /* Feature names that are already defined on feature_name[] but are set on
200 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
201 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
202 * if and only if CPU vendor is AMD.
203 */
204 static const char *ext2_feature_name[] = {
205 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
206 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
207 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
208 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
209 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
210 "nx|xd", NULL, "mmxext", NULL /* mmx */,
211 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
212 NULL, "lm|i64", "3dnowext", "3dnow",
213 };
214 static const char *ext3_feature_name[] = {
215 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
216 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
217 "3dnowprefetch", "osvw", "ibs", "xop",
218 "skinit", "wdt", NULL, "lwp",
219 "fma4", "tce", NULL, "nodeid_msr",
220 NULL, "tbm", "topoext", "perfctr_core",
221 "perfctr_nb", NULL, NULL, NULL,
222 NULL, NULL, NULL, NULL,
223 };
224
225 static const char *ext4_feature_name[] = {
226 NULL, NULL, "xstore", "xstore-en",
227 NULL, NULL, "xcrypt", "xcrypt-en",
228 "ace2", "ace2-en", "phe", "phe-en",
229 "pmm", "pmm-en", NULL, NULL,
230 NULL, NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 };
235
236 static const char *kvm_feature_name[] = {
237 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
238 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 "kvmclock-stable-bit", NULL, NULL, NULL,
244 NULL, NULL, NULL, NULL,
245 };
246
247 static const char *svm_feature_name[] = {
248 "npt", "lbrv", "svm_lock", "nrip_save",
249 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
250 NULL, NULL, "pause_filter", NULL,
251 "pfthreshold", NULL, NULL, NULL,
252 NULL, NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 };
257
258 static const char *cpuid_7_0_ebx_feature_name[] = {
259 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
260 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
261 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
262 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
263 };
264
265 static const char *cpuid_7_0_ecx_feature_name[] = {
266 NULL, NULL, NULL, "pku",
267 "ospke", NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 NULL, NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 };
275
276 static const char *cpuid_apm_edx_feature_name[] = {
277 NULL, NULL, NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 "invtsc", NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 };
286
287 static const char *cpuid_xsave_feature_name[] = {
288 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
289 NULL, NULL, NULL, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 };
297
298 static const char *cpuid_6_feature_name[] = {
299 NULL, NULL, "arat", NULL,
300 NULL, NULL, NULL, NULL,
301 NULL, NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 NULL, NULL, NULL, NULL,
307 };
308
309 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
310 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
311 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
312 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
313 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
314 CPUID_PSE36 | CPUID_FXSR)
315 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
316 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
317 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
318 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
319 CPUID_PAE | CPUID_SEP | CPUID_APIC)
320
321 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
322 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
323 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
324 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
325 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
326 /* partly implemented:
327 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
328 /* missing:
329 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
330 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
331 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
332 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
333 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
334 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
335 /* missing:
336 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
337 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
338 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
339 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
340 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
341
342 #ifdef TARGET_X86_64
343 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
344 #else
345 #define TCG_EXT2_X86_64_FEATURES 0
346 #endif
347
348 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
349 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
350 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
351 TCG_EXT2_X86_64_FEATURES)
352 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
353 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
354 #define TCG_EXT4_FEATURES 0
355 #define TCG_SVM_FEATURES 0
356 #define TCG_KVM_FEATURES 0
357 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
358 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
359 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
360 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
361 /* missing:
362 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
363 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
364 CPUID_7_0_EBX_RDSEED */
365 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
366 #define TCG_APM_FEATURES 0
367 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
368 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
369 /* missing:
370 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
371
372 typedef struct FeatureWordInfo {
373 const char **feat_names;
374 uint32_t cpuid_eax; /* Input EAX for CPUID */
375 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
376 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
377 int cpuid_reg; /* output register (R_* constant) */
378 uint32_t tcg_features; /* Feature flags supported by TCG */
379 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
380 } FeatureWordInfo;
381
382 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
383 [FEAT_1_EDX] = {
384 .feat_names = feature_name,
385 .cpuid_eax = 1, .cpuid_reg = R_EDX,
386 .tcg_features = TCG_FEATURES,
387 },
388 [FEAT_1_ECX] = {
389 .feat_names = ext_feature_name,
390 .cpuid_eax = 1, .cpuid_reg = R_ECX,
391 .tcg_features = TCG_EXT_FEATURES,
392 },
393 [FEAT_8000_0001_EDX] = {
394 .feat_names = ext2_feature_name,
395 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
396 .tcg_features = TCG_EXT2_FEATURES,
397 },
398 [FEAT_8000_0001_ECX] = {
399 .feat_names = ext3_feature_name,
400 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
401 .tcg_features = TCG_EXT3_FEATURES,
402 },
403 [FEAT_C000_0001_EDX] = {
404 .feat_names = ext4_feature_name,
405 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
406 .tcg_features = TCG_EXT4_FEATURES,
407 },
408 [FEAT_KVM] = {
409 .feat_names = kvm_feature_name,
410 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
411 .tcg_features = TCG_KVM_FEATURES,
412 },
413 [FEAT_SVM] = {
414 .feat_names = svm_feature_name,
415 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
416 .tcg_features = TCG_SVM_FEATURES,
417 },
418 [FEAT_7_0_EBX] = {
419 .feat_names = cpuid_7_0_ebx_feature_name,
420 .cpuid_eax = 7,
421 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
422 .cpuid_reg = R_EBX,
423 .tcg_features = TCG_7_0_EBX_FEATURES,
424 },
425 [FEAT_7_0_ECX] = {
426 .feat_names = cpuid_7_0_ecx_feature_name,
427 .cpuid_eax = 7,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .cpuid_reg = R_ECX,
430 .tcg_features = TCG_7_0_ECX_FEATURES,
431 },
432 [FEAT_8000_0007_EDX] = {
433 .feat_names = cpuid_apm_edx_feature_name,
434 .cpuid_eax = 0x80000007,
435 .cpuid_reg = R_EDX,
436 .tcg_features = TCG_APM_FEATURES,
437 .unmigratable_flags = CPUID_APM_INVTSC,
438 },
439 [FEAT_XSAVE] = {
440 .feat_names = cpuid_xsave_feature_name,
441 .cpuid_eax = 0xd,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
443 .cpuid_reg = R_EAX,
444 .tcg_features = TCG_XSAVE_FEATURES,
445 },
446 [FEAT_6_EAX] = {
447 .feat_names = cpuid_6_feature_name,
448 .cpuid_eax = 6, .cpuid_reg = R_EAX,
449 .tcg_features = TCG_6_EAX_FEATURES,
450 },
451 };
452
453 typedef struct X86RegisterInfo32 {
454 /* Name of register */
455 const char *name;
456 /* QAPI enum value register */
457 X86CPURegister32 qapi_enum;
458 } X86RegisterInfo32;
459
460 #define REGISTER(reg) \
461 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
462 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
463 REGISTER(EAX),
464 REGISTER(ECX),
465 REGISTER(EDX),
466 REGISTER(EBX),
467 REGISTER(ESP),
468 REGISTER(EBP),
469 REGISTER(ESI),
470 REGISTER(EDI),
471 };
472 #undef REGISTER
473
474 const ExtSaveArea x86_ext_save_areas[] = {
475 [XSTATE_YMM_BIT] =
476 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
477 .offset = offsetof(X86XSaveArea, avx_state),
478 .size = sizeof(XSaveAVX) },
479 [XSTATE_BNDREGS_BIT] =
480 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
481 .offset = offsetof(X86XSaveArea, bndreg_state),
482 .size = sizeof(XSaveBNDREG) },
483 [XSTATE_BNDCSR_BIT] =
484 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
485 .offset = offsetof(X86XSaveArea, bndcsr_state),
486 .size = sizeof(XSaveBNDCSR) },
487 [XSTATE_OPMASK_BIT] =
488 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
489 .offset = offsetof(X86XSaveArea, opmask_state),
490 .size = sizeof(XSaveOpmask) },
491 [XSTATE_ZMM_Hi256_BIT] =
492 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
493 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
494 .size = sizeof(XSaveZMM_Hi256) },
495 [XSTATE_Hi16_ZMM_BIT] =
496 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
497 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
498 .size = sizeof(XSaveHi16_ZMM) },
499 [XSTATE_PKRU_BIT] =
500 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
501 .offset = offsetof(X86XSaveArea, pkru_state),
502 .size = sizeof(XSavePKRU) },
503 };
504
505 const char *get_register_name_32(unsigned int reg)
506 {
507 if (reg >= CPU_NB_REGS32) {
508 return NULL;
509 }
510 return x86_reg_info_32[reg].name;
511 }
512
513 /*
514 * Returns the set of feature flags that are supported and migratable by
515 * QEMU, for a given FeatureWord.
516 */
517 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
518 {
519 FeatureWordInfo *wi = &feature_word_info[w];
520 uint32_t r = 0;
521 int i;
522
523 for (i = 0; i < 32; i++) {
524 uint32_t f = 1U << i;
525 /* If the feature name is unknown, it is not supported by QEMU yet */
526 if (!wi->feat_names[i]) {
527 continue;
528 }
529 /* Skip features known to QEMU, but explicitly marked as unmigratable */
530 if (wi->unmigratable_flags & f) {
531 continue;
532 }
533 r |= f;
534 }
535 return r;
536 }
537
538 void host_cpuid(uint32_t function, uint32_t count,
539 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
540 {
541 uint32_t vec[4];
542
543 #ifdef __x86_64__
544 asm volatile("cpuid"
545 : "=a"(vec[0]), "=b"(vec[1]),
546 "=c"(vec[2]), "=d"(vec[3])
547 : "0"(function), "c"(count) : "cc");
548 #elif defined(__i386__)
549 asm volatile("pusha \n\t"
550 "cpuid \n\t"
551 "mov %%eax, 0(%2) \n\t"
552 "mov %%ebx, 4(%2) \n\t"
553 "mov %%ecx, 8(%2) \n\t"
554 "mov %%edx, 12(%2) \n\t"
555 "popa"
556 : : "a"(function), "c"(count), "S"(vec)
557 : "memory", "cc");
558 #else
559 abort();
560 #endif
561
562 if (eax)
563 *eax = vec[0];
564 if (ebx)
565 *ebx = vec[1];
566 if (ecx)
567 *ecx = vec[2];
568 if (edx)
569 *edx = vec[3];
570 }
571
572 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
573
574 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
575 * a substring. ex if !NULL points to the first char after a substring,
576 * otherwise the string is assumed to sized by a terminating nul.
577 * Return lexical ordering of *s1:*s2.
578 */
579 static int sstrcmp(const char *s1, const char *e1,
580 const char *s2, const char *e2)
581 {
582 for (;;) {
583 if (!*s1 || !*s2 || *s1 != *s2)
584 return (*s1 - *s2);
585 ++s1, ++s2;
586 if (s1 == e1 && s2 == e2)
587 return (0);
588 else if (s1 == e1)
589 return (*s2);
590 else if (s2 == e2)
591 return (*s1);
592 }
593 }
594
595 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
596 * '|' delimited (possibly empty) strings in which case search for a match
597 * within the alternatives proceeds left to right. Return 0 for success,
598 * non-zero otherwise.
599 */
600 static int altcmp(const char *s, const char *e, const char *altstr)
601 {
602 const char *p, *q;
603
604 for (q = p = altstr; ; ) {
605 while (*p && *p != '|')
606 ++p;
607 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
608 return (0);
609 if (!*p)
610 return (1);
611 else
612 q = ++p;
613 }
614 }
615
616 /* search featureset for flag *[s..e), if found set corresponding bit in
617 * *pval and return true, otherwise return false
618 */
619 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
620 const char **featureset)
621 {
622 uint32_t mask;
623 const char **ppc;
624 bool found = false;
625
626 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
627 if (*ppc && !altcmp(s, e, *ppc)) {
628 *pval |= mask;
629 found = true;
630 }
631 }
632 return found;
633 }
634
635 static void add_flagname_to_bitmaps(const char *flagname,
636 FeatureWordArray words,
637 Error **errp)
638 {
639 FeatureWord w;
640 for (w = 0; w < FEATURE_WORDS; w++) {
641 FeatureWordInfo *wi = &feature_word_info[w];
642 if (wi->feat_names &&
643 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
644 break;
645 }
646 }
647 if (w == FEATURE_WORDS) {
648 error_setg(errp, "CPU feature %s not found", flagname);
649 }
650 }
651
652 /* CPU class name definitions: */
653
654 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
655 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
656
657 /* Return type name for a given CPU model name
658 * Caller is responsible for freeing the returned string.
659 */
660 static char *x86_cpu_type_name(const char *model_name)
661 {
662 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
663 }
664
665 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
666 {
667 ObjectClass *oc;
668 char *typename;
669
670 if (cpu_model == NULL) {
671 return NULL;
672 }
673
674 typename = x86_cpu_type_name(cpu_model);
675 oc = object_class_by_name(typename);
676 g_free(typename);
677 return oc;
678 }
679
680 struct X86CPUDefinition {
681 const char *name;
682 uint32_t level;
683 uint32_t xlevel;
684 uint32_t xlevel2;
685 /* vendor is zero-terminated, 12 character ASCII string */
686 char vendor[CPUID_VENDOR_SZ + 1];
687 int family;
688 int model;
689 int stepping;
690 FeatureWordArray features;
691 char model_id[48];
692 };
693
694 static X86CPUDefinition builtin_x86_defs[] = {
695 {
696 .name = "qemu64",
697 .level = 0xd,
698 .vendor = CPUID_VENDOR_AMD,
699 .family = 6,
700 .model = 6,
701 .stepping = 3,
702 .features[FEAT_1_EDX] =
703 PPRO_FEATURES |
704 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
705 CPUID_PSE36,
706 .features[FEAT_1_ECX] =
707 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
708 .features[FEAT_8000_0001_EDX] =
709 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
710 .features[FEAT_8000_0001_ECX] =
711 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
712 .xlevel = 0x8000000A,
713 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
714 },
715 {
716 .name = "phenom",
717 .level = 5,
718 .vendor = CPUID_VENDOR_AMD,
719 .family = 16,
720 .model = 2,
721 .stepping = 3,
722 /* Missing: CPUID_HT */
723 .features[FEAT_1_EDX] =
724 PPRO_FEATURES |
725 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
726 CPUID_PSE36 | CPUID_VME,
727 .features[FEAT_1_ECX] =
728 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
729 CPUID_EXT_POPCNT,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
732 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
733 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
734 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
735 CPUID_EXT3_CR8LEG,
736 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
737 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
738 .features[FEAT_8000_0001_ECX] =
739 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
740 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
741 /* Missing: CPUID_SVM_LBRV */
742 .features[FEAT_SVM] =
743 CPUID_SVM_NPT,
744 .xlevel = 0x8000001A,
745 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
746 },
747 {
748 .name = "core2duo",
749 .level = 10,
750 .vendor = CPUID_VENDOR_INTEL,
751 .family = 6,
752 .model = 15,
753 .stepping = 11,
754 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
755 .features[FEAT_1_EDX] =
756 PPRO_FEATURES |
757 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
758 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
759 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
760 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
761 .features[FEAT_1_ECX] =
762 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
763 CPUID_EXT_CX16,
764 .features[FEAT_8000_0001_EDX] =
765 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
766 .features[FEAT_8000_0001_ECX] =
767 CPUID_EXT3_LAHF_LM,
768 .xlevel = 0x80000008,
769 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
770 },
771 {
772 .name = "kvm64",
773 .level = 0xd,
774 .vendor = CPUID_VENDOR_INTEL,
775 .family = 15,
776 .model = 6,
777 .stepping = 1,
778 /* Missing: CPUID_HT */
779 .features[FEAT_1_EDX] =
780 PPRO_FEATURES | CPUID_VME |
781 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
782 CPUID_PSE36,
783 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
784 .features[FEAT_1_ECX] =
785 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
786 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
787 .features[FEAT_8000_0001_EDX] =
788 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
789 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
790 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
791 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
792 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
793 .features[FEAT_8000_0001_ECX] =
794 0,
795 .xlevel = 0x80000008,
796 .model_id = "Common KVM processor"
797 },
798 {
799 .name = "qemu32",
800 .level = 4,
801 .vendor = CPUID_VENDOR_INTEL,
802 .family = 6,
803 .model = 6,
804 .stepping = 3,
805 .features[FEAT_1_EDX] =
806 PPRO_FEATURES,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .xlevel = 0x80000004,
810 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
811 },
812 {
813 .name = "kvm32",
814 .level = 5,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 15,
817 .model = 6,
818 .stepping = 1,
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3,
824 .features[FEAT_8000_0001_ECX] =
825 0,
826 .xlevel = 0x80000008,
827 .model_id = "Common 32-bit KVM processor"
828 },
829 {
830 .name = "coreduo",
831 .level = 10,
832 .vendor = CPUID_VENDOR_INTEL,
833 .family = 6,
834 .model = 14,
835 .stepping = 8,
836 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
837 .features[FEAT_1_EDX] =
838 PPRO_FEATURES | CPUID_VME |
839 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
840 CPUID_SS,
841 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
842 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
843 .features[FEAT_1_ECX] =
844 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_NX,
847 .xlevel = 0x80000008,
848 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
849 },
850 {
851 .name = "486",
852 .level = 1,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 4,
855 .model = 8,
856 .stepping = 0,
857 .features[FEAT_1_EDX] =
858 I486_FEATURES,
859 .xlevel = 0,
860 },
861 {
862 .name = "pentium",
863 .level = 1,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 5,
866 .model = 4,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PENTIUM_FEATURES,
870 .xlevel = 0,
871 },
872 {
873 .name = "pentium2",
874 .level = 2,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 6,
877 .model = 5,
878 .stepping = 2,
879 .features[FEAT_1_EDX] =
880 PENTIUM2_FEATURES,
881 .xlevel = 0,
882 },
883 {
884 .name = "pentium3",
885 .level = 3,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 6,
888 .model = 7,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM3_FEATURES,
892 .xlevel = 0,
893 },
894 {
895 .name = "athlon",
896 .level = 2,
897 .vendor = CPUID_VENDOR_AMD,
898 .family = 6,
899 .model = 2,
900 .stepping = 3,
901 .features[FEAT_1_EDX] =
902 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
903 CPUID_MCA,
904 .features[FEAT_8000_0001_EDX] =
905 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
906 .xlevel = 0x80000008,
907 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
908 },
909 {
910 .name = "n270",
911 .level = 10,
912 .vendor = CPUID_VENDOR_INTEL,
913 .family = 6,
914 .model = 28,
915 .stepping = 2,
916 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
917 .features[FEAT_1_EDX] =
918 PPRO_FEATURES |
919 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
920 CPUID_ACPI | CPUID_SS,
921 /* Some CPUs got no CPUID_SEP */
922 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
923 * CPUID_EXT_XTPR */
924 .features[FEAT_1_ECX] =
925 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
926 CPUID_EXT_MOVBE,
927 .features[FEAT_8000_0001_EDX] =
928 CPUID_EXT2_NX,
929 .features[FEAT_8000_0001_ECX] =
930 CPUID_EXT3_LAHF_LM,
931 .xlevel = 0x80000008,
932 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
933 },
934 {
935 .name = "Conroe",
936 .level = 10,
937 .vendor = CPUID_VENDOR_INTEL,
938 .family = 6,
939 .model = 15,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
943 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
944 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
945 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
946 CPUID_DE | CPUID_FP87,
947 .features[FEAT_1_ECX] =
948 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
951 .features[FEAT_8000_0001_ECX] =
952 CPUID_EXT3_LAHF_LM,
953 .xlevel = 0x80000008,
954 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
955 },
956 {
957 .name = "Penryn",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 23,
962 .stepping = 3,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
971 CPUID_EXT_SSE3,
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
974 .features[FEAT_8000_0001_ECX] =
975 CPUID_EXT3_LAHF_LM,
976 .xlevel = 0x80000008,
977 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
978 },
979 {
980 .name = "Nehalem",
981 .level = 11,
982 .vendor = CPUID_VENDOR_INTEL,
983 .family = 6,
984 .model = 26,
985 .stepping = 3,
986 .features[FEAT_1_EDX] =
987 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
988 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
989 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
990 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
991 CPUID_DE | CPUID_FP87,
992 .features[FEAT_1_ECX] =
993 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
994 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
997 .features[FEAT_8000_0001_ECX] =
998 CPUID_EXT3_LAHF_LM,
999 .xlevel = 0x80000008,
1000 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1001 },
1002 {
1003 .name = "Westmere",
1004 .level = 11,
1005 .vendor = CPUID_VENDOR_INTEL,
1006 .family = 6,
1007 .model = 44,
1008 .stepping = 1,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1017 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1018 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1019 .features[FEAT_8000_0001_EDX] =
1020 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1021 .features[FEAT_8000_0001_ECX] =
1022 CPUID_EXT3_LAHF_LM,
1023 .features[FEAT_6_EAX] =
1024 CPUID_6_EAX_ARAT,
1025 .xlevel = 0x80000008,
1026 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1027 },
1028 {
1029 .name = "SandyBridge",
1030 .level = 0xd,
1031 .vendor = CPUID_VENDOR_INTEL,
1032 .family = 6,
1033 .model = 42,
1034 .stepping = 1,
1035 .features[FEAT_1_EDX] =
1036 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1037 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1038 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1039 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1040 CPUID_DE | CPUID_FP87,
1041 .features[FEAT_1_ECX] =
1042 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1043 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1044 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1045 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1046 CPUID_EXT_SSE3,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1049 CPUID_EXT2_SYSCALL,
1050 .features[FEAT_8000_0001_ECX] =
1051 CPUID_EXT3_LAHF_LM,
1052 .features[FEAT_XSAVE] =
1053 CPUID_XSAVE_XSAVEOPT,
1054 .features[FEAT_6_EAX] =
1055 CPUID_6_EAX_ARAT,
1056 .xlevel = 0x80000008,
1057 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1058 },
1059 {
1060 .name = "IvyBridge",
1061 .level = 0xd,
1062 .vendor = CPUID_VENDOR_INTEL,
1063 .family = 6,
1064 .model = 58,
1065 .stepping = 9,
1066 .features[FEAT_1_EDX] =
1067 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1068 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1069 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1070 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1071 CPUID_DE | CPUID_FP87,
1072 .features[FEAT_1_ECX] =
1073 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1074 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1075 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1076 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1077 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1078 .features[FEAT_7_0_EBX] =
1079 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1080 CPUID_7_0_EBX_ERMS,
1081 .features[FEAT_8000_0001_EDX] =
1082 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1083 CPUID_EXT2_SYSCALL,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .features[FEAT_XSAVE] =
1087 CPUID_XSAVE_XSAVEOPT,
1088 .features[FEAT_6_EAX] =
1089 CPUID_6_EAX_ARAT,
1090 .xlevel = 0x80000008,
1091 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1092 },
1093 {
1094 .name = "Haswell-noTSX",
1095 .level = 0xd,
1096 .vendor = CPUID_VENDOR_INTEL,
1097 .family = 6,
1098 .model = 60,
1099 .stepping = 1,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1109 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1110 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1111 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1112 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1120 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1121 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1122 .features[FEAT_XSAVE] =
1123 CPUID_XSAVE_XSAVEOPT,
1124 .features[FEAT_6_EAX] =
1125 CPUID_6_EAX_ARAT,
1126 .xlevel = 0x80000008,
1127 .model_id = "Intel Core Processor (Haswell, no TSX)",
1128 }, {
1129 .name = "Haswell",
1130 .level = 0xd,
1131 .vendor = CPUID_VENDOR_INTEL,
1132 .family = 6,
1133 .model = 60,
1134 .stepping = 1,
1135 .features[FEAT_1_EDX] =
1136 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1137 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1138 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1139 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1140 CPUID_DE | CPUID_FP87,
1141 .features[FEAT_1_ECX] =
1142 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1143 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1144 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1145 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1146 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1147 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1150 CPUID_EXT2_SYSCALL,
1151 .features[FEAT_8000_0001_ECX] =
1152 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1153 .features[FEAT_7_0_EBX] =
1154 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1155 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1156 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1157 CPUID_7_0_EBX_RTM,
1158 .features[FEAT_XSAVE] =
1159 CPUID_XSAVE_XSAVEOPT,
1160 .features[FEAT_6_EAX] =
1161 CPUID_6_EAX_ARAT,
1162 .xlevel = 0x80000008,
1163 .model_id = "Intel Core Processor (Haswell)",
1164 },
1165 {
1166 .name = "Broadwell-noTSX",
1167 .level = 0xd,
1168 .vendor = CPUID_VENDOR_INTEL,
1169 .family = 6,
1170 .model = 61,
1171 .stepping = 2,
1172 .features[FEAT_1_EDX] =
1173 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1174 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1175 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1176 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1177 CPUID_DE | CPUID_FP87,
1178 .features[FEAT_1_ECX] =
1179 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1180 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1181 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1182 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1184 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1187 CPUID_EXT2_SYSCALL,
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1190 .features[FEAT_7_0_EBX] =
1191 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1192 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1193 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1194 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1195 CPUID_7_0_EBX_SMAP,
1196 .features[FEAT_XSAVE] =
1197 CPUID_XSAVE_XSAVEOPT,
1198 .features[FEAT_6_EAX] =
1199 CPUID_6_EAX_ARAT,
1200 .xlevel = 0x80000008,
1201 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1202 },
1203 {
1204 .name = "Broadwell",
1205 .level = 0xd,
1206 .vendor = CPUID_VENDOR_INTEL,
1207 .family = 6,
1208 .model = 61,
1209 .stepping = 2,
1210 .features[FEAT_1_EDX] =
1211 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1212 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1213 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1214 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1215 CPUID_DE | CPUID_FP87,
1216 .features[FEAT_1_ECX] =
1217 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1218 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1219 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1220 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1221 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1222 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1225 CPUID_EXT2_SYSCALL,
1226 .features[FEAT_8000_0001_ECX] =
1227 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1228 .features[FEAT_7_0_EBX] =
1229 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1230 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1231 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1232 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1233 CPUID_7_0_EBX_SMAP,
1234 .features[FEAT_XSAVE] =
1235 CPUID_XSAVE_XSAVEOPT,
1236 .features[FEAT_6_EAX] =
1237 CPUID_6_EAX_ARAT,
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel Core Processor (Broadwell)",
1240 },
1241 {
1242 .name = "Opteron_G1",
1243 .level = 5,
1244 .vendor = CPUID_VENDOR_AMD,
1245 .family = 15,
1246 .model = 6,
1247 .stepping = 1,
1248 .features[FEAT_1_EDX] =
1249 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1250 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1251 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1252 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1253 CPUID_DE | CPUID_FP87,
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_SSE3,
1256 .features[FEAT_8000_0001_EDX] =
1257 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1258 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1259 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1260 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1261 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1262 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1263 .xlevel = 0x80000008,
1264 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1265 },
1266 {
1267 .name = "Opteron_G2",
1268 .level = 5,
1269 .vendor = CPUID_VENDOR_AMD,
1270 .family = 15,
1271 .model = 6,
1272 .stepping = 1,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1281 /* Missing: CPUID_EXT2_RDTSCP */
1282 .features[FEAT_8000_0001_EDX] =
1283 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1284 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1285 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1286 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1287 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1288 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1289 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1290 .features[FEAT_8000_0001_ECX] =
1291 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1292 .xlevel = 0x80000008,
1293 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1294 },
1295 {
1296 .name = "Opteron_G3",
1297 .level = 5,
1298 .vendor = CPUID_VENDOR_AMD,
1299 .family = 15,
1300 .model = 6,
1301 .stepping = 1,
1302 .features[FEAT_1_EDX] =
1303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1307 CPUID_DE | CPUID_FP87,
1308 .features[FEAT_1_ECX] =
1309 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1310 CPUID_EXT_SSE3,
1311 /* Missing: CPUID_EXT2_RDTSCP */
1312 .features[FEAT_8000_0001_EDX] =
1313 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1314 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1315 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1316 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1317 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1318 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1319 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1320 .features[FEAT_8000_0001_ECX] =
1321 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1322 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1323 .xlevel = 0x80000008,
1324 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1325 },
1326 {
1327 .name = "Opteron_G4",
1328 .level = 0xd,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 21,
1331 .model = 1,
1332 .stepping = 2,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1341 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1342 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1343 CPUID_EXT_SSE3,
1344 /* Missing: CPUID_EXT2_RDTSCP */
1345 .features[FEAT_8000_0001_EDX] =
1346 CPUID_EXT2_LM |
1347 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1348 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1349 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1350 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1351 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1352 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1353 .features[FEAT_8000_0001_ECX] =
1354 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1355 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1356 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1357 CPUID_EXT3_LAHF_LM,
1358 /* no xsaveopt! */
1359 .xlevel = 0x8000001A,
1360 .model_id = "AMD Opteron 62xx class CPU",
1361 },
1362 {
1363 .name = "Opteron_G5",
1364 .level = 0xd,
1365 .vendor = CPUID_VENDOR_AMD,
1366 .family = 21,
1367 .model = 2,
1368 .stepping = 0,
1369 .features[FEAT_1_EDX] =
1370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1374 CPUID_DE | CPUID_FP87,
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1377 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1378 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1379 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1380 /* Missing: CPUID_EXT2_RDTSCP */
1381 .features[FEAT_8000_0001_EDX] =
1382 CPUID_EXT2_LM |
1383 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1384 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1385 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1386 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1387 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1388 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1389 .features[FEAT_8000_0001_ECX] =
1390 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1391 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1392 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1393 CPUID_EXT3_LAHF_LM,
1394 /* no xsaveopt! */
1395 .xlevel = 0x8000001A,
1396 .model_id = "AMD Opteron 63xx class CPU",
1397 },
1398 };
1399
1400 typedef struct PropValue {
1401 const char *prop, *value;
1402 } PropValue;
1403
1404 /* KVM-specific features that are automatically added/removed
1405 * from all CPU models when KVM is enabled.
1406 */
1407 static PropValue kvm_default_props[] = {
1408 { "kvmclock", "on" },
1409 { "kvm-nopiodelay", "on" },
1410 { "kvm-asyncpf", "on" },
1411 { "kvm-steal-time", "on" },
1412 { "kvm-pv-eoi", "on" },
1413 { "kvmclock-stable-bit", "on" },
1414 { "x2apic", "on" },
1415 { "acpi", "off" },
1416 { "monitor", "off" },
1417 { "svm", "off" },
1418 { NULL, NULL },
1419 };
1420
1421 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1422 {
1423 PropValue *pv;
1424 for (pv = kvm_default_props; pv->prop; pv++) {
1425 if (!strcmp(pv->prop, prop)) {
1426 pv->value = value;
1427 break;
1428 }
1429 }
1430
1431 /* It is valid to call this function only for properties that
1432 * are already present in the kvm_default_props table.
1433 */
1434 assert(pv->prop);
1435 }
1436
1437 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1438 bool migratable_only);
1439
1440 #ifdef CONFIG_KVM
1441
1442 static int cpu_x86_fill_model_id(char *str)
1443 {
1444 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1445 int i;
1446
1447 for (i = 0; i < 3; i++) {
1448 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1449 memcpy(str + i * 16 + 0, &eax, 4);
1450 memcpy(str + i * 16 + 4, &ebx, 4);
1451 memcpy(str + i * 16 + 8, &ecx, 4);
1452 memcpy(str + i * 16 + 12, &edx, 4);
1453 }
1454 return 0;
1455 }
1456
1457 static X86CPUDefinition host_cpudef;
1458
1459 static Property host_x86_cpu_properties[] = {
1460 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1461 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1462 DEFINE_PROP_END_OF_LIST()
1463 };
1464
1465 /* class_init for the "host" CPU model
1466 *
1467 * This function may be called before KVM is initialized.
1468 */
1469 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1470 {
1471 DeviceClass *dc = DEVICE_CLASS(oc);
1472 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1473 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1474
1475 xcc->kvm_required = true;
1476
1477 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1478 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1479
1480 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1481 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1482 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1483 host_cpudef.stepping = eax & 0x0F;
1484
1485 cpu_x86_fill_model_id(host_cpudef.model_id);
1486
1487 xcc->cpu_def = &host_cpudef;
1488
1489 /* level, xlevel, xlevel2, and the feature words are initialized on
1490 * instance_init, because they require KVM to be initialized.
1491 */
1492
1493 dc->props = host_x86_cpu_properties;
1494 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1495 dc->cannot_destroy_with_object_finalize_yet = true;
1496 }
1497
1498 static void host_x86_cpu_initfn(Object *obj)
1499 {
1500 X86CPU *cpu = X86_CPU(obj);
1501 CPUX86State *env = &cpu->env;
1502 KVMState *s = kvm_state;
1503
1504 assert(kvm_enabled());
1505
1506 /* We can't fill the features array here because we don't know yet if
1507 * "migratable" is true or false.
1508 */
1509 cpu->host_features = true;
1510
1511 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1512 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1513 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1514
1515 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1516 }
1517
1518 static const TypeInfo host_x86_cpu_type_info = {
1519 .name = X86_CPU_TYPE_NAME("host"),
1520 .parent = TYPE_X86_CPU,
1521 .instance_init = host_x86_cpu_initfn,
1522 .class_init = host_x86_cpu_class_init,
1523 };
1524
1525 #endif
1526
1527 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1528 {
1529 FeatureWordInfo *f = &feature_word_info[w];
1530 int i;
1531
1532 for (i = 0; i < 32; ++i) {
1533 if ((1UL << i) & mask) {
1534 const char *reg = get_register_name_32(f->cpuid_reg);
1535 assert(reg);
1536 fprintf(stderr, "warning: %s doesn't support requested feature: "
1537 "CPUID.%02XH:%s%s%s [bit %d]\n",
1538 kvm_enabled() ? "host" : "TCG",
1539 f->cpuid_eax, reg,
1540 f->feat_names[i] ? "." : "",
1541 f->feat_names[i] ? f->feat_names[i] : "", i);
1542 }
1543 }
1544 }
1545
1546 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1547 const char *name, void *opaque,
1548 Error **errp)
1549 {
1550 X86CPU *cpu = X86_CPU(obj);
1551 CPUX86State *env = &cpu->env;
1552 int64_t value;
1553
1554 value = (env->cpuid_version >> 8) & 0xf;
1555 if (value == 0xf) {
1556 value += (env->cpuid_version >> 20) & 0xff;
1557 }
1558 visit_type_int(v, name, &value, errp);
1559 }
1560
1561 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1562 const char *name, void *opaque,
1563 Error **errp)
1564 {
1565 X86CPU *cpu = X86_CPU(obj);
1566 CPUX86State *env = &cpu->env;
1567 const int64_t min = 0;
1568 const int64_t max = 0xff + 0xf;
1569 Error *local_err = NULL;
1570 int64_t value;
1571
1572 visit_type_int(v, name, &value, &local_err);
1573 if (local_err) {
1574 error_propagate(errp, local_err);
1575 return;
1576 }
1577 if (value < min || value > max) {
1578 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1579 name ? name : "null", value, min, max);
1580 return;
1581 }
1582
1583 env->cpuid_version &= ~0xff00f00;
1584 if (value > 0x0f) {
1585 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1586 } else {
1587 env->cpuid_version |= value << 8;
1588 }
1589 }
1590
1591 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1592 const char *name, void *opaque,
1593 Error **errp)
1594 {
1595 X86CPU *cpu = X86_CPU(obj);
1596 CPUX86State *env = &cpu->env;
1597 int64_t value;
1598
1599 value = (env->cpuid_version >> 4) & 0xf;
1600 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1601 visit_type_int(v, name, &value, errp);
1602 }
1603
1604 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1605 const char *name, void *opaque,
1606 Error **errp)
1607 {
1608 X86CPU *cpu = X86_CPU(obj);
1609 CPUX86State *env = &cpu->env;
1610 const int64_t min = 0;
1611 const int64_t max = 0xff;
1612 Error *local_err = NULL;
1613 int64_t value;
1614
1615 visit_type_int(v, name, &value, &local_err);
1616 if (local_err) {
1617 error_propagate(errp, local_err);
1618 return;
1619 }
1620 if (value < min || value > max) {
1621 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1622 name ? name : "null", value, min, max);
1623 return;
1624 }
1625
1626 env->cpuid_version &= ~0xf00f0;
1627 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1628 }
1629
1630 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1631 const char *name, void *opaque,
1632 Error **errp)
1633 {
1634 X86CPU *cpu = X86_CPU(obj);
1635 CPUX86State *env = &cpu->env;
1636 int64_t value;
1637
1638 value = env->cpuid_version & 0xf;
1639 visit_type_int(v, name, &value, errp);
1640 }
1641
1642 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1643 const char *name, void *opaque,
1644 Error **errp)
1645 {
1646 X86CPU *cpu = X86_CPU(obj);
1647 CPUX86State *env = &cpu->env;
1648 const int64_t min = 0;
1649 const int64_t max = 0xf;
1650 Error *local_err = NULL;
1651 int64_t value;
1652
1653 visit_type_int(v, name, &value, &local_err);
1654 if (local_err) {
1655 error_propagate(errp, local_err);
1656 return;
1657 }
1658 if (value < min || value > max) {
1659 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1660 name ? name : "null", value, min, max);
1661 return;
1662 }
1663
1664 env->cpuid_version &= ~0xf;
1665 env->cpuid_version |= value & 0xf;
1666 }
1667
1668 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1669 {
1670 X86CPU *cpu = X86_CPU(obj);
1671 CPUX86State *env = &cpu->env;
1672 char *value;
1673
1674 value = g_malloc(CPUID_VENDOR_SZ + 1);
1675 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1676 env->cpuid_vendor3);
1677 return value;
1678 }
1679
1680 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1681 Error **errp)
1682 {
1683 X86CPU *cpu = X86_CPU(obj);
1684 CPUX86State *env = &cpu->env;
1685 int i;
1686
1687 if (strlen(value) != CPUID_VENDOR_SZ) {
1688 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1689 return;
1690 }
1691
1692 env->cpuid_vendor1 = 0;
1693 env->cpuid_vendor2 = 0;
1694 env->cpuid_vendor3 = 0;
1695 for (i = 0; i < 4; i++) {
1696 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1697 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1698 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1699 }
1700 }
1701
1702 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1703 {
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1706 char *value;
1707 int i;
1708
1709 value = g_malloc(48 + 1);
1710 for (i = 0; i < 48; i++) {
1711 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1712 }
1713 value[48] = '\0';
1714 return value;
1715 }
1716
1717 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1718 Error **errp)
1719 {
1720 X86CPU *cpu = X86_CPU(obj);
1721 CPUX86State *env = &cpu->env;
1722 int c, len, i;
1723
1724 if (model_id == NULL) {
1725 model_id = "";
1726 }
1727 len = strlen(model_id);
1728 memset(env->cpuid_model, 0, 48);
1729 for (i = 0; i < 48; i++) {
1730 if (i >= len) {
1731 c = '\0';
1732 } else {
1733 c = (uint8_t)model_id[i];
1734 }
1735 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1736 }
1737 }
1738
1739 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1740 void *opaque, Error **errp)
1741 {
1742 X86CPU *cpu = X86_CPU(obj);
1743 int64_t value;
1744
1745 value = cpu->env.tsc_khz * 1000;
1746 visit_type_int(v, name, &value, errp);
1747 }
1748
1749 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1750 void *opaque, Error **errp)
1751 {
1752 X86CPU *cpu = X86_CPU(obj);
1753 const int64_t min = 0;
1754 const int64_t max = INT64_MAX;
1755 Error *local_err = NULL;
1756 int64_t value;
1757
1758 visit_type_int(v, name, &value, &local_err);
1759 if (local_err) {
1760 error_propagate(errp, local_err);
1761 return;
1762 }
1763 if (value < min || value > max) {
1764 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1765 name ? name : "null", value, min, max);
1766 return;
1767 }
1768
1769 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1770 }
1771
1772 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1773 void *opaque, Error **errp)
1774 {
1775 X86CPU *cpu = X86_CPU(obj);
1776 int64_t value = cpu->apic_id;
1777
1778 visit_type_int(v, name, &value, errp);
1779 }
1780
1781 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1782 void *opaque, Error **errp)
1783 {
1784 X86CPU *cpu = X86_CPU(obj);
1785 DeviceState *dev = DEVICE(obj);
1786 const int64_t min = 0;
1787 const int64_t max = UINT32_MAX;
1788 Error *error = NULL;
1789 int64_t value;
1790
1791 if (dev->realized) {
1792 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1793 "it was realized", name, object_get_typename(obj));
1794 return;
1795 }
1796
1797 visit_type_int(v, name, &value, &error);
1798 if (error) {
1799 error_propagate(errp, error);
1800 return;
1801 }
1802 if (value < min || value > max) {
1803 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1804 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1805 object_get_typename(obj), name, value, min, max);
1806 return;
1807 }
1808
1809 if ((value != cpu->apic_id) && cpu_exists(value)) {
1810 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1811 return;
1812 }
1813 cpu->apic_id = value;
1814 }
1815
1816 /* Generic getter for "feature-words" and "filtered-features" properties */
1817 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1818 const char *name, void *opaque,
1819 Error **errp)
1820 {
1821 uint32_t *array = (uint32_t *)opaque;
1822 FeatureWord w;
1823 Error *err = NULL;
1824 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1825 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1826 X86CPUFeatureWordInfoList *list = NULL;
1827
1828 for (w = 0; w < FEATURE_WORDS; w++) {
1829 FeatureWordInfo *wi = &feature_word_info[w];
1830 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1831 qwi->cpuid_input_eax = wi->cpuid_eax;
1832 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1833 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1834 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1835 qwi->features = array[w];
1836
1837 /* List will be in reverse order, but order shouldn't matter */
1838 list_entries[w].next = list;
1839 list_entries[w].value = &word_infos[w];
1840 list = &list_entries[w];
1841 }
1842
1843 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, &err);
1844 error_propagate(errp, err);
1845 }
1846
1847 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1848 void *opaque, Error **errp)
1849 {
1850 X86CPU *cpu = X86_CPU(obj);
1851 int64_t value = cpu->hyperv_spinlock_attempts;
1852
1853 visit_type_int(v, name, &value, errp);
1854 }
1855
1856 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1857 void *opaque, Error **errp)
1858 {
1859 const int64_t min = 0xFFF;
1860 const int64_t max = UINT_MAX;
1861 X86CPU *cpu = X86_CPU(obj);
1862 Error *err = NULL;
1863 int64_t value;
1864
1865 visit_type_int(v, name, &value, &err);
1866 if (err) {
1867 error_propagate(errp, err);
1868 return;
1869 }
1870
1871 if (value < min || value > max) {
1872 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1873 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1874 object_get_typename(obj), name ? name : "null",
1875 value, min, max);
1876 return;
1877 }
1878 cpu->hyperv_spinlock_attempts = value;
1879 }
1880
1881 static PropertyInfo qdev_prop_spinlocks = {
1882 .name = "int",
1883 .get = x86_get_hv_spinlocks,
1884 .set = x86_set_hv_spinlocks,
1885 };
1886
1887 /* Convert all '_' in a feature string option name to '-', to make feature
1888 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1889 */
1890 static inline void feat2prop(char *s)
1891 {
1892 while ((s = strchr(s, '_'))) {
1893 *s = '-';
1894 }
1895 }
1896
1897 /* Parse "+feature,-feature,feature=foo" CPU feature string
1898 */
1899 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1900 Error **errp)
1901 {
1902 X86CPU *cpu = X86_CPU(cs);
1903 char *featurestr; /* Single 'key=value" string being parsed */
1904 FeatureWord w;
1905 /* Features to be added */
1906 FeatureWordArray plus_features = { 0 };
1907 /* Features to be removed */
1908 FeatureWordArray minus_features = { 0 };
1909 uint32_t numvalue;
1910 CPUX86State *env = &cpu->env;
1911 Error *local_err = NULL;
1912
1913 featurestr = features ? strtok(features, ",") : NULL;
1914
1915 while (featurestr) {
1916 char *val;
1917 if (featurestr[0] == '+') {
1918 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1919 } else if (featurestr[0] == '-') {
1920 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1921 } else if ((val = strchr(featurestr, '='))) {
1922 *val = 0; val++;
1923 feat2prop(featurestr);
1924 if (!strcmp(featurestr, "xlevel")) {
1925 char *err;
1926 char num[32];
1927
1928 numvalue = strtoul(val, &err, 0);
1929 if (!*val || *err) {
1930 error_setg(errp, "bad numerical value %s", val);
1931 return;
1932 }
1933 if (numvalue < 0x80000000) {
1934 error_report("xlevel value shall always be >= 0x80000000"
1935 ", fixup will be removed in future versions");
1936 numvalue += 0x80000000;
1937 }
1938 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1939 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1940 } else if (!strcmp(featurestr, "tsc-freq")) {
1941 int64_t tsc_freq;
1942 char *err;
1943 char num[32];
1944
1945 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1946 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1947 if (tsc_freq < 0 || *err) {
1948 error_setg(errp, "bad numerical value %s", val);
1949 return;
1950 }
1951 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1952 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1953 &local_err);
1954 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1955 char *err;
1956 const int min = 0xFFF;
1957 char num[32];
1958 numvalue = strtoul(val, &err, 0);
1959 if (!*val || *err) {
1960 error_setg(errp, "bad numerical value %s", val);
1961 return;
1962 }
1963 if (numvalue < min) {
1964 error_report("hv-spinlocks value shall always be >= 0x%x"
1965 ", fixup will be removed in future versions",
1966 min);
1967 numvalue = min;
1968 }
1969 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1970 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1971 } else {
1972 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1973 }
1974 } else {
1975 feat2prop(featurestr);
1976 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1977 }
1978 if (local_err) {
1979 error_propagate(errp, local_err);
1980 return;
1981 }
1982 featurestr = strtok(NULL, ",");
1983 }
1984
1985 if (cpu->host_features) {
1986 for (w = 0; w < FEATURE_WORDS; w++) {
1987 env->features[w] =
1988 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1989 }
1990 }
1991
1992 for (w = 0; w < FEATURE_WORDS; w++) {
1993 env->features[w] |= plus_features[w];
1994 env->features[w] &= ~minus_features[w];
1995 }
1996 }
1997
1998 /* Print all cpuid feature names in featureset
1999 */
2000 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2001 {
2002 int bit;
2003 bool first = true;
2004
2005 for (bit = 0; bit < 32; bit++) {
2006 if (featureset[bit]) {
2007 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2008 first = false;
2009 }
2010 }
2011 }
2012
2013 /* generate CPU information. */
2014 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2015 {
2016 X86CPUDefinition *def;
2017 char buf[256];
2018 int i;
2019
2020 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2021 def = &builtin_x86_defs[i];
2022 snprintf(buf, sizeof(buf), "%s", def->name);
2023 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2024 }
2025 #ifdef CONFIG_KVM
2026 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2027 "KVM processor with all supported host features "
2028 "(only available in KVM mode)");
2029 #endif
2030
2031 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2032 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2033 FeatureWordInfo *fw = &feature_word_info[i];
2034
2035 (*cpu_fprintf)(f, " ");
2036 listflags(f, cpu_fprintf, fw->feat_names);
2037 (*cpu_fprintf)(f, "\n");
2038 }
2039 }
2040
2041 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2042 {
2043 CpuDefinitionInfoList *cpu_list = NULL;
2044 X86CPUDefinition *def;
2045 int i;
2046
2047 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2048 CpuDefinitionInfoList *entry;
2049 CpuDefinitionInfo *info;
2050
2051 def = &builtin_x86_defs[i];
2052 info = g_malloc0(sizeof(*info));
2053 info->name = g_strdup(def->name);
2054
2055 entry = g_malloc0(sizeof(*entry));
2056 entry->value = info;
2057 entry->next = cpu_list;
2058 cpu_list = entry;
2059 }
2060
2061 return cpu_list;
2062 }
2063
2064 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2065 bool migratable_only)
2066 {
2067 FeatureWordInfo *wi = &feature_word_info[w];
2068 uint32_t r;
2069
2070 if (kvm_enabled()) {
2071 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2072 wi->cpuid_ecx,
2073 wi->cpuid_reg);
2074 } else if (tcg_enabled()) {
2075 r = wi->tcg_features;
2076 } else {
2077 return ~0;
2078 }
2079 if (migratable_only) {
2080 r &= x86_cpu_get_migratable_flags(w);
2081 }
2082 return r;
2083 }
2084
2085 /*
2086 * Filters CPU feature words based on host availability of each feature.
2087 *
2088 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2089 */
2090 static int x86_cpu_filter_features(X86CPU *cpu)
2091 {
2092 CPUX86State *env = &cpu->env;
2093 FeatureWord w;
2094 int rv = 0;
2095
2096 for (w = 0; w < FEATURE_WORDS; w++) {
2097 uint32_t host_feat =
2098 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2099 uint32_t requested_features = env->features[w];
2100 env->features[w] &= host_feat;
2101 cpu->filtered_features[w] = requested_features & ~env->features[w];
2102 if (cpu->filtered_features[w]) {
2103 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2104 report_unavailable_features(w, cpu->filtered_features[w]);
2105 }
2106 rv = 1;
2107 }
2108 }
2109
2110 return rv;
2111 }
2112
2113 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2114 {
2115 PropValue *pv;
2116 for (pv = props; pv->prop; pv++) {
2117 if (!pv->value) {
2118 continue;
2119 }
2120 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2121 &error_abort);
2122 }
2123 }
2124
2125 /* Load data from X86CPUDefinition
2126 */
2127 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2128 {
2129 CPUX86State *env = &cpu->env;
2130 const char *vendor;
2131 char host_vendor[CPUID_VENDOR_SZ + 1];
2132 FeatureWord w;
2133
2134 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2135 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2136 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2137 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2138 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2139 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2140 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2141 for (w = 0; w < FEATURE_WORDS; w++) {
2142 env->features[w] = def->features[w];
2143 }
2144
2145 /* Special cases not set in the X86CPUDefinition structs: */
2146 if (kvm_enabled()) {
2147 if (!kvm_irqchip_in_kernel()) {
2148 x86_cpu_change_kvm_default("x2apic", "off");
2149 }
2150
2151 x86_cpu_apply_props(cpu, kvm_default_props);
2152 }
2153
2154 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2155
2156 /* sysenter isn't supported in compatibility mode on AMD,
2157 * syscall isn't supported in compatibility mode on Intel.
2158 * Normally we advertise the actual CPU vendor, but you can
2159 * override this using the 'vendor' property if you want to use
2160 * KVM's sysenter/syscall emulation in compatibility mode and
2161 * when doing cross vendor migration
2162 */
2163 vendor = def->vendor;
2164 if (kvm_enabled()) {
2165 uint32_t ebx = 0, ecx = 0, edx = 0;
2166 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2167 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2168 vendor = host_vendor;
2169 }
2170
2171 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2172
2173 }
2174
2175 X86CPU *cpu_x86_create(const char *cpu_model, Error **errp)
2176 {
2177 X86CPU *cpu = NULL;
2178 X86CPUClass *xcc;
2179 ObjectClass *oc;
2180 gchar **model_pieces;
2181 char *name, *features;
2182 Error *error = NULL;
2183
2184 model_pieces = g_strsplit(cpu_model, ",", 2);
2185 if (!model_pieces[0]) {
2186 error_setg(&error, "Invalid/empty CPU model name");
2187 goto out;
2188 }
2189 name = model_pieces[0];
2190 features = model_pieces[1];
2191
2192 oc = x86_cpu_class_by_name(name);
2193 if (oc == NULL) {
2194 error_setg(&error, "Unable to find CPU definition: %s", name);
2195 goto out;
2196 }
2197 xcc = X86_CPU_CLASS(oc);
2198
2199 if (xcc->kvm_required && !kvm_enabled()) {
2200 error_setg(&error, "CPU model '%s' requires KVM", name);
2201 goto out;
2202 }
2203
2204 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2205
2206 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2207 if (error) {
2208 goto out;
2209 }
2210
2211 out:
2212 if (error != NULL) {
2213 error_propagate(errp, error);
2214 if (cpu) {
2215 object_unref(OBJECT(cpu));
2216 cpu = NULL;
2217 }
2218 }
2219 g_strfreev(model_pieces);
2220 return cpu;
2221 }
2222
2223 X86CPU *cpu_x86_init(const char *cpu_model)
2224 {
2225 Error *error = NULL;
2226 X86CPU *cpu;
2227
2228 cpu = cpu_x86_create(cpu_model, &error);
2229 if (error) {
2230 goto out;
2231 }
2232
2233 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2234
2235 out:
2236 if (error) {
2237 error_report_err(error);
2238 if (cpu != NULL) {
2239 object_unref(OBJECT(cpu));
2240 cpu = NULL;
2241 }
2242 }
2243 return cpu;
2244 }
2245
2246 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2247 {
2248 X86CPUDefinition *cpudef = data;
2249 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2250
2251 xcc->cpu_def = cpudef;
2252 }
2253
2254 static void x86_register_cpudef_type(X86CPUDefinition *def)
2255 {
2256 char *typename = x86_cpu_type_name(def->name);
2257 TypeInfo ti = {
2258 .name = typename,
2259 .parent = TYPE_X86_CPU,
2260 .class_init = x86_cpu_cpudef_class_init,
2261 .class_data = def,
2262 };
2263
2264 type_register(&ti);
2265 g_free(typename);
2266 }
2267
2268 #if !defined(CONFIG_USER_ONLY)
2269
2270 void cpu_clear_apic_feature(CPUX86State *env)
2271 {
2272 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2273 }
2274
2275 #endif /* !CONFIG_USER_ONLY */
2276
2277 void x86_cpudef_setup(void)
2278 {
2279 }
2280
2281 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2282 uint32_t *eax, uint32_t *ebx,
2283 uint32_t *ecx, uint32_t *edx)
2284 {
2285 X86CPU *cpu = x86_env_get_cpu(env);
2286 CPUState *cs = CPU(cpu);
2287
2288 /* test if maximum index reached */
2289 if (index & 0x80000000) {
2290 if (index > env->cpuid_xlevel) {
2291 if (env->cpuid_xlevel2 > 0) {
2292 /* Handle the Centaur's CPUID instruction. */
2293 if (index > env->cpuid_xlevel2) {
2294 index = env->cpuid_xlevel2;
2295 } else if (index < 0xC0000000) {
2296 index = env->cpuid_xlevel;
2297 }
2298 } else {
2299 /* Intel documentation states that invalid EAX input will
2300 * return the same information as EAX=cpuid_level
2301 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2302 */
2303 index = env->cpuid_level;
2304 }
2305 }
2306 } else {
2307 if (index > env->cpuid_level)
2308 index = env->cpuid_level;
2309 }
2310
2311 switch(index) {
2312 case 0:
2313 *eax = env->cpuid_level;
2314 *ebx = env->cpuid_vendor1;
2315 *edx = env->cpuid_vendor2;
2316 *ecx = env->cpuid_vendor3;
2317 break;
2318 case 1:
2319 *eax = env->cpuid_version;
2320 *ebx = (cpu->apic_id << 24) |
2321 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2322 *ecx = env->features[FEAT_1_ECX];
2323 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2324 *ecx |= CPUID_EXT_OSXSAVE;
2325 }
2326 *edx = env->features[FEAT_1_EDX];
2327 if (cs->nr_cores * cs->nr_threads > 1) {
2328 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2329 *edx |= CPUID_HT;
2330 }
2331 break;
2332 case 2:
2333 /* cache info: needed for Pentium Pro compatibility */
2334 if (cpu->cache_info_passthrough) {
2335 host_cpuid(index, 0, eax, ebx, ecx, edx);
2336 break;
2337 }
2338 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2339 *ebx = 0;
2340 *ecx = 0;
2341 *edx = (L1D_DESCRIPTOR << 16) | \
2342 (L1I_DESCRIPTOR << 8) | \
2343 (L2_DESCRIPTOR);
2344 break;
2345 case 4:
2346 /* cache info: needed for Core compatibility */
2347 if (cpu->cache_info_passthrough) {
2348 host_cpuid(index, count, eax, ebx, ecx, edx);
2349 *eax &= ~0xFC000000;
2350 } else {
2351 *eax = 0;
2352 switch (count) {
2353 case 0: /* L1 dcache info */
2354 *eax |= CPUID_4_TYPE_DCACHE | \
2355 CPUID_4_LEVEL(1) | \
2356 CPUID_4_SELF_INIT_LEVEL;
2357 *ebx = (L1D_LINE_SIZE - 1) | \
2358 ((L1D_PARTITIONS - 1) << 12) | \
2359 ((L1D_ASSOCIATIVITY - 1) << 22);
2360 *ecx = L1D_SETS - 1;
2361 *edx = CPUID_4_NO_INVD_SHARING;
2362 break;
2363 case 1: /* L1 icache info */
2364 *eax |= CPUID_4_TYPE_ICACHE | \
2365 CPUID_4_LEVEL(1) | \
2366 CPUID_4_SELF_INIT_LEVEL;
2367 *ebx = (L1I_LINE_SIZE - 1) | \
2368 ((L1I_PARTITIONS - 1) << 12) | \
2369 ((L1I_ASSOCIATIVITY - 1) << 22);
2370 *ecx = L1I_SETS - 1;
2371 *edx = CPUID_4_NO_INVD_SHARING;
2372 break;
2373 case 2: /* L2 cache info */
2374 *eax |= CPUID_4_TYPE_UNIFIED | \
2375 CPUID_4_LEVEL(2) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 if (cs->nr_threads > 1) {
2378 *eax |= (cs->nr_threads - 1) << 14;
2379 }
2380 *ebx = (L2_LINE_SIZE - 1) | \
2381 ((L2_PARTITIONS - 1) << 12) | \
2382 ((L2_ASSOCIATIVITY - 1) << 22);
2383 *ecx = L2_SETS - 1;
2384 *edx = CPUID_4_NO_INVD_SHARING;
2385 break;
2386 default: /* end of info */
2387 *eax = 0;
2388 *ebx = 0;
2389 *ecx = 0;
2390 *edx = 0;
2391 break;
2392 }
2393 }
2394
2395 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2396 if ((*eax & 31) && cs->nr_cores > 1) {
2397 *eax |= (cs->nr_cores - 1) << 26;
2398 }
2399 break;
2400 case 5:
2401 /* mwait info: needed for Core compatibility */
2402 *eax = 0; /* Smallest monitor-line size in bytes */
2403 *ebx = 0; /* Largest monitor-line size in bytes */
2404 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2405 *edx = 0;
2406 break;
2407 case 6:
2408 /* Thermal and Power Leaf */
2409 *eax = env->features[FEAT_6_EAX];
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 case 7:
2415 /* Structured Extended Feature Flags Enumeration Leaf */
2416 if (count == 0) {
2417 *eax = 0; /* Maximum ECX value for sub-leaves */
2418 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2419 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2420 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2421 *ecx |= CPUID_7_0_ECX_OSPKE;
2422 }
2423 *edx = 0; /* Reserved */
2424 } else {
2425 *eax = 0;
2426 *ebx = 0;
2427 *ecx = 0;
2428 *edx = 0;
2429 }
2430 break;
2431 case 9:
2432 /* Direct Cache Access Information Leaf */
2433 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2434 *ebx = 0;
2435 *ecx = 0;
2436 *edx = 0;
2437 break;
2438 case 0xA:
2439 /* Architectural Performance Monitoring Leaf */
2440 if (kvm_enabled() && cpu->enable_pmu) {
2441 KVMState *s = cs->kvm_state;
2442
2443 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2444 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2445 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2446 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2447 } else {
2448 *eax = 0;
2449 *ebx = 0;
2450 *ecx = 0;
2451 *edx = 0;
2452 }
2453 break;
2454 case 0xD: {
2455 KVMState *s = cs->kvm_state;
2456 uint64_t ena_mask;
2457 int i;
2458
2459 /* Processor Extended State */
2460 *eax = 0;
2461 *ebx = 0;
2462 *ecx = 0;
2463 *edx = 0;
2464 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2465 break;
2466 }
2467 if (kvm_enabled()) {
2468 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2469 ena_mask <<= 32;
2470 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2471 } else {
2472 ena_mask = -1;
2473 }
2474
2475 if (count == 0) {
2476 *ecx = 0x240;
2477 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2478 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2479 if ((env->features[esa->feature] & esa->bits) == esa->bits
2480 && ((ena_mask >> i) & 1) != 0) {
2481 if (i < 32) {
2482 *eax |= 1u << i;
2483 } else {
2484 *edx |= 1u << (i - 32);
2485 }
2486 *ecx = MAX(*ecx, esa->offset + esa->size);
2487 }
2488 }
2489 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2490 *ebx = *ecx;
2491 } else if (count == 1) {
2492 *eax = env->features[FEAT_XSAVE];
2493 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2494 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2495 if ((env->features[esa->feature] & esa->bits) == esa->bits
2496 && ((ena_mask >> count) & 1) != 0) {
2497 *eax = esa->size;
2498 *ebx = esa->offset;
2499 }
2500 }
2501 break;
2502 }
2503 case 0x80000000:
2504 *eax = env->cpuid_xlevel;
2505 *ebx = env->cpuid_vendor1;
2506 *edx = env->cpuid_vendor2;
2507 *ecx = env->cpuid_vendor3;
2508 break;
2509 case 0x80000001:
2510 *eax = env->cpuid_version;
2511 *ebx = 0;
2512 *ecx = env->features[FEAT_8000_0001_ECX];
2513 *edx = env->features[FEAT_8000_0001_EDX];
2514
2515 /* The Linux kernel checks for the CMPLegacy bit and
2516 * discards multiple thread information if it is set.
2517 * So don't set it here for Intel to make Linux guests happy.
2518 */
2519 if (cs->nr_cores * cs->nr_threads > 1) {
2520 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2521 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2522 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2523 *ecx |= 1 << 1; /* CmpLegacy bit */
2524 }
2525 }
2526 break;
2527 case 0x80000002:
2528 case 0x80000003:
2529 case 0x80000004:
2530 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2531 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2532 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2533 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2534 break;
2535 case 0x80000005:
2536 /* cache info (L1 cache) */
2537 if (cpu->cache_info_passthrough) {
2538 host_cpuid(index, 0, eax, ebx, ecx, edx);
2539 break;
2540 }
2541 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2542 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2543 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2544 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2545 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2546 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2547 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2548 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2549 break;
2550 case 0x80000006:
2551 /* cache info (L2 cache) */
2552 if (cpu->cache_info_passthrough) {
2553 host_cpuid(index, 0, eax, ebx, ecx, edx);
2554 break;
2555 }
2556 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2557 (L2_DTLB_2M_ENTRIES << 16) | \
2558 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2559 (L2_ITLB_2M_ENTRIES);
2560 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2561 (L2_DTLB_4K_ENTRIES << 16) | \
2562 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2563 (L2_ITLB_4K_ENTRIES);
2564 *ecx = (L2_SIZE_KB_AMD << 16) | \
2565 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2566 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2567 *edx = ((L3_SIZE_KB/512) << 18) | \
2568 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2569 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2570 break;
2571 case 0x80000007:
2572 *eax = 0;
2573 *ebx = 0;
2574 *ecx = 0;
2575 *edx = env->features[FEAT_8000_0007_EDX];
2576 break;
2577 case 0x80000008:
2578 /* virtual & phys address size in low 2 bytes. */
2579 /* XXX: This value must match the one used in the MMU code. */
2580 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2581 /* 64 bit processor */
2582 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2583 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2584 } else {
2585 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2586 *eax = 0x00000024; /* 36 bits physical */
2587 } else {
2588 *eax = 0x00000020; /* 32 bits physical */
2589 }
2590 }
2591 *ebx = 0;
2592 *ecx = 0;
2593 *edx = 0;
2594 if (cs->nr_cores * cs->nr_threads > 1) {
2595 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2596 }
2597 break;
2598 case 0x8000000A:
2599 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2600 *eax = 0x00000001; /* SVM Revision */
2601 *ebx = 0x00000010; /* nr of ASIDs */
2602 *ecx = 0;
2603 *edx = env->features[FEAT_SVM]; /* optional features */
2604 } else {
2605 *eax = 0;
2606 *ebx = 0;
2607 *ecx = 0;
2608 *edx = 0;
2609 }
2610 break;
2611 case 0xC0000000:
2612 *eax = env->cpuid_xlevel2;
2613 *ebx = 0;
2614 *ecx = 0;
2615 *edx = 0;
2616 break;
2617 case 0xC0000001:
2618 /* Support for VIA CPU's CPUID instruction */
2619 *eax = env->cpuid_version;
2620 *ebx = 0;
2621 *ecx = 0;
2622 *edx = env->features[FEAT_C000_0001_EDX];
2623 break;
2624 case 0xC0000002:
2625 case 0xC0000003:
2626 case 0xC0000004:
2627 /* Reserved for the future, and now filled with zero */
2628 *eax = 0;
2629 *ebx = 0;
2630 *ecx = 0;
2631 *edx = 0;
2632 break;
2633 default:
2634 /* reserved values: zero */
2635 *eax = 0;
2636 *ebx = 0;
2637 *ecx = 0;
2638 *edx = 0;
2639 break;
2640 }
2641 }
2642
2643 /* CPUClass::reset() */
2644 static void x86_cpu_reset(CPUState *s)
2645 {
2646 X86CPU *cpu = X86_CPU(s);
2647 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2648 CPUX86State *env = &cpu->env;
2649 target_ulong cr4;
2650 uint64_t xcr0;
2651 int i;
2652
2653 xcc->parent_reset(s);
2654
2655 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2656
2657 tlb_flush(s, 1);
2658
2659 env->old_exception = -1;
2660
2661 /* init to reset state */
2662
2663 #ifdef CONFIG_SOFTMMU
2664 env->hflags |= HF_SOFTMMU_MASK;
2665 #endif
2666 env->hflags2 |= HF2_GIF_MASK;
2667
2668 cpu_x86_update_cr0(env, 0x60000010);
2669 env->a20_mask = ~0x0;
2670 env->smbase = 0x30000;
2671
2672 env->idt.limit = 0xffff;
2673 env->gdt.limit = 0xffff;
2674 env->ldt.limit = 0xffff;
2675 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2676 env->tr.limit = 0xffff;
2677 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2678
2679 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2680 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2681 DESC_R_MASK | DESC_A_MASK);
2682 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2683 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2684 DESC_A_MASK);
2685 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2686 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2687 DESC_A_MASK);
2688 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2689 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2690 DESC_A_MASK);
2691 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2692 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2693 DESC_A_MASK);
2694 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2695 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2696 DESC_A_MASK);
2697
2698 env->eip = 0xfff0;
2699 env->regs[R_EDX] = env->cpuid_version;
2700
2701 env->eflags = 0x2;
2702
2703 /* FPU init */
2704 for (i = 0; i < 8; i++) {
2705 env->fptags[i] = 1;
2706 }
2707 cpu_set_fpuc(env, 0x37f);
2708
2709 env->mxcsr = 0x1f80;
2710 /* All units are in INIT state. */
2711 env->xstate_bv = 0;
2712
2713 env->pat = 0x0007040600070406ULL;
2714 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2715
2716 memset(env->dr, 0, sizeof(env->dr));
2717 env->dr[6] = DR6_FIXED_1;
2718 env->dr[7] = DR7_FIXED_1;
2719 cpu_breakpoint_remove_all(s, BP_CPU);
2720 cpu_watchpoint_remove_all(s, BP_CPU);
2721
2722 cr4 = 0;
2723 xcr0 = XSTATE_FP_MASK;
2724
2725 #ifdef CONFIG_USER_ONLY
2726 /* Enable all the features for user-mode. */
2727 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2728 xcr0 |= XSTATE_SSE_MASK;
2729 }
2730 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2731 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2732 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2733 xcr0 |= 1ull << i;
2734 }
2735 }
2736
2737 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2738 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2739 }
2740 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2741 cr4 |= CR4_FSGSBASE_MASK;
2742 }
2743 #endif
2744
2745 env->xcr0 = xcr0;
2746 cpu_x86_update_cr4(env, cr4);
2747
2748 /*
2749 * SDM 11.11.5 requires:
2750 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2751 * - IA32_MTRR_PHYSMASKn.V = 0
2752 * All other bits are undefined. For simplification, zero it all.
2753 */
2754 env->mtrr_deftype = 0;
2755 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2756 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2757
2758 #if !defined(CONFIG_USER_ONLY)
2759 /* We hard-wire the BSP to the first CPU. */
2760 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2761
2762 s->halted = !cpu_is_bsp(cpu);
2763
2764 if (kvm_enabled()) {
2765 kvm_arch_reset_vcpu(cpu);
2766 }
2767 #endif
2768 }
2769
2770 #ifndef CONFIG_USER_ONLY
2771 bool cpu_is_bsp(X86CPU *cpu)
2772 {
2773 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2774 }
2775
2776 /* TODO: remove me, when reset over QOM tree is implemented */
2777 static void x86_cpu_machine_reset_cb(void *opaque)
2778 {
2779 X86CPU *cpu = opaque;
2780 cpu_reset(CPU(cpu));
2781 }
2782 #endif
2783
2784 static void mce_init(X86CPU *cpu)
2785 {
2786 CPUX86State *cenv = &cpu->env;
2787 unsigned int bank;
2788
2789 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2790 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2791 (CPUID_MCE | CPUID_MCA)) {
2792 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2793 cenv->mcg_ctl = ~(uint64_t)0;
2794 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2795 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2796 }
2797 }
2798 }
2799
2800 #ifndef CONFIG_USER_ONLY
2801 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2802 {
2803 APICCommonState *apic;
2804 const char *apic_type = "apic";
2805
2806 if (kvm_apic_in_kernel()) {
2807 apic_type = "kvm-apic";
2808 } else if (xen_enabled()) {
2809 apic_type = "xen-apic";
2810 }
2811
2812 cpu->apic_state = DEVICE(object_new(apic_type));
2813
2814 object_property_add_child(OBJECT(cpu), "apic",
2815 OBJECT(cpu->apic_state), NULL);
2816 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2817 /* TODO: convert to link<> */
2818 apic = APIC_COMMON(cpu->apic_state);
2819 apic->cpu = cpu;
2820 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2821 }
2822
2823 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2824 {
2825 APICCommonState *apic;
2826 static bool apic_mmio_map_once;
2827
2828 if (cpu->apic_state == NULL) {
2829 return;
2830 }
2831 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2832 errp);
2833
2834 /* Map APIC MMIO area */
2835 apic = APIC_COMMON(cpu->apic_state);
2836 if (!apic_mmio_map_once) {
2837 memory_region_add_subregion_overlap(get_system_memory(),
2838 apic->apicbase &
2839 MSR_IA32_APICBASE_BASE,
2840 &apic->io_memory,
2841 0x1000);
2842 apic_mmio_map_once = true;
2843 }
2844 }
2845
2846 static void x86_cpu_machine_done(Notifier *n, void *unused)
2847 {
2848 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2849 MemoryRegion *smram =
2850 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2851
2852 if (smram) {
2853 cpu->smram = g_new(MemoryRegion, 1);
2854 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2855 smram, 0, 1ull << 32);
2856 memory_region_set_enabled(cpu->smram, false);
2857 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2858 }
2859 }
2860 #else
2861 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2862 {
2863 }
2864 #endif
2865
2866
2867 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2868 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2869 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2870 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2871 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2872 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2873 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2874 {
2875 CPUState *cs = CPU(dev);
2876 X86CPU *cpu = X86_CPU(dev);
2877 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2878 CPUX86State *env = &cpu->env;
2879 Error *local_err = NULL;
2880 static bool ht_warned;
2881
2882 if (cpu->apic_id < 0) {
2883 error_setg(errp, "apic-id property was not initialized properly");
2884 return;
2885 }
2886
2887 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2888 env->cpuid_level = 7;
2889 }
2890
2891 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2892 error_setg(&local_err,
2893 kvm_enabled() ?
2894 "Host doesn't support requested features" :
2895 "TCG doesn't support requested features");
2896 goto out;
2897 }
2898
2899 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2900 * CPUID[1].EDX.
2901 */
2902 if (IS_AMD_CPU(env)) {
2903 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2904 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2905 & CPUID_EXT2_AMD_ALIASES);
2906 }
2907
2908
2909 #ifndef CONFIG_USER_ONLY
2910 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2911
2912 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2913 x86_cpu_apic_create(cpu, &local_err);
2914 if (local_err != NULL) {
2915 goto out;
2916 }
2917 }
2918 #endif
2919
2920 mce_init(cpu);
2921
2922 #ifndef CONFIG_USER_ONLY
2923 if (tcg_enabled()) {
2924 AddressSpace *newas = g_new(AddressSpace, 1);
2925
2926 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
2927 cpu->cpu_as_root = g_new(MemoryRegion, 1);
2928
2929 /* Outer container... */
2930 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
2931 memory_region_set_enabled(cpu->cpu_as_root, true);
2932
2933 /* ... with two regions inside: normal system memory with low
2934 * priority, and...
2935 */
2936 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
2937 get_system_memory(), 0, ~0ull);
2938 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
2939 memory_region_set_enabled(cpu->cpu_as_mem, true);
2940 address_space_init(newas, cpu->cpu_as_root, "CPU");
2941 cs->num_ases = 1;
2942 cpu_address_space_init(cs, newas, 0);
2943
2944 /* ... SMRAM with higher priority, linked from /machine/smram. */
2945 cpu->machine_done.notify = x86_cpu_machine_done;
2946 qemu_add_machine_init_done_notifier(&cpu->machine_done);
2947 }
2948 #endif
2949
2950 qemu_init_vcpu(cs);
2951
2952 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2953 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2954 * based on inputs (sockets,cores,threads), it is still better to gives
2955 * users a warning.
2956 *
2957 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2958 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2959 */
2960 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2961 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2962 " -smp options properly.");
2963 ht_warned = true;
2964 }
2965
2966 x86_cpu_apic_realize(cpu, &local_err);
2967 if (local_err != NULL) {
2968 goto out;
2969 }
2970 cpu_reset(cs);
2971
2972 xcc->parent_realize(dev, &local_err);
2973
2974 out:
2975 if (local_err != NULL) {
2976 error_propagate(errp, local_err);
2977 return;
2978 }
2979 }
2980
2981 typedef struct BitProperty {
2982 uint32_t *ptr;
2983 uint32_t mask;
2984 } BitProperty;
2985
2986 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
2987 void *opaque, Error **errp)
2988 {
2989 BitProperty *fp = opaque;
2990 bool value = (*fp->ptr & fp->mask) == fp->mask;
2991 visit_type_bool(v, name, &value, errp);
2992 }
2993
2994 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
2995 void *opaque, Error **errp)
2996 {
2997 DeviceState *dev = DEVICE(obj);
2998 BitProperty *fp = opaque;
2999 Error *local_err = NULL;
3000 bool value;
3001
3002 if (dev->realized) {
3003 qdev_prop_set_after_realize(dev, name, errp);
3004 return;
3005 }
3006
3007 visit_type_bool(v, name, &value, &local_err);
3008 if (local_err) {
3009 error_propagate(errp, local_err);
3010 return;
3011 }
3012
3013 if (value) {
3014 *fp->ptr |= fp->mask;
3015 } else {
3016 *fp->ptr &= ~fp->mask;
3017 }
3018 }
3019
3020 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3021 void *opaque)
3022 {
3023 BitProperty *prop = opaque;
3024 g_free(prop);
3025 }
3026
3027 /* Register a boolean property to get/set a single bit in a uint32_t field.
3028 *
3029 * The same property name can be registered multiple times to make it affect
3030 * multiple bits in the same FeatureWord. In that case, the getter will return
3031 * true only if all bits are set.
3032 */
3033 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3034 const char *prop_name,
3035 uint32_t *field,
3036 int bitnr)
3037 {
3038 BitProperty *fp;
3039 ObjectProperty *op;
3040 uint32_t mask = (1UL << bitnr);
3041
3042 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3043 if (op) {
3044 fp = op->opaque;
3045 assert(fp->ptr == field);
3046 fp->mask |= mask;
3047 } else {
3048 fp = g_new0(BitProperty, 1);
3049 fp->ptr = field;
3050 fp->mask = mask;
3051 object_property_add(OBJECT(cpu), prop_name, "bool",
3052 x86_cpu_get_bit_prop,
3053 x86_cpu_set_bit_prop,
3054 x86_cpu_release_bit_prop, fp, &error_abort);
3055 }
3056 }
3057
3058 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3059 FeatureWord w,
3060 int bitnr)
3061 {
3062 Object *obj = OBJECT(cpu);
3063 int i;
3064 char **names;
3065 FeatureWordInfo *fi = &feature_word_info[w];
3066
3067 if (!fi->feat_names) {
3068 return;
3069 }
3070 if (!fi->feat_names[bitnr]) {
3071 return;
3072 }
3073
3074 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3075
3076 feat2prop(names[0]);
3077 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3078
3079 for (i = 1; names[i]; i++) {
3080 feat2prop(names[i]);
3081 object_property_add_alias(obj, names[i], obj, names[0],
3082 &error_abort);
3083 }
3084
3085 g_strfreev(names);
3086 }
3087
3088 static void x86_cpu_initfn(Object *obj)
3089 {
3090 CPUState *cs = CPU(obj);
3091 X86CPU *cpu = X86_CPU(obj);
3092 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3093 CPUX86State *env = &cpu->env;
3094 FeatureWord w;
3095 static int inited;
3096
3097 cs->env_ptr = env;
3098 cpu_exec_init(cs, &error_abort);
3099
3100 object_property_add(obj, "family", "int",
3101 x86_cpuid_version_get_family,
3102 x86_cpuid_version_set_family, NULL, NULL, NULL);
3103 object_property_add(obj, "model", "int",
3104 x86_cpuid_version_get_model,
3105 x86_cpuid_version_set_model, NULL, NULL, NULL);
3106 object_property_add(obj, "stepping", "int",
3107 x86_cpuid_version_get_stepping,
3108 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3109 object_property_add_str(obj, "vendor",
3110 x86_cpuid_get_vendor,
3111 x86_cpuid_set_vendor, NULL);
3112 object_property_add_str(obj, "model-id",
3113 x86_cpuid_get_model_id,
3114 x86_cpuid_set_model_id, NULL);
3115 object_property_add(obj, "tsc-frequency", "int",
3116 x86_cpuid_get_tsc_freq,
3117 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3118 object_property_add(obj, "apic-id", "int",
3119 x86_cpuid_get_apic_id,
3120 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3121 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3122 x86_cpu_get_feature_words,
3123 NULL, NULL, (void *)env->features, NULL);
3124 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3125 x86_cpu_get_feature_words,
3126 NULL, NULL, (void *)cpu->filtered_features, NULL);
3127
3128 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3129
3130 #ifndef CONFIG_USER_ONLY
3131 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3132 cpu->apic_id = -1;
3133 #endif
3134
3135 for (w = 0; w < FEATURE_WORDS; w++) {
3136 int bitnr;
3137
3138 for (bitnr = 0; bitnr < 32; bitnr++) {
3139 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3140 }
3141 }
3142
3143 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3144
3145 /* init various static tables used in TCG mode */
3146 if (tcg_enabled() && !inited) {
3147 inited = 1;
3148 tcg_x86_init();
3149 }
3150 }
3151
3152 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3153 {
3154 X86CPU *cpu = X86_CPU(cs);
3155
3156 return cpu->apic_id;
3157 }
3158
3159 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3160 {
3161 X86CPU *cpu = X86_CPU(cs);
3162
3163 return cpu->env.cr[0] & CR0_PG_MASK;
3164 }
3165
3166 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3167 {
3168 X86CPU *cpu = X86_CPU(cs);
3169
3170 cpu->env.eip = value;
3171 }
3172
3173 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3174 {
3175 X86CPU *cpu = X86_CPU(cs);
3176
3177 cpu->env.eip = tb->pc - tb->cs_base;
3178 }
3179
3180 static bool x86_cpu_has_work(CPUState *cs)
3181 {
3182 X86CPU *cpu = X86_CPU(cs);
3183 CPUX86State *env = &cpu->env;
3184
3185 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3186 CPU_INTERRUPT_POLL)) &&
3187 (env->eflags & IF_MASK)) ||
3188 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3189 CPU_INTERRUPT_INIT |
3190 CPU_INTERRUPT_SIPI |
3191 CPU_INTERRUPT_MCE)) ||
3192 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3193 !(env->hflags & HF_SMM_MASK));
3194 }
3195
3196 static Property x86_cpu_properties[] = {
3197 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3198 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3199 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3200 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3201 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3202 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3203 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3204 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3205 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3206 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3207 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3208 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3209 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3210 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3211 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3212 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3213 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3214 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3215 DEFINE_PROP_END_OF_LIST()
3216 };
3217
3218 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3219 {
3220 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3221 CPUClass *cc = CPU_CLASS(oc);
3222 DeviceClass *dc = DEVICE_CLASS(oc);
3223
3224 xcc->parent_realize = dc->realize;
3225 dc->realize = x86_cpu_realizefn;
3226 dc->props = x86_cpu_properties;
3227
3228 xcc->parent_reset = cc->reset;
3229 cc->reset = x86_cpu_reset;
3230 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3231
3232 cc->class_by_name = x86_cpu_class_by_name;
3233 cc->parse_features = x86_cpu_parse_featurestr;
3234 cc->has_work = x86_cpu_has_work;
3235 cc->do_interrupt = x86_cpu_do_interrupt;
3236 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3237 cc->dump_state = x86_cpu_dump_state;
3238 cc->set_pc = x86_cpu_set_pc;
3239 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3240 cc->gdb_read_register = x86_cpu_gdb_read_register;
3241 cc->gdb_write_register = x86_cpu_gdb_write_register;
3242 cc->get_arch_id = x86_cpu_get_arch_id;
3243 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3244 #ifdef CONFIG_USER_ONLY
3245 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3246 #else
3247 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3248 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3249 cc->write_elf64_note = x86_cpu_write_elf64_note;
3250 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3251 cc->write_elf32_note = x86_cpu_write_elf32_note;
3252 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3253 cc->vmsd = &vmstate_x86_cpu;
3254 #endif
3255 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3256 #ifndef CONFIG_USER_ONLY
3257 cc->debug_excp_handler = breakpoint_handler;
3258 #endif
3259 cc->cpu_exec_enter = x86_cpu_exec_enter;
3260 cc->cpu_exec_exit = x86_cpu_exec_exit;
3261
3262 /*
3263 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3264 * object in cpus -> dangling pointer after final object_unref().
3265 */
3266 dc->cannot_destroy_with_object_finalize_yet = true;
3267 }
3268
3269 static const TypeInfo x86_cpu_type_info = {
3270 .name = TYPE_X86_CPU,
3271 .parent = TYPE_CPU,
3272 .instance_size = sizeof(X86CPU),
3273 .instance_init = x86_cpu_initfn,
3274 .abstract = true,
3275 .class_size = sizeof(X86CPUClass),
3276 .class_init = x86_cpu_common_class_init,
3277 };
3278
3279 static void x86_cpu_register_types(void)
3280 {
3281 int i;
3282
3283 type_register_static(&x86_cpu_type_info);
3284 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3285 x86_register_cpudef_type(&builtin_x86_defs[i]);
3286 }
3287 #ifdef CONFIG_KVM
3288 type_register_static(&host_x86_cpu_type_info);
3289 #endif
3290 }
3291
3292 type_init(x86_cpu_register_types)