]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Improve query-cpu-model-expansion full mode
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qbool.h"
35 #include "qapi/qmp/qint.h"
36 #include "qapi/qmp/qfloat.h"
37
38 #include "qapi-types.h"
39 #include "qapi-visit.h"
40 #include "qapi/visitor.h"
41 #include "qom/qom-qobject.h"
42 #include "sysemu/arch_init.h"
43
44 #if defined(CONFIG_KVM)
45 #include <linux/kvm_para.h>
46 #endif
47
48 #include "sysemu/sysemu.h"
49 #include "hw/qdev-properties.h"
50 #include "hw/i386/topology.h"
51 #ifndef CONFIG_USER_ONLY
52 #include "exec/address-spaces.h"
53 #include "hw/hw.h"
54 #include "hw/xen/xen.h"
55 #include "hw/i386/apic_internal.h"
56 #endif
57
58
59 /* Cache topology CPUID constants: */
60
61 /* CPUID Leaf 2 Descriptors */
62
63 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
64 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
65 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
66 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
67
68
69 /* CPUID Leaf 4 constants: */
70
71 /* EAX: */
72 #define CPUID_4_TYPE_DCACHE 1
73 #define CPUID_4_TYPE_ICACHE 2
74 #define CPUID_4_TYPE_UNIFIED 3
75
76 #define CPUID_4_LEVEL(l) ((l) << 5)
77
78 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
79 #define CPUID_4_FULLY_ASSOC (1 << 9)
80
81 /* EDX: */
82 #define CPUID_4_NO_INVD_SHARING (1 << 0)
83 #define CPUID_4_INCLUSIVE (1 << 1)
84 #define CPUID_4_COMPLEX_IDX (1 << 2)
85
86 #define ASSOC_FULL 0xFF
87
88 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
89 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
90 a == 2 ? 0x2 : \
91 a == 4 ? 0x4 : \
92 a == 8 ? 0x6 : \
93 a == 16 ? 0x8 : \
94 a == 32 ? 0xA : \
95 a == 48 ? 0xB : \
96 a == 64 ? 0xC : \
97 a == 96 ? 0xD : \
98 a == 128 ? 0xE : \
99 a == ASSOC_FULL ? 0xF : \
100 0 /* invalid value */)
101
102
103 /* Definitions of the hardcoded cache entries we expose: */
104
105 /* L1 data cache: */
106 #define L1D_LINE_SIZE 64
107 #define L1D_ASSOCIATIVITY 8
108 #define L1D_SETS 64
109 #define L1D_PARTITIONS 1
110 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
111 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
112 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
113 #define L1D_LINES_PER_TAG 1
114 #define L1D_SIZE_KB_AMD 64
115 #define L1D_ASSOCIATIVITY_AMD 2
116
117 /* L1 instruction cache: */
118 #define L1I_LINE_SIZE 64
119 #define L1I_ASSOCIATIVITY 8
120 #define L1I_SETS 64
121 #define L1I_PARTITIONS 1
122 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
123 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
124 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
125 #define L1I_LINES_PER_TAG 1
126 #define L1I_SIZE_KB_AMD 64
127 #define L1I_ASSOCIATIVITY_AMD 2
128
129 /* Level 2 unified cache: */
130 #define L2_LINE_SIZE 64
131 #define L2_ASSOCIATIVITY 16
132 #define L2_SETS 4096
133 #define L2_PARTITIONS 1
134 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
135 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
136 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
137 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
138 #define L2_LINES_PER_TAG 1
139 #define L2_SIZE_KB_AMD 512
140
141 /* Level 3 unified cache: */
142 #define L3_SIZE_KB 0 /* disabled */
143 #define L3_ASSOCIATIVITY 0 /* disabled */
144 #define L3_LINES_PER_TAG 0 /* disabled */
145 #define L3_LINE_SIZE 0 /* disabled */
146 #define L3_N_LINE_SIZE 64
147 #define L3_N_ASSOCIATIVITY 16
148 #define L3_N_SETS 16384
149 #define L3_N_PARTITIONS 1
150 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
151 #define L3_N_LINES_PER_TAG 1
152 #define L3_N_SIZE_KB_AMD 16384
153
154 /* TLB definitions: */
155
156 #define L1_DTLB_2M_ASSOC 1
157 #define L1_DTLB_2M_ENTRIES 255
158 #define L1_DTLB_4K_ASSOC 1
159 #define L1_DTLB_4K_ENTRIES 255
160
161 #define L1_ITLB_2M_ASSOC 1
162 #define L1_ITLB_2M_ENTRIES 255
163 #define L1_ITLB_4K_ASSOC 1
164 #define L1_ITLB_4K_ENTRIES 255
165
166 #define L2_DTLB_2M_ASSOC 0 /* disabled */
167 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
168 #define L2_DTLB_4K_ASSOC 4
169 #define L2_DTLB_4K_ENTRIES 512
170
171 #define L2_ITLB_2M_ASSOC 0 /* disabled */
172 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
173 #define L2_ITLB_4K_ASSOC 4
174 #define L2_ITLB_4K_ENTRIES 512
175
176
177
178 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
179 uint32_t vendor2, uint32_t vendor3)
180 {
181 int i;
182 for (i = 0; i < 4; i++) {
183 dst[i] = vendor1 >> (8 * i);
184 dst[i + 4] = vendor2 >> (8 * i);
185 dst[i + 8] = vendor3 >> (8 * i);
186 }
187 dst[CPUID_VENDOR_SZ] = '\0';
188 }
189
190 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
191 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
193 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
194 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
195 CPUID_PSE36 | CPUID_FXSR)
196 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
197 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
198 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
199 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
200 CPUID_PAE | CPUID_SEP | CPUID_APIC)
201
202 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
203 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
204 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
205 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
206 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
207 /* partly implemented:
208 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
209 /* missing:
210 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
211 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
212 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
213 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
214 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
215 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
216 /* missing:
217 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
218 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
219 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
220 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
221 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
222
223 #ifdef TARGET_X86_64
224 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
225 #else
226 #define TCG_EXT2_X86_64_FEATURES 0
227 #endif
228
229 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
230 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
231 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
232 TCG_EXT2_X86_64_FEATURES)
233 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
234 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
235 #define TCG_EXT4_FEATURES 0
236 #define TCG_SVM_FEATURES 0
237 #define TCG_KVM_FEATURES 0
238 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
239 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
240 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
241 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
242 CPUID_7_0_EBX_ERMS)
243 /* missing:
244 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
245 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
246 CPUID_7_0_EBX_RDSEED */
247 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
248 CPUID_7_0_ECX_LA57)
249 #define TCG_7_0_EDX_FEATURES 0
250 #define TCG_APM_FEATURES 0
251 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
252 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
253 /* missing:
254 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
255
256 typedef struct FeatureWordInfo {
257 /* feature flags names are taken from "Intel Processor Identification and
258 * the CPUID Instruction" and AMD's "CPUID Specification".
259 * In cases of disagreement between feature naming conventions,
260 * aliases may be added.
261 */
262 const char *feat_names[32];
263 uint32_t cpuid_eax; /* Input EAX for CPUID */
264 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
265 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
266 int cpuid_reg; /* output register (R_* constant) */
267 uint32_t tcg_features; /* Feature flags supported by TCG */
268 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
269 uint32_t migratable_flags; /* Feature flags known to be migratable */
270 } FeatureWordInfo;
271
272 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
273 [FEAT_1_EDX] = {
274 .feat_names = {
275 "fpu", "vme", "de", "pse",
276 "tsc", "msr", "pae", "mce",
277 "cx8", "apic", NULL, "sep",
278 "mtrr", "pge", "mca", "cmov",
279 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
280 NULL, "ds" /* Intel dts */, "acpi", "mmx",
281 "fxsr", "sse", "sse2", "ss",
282 "ht" /* Intel htt */, "tm", "ia64", "pbe",
283 },
284 .cpuid_eax = 1, .cpuid_reg = R_EDX,
285 .tcg_features = TCG_FEATURES,
286 },
287 [FEAT_1_ECX] = {
288 .feat_names = {
289 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
290 "ds-cpl", "vmx", "smx", "est",
291 "tm2", "ssse3", "cid", NULL,
292 "fma", "cx16", "xtpr", "pdcm",
293 NULL, "pcid", "dca", "sse4.1",
294 "sse4.2", "x2apic", "movbe", "popcnt",
295 "tsc-deadline", "aes", "xsave", "osxsave",
296 "avx", "f16c", "rdrand", "hypervisor",
297 },
298 .cpuid_eax = 1, .cpuid_reg = R_ECX,
299 .tcg_features = TCG_EXT_FEATURES,
300 },
301 /* Feature names that are already defined on feature_name[] but
302 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
303 * names on feat_names below. They are copied automatically
304 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
305 */
306 [FEAT_8000_0001_EDX] = {
307 .feat_names = {
308 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
309 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
310 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
311 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
312 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
313 "nx", NULL, "mmxext", NULL /* mmx */,
314 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
315 NULL, "lm", "3dnowext", "3dnow",
316 },
317 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
318 .tcg_features = TCG_EXT2_FEATURES,
319 },
320 [FEAT_8000_0001_ECX] = {
321 .feat_names = {
322 "lahf-lm", "cmp-legacy", "svm", "extapic",
323 "cr8legacy", "abm", "sse4a", "misalignsse",
324 "3dnowprefetch", "osvw", "ibs", "xop",
325 "skinit", "wdt", NULL, "lwp",
326 "fma4", "tce", NULL, "nodeid-msr",
327 NULL, "tbm", "topoext", "perfctr-core",
328 "perfctr-nb", NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 },
331 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
332 .tcg_features = TCG_EXT3_FEATURES,
333 },
334 [FEAT_C000_0001_EDX] = {
335 .feat_names = {
336 NULL, NULL, "xstore", "xstore-en",
337 NULL, NULL, "xcrypt", "xcrypt-en",
338 "ace2", "ace2-en", "phe", "phe-en",
339 "pmm", "pmm-en", NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 },
345 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT4_FEATURES,
347 },
348 [FEAT_KVM] = {
349 .feat_names = {
350 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
351 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 "kvmclock-stable-bit", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 },
359 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
360 .tcg_features = TCG_KVM_FEATURES,
361 },
362 [FEAT_HYPERV_EAX] = {
363 .feat_names = {
364 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
365 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
366 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
367 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
368 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
369 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
375 },
376 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
377 },
378 [FEAT_HYPERV_EBX] = {
379 .feat_names = {
380 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
381 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
382 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
383 NULL /* hv_create_port */, NULL /* hv_connect_port */,
384 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
385 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
386 NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
391 },
392 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
393 },
394 [FEAT_HYPERV_EDX] = {
395 .feat_names = {
396 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
397 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
398 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
399 NULL, NULL,
400 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
406 },
407 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
408 },
409 [FEAT_SVM] = {
410 .feat_names = {
411 "npt", "lbrv", "svm-lock", "nrip-save",
412 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
413 NULL, NULL, "pause-filter", NULL,
414 "pfthreshold", NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
419 },
420 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
421 .tcg_features = TCG_SVM_FEATURES,
422 },
423 [FEAT_7_0_EBX] = {
424 .feat_names = {
425 "fsgsbase", "tsc-adjust", NULL, "bmi1",
426 "hle", "avx2", NULL, "smep",
427 "bmi2", "erms", "invpcid", "rtm",
428 NULL, NULL, "mpx", NULL,
429 "avx512f", "avx512dq", "rdseed", "adx",
430 "smap", "avx512ifma", "pcommit", "clflushopt",
431 "clwb", NULL, "avx512pf", "avx512er",
432 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
433 },
434 .cpuid_eax = 7,
435 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
436 .cpuid_reg = R_EBX,
437 .tcg_features = TCG_7_0_EBX_FEATURES,
438 },
439 [FEAT_7_0_ECX] = {
440 .feat_names = {
441 NULL, "avx512vbmi", "umip", "pku",
442 "ospke", NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, "avx512-vpopcntdq", NULL,
445 "la57", NULL, NULL, NULL,
446 NULL, NULL, "rdpid", NULL,
447 NULL, NULL, NULL, NULL,
448 NULL, NULL, NULL, NULL,
449 },
450 .cpuid_eax = 7,
451 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
452 .cpuid_reg = R_ECX,
453 .tcg_features = TCG_7_0_ECX_FEATURES,
454 },
455 [FEAT_7_0_EDX] = {
456 .feat_names = {
457 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL,
465 },
466 .cpuid_eax = 7,
467 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
468 .cpuid_reg = R_EDX,
469 .tcg_features = TCG_7_0_EDX_FEATURES,
470 },
471 [FEAT_8000_0007_EDX] = {
472 .feat_names = {
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 "invtsc", NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
481 },
482 .cpuid_eax = 0x80000007,
483 .cpuid_reg = R_EDX,
484 .tcg_features = TCG_APM_FEATURES,
485 .unmigratable_flags = CPUID_APM_INVTSC,
486 },
487 [FEAT_XSAVE] = {
488 .feat_names = {
489 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
497 },
498 .cpuid_eax = 0xd,
499 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
500 .cpuid_reg = R_EAX,
501 .tcg_features = TCG_XSAVE_FEATURES,
502 },
503 [FEAT_6_EAX] = {
504 .feat_names = {
505 NULL, NULL, "arat", NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 },
514 .cpuid_eax = 6, .cpuid_reg = R_EAX,
515 .tcg_features = TCG_6_EAX_FEATURES,
516 },
517 [FEAT_XSAVE_COMP_LO] = {
518 .cpuid_eax = 0xD,
519 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
520 .cpuid_reg = R_EAX,
521 .tcg_features = ~0U,
522 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
523 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
524 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
525 XSTATE_PKRU_MASK,
526 },
527 [FEAT_XSAVE_COMP_HI] = {
528 .cpuid_eax = 0xD,
529 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
530 .cpuid_reg = R_EDX,
531 .tcg_features = ~0U,
532 },
533 };
534
535 typedef struct X86RegisterInfo32 {
536 /* Name of register */
537 const char *name;
538 /* QAPI enum value register */
539 X86CPURegister32 qapi_enum;
540 } X86RegisterInfo32;
541
542 #define REGISTER(reg) \
543 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
544 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
545 REGISTER(EAX),
546 REGISTER(ECX),
547 REGISTER(EDX),
548 REGISTER(EBX),
549 REGISTER(ESP),
550 REGISTER(EBP),
551 REGISTER(ESI),
552 REGISTER(EDI),
553 };
554 #undef REGISTER
555
556 typedef struct ExtSaveArea {
557 uint32_t feature, bits;
558 uint32_t offset, size;
559 } ExtSaveArea;
560
561 static const ExtSaveArea x86_ext_save_areas[] = {
562 [XSTATE_FP_BIT] = {
563 /* x87 FP state component is always enabled if XSAVE is supported */
564 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
565 /* x87 state is in the legacy region of the XSAVE area */
566 .offset = 0,
567 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
568 },
569 [XSTATE_SSE_BIT] = {
570 /* SSE state component is always enabled if XSAVE is supported */
571 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
572 /* SSE state is in the legacy region of the XSAVE area */
573 .offset = 0,
574 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
575 },
576 [XSTATE_YMM_BIT] =
577 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
578 .offset = offsetof(X86XSaveArea, avx_state),
579 .size = sizeof(XSaveAVX) },
580 [XSTATE_BNDREGS_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndreg_state),
583 .size = sizeof(XSaveBNDREG) },
584 [XSTATE_BNDCSR_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
586 .offset = offsetof(X86XSaveArea, bndcsr_state),
587 .size = sizeof(XSaveBNDCSR) },
588 [XSTATE_OPMASK_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, opmask_state),
591 .size = sizeof(XSaveOpmask) },
592 [XSTATE_ZMM_Hi256_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
595 .size = sizeof(XSaveZMM_Hi256) },
596 [XSTATE_Hi16_ZMM_BIT] =
597 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
598 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
599 .size = sizeof(XSaveHi16_ZMM) },
600 [XSTATE_PKRU_BIT] =
601 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
602 .offset = offsetof(X86XSaveArea, pkru_state),
603 .size = sizeof(XSavePKRU) },
604 };
605
606 static uint32_t xsave_area_size(uint64_t mask)
607 {
608 int i;
609 uint64_t ret = 0;
610
611 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
612 const ExtSaveArea *esa = &x86_ext_save_areas[i];
613 if ((mask >> i) & 1) {
614 ret = MAX(ret, esa->offset + esa->size);
615 }
616 }
617 return ret;
618 }
619
620 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
621 {
622 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
623 cpu->env.features[FEAT_XSAVE_COMP_LO];
624 }
625
626 const char *get_register_name_32(unsigned int reg)
627 {
628 if (reg >= CPU_NB_REGS32) {
629 return NULL;
630 }
631 return x86_reg_info_32[reg].name;
632 }
633
634 /*
635 * Returns the set of feature flags that are supported and migratable by
636 * QEMU, for a given FeatureWord.
637 */
638 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
639 {
640 FeatureWordInfo *wi = &feature_word_info[w];
641 uint32_t r = 0;
642 int i;
643
644 for (i = 0; i < 32; i++) {
645 uint32_t f = 1U << i;
646
647 /* If the feature name is known, it is implicitly considered migratable,
648 * unless it is explicitly set in unmigratable_flags */
649 if ((wi->migratable_flags & f) ||
650 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
651 r |= f;
652 }
653 }
654 return r;
655 }
656
657 void host_cpuid(uint32_t function, uint32_t count,
658 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
659 {
660 uint32_t vec[4];
661
662 #ifdef __x86_64__
663 asm volatile("cpuid"
664 : "=a"(vec[0]), "=b"(vec[1]),
665 "=c"(vec[2]), "=d"(vec[3])
666 : "0"(function), "c"(count) : "cc");
667 #elif defined(__i386__)
668 asm volatile("pusha \n\t"
669 "cpuid \n\t"
670 "mov %%eax, 0(%2) \n\t"
671 "mov %%ebx, 4(%2) \n\t"
672 "mov %%ecx, 8(%2) \n\t"
673 "mov %%edx, 12(%2) \n\t"
674 "popa"
675 : : "a"(function), "c"(count), "S"(vec)
676 : "memory", "cc");
677 #else
678 abort();
679 #endif
680
681 if (eax)
682 *eax = vec[0];
683 if (ebx)
684 *ebx = vec[1];
685 if (ecx)
686 *ecx = vec[2];
687 if (edx)
688 *edx = vec[3];
689 }
690
691 /* CPU class name definitions: */
692
693 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
694 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
695
696 /* Return type name for a given CPU model name
697 * Caller is responsible for freeing the returned string.
698 */
699 static char *x86_cpu_type_name(const char *model_name)
700 {
701 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
702 }
703
704 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
705 {
706 ObjectClass *oc;
707 char *typename;
708
709 if (cpu_model == NULL) {
710 return NULL;
711 }
712
713 typename = x86_cpu_type_name(cpu_model);
714 oc = object_class_by_name(typename);
715 g_free(typename);
716 return oc;
717 }
718
719 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
720 {
721 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
722 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
723 return g_strndup(class_name,
724 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
725 }
726
727 struct X86CPUDefinition {
728 const char *name;
729 uint32_t level;
730 uint32_t xlevel;
731 /* vendor is zero-terminated, 12 character ASCII string */
732 char vendor[CPUID_VENDOR_SZ + 1];
733 int family;
734 int model;
735 int stepping;
736 FeatureWordArray features;
737 char model_id[48];
738 };
739
740 static X86CPUDefinition builtin_x86_defs[] = {
741 {
742 .name = "qemu64",
743 .level = 0xd,
744 .vendor = CPUID_VENDOR_AMD,
745 .family = 6,
746 .model = 6,
747 .stepping = 3,
748 .features[FEAT_1_EDX] =
749 PPRO_FEATURES |
750 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
751 CPUID_PSE36,
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
754 .features[FEAT_8000_0001_EDX] =
755 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
756 .features[FEAT_8000_0001_ECX] =
757 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
758 .xlevel = 0x8000000A,
759 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
760 },
761 {
762 .name = "phenom",
763 .level = 5,
764 .vendor = CPUID_VENDOR_AMD,
765 .family = 16,
766 .model = 2,
767 .stepping = 3,
768 /* Missing: CPUID_HT */
769 .features[FEAT_1_EDX] =
770 PPRO_FEATURES |
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
772 CPUID_PSE36 | CPUID_VME,
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
775 CPUID_EXT_POPCNT,
776 .features[FEAT_8000_0001_EDX] =
777 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
778 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
779 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
780 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
781 CPUID_EXT3_CR8LEG,
782 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
783 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
784 .features[FEAT_8000_0001_ECX] =
785 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
786 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
787 /* Missing: CPUID_SVM_LBRV */
788 .features[FEAT_SVM] =
789 CPUID_SVM_NPT,
790 .xlevel = 0x8000001A,
791 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
792 },
793 {
794 .name = "core2duo",
795 .level = 10,
796 .vendor = CPUID_VENDOR_INTEL,
797 .family = 6,
798 .model = 15,
799 .stepping = 11,
800 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
804 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
805 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
806 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
809 CPUID_EXT_CX16,
810 .features[FEAT_8000_0001_EDX] =
811 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
812 .features[FEAT_8000_0001_ECX] =
813 CPUID_EXT3_LAHF_LM,
814 .xlevel = 0x80000008,
815 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
816 },
817 {
818 .name = "kvm64",
819 .level = 0xd,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 15,
822 .model = 6,
823 .stepping = 1,
824 /* Missing: CPUID_HT */
825 .features[FEAT_1_EDX] =
826 PPRO_FEATURES | CPUID_VME |
827 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
828 CPUID_PSE36,
829 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
832 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
833 .features[FEAT_8000_0001_EDX] =
834 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
835 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
836 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
837 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
838 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
839 .features[FEAT_8000_0001_ECX] =
840 0,
841 .xlevel = 0x80000008,
842 .model_id = "Common KVM processor"
843 },
844 {
845 .name = "qemu32",
846 .level = 4,
847 .vendor = CPUID_VENDOR_INTEL,
848 .family = 6,
849 .model = 6,
850 .stepping = 3,
851 .features[FEAT_1_EDX] =
852 PPRO_FEATURES,
853 .features[FEAT_1_ECX] =
854 CPUID_EXT_SSE3,
855 .xlevel = 0x80000004,
856 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
857 },
858 {
859 .name = "kvm32",
860 .level = 5,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 15,
863 .model = 6,
864 .stepping = 1,
865 .features[FEAT_1_EDX] =
866 PPRO_FEATURES | CPUID_VME |
867 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .features[FEAT_8000_0001_ECX] =
871 0,
872 .xlevel = 0x80000008,
873 .model_id = "Common 32-bit KVM processor"
874 },
875 {
876 .name = "coreduo",
877 .level = 10,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 14,
881 .stepping = 8,
882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES | CPUID_VME |
885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
886 CPUID_SS,
887 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
888 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
889 .features[FEAT_1_ECX] =
890 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
891 .features[FEAT_8000_0001_EDX] =
892 CPUID_EXT2_NX,
893 .xlevel = 0x80000008,
894 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
895 },
896 {
897 .name = "486",
898 .level = 1,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 4,
901 .model = 8,
902 .stepping = 0,
903 .features[FEAT_1_EDX] =
904 I486_FEATURES,
905 .xlevel = 0,
906 },
907 {
908 .name = "pentium",
909 .level = 1,
910 .vendor = CPUID_VENDOR_INTEL,
911 .family = 5,
912 .model = 4,
913 .stepping = 3,
914 .features[FEAT_1_EDX] =
915 PENTIUM_FEATURES,
916 .xlevel = 0,
917 },
918 {
919 .name = "pentium2",
920 .level = 2,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 5,
924 .stepping = 2,
925 .features[FEAT_1_EDX] =
926 PENTIUM2_FEATURES,
927 .xlevel = 0,
928 },
929 {
930 .name = "pentium3",
931 .level = 3,
932 .vendor = CPUID_VENDOR_INTEL,
933 .family = 6,
934 .model = 7,
935 .stepping = 3,
936 .features[FEAT_1_EDX] =
937 PENTIUM3_FEATURES,
938 .xlevel = 0,
939 },
940 {
941 .name = "athlon",
942 .level = 2,
943 .vendor = CPUID_VENDOR_AMD,
944 .family = 6,
945 .model = 2,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
949 CPUID_MCA,
950 .features[FEAT_8000_0001_EDX] =
951 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
952 .xlevel = 0x80000008,
953 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
954 },
955 {
956 .name = "n270",
957 .level = 10,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 28,
961 .stepping = 2,
962 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
963 .features[FEAT_1_EDX] =
964 PPRO_FEATURES |
965 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
966 CPUID_ACPI | CPUID_SS,
967 /* Some CPUs got no CPUID_SEP */
968 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
969 * CPUID_EXT_XTPR */
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
972 CPUID_EXT_MOVBE,
973 .features[FEAT_8000_0001_EDX] =
974 CPUID_EXT2_NX,
975 .features[FEAT_8000_0001_ECX] =
976 CPUID_EXT3_LAHF_LM,
977 .xlevel = 0x80000008,
978 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
979 },
980 {
981 .name = "Conroe",
982 .level = 10,
983 .vendor = CPUID_VENDOR_INTEL,
984 .family = 6,
985 .model = 15,
986 .stepping = 3,
987 .features[FEAT_1_EDX] =
988 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
989 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
990 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
991 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
992 CPUID_DE | CPUID_FP87,
993 .features[FEAT_1_ECX] =
994 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
997 .features[FEAT_8000_0001_ECX] =
998 CPUID_EXT3_LAHF_LM,
999 .xlevel = 0x80000008,
1000 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1001 },
1002 {
1003 .name = "Penryn",
1004 .level = 10,
1005 .vendor = CPUID_VENDOR_INTEL,
1006 .family = 6,
1007 .model = 23,
1008 .stepping = 3,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1017 CPUID_EXT_SSE3,
1018 .features[FEAT_8000_0001_EDX] =
1019 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1020 .features[FEAT_8000_0001_ECX] =
1021 CPUID_EXT3_LAHF_LM,
1022 .xlevel = 0x80000008,
1023 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1024 },
1025 {
1026 .name = "Nehalem",
1027 .level = 11,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 26,
1031 .stepping = 3,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1040 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1041 .features[FEAT_8000_0001_EDX] =
1042 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1043 .features[FEAT_8000_0001_ECX] =
1044 CPUID_EXT3_LAHF_LM,
1045 .xlevel = 0x80000008,
1046 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1047 },
1048 {
1049 .name = "Westmere",
1050 .level = 11,
1051 .vendor = CPUID_VENDOR_INTEL,
1052 .family = 6,
1053 .model = 44,
1054 .stepping = 1,
1055 .features[FEAT_1_EDX] =
1056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1060 CPUID_DE | CPUID_FP87,
1061 .features[FEAT_1_ECX] =
1062 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1063 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1064 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1067 .features[FEAT_8000_0001_ECX] =
1068 CPUID_EXT3_LAHF_LM,
1069 .features[FEAT_6_EAX] =
1070 CPUID_6_EAX_ARAT,
1071 .xlevel = 0x80000008,
1072 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1073 },
1074 {
1075 .name = "SandyBridge",
1076 .level = 0xd,
1077 .vendor = CPUID_VENDOR_INTEL,
1078 .family = 6,
1079 .model = 42,
1080 .stepping = 1,
1081 .features[FEAT_1_EDX] =
1082 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1083 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1084 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1085 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1086 CPUID_DE | CPUID_FP87,
1087 .features[FEAT_1_ECX] =
1088 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1089 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1090 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1091 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1092 CPUID_EXT_SSE3,
1093 .features[FEAT_8000_0001_EDX] =
1094 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1095 CPUID_EXT2_SYSCALL,
1096 .features[FEAT_8000_0001_ECX] =
1097 CPUID_EXT3_LAHF_LM,
1098 .features[FEAT_XSAVE] =
1099 CPUID_XSAVE_XSAVEOPT,
1100 .features[FEAT_6_EAX] =
1101 CPUID_6_EAX_ARAT,
1102 .xlevel = 0x80000008,
1103 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1104 },
1105 {
1106 .name = "IvyBridge",
1107 .level = 0xd,
1108 .vendor = CPUID_VENDOR_INTEL,
1109 .family = 6,
1110 .model = 58,
1111 .stepping = 9,
1112 .features[FEAT_1_EDX] =
1113 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1114 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1115 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1116 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1117 CPUID_DE | CPUID_FP87,
1118 .features[FEAT_1_ECX] =
1119 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1121 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1122 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1123 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1124 .features[FEAT_7_0_EBX] =
1125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1126 CPUID_7_0_EBX_ERMS,
1127 .features[FEAT_8000_0001_EDX] =
1128 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1129 CPUID_EXT2_SYSCALL,
1130 .features[FEAT_8000_0001_ECX] =
1131 CPUID_EXT3_LAHF_LM,
1132 .features[FEAT_XSAVE] =
1133 CPUID_XSAVE_XSAVEOPT,
1134 .features[FEAT_6_EAX] =
1135 CPUID_6_EAX_ARAT,
1136 .xlevel = 0x80000008,
1137 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1138 },
1139 {
1140 .name = "Haswell-noTSX",
1141 .level = 0xd,
1142 .vendor = CPUID_VENDOR_INTEL,
1143 .family = 6,
1144 .model = 60,
1145 .stepping = 1,
1146 .features[FEAT_1_EDX] =
1147 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1148 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1149 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1150 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1151 CPUID_DE | CPUID_FP87,
1152 .features[FEAT_1_ECX] =
1153 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1154 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1155 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1156 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1157 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1158 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1159 .features[FEAT_8000_0001_EDX] =
1160 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1161 CPUID_EXT2_SYSCALL,
1162 .features[FEAT_8000_0001_ECX] =
1163 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1164 .features[FEAT_7_0_EBX] =
1165 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1166 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1167 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1168 .features[FEAT_XSAVE] =
1169 CPUID_XSAVE_XSAVEOPT,
1170 .features[FEAT_6_EAX] =
1171 CPUID_6_EAX_ARAT,
1172 .xlevel = 0x80000008,
1173 .model_id = "Intel Core Processor (Haswell, no TSX)",
1174 }, {
1175 .name = "Haswell",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 60,
1180 .stepping = 1,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RTM,
1204 .features[FEAT_XSAVE] =
1205 CPUID_XSAVE_XSAVEOPT,
1206 .features[FEAT_6_EAX] =
1207 CPUID_6_EAX_ARAT,
1208 .xlevel = 0x80000008,
1209 .model_id = "Intel Core Processor (Haswell)",
1210 },
1211 {
1212 .name = "Broadwell-noTSX",
1213 .level = 0xd,
1214 .vendor = CPUID_VENDOR_INTEL,
1215 .family = 6,
1216 .model = 61,
1217 .stepping = 2,
1218 .features[FEAT_1_EDX] =
1219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1223 CPUID_DE | CPUID_FP87,
1224 .features[FEAT_1_ECX] =
1225 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1226 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1227 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1228 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1229 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1230 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1231 .features[FEAT_8000_0001_EDX] =
1232 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1233 CPUID_EXT2_SYSCALL,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1236 .features[FEAT_7_0_EBX] =
1237 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1238 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1239 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1240 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1241 CPUID_7_0_EBX_SMAP,
1242 .features[FEAT_XSAVE] =
1243 CPUID_XSAVE_XSAVEOPT,
1244 .features[FEAT_6_EAX] =
1245 CPUID_6_EAX_ARAT,
1246 .xlevel = 0x80000008,
1247 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1248 },
1249 {
1250 .name = "Broadwell",
1251 .level = 0xd,
1252 .vendor = CPUID_VENDOR_INTEL,
1253 .family = 6,
1254 .model = 61,
1255 .stepping = 2,
1256 .features[FEAT_1_EDX] =
1257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1261 CPUID_DE | CPUID_FP87,
1262 .features[FEAT_1_ECX] =
1263 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1264 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1265 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1266 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1267 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1268 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1271 CPUID_EXT2_SYSCALL,
1272 .features[FEAT_8000_0001_ECX] =
1273 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1274 .features[FEAT_7_0_EBX] =
1275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1279 CPUID_7_0_EBX_SMAP,
1280 .features[FEAT_XSAVE] =
1281 CPUID_XSAVE_XSAVEOPT,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Broadwell)",
1286 },
1287 {
1288 .name = "Skylake-Client",
1289 .level = 0xd,
1290 .vendor = CPUID_VENDOR_INTEL,
1291 .family = 6,
1292 .model = 94,
1293 .stepping = 3,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1302 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1305 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1306 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1307 .features[FEAT_8000_0001_EDX] =
1308 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1309 CPUID_EXT2_SYSCALL,
1310 .features[FEAT_8000_0001_ECX] =
1311 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1312 .features[FEAT_7_0_EBX] =
1313 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1314 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1315 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1316 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1317 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1318 /* Missing: XSAVES (not supported by some Linux versions,
1319 * including v4.1 to v4.6).
1320 * KVM doesn't yet expose any XSAVES state save component,
1321 * and the only one defined in Skylake (processor tracing)
1322 * probably will block migration anyway.
1323 */
1324 .features[FEAT_XSAVE] =
1325 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1326 CPUID_XSAVE_XGETBV1,
1327 .features[FEAT_6_EAX] =
1328 CPUID_6_EAX_ARAT,
1329 .xlevel = 0x80000008,
1330 .model_id = "Intel Core Processor (Skylake)",
1331 },
1332 {
1333 .name = "Opteron_G1",
1334 .level = 5,
1335 .vendor = CPUID_VENDOR_AMD,
1336 .family = 15,
1337 .model = 6,
1338 .stepping = 1,
1339 .features[FEAT_1_EDX] =
1340 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1341 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1342 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1343 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1344 CPUID_DE | CPUID_FP87,
1345 .features[FEAT_1_ECX] =
1346 CPUID_EXT_SSE3,
1347 .features[FEAT_8000_0001_EDX] =
1348 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1349 .xlevel = 0x80000008,
1350 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1351 },
1352 {
1353 .name = "Opteron_G2",
1354 .level = 5,
1355 .vendor = CPUID_VENDOR_AMD,
1356 .family = 15,
1357 .model = 6,
1358 .stepping = 1,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1367 /* Missing: CPUID_EXT2_RDTSCP */
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1372 .xlevel = 0x80000008,
1373 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1374 },
1375 {
1376 .name = "Opteron_G3",
1377 .level = 5,
1378 .vendor = CPUID_VENDOR_AMD,
1379 .family = 16,
1380 .model = 2,
1381 .stepping = 3,
1382 .features[FEAT_1_EDX] =
1383 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1384 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1385 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1386 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1387 CPUID_DE | CPUID_FP87,
1388 .features[FEAT_1_ECX] =
1389 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1390 CPUID_EXT_SSE3,
1391 /* Missing: CPUID_EXT2_RDTSCP */
1392 .features[FEAT_8000_0001_EDX] =
1393 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1394 .features[FEAT_8000_0001_ECX] =
1395 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1396 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1397 .xlevel = 0x80000008,
1398 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1399 },
1400 {
1401 .name = "Opteron_G4",
1402 .level = 0xd,
1403 .vendor = CPUID_VENDOR_AMD,
1404 .family = 21,
1405 .model = 1,
1406 .stepping = 2,
1407 .features[FEAT_1_EDX] =
1408 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1409 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1410 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1411 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1412 CPUID_DE | CPUID_FP87,
1413 .features[FEAT_1_ECX] =
1414 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1415 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1416 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1417 CPUID_EXT_SSE3,
1418 /* Missing: CPUID_EXT2_RDTSCP */
1419 .features[FEAT_8000_0001_EDX] =
1420 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1421 CPUID_EXT2_SYSCALL,
1422 .features[FEAT_8000_0001_ECX] =
1423 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1424 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1425 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1426 CPUID_EXT3_LAHF_LM,
1427 /* no xsaveopt! */
1428 .xlevel = 0x8000001A,
1429 .model_id = "AMD Opteron 62xx class CPU",
1430 },
1431 {
1432 .name = "Opteron_G5",
1433 .level = 0xd,
1434 .vendor = CPUID_VENDOR_AMD,
1435 .family = 21,
1436 .model = 2,
1437 .stepping = 0,
1438 .features[FEAT_1_EDX] =
1439 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1440 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1441 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1442 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1443 CPUID_DE | CPUID_FP87,
1444 .features[FEAT_1_ECX] =
1445 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1446 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1447 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1448 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1449 /* Missing: CPUID_EXT2_RDTSCP */
1450 .features[FEAT_8000_0001_EDX] =
1451 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1452 CPUID_EXT2_SYSCALL,
1453 .features[FEAT_8000_0001_ECX] =
1454 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1455 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1456 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1457 CPUID_EXT3_LAHF_LM,
1458 /* no xsaveopt! */
1459 .xlevel = 0x8000001A,
1460 .model_id = "AMD Opteron 63xx class CPU",
1461 },
1462 };
1463
1464 typedef struct PropValue {
1465 const char *prop, *value;
1466 } PropValue;
1467
1468 /* KVM-specific features that are automatically added/removed
1469 * from all CPU models when KVM is enabled.
1470 */
1471 static PropValue kvm_default_props[] = {
1472 { "kvmclock", "on" },
1473 { "kvm-nopiodelay", "on" },
1474 { "kvm-asyncpf", "on" },
1475 { "kvm-steal-time", "on" },
1476 { "kvm-pv-eoi", "on" },
1477 { "kvmclock-stable-bit", "on" },
1478 { "x2apic", "on" },
1479 { "acpi", "off" },
1480 { "monitor", "off" },
1481 { "svm", "off" },
1482 { NULL, NULL },
1483 };
1484
1485 /* TCG-specific defaults that override all CPU models when using TCG
1486 */
1487 static PropValue tcg_default_props[] = {
1488 { "vme", "off" },
1489 { NULL, NULL },
1490 };
1491
1492
1493 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1494 {
1495 PropValue *pv;
1496 for (pv = kvm_default_props; pv->prop; pv++) {
1497 if (!strcmp(pv->prop, prop)) {
1498 pv->value = value;
1499 break;
1500 }
1501 }
1502
1503 /* It is valid to call this function only for properties that
1504 * are already present in the kvm_default_props table.
1505 */
1506 assert(pv->prop);
1507 }
1508
1509 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1510 bool migratable_only);
1511
1512 static bool lmce_supported(void)
1513 {
1514 uint64_t mce_cap = 0;
1515
1516 #ifdef CONFIG_KVM
1517 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1518 return false;
1519 }
1520 #endif
1521
1522 return !!(mce_cap & MCG_LMCE_P);
1523 }
1524
1525 static int cpu_x86_fill_model_id(char *str)
1526 {
1527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1528 int i;
1529
1530 for (i = 0; i < 3; i++) {
1531 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1532 memcpy(str + i * 16 + 0, &eax, 4);
1533 memcpy(str + i * 16 + 4, &ebx, 4);
1534 memcpy(str + i * 16 + 8, &ecx, 4);
1535 memcpy(str + i * 16 + 12, &edx, 4);
1536 }
1537 return 0;
1538 }
1539
1540 static Property max_x86_cpu_properties[] = {
1541 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1542 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1543 DEFINE_PROP_END_OF_LIST()
1544 };
1545
1546 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1547 {
1548 DeviceClass *dc = DEVICE_CLASS(oc);
1549 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1550
1551 xcc->ordering = 9;
1552
1553 xcc->model_description =
1554 "Enables all features supported by the accelerator in the current host";
1555
1556 dc->props = max_x86_cpu_properties;
1557 }
1558
1559 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1560
1561 static void max_x86_cpu_initfn(Object *obj)
1562 {
1563 X86CPU *cpu = X86_CPU(obj);
1564 CPUX86State *env = &cpu->env;
1565 KVMState *s = kvm_state;
1566
1567 /* We can't fill the features array here because we don't know yet if
1568 * "migratable" is true or false.
1569 */
1570 cpu->max_features = true;
1571
1572 if (kvm_enabled()) {
1573 X86CPUDefinition host_cpudef = { };
1574 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1575
1576 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1577 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1578
1579 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1580 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1581 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1582 host_cpudef.stepping = eax & 0x0F;
1583
1584 cpu_x86_fill_model_id(host_cpudef.model_id);
1585
1586 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1587
1588 env->cpuid_min_level =
1589 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1590 env->cpuid_min_xlevel =
1591 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1592 env->cpuid_min_xlevel2 =
1593 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1594
1595 if (lmce_supported()) {
1596 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1597 }
1598 } else {
1599 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1600 "vendor", &error_abort);
1601 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1602 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1603 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1604 object_property_set_str(OBJECT(cpu),
1605 "QEMU TCG CPU version " QEMU_HW_VERSION,
1606 "model-id", &error_abort);
1607 }
1608
1609 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1610 }
1611
1612 static const TypeInfo max_x86_cpu_type_info = {
1613 .name = X86_CPU_TYPE_NAME("max"),
1614 .parent = TYPE_X86_CPU,
1615 .instance_init = max_x86_cpu_initfn,
1616 .class_init = max_x86_cpu_class_init,
1617 };
1618
1619 #ifdef CONFIG_KVM
1620
1621 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1622 {
1623 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1624
1625 xcc->kvm_required = true;
1626 xcc->ordering = 8;
1627
1628 xcc->model_description =
1629 "KVM processor with all supported host features "
1630 "(only available in KVM mode)";
1631 }
1632
1633 static const TypeInfo host_x86_cpu_type_info = {
1634 .name = X86_CPU_TYPE_NAME("host"),
1635 .parent = X86_CPU_TYPE_NAME("max"),
1636 .class_init = host_x86_cpu_class_init,
1637 };
1638
1639 #endif
1640
1641 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1642 {
1643 FeatureWordInfo *f = &feature_word_info[w];
1644 int i;
1645
1646 for (i = 0; i < 32; ++i) {
1647 if ((1UL << i) & mask) {
1648 const char *reg = get_register_name_32(f->cpuid_reg);
1649 assert(reg);
1650 fprintf(stderr, "warning: %s doesn't support requested feature: "
1651 "CPUID.%02XH:%s%s%s [bit %d]\n",
1652 kvm_enabled() ? "host" : "TCG",
1653 f->cpuid_eax, reg,
1654 f->feat_names[i] ? "." : "",
1655 f->feat_names[i] ? f->feat_names[i] : "", i);
1656 }
1657 }
1658 }
1659
1660 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1661 const char *name, void *opaque,
1662 Error **errp)
1663 {
1664 X86CPU *cpu = X86_CPU(obj);
1665 CPUX86State *env = &cpu->env;
1666 int64_t value;
1667
1668 value = (env->cpuid_version >> 8) & 0xf;
1669 if (value == 0xf) {
1670 value += (env->cpuid_version >> 20) & 0xff;
1671 }
1672 visit_type_int(v, name, &value, errp);
1673 }
1674
1675 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1678 {
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 const int64_t min = 0;
1682 const int64_t max = 0xff + 0xf;
1683 Error *local_err = NULL;
1684 int64_t value;
1685
1686 visit_type_int(v, name, &value, &local_err);
1687 if (local_err) {
1688 error_propagate(errp, local_err);
1689 return;
1690 }
1691 if (value < min || value > max) {
1692 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1693 name ? name : "null", value, min, max);
1694 return;
1695 }
1696
1697 env->cpuid_version &= ~0xff00f00;
1698 if (value > 0x0f) {
1699 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1700 } else {
1701 env->cpuid_version |= value << 8;
1702 }
1703 }
1704
1705 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1706 const char *name, void *opaque,
1707 Error **errp)
1708 {
1709 X86CPU *cpu = X86_CPU(obj);
1710 CPUX86State *env = &cpu->env;
1711 int64_t value;
1712
1713 value = (env->cpuid_version >> 4) & 0xf;
1714 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1715 visit_type_int(v, name, &value, errp);
1716 }
1717
1718 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1719 const char *name, void *opaque,
1720 Error **errp)
1721 {
1722 X86CPU *cpu = X86_CPU(obj);
1723 CPUX86State *env = &cpu->env;
1724 const int64_t min = 0;
1725 const int64_t max = 0xff;
1726 Error *local_err = NULL;
1727 int64_t value;
1728
1729 visit_type_int(v, name, &value, &local_err);
1730 if (local_err) {
1731 error_propagate(errp, local_err);
1732 return;
1733 }
1734 if (value < min || value > max) {
1735 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1736 name ? name : "null", value, min, max);
1737 return;
1738 }
1739
1740 env->cpuid_version &= ~0xf00f0;
1741 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1742 }
1743
1744 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1745 const char *name, void *opaque,
1746 Error **errp)
1747 {
1748 X86CPU *cpu = X86_CPU(obj);
1749 CPUX86State *env = &cpu->env;
1750 int64_t value;
1751
1752 value = env->cpuid_version & 0xf;
1753 visit_type_int(v, name, &value, errp);
1754 }
1755
1756 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1757 const char *name, void *opaque,
1758 Error **errp)
1759 {
1760 X86CPU *cpu = X86_CPU(obj);
1761 CPUX86State *env = &cpu->env;
1762 const int64_t min = 0;
1763 const int64_t max = 0xf;
1764 Error *local_err = NULL;
1765 int64_t value;
1766
1767 visit_type_int(v, name, &value, &local_err);
1768 if (local_err) {
1769 error_propagate(errp, local_err);
1770 return;
1771 }
1772 if (value < min || value > max) {
1773 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1774 name ? name : "null", value, min, max);
1775 return;
1776 }
1777
1778 env->cpuid_version &= ~0xf;
1779 env->cpuid_version |= value & 0xf;
1780 }
1781
1782 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1783 {
1784 X86CPU *cpu = X86_CPU(obj);
1785 CPUX86State *env = &cpu->env;
1786 char *value;
1787
1788 value = g_malloc(CPUID_VENDOR_SZ + 1);
1789 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1790 env->cpuid_vendor3);
1791 return value;
1792 }
1793
1794 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1795 Error **errp)
1796 {
1797 X86CPU *cpu = X86_CPU(obj);
1798 CPUX86State *env = &cpu->env;
1799 int i;
1800
1801 if (strlen(value) != CPUID_VENDOR_SZ) {
1802 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1803 return;
1804 }
1805
1806 env->cpuid_vendor1 = 0;
1807 env->cpuid_vendor2 = 0;
1808 env->cpuid_vendor3 = 0;
1809 for (i = 0; i < 4; i++) {
1810 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1811 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1812 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1813 }
1814 }
1815
1816 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1817 {
1818 X86CPU *cpu = X86_CPU(obj);
1819 CPUX86State *env = &cpu->env;
1820 char *value;
1821 int i;
1822
1823 value = g_malloc(48 + 1);
1824 for (i = 0; i < 48; i++) {
1825 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1826 }
1827 value[48] = '\0';
1828 return value;
1829 }
1830
1831 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1832 Error **errp)
1833 {
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1836 int c, len, i;
1837
1838 if (model_id == NULL) {
1839 model_id = "";
1840 }
1841 len = strlen(model_id);
1842 memset(env->cpuid_model, 0, 48);
1843 for (i = 0; i < 48; i++) {
1844 if (i >= len) {
1845 c = '\0';
1846 } else {
1847 c = (uint8_t)model_id[i];
1848 }
1849 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1850 }
1851 }
1852
1853 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1854 void *opaque, Error **errp)
1855 {
1856 X86CPU *cpu = X86_CPU(obj);
1857 int64_t value;
1858
1859 value = cpu->env.tsc_khz * 1000;
1860 visit_type_int(v, name, &value, errp);
1861 }
1862
1863 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1865 {
1866 X86CPU *cpu = X86_CPU(obj);
1867 const int64_t min = 0;
1868 const int64_t max = INT64_MAX;
1869 Error *local_err = NULL;
1870 int64_t value;
1871
1872 visit_type_int(v, name, &value, &local_err);
1873 if (local_err) {
1874 error_propagate(errp, local_err);
1875 return;
1876 }
1877 if (value < min || value > max) {
1878 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1879 name ? name : "null", value, min, max);
1880 return;
1881 }
1882
1883 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1884 }
1885
1886 /* Generic getter for "feature-words" and "filtered-features" properties */
1887 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1888 const char *name, void *opaque,
1889 Error **errp)
1890 {
1891 uint32_t *array = (uint32_t *)opaque;
1892 FeatureWord w;
1893 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1894 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1895 X86CPUFeatureWordInfoList *list = NULL;
1896
1897 for (w = 0; w < FEATURE_WORDS; w++) {
1898 FeatureWordInfo *wi = &feature_word_info[w];
1899 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1900 qwi->cpuid_input_eax = wi->cpuid_eax;
1901 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1902 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1903 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1904 qwi->features = array[w];
1905
1906 /* List will be in reverse order, but order shouldn't matter */
1907 list_entries[w].next = list;
1908 list_entries[w].value = &word_infos[w];
1909 list = &list_entries[w];
1910 }
1911
1912 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1913 }
1914
1915 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1916 void *opaque, Error **errp)
1917 {
1918 X86CPU *cpu = X86_CPU(obj);
1919 int64_t value = cpu->hyperv_spinlock_attempts;
1920
1921 visit_type_int(v, name, &value, errp);
1922 }
1923
1924 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1925 void *opaque, Error **errp)
1926 {
1927 const int64_t min = 0xFFF;
1928 const int64_t max = UINT_MAX;
1929 X86CPU *cpu = X86_CPU(obj);
1930 Error *err = NULL;
1931 int64_t value;
1932
1933 visit_type_int(v, name, &value, &err);
1934 if (err) {
1935 error_propagate(errp, err);
1936 return;
1937 }
1938
1939 if (value < min || value > max) {
1940 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1941 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1942 object_get_typename(obj), name ? name : "null",
1943 value, min, max);
1944 return;
1945 }
1946 cpu->hyperv_spinlock_attempts = value;
1947 }
1948
1949 static PropertyInfo qdev_prop_spinlocks = {
1950 .name = "int",
1951 .get = x86_get_hv_spinlocks,
1952 .set = x86_set_hv_spinlocks,
1953 };
1954
1955 /* Convert all '_' in a feature string option name to '-', to make feature
1956 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1957 */
1958 static inline void feat2prop(char *s)
1959 {
1960 while ((s = strchr(s, '_'))) {
1961 *s = '-';
1962 }
1963 }
1964
1965 /* Return the feature property name for a feature flag bit */
1966 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1967 {
1968 /* XSAVE components are automatically enabled by other features,
1969 * so return the original feature name instead
1970 */
1971 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1972 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1973
1974 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1975 x86_ext_save_areas[comp].bits) {
1976 w = x86_ext_save_areas[comp].feature;
1977 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1978 }
1979 }
1980
1981 assert(bitnr < 32);
1982 assert(w < FEATURE_WORDS);
1983 return feature_word_info[w].feat_names[bitnr];
1984 }
1985
1986 /* Compatibily hack to maintain legacy +-feat semantic,
1987 * where +-feat overwrites any feature set by
1988 * feat=on|feat even if the later is parsed after +-feat
1989 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1990 */
1991 static GList *plus_features, *minus_features;
1992
1993 static gint compare_string(gconstpointer a, gconstpointer b)
1994 {
1995 return g_strcmp0(a, b);
1996 }
1997
1998 /* Parse "+feature,-feature,feature=foo" CPU feature string
1999 */
2000 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2001 Error **errp)
2002 {
2003 char *featurestr; /* Single 'key=value" string being parsed */
2004 static bool cpu_globals_initialized;
2005 bool ambiguous = false;
2006
2007 if (cpu_globals_initialized) {
2008 return;
2009 }
2010 cpu_globals_initialized = true;
2011
2012 if (!features) {
2013 return;
2014 }
2015
2016 for (featurestr = strtok(features, ",");
2017 featurestr;
2018 featurestr = strtok(NULL, ",")) {
2019 const char *name;
2020 const char *val = NULL;
2021 char *eq = NULL;
2022 char num[32];
2023 GlobalProperty *prop;
2024
2025 /* Compatibility syntax: */
2026 if (featurestr[0] == '+') {
2027 plus_features = g_list_append(plus_features,
2028 g_strdup(featurestr + 1));
2029 continue;
2030 } else if (featurestr[0] == '-') {
2031 minus_features = g_list_append(minus_features,
2032 g_strdup(featurestr + 1));
2033 continue;
2034 }
2035
2036 eq = strchr(featurestr, '=');
2037 if (eq) {
2038 *eq++ = 0;
2039 val = eq;
2040 } else {
2041 val = "on";
2042 }
2043
2044 feat2prop(featurestr);
2045 name = featurestr;
2046
2047 if (g_list_find_custom(plus_features, name, compare_string)) {
2048 error_report("warning: Ambiguous CPU model string. "
2049 "Don't mix both \"+%s\" and \"%s=%s\"",
2050 name, name, val);
2051 ambiguous = true;
2052 }
2053 if (g_list_find_custom(minus_features, name, compare_string)) {
2054 error_report("warning: Ambiguous CPU model string. "
2055 "Don't mix both \"-%s\" and \"%s=%s\"",
2056 name, name, val);
2057 ambiguous = true;
2058 }
2059
2060 /* Special case: */
2061 if (!strcmp(name, "tsc-freq")) {
2062 int ret;
2063 uint64_t tsc_freq;
2064
2065 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2066 if (ret < 0 || tsc_freq > INT64_MAX) {
2067 error_setg(errp, "bad numerical value %s", val);
2068 return;
2069 }
2070 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2071 val = num;
2072 name = "tsc-frequency";
2073 }
2074
2075 prop = g_new0(typeof(*prop), 1);
2076 prop->driver = typename;
2077 prop->property = g_strdup(name);
2078 prop->value = g_strdup(val);
2079 prop->errp = &error_fatal;
2080 qdev_prop_register_global(prop);
2081 }
2082
2083 if (ambiguous) {
2084 error_report("warning: Compatibility of ambiguous CPU model "
2085 "strings won't be kept on future QEMU versions");
2086 }
2087 }
2088
2089 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2090 static int x86_cpu_filter_features(X86CPU *cpu);
2091
2092 /* Check for missing features that may prevent the CPU class from
2093 * running using the current machine and accelerator.
2094 */
2095 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2096 strList **missing_feats)
2097 {
2098 X86CPU *xc;
2099 FeatureWord w;
2100 Error *err = NULL;
2101 strList **next = missing_feats;
2102
2103 if (xcc->kvm_required && !kvm_enabled()) {
2104 strList *new = g_new0(strList, 1);
2105 new->value = g_strdup("kvm");;
2106 *missing_feats = new;
2107 return;
2108 }
2109
2110 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2111
2112 x86_cpu_expand_features(xc, &err);
2113 if (err) {
2114 /* Errors at x86_cpu_expand_features should never happen,
2115 * but in case it does, just report the model as not
2116 * runnable at all using the "type" property.
2117 */
2118 strList *new = g_new0(strList, 1);
2119 new->value = g_strdup("type");
2120 *next = new;
2121 next = &new->next;
2122 }
2123
2124 x86_cpu_filter_features(xc);
2125
2126 for (w = 0; w < FEATURE_WORDS; w++) {
2127 uint32_t filtered = xc->filtered_features[w];
2128 int i;
2129 for (i = 0; i < 32; i++) {
2130 if (filtered & (1UL << i)) {
2131 strList *new = g_new0(strList, 1);
2132 new->value = g_strdup(x86_cpu_feature_name(w, i));
2133 *next = new;
2134 next = &new->next;
2135 }
2136 }
2137 }
2138
2139 object_unref(OBJECT(xc));
2140 }
2141
2142 /* Print all cpuid feature names in featureset
2143 */
2144 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2145 {
2146 int bit;
2147 bool first = true;
2148
2149 for (bit = 0; bit < 32; bit++) {
2150 if (featureset[bit]) {
2151 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2152 first = false;
2153 }
2154 }
2155 }
2156
2157 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2158 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2159 {
2160 ObjectClass *class_a = (ObjectClass *)a;
2161 ObjectClass *class_b = (ObjectClass *)b;
2162 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2163 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2164 const char *name_a, *name_b;
2165
2166 if (cc_a->ordering != cc_b->ordering) {
2167 return cc_a->ordering - cc_b->ordering;
2168 } else {
2169 name_a = object_class_get_name(class_a);
2170 name_b = object_class_get_name(class_b);
2171 return strcmp(name_a, name_b);
2172 }
2173 }
2174
2175 static GSList *get_sorted_cpu_model_list(void)
2176 {
2177 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2178 list = g_slist_sort(list, x86_cpu_list_compare);
2179 return list;
2180 }
2181
2182 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2183 {
2184 ObjectClass *oc = data;
2185 X86CPUClass *cc = X86_CPU_CLASS(oc);
2186 CPUListState *s = user_data;
2187 char *name = x86_cpu_class_get_model_name(cc);
2188 const char *desc = cc->model_description;
2189 if (!desc && cc->cpu_def) {
2190 desc = cc->cpu_def->model_id;
2191 }
2192
2193 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2194 name, desc);
2195 g_free(name);
2196 }
2197
2198 /* list available CPU models and flags */
2199 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2200 {
2201 int i;
2202 CPUListState s = {
2203 .file = f,
2204 .cpu_fprintf = cpu_fprintf,
2205 };
2206 GSList *list;
2207
2208 (*cpu_fprintf)(f, "Available CPUs:\n");
2209 list = get_sorted_cpu_model_list();
2210 g_slist_foreach(list, x86_cpu_list_entry, &s);
2211 g_slist_free(list);
2212
2213 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2214 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2215 FeatureWordInfo *fw = &feature_word_info[i];
2216
2217 (*cpu_fprintf)(f, " ");
2218 listflags(f, cpu_fprintf, fw->feat_names);
2219 (*cpu_fprintf)(f, "\n");
2220 }
2221 }
2222
2223 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2224 {
2225 ObjectClass *oc = data;
2226 X86CPUClass *cc = X86_CPU_CLASS(oc);
2227 CpuDefinitionInfoList **cpu_list = user_data;
2228 CpuDefinitionInfoList *entry;
2229 CpuDefinitionInfo *info;
2230
2231 info = g_malloc0(sizeof(*info));
2232 info->name = x86_cpu_class_get_model_name(cc);
2233 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2234 info->has_unavailable_features = true;
2235 info->q_typename = g_strdup(object_class_get_name(oc));
2236 info->migration_safe = cc->migration_safe;
2237 info->has_migration_safe = true;
2238 info->q_static = cc->static_model;
2239
2240 entry = g_malloc0(sizeof(*entry));
2241 entry->value = info;
2242 entry->next = *cpu_list;
2243 *cpu_list = entry;
2244 }
2245
2246 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2247 {
2248 CpuDefinitionInfoList *cpu_list = NULL;
2249 GSList *list = get_sorted_cpu_model_list();
2250 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2251 g_slist_free(list);
2252 return cpu_list;
2253 }
2254
2255 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2256 bool migratable_only)
2257 {
2258 FeatureWordInfo *wi = &feature_word_info[w];
2259 uint32_t r;
2260
2261 if (kvm_enabled()) {
2262 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2263 wi->cpuid_ecx,
2264 wi->cpuid_reg);
2265 } else if (tcg_enabled()) {
2266 r = wi->tcg_features;
2267 } else {
2268 return ~0;
2269 }
2270 if (migratable_only) {
2271 r &= x86_cpu_get_migratable_flags(w);
2272 }
2273 return r;
2274 }
2275
2276 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2277 {
2278 FeatureWord w;
2279
2280 for (w = 0; w < FEATURE_WORDS; w++) {
2281 report_unavailable_features(w, cpu->filtered_features[w]);
2282 }
2283 }
2284
2285 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2286 {
2287 PropValue *pv;
2288 for (pv = props; pv->prop; pv++) {
2289 if (!pv->value) {
2290 continue;
2291 }
2292 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2293 &error_abort);
2294 }
2295 }
2296
2297 /* Load data from X86CPUDefinition into a X86CPU object
2298 */
2299 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2300 {
2301 CPUX86State *env = &cpu->env;
2302 const char *vendor;
2303 char host_vendor[CPUID_VENDOR_SZ + 1];
2304 FeatureWord w;
2305
2306 /*NOTE: any property set by this function should be returned by
2307 * x86_cpu_static_props(), so static expansion of
2308 * query-cpu-model-expansion is always complete.
2309 */
2310
2311 /* CPU models only set _minimum_ values for level/xlevel: */
2312 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2313 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2314
2315 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2316 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2317 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2318 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2319 for (w = 0; w < FEATURE_WORDS; w++) {
2320 env->features[w] = def->features[w];
2321 }
2322
2323 /* Special cases not set in the X86CPUDefinition structs: */
2324 if (kvm_enabled()) {
2325 if (!kvm_irqchip_in_kernel()) {
2326 x86_cpu_change_kvm_default("x2apic", "off");
2327 }
2328
2329 x86_cpu_apply_props(cpu, kvm_default_props);
2330 } else if (tcg_enabled()) {
2331 x86_cpu_apply_props(cpu, tcg_default_props);
2332 }
2333
2334 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2335
2336 /* sysenter isn't supported in compatibility mode on AMD,
2337 * syscall isn't supported in compatibility mode on Intel.
2338 * Normally we advertise the actual CPU vendor, but you can
2339 * override this using the 'vendor' property if you want to use
2340 * KVM's sysenter/syscall emulation in compatibility mode and
2341 * when doing cross vendor migration
2342 */
2343 vendor = def->vendor;
2344 if (kvm_enabled()) {
2345 uint32_t ebx = 0, ecx = 0, edx = 0;
2346 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2347 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2348 vendor = host_vendor;
2349 }
2350
2351 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2352
2353 }
2354
2355 /* Return a QDict containing keys for all properties that can be included
2356 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2357 * must be included in the dictionary.
2358 */
2359 static QDict *x86_cpu_static_props(void)
2360 {
2361 FeatureWord w;
2362 int i;
2363 static const char *props[] = {
2364 "min-level",
2365 "min-xlevel",
2366 "family",
2367 "model",
2368 "stepping",
2369 "model-id",
2370 "vendor",
2371 "lmce",
2372 NULL,
2373 };
2374 static QDict *d;
2375
2376 if (d) {
2377 return d;
2378 }
2379
2380 d = qdict_new();
2381 for (i = 0; props[i]; i++) {
2382 qdict_put_obj(d, props[i], qnull());
2383 }
2384
2385 for (w = 0; w < FEATURE_WORDS; w++) {
2386 FeatureWordInfo *fi = &feature_word_info[w];
2387 int bit;
2388 for (bit = 0; bit < 32; bit++) {
2389 if (!fi->feat_names[bit]) {
2390 continue;
2391 }
2392 qdict_put_obj(d, fi->feat_names[bit], qnull());
2393 }
2394 }
2395
2396 return d;
2397 }
2398
2399 /* Add an entry to @props dict, with the value for property. */
2400 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2401 {
2402 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2403 &error_abort);
2404
2405 qdict_put_obj(props, prop, value);
2406 }
2407
2408 /* Convert CPU model data from X86CPU object to a property dictionary
2409 * that can recreate exactly the same CPU model.
2410 */
2411 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2412 {
2413 QDict *sprops = x86_cpu_static_props();
2414 const QDictEntry *e;
2415
2416 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2417 const char *prop = qdict_entry_key(e);
2418 x86_cpu_expand_prop(cpu, props, prop);
2419 }
2420 }
2421
2422 /* Convert CPU model data from X86CPU object to a property dictionary
2423 * that can recreate exactly the same CPU model, including every
2424 * writeable QOM property.
2425 */
2426 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2427 {
2428 ObjectPropertyIterator iter;
2429 ObjectProperty *prop;
2430
2431 object_property_iter_init(&iter, OBJECT(cpu));
2432 while ((prop = object_property_iter_next(&iter))) {
2433 /* skip read-only or write-only properties */
2434 if (!prop->get || !prop->set) {
2435 continue;
2436 }
2437
2438 /* "hotplugged" is the only property that is configurable
2439 * on the command-line but will be set differently on CPUs
2440 * created using "-cpu ... -smp ..." and by CPUs created
2441 * on the fly by x86_cpu_from_model() for querying. Skip it.
2442 */
2443 if (!strcmp(prop->name, "hotplugged")) {
2444 continue;
2445 }
2446 x86_cpu_expand_prop(cpu, props, prop->name);
2447 }
2448 }
2449
2450 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2451 {
2452 const QDictEntry *prop;
2453 Error *err = NULL;
2454
2455 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2456 object_property_set_qobject(obj, qdict_entry_value(prop),
2457 qdict_entry_key(prop), &err);
2458 if (err) {
2459 break;
2460 }
2461 }
2462
2463 error_propagate(errp, err);
2464 }
2465
2466 /* Create X86CPU object according to model+props specification */
2467 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2468 {
2469 X86CPU *xc = NULL;
2470 X86CPUClass *xcc;
2471 Error *err = NULL;
2472
2473 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2474 if (xcc == NULL) {
2475 error_setg(&err, "CPU model '%s' not found", model);
2476 goto out;
2477 }
2478
2479 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2480 if (props) {
2481 object_apply_props(OBJECT(xc), props, &err);
2482 if (err) {
2483 goto out;
2484 }
2485 }
2486
2487 x86_cpu_expand_features(xc, &err);
2488 if (err) {
2489 goto out;
2490 }
2491
2492 out:
2493 if (err) {
2494 error_propagate(errp, err);
2495 object_unref(OBJECT(xc));
2496 xc = NULL;
2497 }
2498 return xc;
2499 }
2500
2501 CpuModelExpansionInfo *
2502 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2503 CpuModelInfo *model,
2504 Error **errp)
2505 {
2506 X86CPU *xc = NULL;
2507 Error *err = NULL;
2508 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2509 QDict *props = NULL;
2510 const char *base_name;
2511
2512 xc = x86_cpu_from_model(model->name,
2513 model->has_props ?
2514 qobject_to_qdict(model->props) :
2515 NULL, &err);
2516 if (err) {
2517 goto out;
2518 }
2519
2520 props = qdict_new();
2521
2522 switch (type) {
2523 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2524 /* Static expansion will be based on "base" only */
2525 base_name = "base";
2526 x86_cpu_to_dict(xc, props);
2527 break;
2528 case CPU_MODEL_EXPANSION_TYPE_FULL:
2529 /* As we don't return every single property, full expansion needs
2530 * to keep the original model name+props, and add extra
2531 * properties on top of that.
2532 */
2533 base_name = model->name;
2534 x86_cpu_to_dict_full(xc, props);
2535 break;
2536 default:
2537 error_setg(&err, "Unsupportted expansion type");
2538 goto out;
2539 }
2540
2541 if (!props) {
2542 props = qdict_new();
2543 }
2544 x86_cpu_to_dict(xc, props);
2545
2546 ret->model = g_new0(CpuModelInfo, 1);
2547 ret->model->name = g_strdup(base_name);
2548 ret->model->props = QOBJECT(props);
2549 ret->model->has_props = true;
2550
2551 out:
2552 object_unref(OBJECT(xc));
2553 if (err) {
2554 error_propagate(errp, err);
2555 qapi_free_CpuModelExpansionInfo(ret);
2556 ret = NULL;
2557 }
2558 return ret;
2559 }
2560
2561 X86CPU *cpu_x86_init(const char *cpu_model)
2562 {
2563 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2564 }
2565
2566 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2567 {
2568 X86CPUDefinition *cpudef = data;
2569 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2570
2571 xcc->cpu_def = cpudef;
2572 xcc->migration_safe = true;
2573 }
2574
2575 static void x86_register_cpudef_type(X86CPUDefinition *def)
2576 {
2577 char *typename = x86_cpu_type_name(def->name);
2578 TypeInfo ti = {
2579 .name = typename,
2580 .parent = TYPE_X86_CPU,
2581 .class_init = x86_cpu_cpudef_class_init,
2582 .class_data = def,
2583 };
2584
2585 /* AMD aliases are handled at runtime based on CPUID vendor, so
2586 * they shouldn't be set on the CPU model table.
2587 */
2588 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2589
2590 type_register(&ti);
2591 g_free(typename);
2592 }
2593
2594 #if !defined(CONFIG_USER_ONLY)
2595
2596 void cpu_clear_apic_feature(CPUX86State *env)
2597 {
2598 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2599 }
2600
2601 #endif /* !CONFIG_USER_ONLY */
2602
2603 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2604 uint32_t *eax, uint32_t *ebx,
2605 uint32_t *ecx, uint32_t *edx)
2606 {
2607 X86CPU *cpu = x86_env_get_cpu(env);
2608 CPUState *cs = CPU(cpu);
2609 uint32_t pkg_offset;
2610
2611 /* test if maximum index reached */
2612 if (index & 0x80000000) {
2613 if (index > env->cpuid_xlevel) {
2614 if (env->cpuid_xlevel2 > 0) {
2615 /* Handle the Centaur's CPUID instruction. */
2616 if (index > env->cpuid_xlevel2) {
2617 index = env->cpuid_xlevel2;
2618 } else if (index < 0xC0000000) {
2619 index = env->cpuid_xlevel;
2620 }
2621 } else {
2622 /* Intel documentation states that invalid EAX input will
2623 * return the same information as EAX=cpuid_level
2624 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2625 */
2626 index = env->cpuid_level;
2627 }
2628 }
2629 } else {
2630 if (index > env->cpuid_level)
2631 index = env->cpuid_level;
2632 }
2633
2634 switch(index) {
2635 case 0:
2636 *eax = env->cpuid_level;
2637 *ebx = env->cpuid_vendor1;
2638 *edx = env->cpuid_vendor2;
2639 *ecx = env->cpuid_vendor3;
2640 break;
2641 case 1:
2642 *eax = env->cpuid_version;
2643 *ebx = (cpu->apic_id << 24) |
2644 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2645 *ecx = env->features[FEAT_1_ECX];
2646 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2647 *ecx |= CPUID_EXT_OSXSAVE;
2648 }
2649 *edx = env->features[FEAT_1_EDX];
2650 if (cs->nr_cores * cs->nr_threads > 1) {
2651 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2652 *edx |= CPUID_HT;
2653 }
2654 break;
2655 case 2:
2656 /* cache info: needed for Pentium Pro compatibility */
2657 if (cpu->cache_info_passthrough) {
2658 host_cpuid(index, 0, eax, ebx, ecx, edx);
2659 break;
2660 }
2661 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2662 *ebx = 0;
2663 if (!cpu->enable_l3_cache) {
2664 *ecx = 0;
2665 } else {
2666 *ecx = L3_N_DESCRIPTOR;
2667 }
2668 *edx = (L1D_DESCRIPTOR << 16) | \
2669 (L1I_DESCRIPTOR << 8) | \
2670 (L2_DESCRIPTOR);
2671 break;
2672 case 4:
2673 /* cache info: needed for Core compatibility */
2674 if (cpu->cache_info_passthrough) {
2675 host_cpuid(index, count, eax, ebx, ecx, edx);
2676 *eax &= ~0xFC000000;
2677 } else {
2678 *eax = 0;
2679 switch (count) {
2680 case 0: /* L1 dcache info */
2681 *eax |= CPUID_4_TYPE_DCACHE | \
2682 CPUID_4_LEVEL(1) | \
2683 CPUID_4_SELF_INIT_LEVEL;
2684 *ebx = (L1D_LINE_SIZE - 1) | \
2685 ((L1D_PARTITIONS - 1) << 12) | \
2686 ((L1D_ASSOCIATIVITY - 1) << 22);
2687 *ecx = L1D_SETS - 1;
2688 *edx = CPUID_4_NO_INVD_SHARING;
2689 break;
2690 case 1: /* L1 icache info */
2691 *eax |= CPUID_4_TYPE_ICACHE | \
2692 CPUID_4_LEVEL(1) | \
2693 CPUID_4_SELF_INIT_LEVEL;
2694 *ebx = (L1I_LINE_SIZE - 1) | \
2695 ((L1I_PARTITIONS - 1) << 12) | \
2696 ((L1I_ASSOCIATIVITY - 1) << 22);
2697 *ecx = L1I_SETS - 1;
2698 *edx = CPUID_4_NO_INVD_SHARING;
2699 break;
2700 case 2: /* L2 cache info */
2701 *eax |= CPUID_4_TYPE_UNIFIED | \
2702 CPUID_4_LEVEL(2) | \
2703 CPUID_4_SELF_INIT_LEVEL;
2704 if (cs->nr_threads > 1) {
2705 *eax |= (cs->nr_threads - 1) << 14;
2706 }
2707 *ebx = (L2_LINE_SIZE - 1) | \
2708 ((L2_PARTITIONS - 1) << 12) | \
2709 ((L2_ASSOCIATIVITY - 1) << 22);
2710 *ecx = L2_SETS - 1;
2711 *edx = CPUID_4_NO_INVD_SHARING;
2712 break;
2713 case 3: /* L3 cache info */
2714 if (!cpu->enable_l3_cache) {
2715 *eax = 0;
2716 *ebx = 0;
2717 *ecx = 0;
2718 *edx = 0;
2719 break;
2720 }
2721 *eax |= CPUID_4_TYPE_UNIFIED | \
2722 CPUID_4_LEVEL(3) | \
2723 CPUID_4_SELF_INIT_LEVEL;
2724 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2725 *eax |= ((1 << pkg_offset) - 1) << 14;
2726 *ebx = (L3_N_LINE_SIZE - 1) | \
2727 ((L3_N_PARTITIONS - 1) << 12) | \
2728 ((L3_N_ASSOCIATIVITY - 1) << 22);
2729 *ecx = L3_N_SETS - 1;
2730 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2731 break;
2732 default: /* end of info */
2733 *eax = 0;
2734 *ebx = 0;
2735 *ecx = 0;
2736 *edx = 0;
2737 break;
2738 }
2739 }
2740
2741 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2742 if ((*eax & 31) && cs->nr_cores > 1) {
2743 *eax |= (cs->nr_cores - 1) << 26;
2744 }
2745 break;
2746 case 5:
2747 /* mwait info: needed for Core compatibility */
2748 *eax = 0; /* Smallest monitor-line size in bytes */
2749 *ebx = 0; /* Largest monitor-line size in bytes */
2750 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2751 *edx = 0;
2752 break;
2753 case 6:
2754 /* Thermal and Power Leaf */
2755 *eax = env->features[FEAT_6_EAX];
2756 *ebx = 0;
2757 *ecx = 0;
2758 *edx = 0;
2759 break;
2760 case 7:
2761 /* Structured Extended Feature Flags Enumeration Leaf */
2762 if (count == 0) {
2763 *eax = 0; /* Maximum ECX value for sub-leaves */
2764 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2765 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2766 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2767 *ecx |= CPUID_7_0_ECX_OSPKE;
2768 }
2769 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2770 } else {
2771 *eax = 0;
2772 *ebx = 0;
2773 *ecx = 0;
2774 *edx = 0;
2775 }
2776 break;
2777 case 9:
2778 /* Direct Cache Access Information Leaf */
2779 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2780 *ebx = 0;
2781 *ecx = 0;
2782 *edx = 0;
2783 break;
2784 case 0xA:
2785 /* Architectural Performance Monitoring Leaf */
2786 if (kvm_enabled() && cpu->enable_pmu) {
2787 KVMState *s = cs->kvm_state;
2788
2789 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2790 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2791 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2792 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2793 } else {
2794 *eax = 0;
2795 *ebx = 0;
2796 *ecx = 0;
2797 *edx = 0;
2798 }
2799 break;
2800 case 0xB:
2801 /* Extended Topology Enumeration Leaf */
2802 if (!cpu->enable_cpuid_0xb) {
2803 *eax = *ebx = *ecx = *edx = 0;
2804 break;
2805 }
2806
2807 *ecx = count & 0xff;
2808 *edx = cpu->apic_id;
2809
2810 switch (count) {
2811 case 0:
2812 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2813 *ebx = cs->nr_threads;
2814 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2815 break;
2816 case 1:
2817 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2818 *ebx = cs->nr_cores * cs->nr_threads;
2819 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2820 break;
2821 default:
2822 *eax = 0;
2823 *ebx = 0;
2824 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2825 }
2826
2827 assert(!(*eax & ~0x1f));
2828 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2829 break;
2830 case 0xD: {
2831 /* Processor Extended State */
2832 *eax = 0;
2833 *ebx = 0;
2834 *ecx = 0;
2835 *edx = 0;
2836 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2837 break;
2838 }
2839
2840 if (count == 0) {
2841 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2842 *eax = env->features[FEAT_XSAVE_COMP_LO];
2843 *edx = env->features[FEAT_XSAVE_COMP_HI];
2844 *ebx = *ecx;
2845 } else if (count == 1) {
2846 *eax = env->features[FEAT_XSAVE];
2847 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2848 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2849 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2850 *eax = esa->size;
2851 *ebx = esa->offset;
2852 }
2853 }
2854 break;
2855 }
2856 case 0x80000000:
2857 *eax = env->cpuid_xlevel;
2858 *ebx = env->cpuid_vendor1;
2859 *edx = env->cpuid_vendor2;
2860 *ecx = env->cpuid_vendor3;
2861 break;
2862 case 0x80000001:
2863 *eax = env->cpuid_version;
2864 *ebx = 0;
2865 *ecx = env->features[FEAT_8000_0001_ECX];
2866 *edx = env->features[FEAT_8000_0001_EDX];
2867
2868 /* The Linux kernel checks for the CMPLegacy bit and
2869 * discards multiple thread information if it is set.
2870 * So don't set it here for Intel to make Linux guests happy.
2871 */
2872 if (cs->nr_cores * cs->nr_threads > 1) {
2873 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2874 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2875 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2876 *ecx |= 1 << 1; /* CmpLegacy bit */
2877 }
2878 }
2879 break;
2880 case 0x80000002:
2881 case 0x80000003:
2882 case 0x80000004:
2883 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2884 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2885 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2886 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2887 break;
2888 case 0x80000005:
2889 /* cache info (L1 cache) */
2890 if (cpu->cache_info_passthrough) {
2891 host_cpuid(index, 0, eax, ebx, ecx, edx);
2892 break;
2893 }
2894 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2895 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2896 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2897 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2898 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2899 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2900 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2901 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2902 break;
2903 case 0x80000006:
2904 /* cache info (L2 cache) */
2905 if (cpu->cache_info_passthrough) {
2906 host_cpuid(index, 0, eax, ebx, ecx, edx);
2907 break;
2908 }
2909 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2910 (L2_DTLB_2M_ENTRIES << 16) | \
2911 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2912 (L2_ITLB_2M_ENTRIES);
2913 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2914 (L2_DTLB_4K_ENTRIES << 16) | \
2915 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2916 (L2_ITLB_4K_ENTRIES);
2917 *ecx = (L2_SIZE_KB_AMD << 16) | \
2918 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2919 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2920 if (!cpu->enable_l3_cache) {
2921 *edx = ((L3_SIZE_KB / 512) << 18) | \
2922 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2923 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2924 } else {
2925 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2926 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2927 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2928 }
2929 break;
2930 case 0x80000007:
2931 *eax = 0;
2932 *ebx = 0;
2933 *ecx = 0;
2934 *edx = env->features[FEAT_8000_0007_EDX];
2935 break;
2936 case 0x80000008:
2937 /* virtual & phys address size in low 2 bytes. */
2938 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2939 /* 64 bit processor */
2940 *eax = cpu->phys_bits; /* configurable physical bits */
2941 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2942 *eax |= 0x00003900; /* 57 bits virtual */
2943 } else {
2944 *eax |= 0x00003000; /* 48 bits virtual */
2945 }
2946 } else {
2947 *eax = cpu->phys_bits;
2948 }
2949 *ebx = 0;
2950 *ecx = 0;
2951 *edx = 0;
2952 if (cs->nr_cores * cs->nr_threads > 1) {
2953 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2954 }
2955 break;
2956 case 0x8000000A:
2957 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2958 *eax = 0x00000001; /* SVM Revision */
2959 *ebx = 0x00000010; /* nr of ASIDs */
2960 *ecx = 0;
2961 *edx = env->features[FEAT_SVM]; /* optional features */
2962 } else {
2963 *eax = 0;
2964 *ebx = 0;
2965 *ecx = 0;
2966 *edx = 0;
2967 }
2968 break;
2969 case 0xC0000000:
2970 *eax = env->cpuid_xlevel2;
2971 *ebx = 0;
2972 *ecx = 0;
2973 *edx = 0;
2974 break;
2975 case 0xC0000001:
2976 /* Support for VIA CPU's CPUID instruction */
2977 *eax = env->cpuid_version;
2978 *ebx = 0;
2979 *ecx = 0;
2980 *edx = env->features[FEAT_C000_0001_EDX];
2981 break;
2982 case 0xC0000002:
2983 case 0xC0000003:
2984 case 0xC0000004:
2985 /* Reserved for the future, and now filled with zero */
2986 *eax = 0;
2987 *ebx = 0;
2988 *ecx = 0;
2989 *edx = 0;
2990 break;
2991 default:
2992 /* reserved values: zero */
2993 *eax = 0;
2994 *ebx = 0;
2995 *ecx = 0;
2996 *edx = 0;
2997 break;
2998 }
2999 }
3000
3001 /* CPUClass::reset() */
3002 static void x86_cpu_reset(CPUState *s)
3003 {
3004 X86CPU *cpu = X86_CPU(s);
3005 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3006 CPUX86State *env = &cpu->env;
3007 target_ulong cr4;
3008 uint64_t xcr0;
3009 int i;
3010
3011 xcc->parent_reset(s);
3012
3013 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3014
3015 env->old_exception = -1;
3016
3017 /* init to reset state */
3018
3019 env->hflags2 |= HF2_GIF_MASK;
3020
3021 cpu_x86_update_cr0(env, 0x60000010);
3022 env->a20_mask = ~0x0;
3023 env->smbase = 0x30000;
3024
3025 env->idt.limit = 0xffff;
3026 env->gdt.limit = 0xffff;
3027 env->ldt.limit = 0xffff;
3028 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3029 env->tr.limit = 0xffff;
3030 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3031
3032 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3033 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3034 DESC_R_MASK | DESC_A_MASK);
3035 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3036 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3037 DESC_A_MASK);
3038 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3039 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3040 DESC_A_MASK);
3041 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3042 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3043 DESC_A_MASK);
3044 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3045 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3046 DESC_A_MASK);
3047 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3048 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3049 DESC_A_MASK);
3050
3051 env->eip = 0xfff0;
3052 env->regs[R_EDX] = env->cpuid_version;
3053
3054 env->eflags = 0x2;
3055
3056 /* FPU init */
3057 for (i = 0; i < 8; i++) {
3058 env->fptags[i] = 1;
3059 }
3060 cpu_set_fpuc(env, 0x37f);
3061
3062 env->mxcsr = 0x1f80;
3063 /* All units are in INIT state. */
3064 env->xstate_bv = 0;
3065
3066 env->pat = 0x0007040600070406ULL;
3067 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3068
3069 memset(env->dr, 0, sizeof(env->dr));
3070 env->dr[6] = DR6_FIXED_1;
3071 env->dr[7] = DR7_FIXED_1;
3072 cpu_breakpoint_remove_all(s, BP_CPU);
3073 cpu_watchpoint_remove_all(s, BP_CPU);
3074
3075 cr4 = 0;
3076 xcr0 = XSTATE_FP_MASK;
3077
3078 #ifdef CONFIG_USER_ONLY
3079 /* Enable all the features for user-mode. */
3080 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3081 xcr0 |= XSTATE_SSE_MASK;
3082 }
3083 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3084 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3085 if (env->features[esa->feature] & esa->bits) {
3086 xcr0 |= 1ull << i;
3087 }
3088 }
3089
3090 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3091 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3092 }
3093 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3094 cr4 |= CR4_FSGSBASE_MASK;
3095 }
3096 #endif
3097
3098 env->xcr0 = xcr0;
3099 cpu_x86_update_cr4(env, cr4);
3100
3101 /*
3102 * SDM 11.11.5 requires:
3103 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3104 * - IA32_MTRR_PHYSMASKn.V = 0
3105 * All other bits are undefined. For simplification, zero it all.
3106 */
3107 env->mtrr_deftype = 0;
3108 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3109 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3110
3111 #if !defined(CONFIG_USER_ONLY)
3112 /* We hard-wire the BSP to the first CPU. */
3113 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3114
3115 s->halted = !cpu_is_bsp(cpu);
3116
3117 if (kvm_enabled()) {
3118 kvm_arch_reset_vcpu(cpu);
3119 }
3120 #endif
3121 }
3122
3123 #ifndef CONFIG_USER_ONLY
3124 bool cpu_is_bsp(X86CPU *cpu)
3125 {
3126 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3127 }
3128
3129 /* TODO: remove me, when reset over QOM tree is implemented */
3130 static void x86_cpu_machine_reset_cb(void *opaque)
3131 {
3132 X86CPU *cpu = opaque;
3133 cpu_reset(CPU(cpu));
3134 }
3135 #endif
3136
3137 static void mce_init(X86CPU *cpu)
3138 {
3139 CPUX86State *cenv = &cpu->env;
3140 unsigned int bank;
3141
3142 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3143 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3144 (CPUID_MCE | CPUID_MCA)) {
3145 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3146 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3147 cenv->mcg_ctl = ~(uint64_t)0;
3148 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3149 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3150 }
3151 }
3152 }
3153
3154 #ifndef CONFIG_USER_ONLY
3155 APICCommonClass *apic_get_class(void)
3156 {
3157 const char *apic_type = "apic";
3158
3159 if (kvm_apic_in_kernel()) {
3160 apic_type = "kvm-apic";
3161 } else if (xen_enabled()) {
3162 apic_type = "xen-apic";
3163 }
3164
3165 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3166 }
3167
3168 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3169 {
3170 APICCommonState *apic;
3171 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3172
3173 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3174
3175 object_property_add_child(OBJECT(cpu), "lapic",
3176 OBJECT(cpu->apic_state), &error_abort);
3177 object_unref(OBJECT(cpu->apic_state));
3178
3179 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3180 /* TODO: convert to link<> */
3181 apic = APIC_COMMON(cpu->apic_state);
3182 apic->cpu = cpu;
3183 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3184 }
3185
3186 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3187 {
3188 APICCommonState *apic;
3189 static bool apic_mmio_map_once;
3190
3191 if (cpu->apic_state == NULL) {
3192 return;
3193 }
3194 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3195 errp);
3196
3197 /* Map APIC MMIO area */
3198 apic = APIC_COMMON(cpu->apic_state);
3199 if (!apic_mmio_map_once) {
3200 memory_region_add_subregion_overlap(get_system_memory(),
3201 apic->apicbase &
3202 MSR_IA32_APICBASE_BASE,
3203 &apic->io_memory,
3204 0x1000);
3205 apic_mmio_map_once = true;
3206 }
3207 }
3208
3209 static void x86_cpu_machine_done(Notifier *n, void *unused)
3210 {
3211 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3212 MemoryRegion *smram =
3213 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3214
3215 if (smram) {
3216 cpu->smram = g_new(MemoryRegion, 1);
3217 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3218 smram, 0, 1ull << 32);
3219 memory_region_set_enabled(cpu->smram, false);
3220 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3221 }
3222 }
3223 #else
3224 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3225 {
3226 }
3227 #endif
3228
3229 /* Note: Only safe for use on x86(-64) hosts */
3230 static uint32_t x86_host_phys_bits(void)
3231 {
3232 uint32_t eax;
3233 uint32_t host_phys_bits;
3234
3235 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3236 if (eax >= 0x80000008) {
3237 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3238 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3239 * at 23:16 that can specify a maximum physical address bits for
3240 * the guest that can override this value; but I've not seen
3241 * anything with that set.
3242 */
3243 host_phys_bits = eax & 0xff;
3244 } else {
3245 /* It's an odd 64 bit machine that doesn't have the leaf for
3246 * physical address bits; fall back to 36 that's most older
3247 * Intel.
3248 */
3249 host_phys_bits = 36;
3250 }
3251
3252 return host_phys_bits;
3253 }
3254
3255 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3256 {
3257 if (*min < value) {
3258 *min = value;
3259 }
3260 }
3261
3262 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3263 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3264 {
3265 CPUX86State *env = &cpu->env;
3266 FeatureWordInfo *fi = &feature_word_info[w];
3267 uint32_t eax = fi->cpuid_eax;
3268 uint32_t region = eax & 0xF0000000;
3269
3270 if (!env->features[w]) {
3271 return;
3272 }
3273
3274 switch (region) {
3275 case 0x00000000:
3276 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3277 break;
3278 case 0x80000000:
3279 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3280 break;
3281 case 0xC0000000:
3282 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3283 break;
3284 }
3285 }
3286
3287 /* Calculate XSAVE components based on the configured CPU feature flags */
3288 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3289 {
3290 CPUX86State *env = &cpu->env;
3291 int i;
3292 uint64_t mask;
3293
3294 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3295 return;
3296 }
3297
3298 mask = 0;
3299 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3300 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3301 if (env->features[esa->feature] & esa->bits) {
3302 mask |= (1ULL << i);
3303 }
3304 }
3305
3306 env->features[FEAT_XSAVE_COMP_LO] = mask;
3307 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3308 }
3309
3310 /***** Steps involved on loading and filtering CPUID data
3311 *
3312 * When initializing and realizing a CPU object, the steps
3313 * involved in setting up CPUID data are:
3314 *
3315 * 1) Loading CPU model definition (X86CPUDefinition). This is
3316 * implemented by x86_cpu_load_def() and should be completely
3317 * transparent, as it is done automatically by instance_init.
3318 * No code should need to look at X86CPUDefinition structs
3319 * outside instance_init.
3320 *
3321 * 2) CPU expansion. This is done by realize before CPUID
3322 * filtering, and will make sure host/accelerator data is
3323 * loaded for CPU models that depend on host capabilities
3324 * (e.g. "host"). Done by x86_cpu_expand_features().
3325 *
3326 * 3) CPUID filtering. This initializes extra data related to
3327 * CPUID, and checks if the host supports all capabilities
3328 * required by the CPU. Runnability of a CPU model is
3329 * determined at this step. Done by x86_cpu_filter_features().
3330 *
3331 * Some operations don't require all steps to be performed.
3332 * More precisely:
3333 *
3334 * - CPU instance creation (instance_init) will run only CPU
3335 * model loading. CPU expansion can't run at instance_init-time
3336 * because host/accelerator data may be not available yet.
3337 * - CPU realization will perform both CPU model expansion and CPUID
3338 * filtering, and return an error in case one of them fails.
3339 * - query-cpu-definitions needs to run all 3 steps. It needs
3340 * to run CPUID filtering, as the 'unavailable-features'
3341 * field is set based on the filtering results.
3342 * - The query-cpu-model-expansion QMP command only needs to run
3343 * CPU model loading and CPU expansion. It should not filter
3344 * any CPUID data based on host capabilities.
3345 */
3346
3347 /* Expand CPU configuration data, based on configured features
3348 * and host/accelerator capabilities when appropriate.
3349 */
3350 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3351 {
3352 CPUX86State *env = &cpu->env;
3353 FeatureWord w;
3354 GList *l;
3355 Error *local_err = NULL;
3356
3357 /*TODO: cpu->max_features incorrectly overwrites features
3358 * set using "feat=on|off". Once we fix this, we can convert
3359 * plus_features & minus_features to global properties
3360 * inside x86_cpu_parse_featurestr() too.
3361 */
3362 if (cpu->max_features) {
3363 for (w = 0; w < FEATURE_WORDS; w++) {
3364 env->features[w] =
3365 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3366 }
3367 }
3368
3369 for (l = plus_features; l; l = l->next) {
3370 const char *prop = l->data;
3371 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3372 if (local_err) {
3373 goto out;
3374 }
3375 }
3376
3377 for (l = minus_features; l; l = l->next) {
3378 const char *prop = l->data;
3379 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3380 if (local_err) {
3381 goto out;
3382 }
3383 }
3384
3385 if (!kvm_enabled() || !cpu->expose_kvm) {
3386 env->features[FEAT_KVM] = 0;
3387 }
3388
3389 x86_cpu_enable_xsave_components(cpu);
3390
3391 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3392 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3393 if (cpu->full_cpuid_auto_level) {
3394 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3395 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3396 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3397 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3398 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3399 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3400 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3401 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3402 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3403 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3404 /* SVM requires CPUID[0x8000000A] */
3405 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3406 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3407 }
3408 }
3409
3410 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3411 if (env->cpuid_level == UINT32_MAX) {
3412 env->cpuid_level = env->cpuid_min_level;
3413 }
3414 if (env->cpuid_xlevel == UINT32_MAX) {
3415 env->cpuid_xlevel = env->cpuid_min_xlevel;
3416 }
3417 if (env->cpuid_xlevel2 == UINT32_MAX) {
3418 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3419 }
3420
3421 out:
3422 if (local_err != NULL) {
3423 error_propagate(errp, local_err);
3424 }
3425 }
3426
3427 /*
3428 * Finishes initialization of CPUID data, filters CPU feature
3429 * words based on host availability of each feature.
3430 *
3431 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3432 */
3433 static int x86_cpu_filter_features(X86CPU *cpu)
3434 {
3435 CPUX86State *env = &cpu->env;
3436 FeatureWord w;
3437 int rv = 0;
3438
3439 for (w = 0; w < FEATURE_WORDS; w++) {
3440 uint32_t host_feat =
3441 x86_cpu_get_supported_feature_word(w, false);
3442 uint32_t requested_features = env->features[w];
3443 env->features[w] &= host_feat;
3444 cpu->filtered_features[w] = requested_features & ~env->features[w];
3445 if (cpu->filtered_features[w]) {
3446 rv = 1;
3447 }
3448 }
3449
3450 return rv;
3451 }
3452
3453 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3454 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3455 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3456 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3457 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3458 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3459 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3460 {
3461 CPUState *cs = CPU(dev);
3462 X86CPU *cpu = X86_CPU(dev);
3463 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3464 CPUX86State *env = &cpu->env;
3465 Error *local_err = NULL;
3466 static bool ht_warned;
3467
3468 if (xcc->kvm_required && !kvm_enabled()) {
3469 char *name = x86_cpu_class_get_model_name(xcc);
3470 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3471 g_free(name);
3472 goto out;
3473 }
3474
3475 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3476 error_setg(errp, "apic-id property was not initialized properly");
3477 return;
3478 }
3479
3480 x86_cpu_expand_features(cpu, &local_err);
3481 if (local_err) {
3482 goto out;
3483 }
3484
3485 if (x86_cpu_filter_features(cpu) &&
3486 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3487 x86_cpu_report_filtered_features(cpu);
3488 if (cpu->enforce_cpuid) {
3489 error_setg(&local_err,
3490 kvm_enabled() ?
3491 "Host doesn't support requested features" :
3492 "TCG doesn't support requested features");
3493 goto out;
3494 }
3495 }
3496
3497 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3498 * CPUID[1].EDX.
3499 */
3500 if (IS_AMD_CPU(env)) {
3501 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3502 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3503 & CPUID_EXT2_AMD_ALIASES);
3504 }
3505
3506 /* For 64bit systems think about the number of physical bits to present.
3507 * ideally this should be the same as the host; anything other than matching
3508 * the host can cause incorrect guest behaviour.
3509 * QEMU used to pick the magic value of 40 bits that corresponds to
3510 * consumer AMD devices but nothing else.
3511 */
3512 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3513 if (kvm_enabled()) {
3514 uint32_t host_phys_bits = x86_host_phys_bits();
3515 static bool warned;
3516
3517 if (cpu->host_phys_bits) {
3518 /* The user asked for us to use the host physical bits */
3519 cpu->phys_bits = host_phys_bits;
3520 }
3521
3522 /* Print a warning if the user set it to a value that's not the
3523 * host value.
3524 */
3525 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3526 !warned) {
3527 error_report("Warning: Host physical bits (%u)"
3528 " does not match phys-bits property (%u)",
3529 host_phys_bits, cpu->phys_bits);
3530 warned = true;
3531 }
3532
3533 if (cpu->phys_bits &&
3534 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3535 cpu->phys_bits < 32)) {
3536 error_setg(errp, "phys-bits should be between 32 and %u "
3537 " (but is %u)",
3538 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3539 return;
3540 }
3541 } else {
3542 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3543 error_setg(errp, "TCG only supports phys-bits=%u",
3544 TCG_PHYS_ADDR_BITS);
3545 return;
3546 }
3547 }
3548 /* 0 means it was not explicitly set by the user (or by machine
3549 * compat_props or by the host code above). In this case, the default
3550 * is the value used by TCG (40).
3551 */
3552 if (cpu->phys_bits == 0) {
3553 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3554 }
3555 } else {
3556 /* For 32 bit systems don't use the user set value, but keep
3557 * phys_bits consistent with what we tell the guest.
3558 */
3559 if (cpu->phys_bits != 0) {
3560 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3561 return;
3562 }
3563
3564 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3565 cpu->phys_bits = 36;
3566 } else {
3567 cpu->phys_bits = 32;
3568 }
3569 }
3570 cpu_exec_realizefn(cs, &local_err);
3571 if (local_err != NULL) {
3572 error_propagate(errp, local_err);
3573 return;
3574 }
3575
3576 if (tcg_enabled()) {
3577 tcg_x86_init();
3578 }
3579
3580 #ifndef CONFIG_USER_ONLY
3581 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3582
3583 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3584 x86_cpu_apic_create(cpu, &local_err);
3585 if (local_err != NULL) {
3586 goto out;
3587 }
3588 }
3589 #endif
3590
3591 mce_init(cpu);
3592
3593 #ifndef CONFIG_USER_ONLY
3594 if (tcg_enabled()) {
3595 AddressSpace *newas = g_new(AddressSpace, 1);
3596
3597 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3598 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3599
3600 /* Outer container... */
3601 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3602 memory_region_set_enabled(cpu->cpu_as_root, true);
3603
3604 /* ... with two regions inside: normal system memory with low
3605 * priority, and...
3606 */
3607 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3608 get_system_memory(), 0, ~0ull);
3609 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3610 memory_region_set_enabled(cpu->cpu_as_mem, true);
3611 address_space_init(newas, cpu->cpu_as_root, "CPU");
3612 cs->num_ases = 1;
3613 cpu_address_space_init(cs, newas, 0);
3614
3615 /* ... SMRAM with higher priority, linked from /machine/smram. */
3616 cpu->machine_done.notify = x86_cpu_machine_done;
3617 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3618 }
3619 #endif
3620
3621 qemu_init_vcpu(cs);
3622
3623 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3624 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3625 * based on inputs (sockets,cores,threads), it is still better to gives
3626 * users a warning.
3627 *
3628 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3629 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3630 */
3631 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3632 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3633 " -smp options properly.");
3634 ht_warned = true;
3635 }
3636
3637 x86_cpu_apic_realize(cpu, &local_err);
3638 if (local_err != NULL) {
3639 goto out;
3640 }
3641 cpu_reset(cs);
3642
3643 xcc->parent_realize(dev, &local_err);
3644
3645 out:
3646 if (local_err != NULL) {
3647 error_propagate(errp, local_err);
3648 return;
3649 }
3650 }
3651
3652 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3653 {
3654 X86CPU *cpu = X86_CPU(dev);
3655 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3656 Error *local_err = NULL;
3657
3658 #ifndef CONFIG_USER_ONLY
3659 cpu_remove_sync(CPU(dev));
3660 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3661 #endif
3662
3663 if (cpu->apic_state) {
3664 object_unparent(OBJECT(cpu->apic_state));
3665 cpu->apic_state = NULL;
3666 }
3667
3668 xcc->parent_unrealize(dev, &local_err);
3669 if (local_err != NULL) {
3670 error_propagate(errp, local_err);
3671 return;
3672 }
3673 }
3674
3675 typedef struct BitProperty {
3676 uint32_t *ptr;
3677 uint32_t mask;
3678 } BitProperty;
3679
3680 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3681 void *opaque, Error **errp)
3682 {
3683 BitProperty *fp = opaque;
3684 bool value = (*fp->ptr & fp->mask) == fp->mask;
3685 visit_type_bool(v, name, &value, errp);
3686 }
3687
3688 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3689 void *opaque, Error **errp)
3690 {
3691 DeviceState *dev = DEVICE(obj);
3692 BitProperty *fp = opaque;
3693 Error *local_err = NULL;
3694 bool value;
3695
3696 if (dev->realized) {
3697 qdev_prop_set_after_realize(dev, name, errp);
3698 return;
3699 }
3700
3701 visit_type_bool(v, name, &value, &local_err);
3702 if (local_err) {
3703 error_propagate(errp, local_err);
3704 return;
3705 }
3706
3707 if (value) {
3708 *fp->ptr |= fp->mask;
3709 } else {
3710 *fp->ptr &= ~fp->mask;
3711 }
3712 }
3713
3714 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3715 void *opaque)
3716 {
3717 BitProperty *prop = opaque;
3718 g_free(prop);
3719 }
3720
3721 /* Register a boolean property to get/set a single bit in a uint32_t field.
3722 *
3723 * The same property name can be registered multiple times to make it affect
3724 * multiple bits in the same FeatureWord. In that case, the getter will return
3725 * true only if all bits are set.
3726 */
3727 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3728 const char *prop_name,
3729 uint32_t *field,
3730 int bitnr)
3731 {
3732 BitProperty *fp;
3733 ObjectProperty *op;
3734 uint32_t mask = (1UL << bitnr);
3735
3736 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3737 if (op) {
3738 fp = op->opaque;
3739 assert(fp->ptr == field);
3740 fp->mask |= mask;
3741 } else {
3742 fp = g_new0(BitProperty, 1);
3743 fp->ptr = field;
3744 fp->mask = mask;
3745 object_property_add(OBJECT(cpu), prop_name, "bool",
3746 x86_cpu_get_bit_prop,
3747 x86_cpu_set_bit_prop,
3748 x86_cpu_release_bit_prop, fp, &error_abort);
3749 }
3750 }
3751
3752 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3753 FeatureWord w,
3754 int bitnr)
3755 {
3756 FeatureWordInfo *fi = &feature_word_info[w];
3757 const char *name = fi->feat_names[bitnr];
3758
3759 if (!name) {
3760 return;
3761 }
3762
3763 /* Property names should use "-" instead of "_".
3764 * Old names containing underscores are registered as aliases
3765 * using object_property_add_alias()
3766 */
3767 assert(!strchr(name, '_'));
3768 /* aliases don't use "|" delimiters anymore, they are registered
3769 * manually using object_property_add_alias() */
3770 assert(!strchr(name, '|'));
3771 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3772 }
3773
3774 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3775 {
3776 X86CPU *cpu = X86_CPU(cs);
3777 CPUX86State *env = &cpu->env;
3778 GuestPanicInformation *panic_info = NULL;
3779
3780 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3781 GuestPanicInformationHyperV *panic_info_hv =
3782 g_malloc0(sizeof(GuestPanicInformationHyperV));
3783 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3784
3785 panic_info->type = GUEST_PANIC_INFORMATION_KIND_HYPER_V;
3786 panic_info->u.hyper_v.data = panic_info_hv;
3787
3788 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3789 panic_info_hv->arg1 = env->msr_hv_crash_params[0];
3790 panic_info_hv->arg2 = env->msr_hv_crash_params[1];
3791 panic_info_hv->arg3 = env->msr_hv_crash_params[2];
3792 panic_info_hv->arg4 = env->msr_hv_crash_params[3];
3793 panic_info_hv->arg5 = env->msr_hv_crash_params[4];
3794 }
3795
3796 return panic_info;
3797 }
3798 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3799 const char *name, void *opaque,
3800 Error **errp)
3801 {
3802 CPUState *cs = CPU(obj);
3803 GuestPanicInformation *panic_info;
3804
3805 if (!cs->crash_occurred) {
3806 error_setg(errp, "No crash occured");
3807 return;
3808 }
3809
3810 panic_info = x86_cpu_get_crash_info(cs);
3811 if (panic_info == NULL) {
3812 error_setg(errp, "No crash information");
3813 return;
3814 }
3815
3816 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3817 errp);
3818 qapi_free_GuestPanicInformation(panic_info);
3819 }
3820
3821 static void x86_cpu_initfn(Object *obj)
3822 {
3823 CPUState *cs = CPU(obj);
3824 X86CPU *cpu = X86_CPU(obj);
3825 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3826 CPUX86State *env = &cpu->env;
3827 FeatureWord w;
3828
3829 cs->env_ptr = env;
3830
3831 object_property_add(obj, "family", "int",
3832 x86_cpuid_version_get_family,
3833 x86_cpuid_version_set_family, NULL, NULL, NULL);
3834 object_property_add(obj, "model", "int",
3835 x86_cpuid_version_get_model,
3836 x86_cpuid_version_set_model, NULL, NULL, NULL);
3837 object_property_add(obj, "stepping", "int",
3838 x86_cpuid_version_get_stepping,
3839 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3840 object_property_add_str(obj, "vendor",
3841 x86_cpuid_get_vendor,
3842 x86_cpuid_set_vendor, NULL);
3843 object_property_add_str(obj, "model-id",
3844 x86_cpuid_get_model_id,
3845 x86_cpuid_set_model_id, NULL);
3846 object_property_add(obj, "tsc-frequency", "int",
3847 x86_cpuid_get_tsc_freq,
3848 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3849 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3850 x86_cpu_get_feature_words,
3851 NULL, NULL, (void *)env->features, NULL);
3852 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3853 x86_cpu_get_feature_words,
3854 NULL, NULL, (void *)cpu->filtered_features, NULL);
3855
3856 object_property_add(obj, "crash-information", "GuestPanicInformation",
3857 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3858
3859 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3860
3861 for (w = 0; w < FEATURE_WORDS; w++) {
3862 int bitnr;
3863
3864 for (bitnr = 0; bitnr < 32; bitnr++) {
3865 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3866 }
3867 }
3868
3869 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3870 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3871 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3872 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3873 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3874 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3875 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3876
3877 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3878 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3879 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3880 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3881 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3882 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3883 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3884 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3885 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3886 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3887 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3888 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3889 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3890 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3891 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3892 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3893 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3894 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3895 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3896 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3897 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3898
3899 if (xcc->cpu_def) {
3900 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3901 }
3902 }
3903
3904 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3905 {
3906 X86CPU *cpu = X86_CPU(cs);
3907
3908 return cpu->apic_id;
3909 }
3910
3911 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3912 {
3913 X86CPU *cpu = X86_CPU(cs);
3914
3915 return cpu->env.cr[0] & CR0_PG_MASK;
3916 }
3917
3918 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3919 {
3920 X86CPU *cpu = X86_CPU(cs);
3921
3922 cpu->env.eip = value;
3923 }
3924
3925 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3926 {
3927 X86CPU *cpu = X86_CPU(cs);
3928
3929 cpu->env.eip = tb->pc - tb->cs_base;
3930 }
3931
3932 static bool x86_cpu_has_work(CPUState *cs)
3933 {
3934 X86CPU *cpu = X86_CPU(cs);
3935 CPUX86State *env = &cpu->env;
3936
3937 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3938 CPU_INTERRUPT_POLL)) &&
3939 (env->eflags & IF_MASK)) ||
3940 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3941 CPU_INTERRUPT_INIT |
3942 CPU_INTERRUPT_SIPI |
3943 CPU_INTERRUPT_MCE)) ||
3944 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3945 !(env->hflags & HF_SMM_MASK));
3946 }
3947
3948 static Property x86_cpu_properties[] = {
3949 #ifdef CONFIG_USER_ONLY
3950 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3951 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3952 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3953 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3954 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3955 #else
3956 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3957 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3958 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3959 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3960 #endif
3961 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3962 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3963 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3964 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3965 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3966 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3967 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3968 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3969 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3970 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3971 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3972 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3973 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3974 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3975 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3976 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3977 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3978 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3979 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3980 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3981 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3982 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3983 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3984 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3985 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3986 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3987 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3988 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3989 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
3990 DEFINE_PROP_END_OF_LIST()
3991 };
3992
3993 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3994 {
3995 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3996 CPUClass *cc = CPU_CLASS(oc);
3997 DeviceClass *dc = DEVICE_CLASS(oc);
3998
3999 xcc->parent_realize = dc->realize;
4000 xcc->parent_unrealize = dc->unrealize;
4001 dc->realize = x86_cpu_realizefn;
4002 dc->unrealize = x86_cpu_unrealizefn;
4003 dc->props = x86_cpu_properties;
4004
4005 xcc->parent_reset = cc->reset;
4006 cc->reset = x86_cpu_reset;
4007 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4008
4009 cc->class_by_name = x86_cpu_class_by_name;
4010 cc->parse_features = x86_cpu_parse_featurestr;
4011 cc->has_work = x86_cpu_has_work;
4012 cc->do_interrupt = x86_cpu_do_interrupt;
4013 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4014 cc->dump_state = x86_cpu_dump_state;
4015 cc->get_crash_info = x86_cpu_get_crash_info;
4016 cc->set_pc = x86_cpu_set_pc;
4017 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4018 cc->gdb_read_register = x86_cpu_gdb_read_register;
4019 cc->gdb_write_register = x86_cpu_gdb_write_register;
4020 cc->get_arch_id = x86_cpu_get_arch_id;
4021 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4022 #ifdef CONFIG_USER_ONLY
4023 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4024 #else
4025 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4026 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4027 cc->write_elf64_note = x86_cpu_write_elf64_note;
4028 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4029 cc->write_elf32_note = x86_cpu_write_elf32_note;
4030 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4031 cc->vmsd = &vmstate_x86_cpu;
4032 #endif
4033 /* CPU_NB_REGS * 2 = general regs + xmm regs
4034 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
4035 */
4036 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
4037 #ifndef CONFIG_USER_ONLY
4038 cc->debug_excp_handler = breakpoint_handler;
4039 #endif
4040 cc->cpu_exec_enter = x86_cpu_exec_enter;
4041 cc->cpu_exec_exit = x86_cpu_exec_exit;
4042
4043 dc->cannot_instantiate_with_device_add_yet = false;
4044 }
4045
4046 static const TypeInfo x86_cpu_type_info = {
4047 .name = TYPE_X86_CPU,
4048 .parent = TYPE_CPU,
4049 .instance_size = sizeof(X86CPU),
4050 .instance_init = x86_cpu_initfn,
4051 .abstract = true,
4052 .class_size = sizeof(X86CPUClass),
4053 .class_init = x86_cpu_common_class_init,
4054 };
4055
4056
4057 /* "base" CPU model, used by query-cpu-model-expansion */
4058 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4059 {
4060 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4061
4062 xcc->static_model = true;
4063 xcc->migration_safe = true;
4064 xcc->model_description = "base CPU model type with no features enabled";
4065 xcc->ordering = 8;
4066 }
4067
4068 static const TypeInfo x86_base_cpu_type_info = {
4069 .name = X86_CPU_TYPE_NAME("base"),
4070 .parent = TYPE_X86_CPU,
4071 .class_init = x86_cpu_base_class_init,
4072 };
4073
4074 static void x86_cpu_register_types(void)
4075 {
4076 int i;
4077
4078 type_register_static(&x86_cpu_type_info);
4079 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4080 x86_register_cpudef_type(&builtin_x86_defs[i]);
4081 }
4082 type_register_static(&max_x86_cpu_type_info);
4083 type_register_static(&x86_base_cpu_type_info);
4084 #ifdef CONFIG_KVM
4085 type_register_static(&host_x86_cpu_type_info);
4086 #endif
4087 }
4088
4089 type_init(x86_cpu_register_types)