]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Implement query-cpu-model-expansion QMP command
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qbool.h"
35 #include "qapi/qmp/qint.h"
36 #include "qapi/qmp/qfloat.h"
37
38 #include "qapi-types.h"
39 #include "qapi-visit.h"
40 #include "qapi/visitor.h"
41 #include "qom/qom-qobject.h"
42 #include "sysemu/arch_init.h"
43
44 #if defined(CONFIG_KVM)
45 #include <linux/kvm_para.h>
46 #endif
47
48 #include "sysemu/sysemu.h"
49 #include "hw/qdev-properties.h"
50 #include "hw/i386/topology.h"
51 #ifndef CONFIG_USER_ONLY
52 #include "exec/address-spaces.h"
53 #include "hw/hw.h"
54 #include "hw/xen/xen.h"
55 #include "hw/i386/apic_internal.h"
56 #endif
57
58
59 /* Cache topology CPUID constants: */
60
61 /* CPUID Leaf 2 Descriptors */
62
63 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
64 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
65 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
66 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
67
68
69 /* CPUID Leaf 4 constants: */
70
71 /* EAX: */
72 #define CPUID_4_TYPE_DCACHE 1
73 #define CPUID_4_TYPE_ICACHE 2
74 #define CPUID_4_TYPE_UNIFIED 3
75
76 #define CPUID_4_LEVEL(l) ((l) << 5)
77
78 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
79 #define CPUID_4_FULLY_ASSOC (1 << 9)
80
81 /* EDX: */
82 #define CPUID_4_NO_INVD_SHARING (1 << 0)
83 #define CPUID_4_INCLUSIVE (1 << 1)
84 #define CPUID_4_COMPLEX_IDX (1 << 2)
85
86 #define ASSOC_FULL 0xFF
87
88 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
89 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
90 a == 2 ? 0x2 : \
91 a == 4 ? 0x4 : \
92 a == 8 ? 0x6 : \
93 a == 16 ? 0x8 : \
94 a == 32 ? 0xA : \
95 a == 48 ? 0xB : \
96 a == 64 ? 0xC : \
97 a == 96 ? 0xD : \
98 a == 128 ? 0xE : \
99 a == ASSOC_FULL ? 0xF : \
100 0 /* invalid value */)
101
102
103 /* Definitions of the hardcoded cache entries we expose: */
104
105 /* L1 data cache: */
106 #define L1D_LINE_SIZE 64
107 #define L1D_ASSOCIATIVITY 8
108 #define L1D_SETS 64
109 #define L1D_PARTITIONS 1
110 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
111 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
112 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
113 #define L1D_LINES_PER_TAG 1
114 #define L1D_SIZE_KB_AMD 64
115 #define L1D_ASSOCIATIVITY_AMD 2
116
117 /* L1 instruction cache: */
118 #define L1I_LINE_SIZE 64
119 #define L1I_ASSOCIATIVITY 8
120 #define L1I_SETS 64
121 #define L1I_PARTITIONS 1
122 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
123 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
124 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
125 #define L1I_LINES_PER_TAG 1
126 #define L1I_SIZE_KB_AMD 64
127 #define L1I_ASSOCIATIVITY_AMD 2
128
129 /* Level 2 unified cache: */
130 #define L2_LINE_SIZE 64
131 #define L2_ASSOCIATIVITY 16
132 #define L2_SETS 4096
133 #define L2_PARTITIONS 1
134 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
135 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
136 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
137 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
138 #define L2_LINES_PER_TAG 1
139 #define L2_SIZE_KB_AMD 512
140
141 /* Level 3 unified cache: */
142 #define L3_SIZE_KB 0 /* disabled */
143 #define L3_ASSOCIATIVITY 0 /* disabled */
144 #define L3_LINES_PER_TAG 0 /* disabled */
145 #define L3_LINE_SIZE 0 /* disabled */
146 #define L3_N_LINE_SIZE 64
147 #define L3_N_ASSOCIATIVITY 16
148 #define L3_N_SETS 16384
149 #define L3_N_PARTITIONS 1
150 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
151 #define L3_N_LINES_PER_TAG 1
152 #define L3_N_SIZE_KB_AMD 16384
153
154 /* TLB definitions: */
155
156 #define L1_DTLB_2M_ASSOC 1
157 #define L1_DTLB_2M_ENTRIES 255
158 #define L1_DTLB_4K_ASSOC 1
159 #define L1_DTLB_4K_ENTRIES 255
160
161 #define L1_ITLB_2M_ASSOC 1
162 #define L1_ITLB_2M_ENTRIES 255
163 #define L1_ITLB_4K_ASSOC 1
164 #define L1_ITLB_4K_ENTRIES 255
165
166 #define L2_DTLB_2M_ASSOC 0 /* disabled */
167 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
168 #define L2_DTLB_4K_ASSOC 4
169 #define L2_DTLB_4K_ENTRIES 512
170
171 #define L2_ITLB_2M_ASSOC 0 /* disabled */
172 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
173 #define L2_ITLB_4K_ASSOC 4
174 #define L2_ITLB_4K_ENTRIES 512
175
176
177
178 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
179 uint32_t vendor2, uint32_t vendor3)
180 {
181 int i;
182 for (i = 0; i < 4; i++) {
183 dst[i] = vendor1 >> (8 * i);
184 dst[i + 4] = vendor2 >> (8 * i);
185 dst[i + 8] = vendor3 >> (8 * i);
186 }
187 dst[CPUID_VENDOR_SZ] = '\0';
188 }
189
190 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
191 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
193 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
194 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
195 CPUID_PSE36 | CPUID_FXSR)
196 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
197 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
198 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
199 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
200 CPUID_PAE | CPUID_SEP | CPUID_APIC)
201
202 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
203 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
204 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
205 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
206 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
207 /* partly implemented:
208 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
209 /* missing:
210 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
211 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
212 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
213 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
214 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
215 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
216 /* missing:
217 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
218 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
219 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
220 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
221 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
222
223 #ifdef TARGET_X86_64
224 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
225 #else
226 #define TCG_EXT2_X86_64_FEATURES 0
227 #endif
228
229 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
230 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
231 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
232 TCG_EXT2_X86_64_FEATURES)
233 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
234 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
235 #define TCG_EXT4_FEATURES 0
236 #define TCG_SVM_FEATURES 0
237 #define TCG_KVM_FEATURES 0
238 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
239 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
240 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
241 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
242 CPUID_7_0_EBX_ERMS)
243 /* missing:
244 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
245 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
246 CPUID_7_0_EBX_RDSEED */
247 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
248 CPUID_7_0_ECX_LA57)
249 #define TCG_7_0_EDX_FEATURES 0
250 #define TCG_APM_FEATURES 0
251 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
252 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
253 /* missing:
254 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
255
256 typedef struct FeatureWordInfo {
257 /* feature flags names are taken from "Intel Processor Identification and
258 * the CPUID Instruction" and AMD's "CPUID Specification".
259 * In cases of disagreement between feature naming conventions,
260 * aliases may be added.
261 */
262 const char *feat_names[32];
263 uint32_t cpuid_eax; /* Input EAX for CPUID */
264 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
265 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
266 int cpuid_reg; /* output register (R_* constant) */
267 uint32_t tcg_features; /* Feature flags supported by TCG */
268 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
269 uint32_t migratable_flags; /* Feature flags known to be migratable */
270 } FeatureWordInfo;
271
272 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
273 [FEAT_1_EDX] = {
274 .feat_names = {
275 "fpu", "vme", "de", "pse",
276 "tsc", "msr", "pae", "mce",
277 "cx8", "apic", NULL, "sep",
278 "mtrr", "pge", "mca", "cmov",
279 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
280 NULL, "ds" /* Intel dts */, "acpi", "mmx",
281 "fxsr", "sse", "sse2", "ss",
282 "ht" /* Intel htt */, "tm", "ia64", "pbe",
283 },
284 .cpuid_eax = 1, .cpuid_reg = R_EDX,
285 .tcg_features = TCG_FEATURES,
286 },
287 [FEAT_1_ECX] = {
288 .feat_names = {
289 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
290 "ds-cpl", "vmx", "smx", "est",
291 "tm2", "ssse3", "cid", NULL,
292 "fma", "cx16", "xtpr", "pdcm",
293 NULL, "pcid", "dca", "sse4.1",
294 "sse4.2", "x2apic", "movbe", "popcnt",
295 "tsc-deadline", "aes", "xsave", "osxsave",
296 "avx", "f16c", "rdrand", "hypervisor",
297 },
298 .cpuid_eax = 1, .cpuid_reg = R_ECX,
299 .tcg_features = TCG_EXT_FEATURES,
300 },
301 /* Feature names that are already defined on feature_name[] but
302 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
303 * names on feat_names below. They are copied automatically
304 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
305 */
306 [FEAT_8000_0001_EDX] = {
307 .feat_names = {
308 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
309 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
310 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
311 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
312 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
313 "nx", NULL, "mmxext", NULL /* mmx */,
314 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
315 NULL, "lm", "3dnowext", "3dnow",
316 },
317 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
318 .tcg_features = TCG_EXT2_FEATURES,
319 },
320 [FEAT_8000_0001_ECX] = {
321 .feat_names = {
322 "lahf-lm", "cmp-legacy", "svm", "extapic",
323 "cr8legacy", "abm", "sse4a", "misalignsse",
324 "3dnowprefetch", "osvw", "ibs", "xop",
325 "skinit", "wdt", NULL, "lwp",
326 "fma4", "tce", NULL, "nodeid-msr",
327 NULL, "tbm", "topoext", "perfctr-core",
328 "perfctr-nb", NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 },
331 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
332 .tcg_features = TCG_EXT3_FEATURES,
333 },
334 [FEAT_C000_0001_EDX] = {
335 .feat_names = {
336 NULL, NULL, "xstore", "xstore-en",
337 NULL, NULL, "xcrypt", "xcrypt-en",
338 "ace2", "ace2-en", "phe", "phe-en",
339 "pmm", "pmm-en", NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 },
345 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT4_FEATURES,
347 },
348 [FEAT_KVM] = {
349 .feat_names = {
350 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
351 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 "kvmclock-stable-bit", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 },
359 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
360 .tcg_features = TCG_KVM_FEATURES,
361 },
362 [FEAT_HYPERV_EAX] = {
363 .feat_names = {
364 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
365 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
366 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
367 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
368 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
369 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
375 },
376 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
377 },
378 [FEAT_HYPERV_EBX] = {
379 .feat_names = {
380 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
381 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
382 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
383 NULL /* hv_create_port */, NULL /* hv_connect_port */,
384 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
385 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
386 NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
391 },
392 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
393 },
394 [FEAT_HYPERV_EDX] = {
395 .feat_names = {
396 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
397 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
398 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
399 NULL, NULL,
400 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
406 },
407 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
408 },
409 [FEAT_SVM] = {
410 .feat_names = {
411 "npt", "lbrv", "svm-lock", "nrip-save",
412 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
413 NULL, NULL, "pause-filter", NULL,
414 "pfthreshold", NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
419 },
420 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
421 .tcg_features = TCG_SVM_FEATURES,
422 },
423 [FEAT_7_0_EBX] = {
424 .feat_names = {
425 "fsgsbase", "tsc-adjust", NULL, "bmi1",
426 "hle", "avx2", NULL, "smep",
427 "bmi2", "erms", "invpcid", "rtm",
428 NULL, NULL, "mpx", NULL,
429 "avx512f", "avx512dq", "rdseed", "adx",
430 "smap", "avx512ifma", "pcommit", "clflushopt",
431 "clwb", NULL, "avx512pf", "avx512er",
432 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
433 },
434 .cpuid_eax = 7,
435 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
436 .cpuid_reg = R_EBX,
437 .tcg_features = TCG_7_0_EBX_FEATURES,
438 },
439 [FEAT_7_0_ECX] = {
440 .feat_names = {
441 NULL, "avx512vbmi", "umip", "pku",
442 "ospke", NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, "avx512-vpopcntdq", NULL,
445 "la57", NULL, NULL, NULL,
446 NULL, NULL, "rdpid", NULL,
447 NULL, NULL, NULL, NULL,
448 NULL, NULL, NULL, NULL,
449 },
450 .cpuid_eax = 7,
451 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
452 .cpuid_reg = R_ECX,
453 .tcg_features = TCG_7_0_ECX_FEATURES,
454 },
455 [FEAT_7_0_EDX] = {
456 .feat_names = {
457 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL,
465 },
466 .cpuid_eax = 7,
467 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
468 .cpuid_reg = R_EDX,
469 .tcg_features = TCG_7_0_EDX_FEATURES,
470 },
471 [FEAT_8000_0007_EDX] = {
472 .feat_names = {
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 "invtsc", NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
481 },
482 .cpuid_eax = 0x80000007,
483 .cpuid_reg = R_EDX,
484 .tcg_features = TCG_APM_FEATURES,
485 .unmigratable_flags = CPUID_APM_INVTSC,
486 },
487 [FEAT_XSAVE] = {
488 .feat_names = {
489 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
497 },
498 .cpuid_eax = 0xd,
499 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
500 .cpuid_reg = R_EAX,
501 .tcg_features = TCG_XSAVE_FEATURES,
502 },
503 [FEAT_6_EAX] = {
504 .feat_names = {
505 NULL, NULL, "arat", NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 },
514 .cpuid_eax = 6, .cpuid_reg = R_EAX,
515 .tcg_features = TCG_6_EAX_FEATURES,
516 },
517 [FEAT_XSAVE_COMP_LO] = {
518 .cpuid_eax = 0xD,
519 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
520 .cpuid_reg = R_EAX,
521 .tcg_features = ~0U,
522 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
523 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
524 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
525 XSTATE_PKRU_MASK,
526 },
527 [FEAT_XSAVE_COMP_HI] = {
528 .cpuid_eax = 0xD,
529 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
530 .cpuid_reg = R_EDX,
531 .tcg_features = ~0U,
532 },
533 };
534
535 typedef struct X86RegisterInfo32 {
536 /* Name of register */
537 const char *name;
538 /* QAPI enum value register */
539 X86CPURegister32 qapi_enum;
540 } X86RegisterInfo32;
541
542 #define REGISTER(reg) \
543 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
544 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
545 REGISTER(EAX),
546 REGISTER(ECX),
547 REGISTER(EDX),
548 REGISTER(EBX),
549 REGISTER(ESP),
550 REGISTER(EBP),
551 REGISTER(ESI),
552 REGISTER(EDI),
553 };
554 #undef REGISTER
555
556 typedef struct ExtSaveArea {
557 uint32_t feature, bits;
558 uint32_t offset, size;
559 } ExtSaveArea;
560
561 static const ExtSaveArea x86_ext_save_areas[] = {
562 [XSTATE_FP_BIT] = {
563 /* x87 FP state component is always enabled if XSAVE is supported */
564 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
565 /* x87 state is in the legacy region of the XSAVE area */
566 .offset = 0,
567 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
568 },
569 [XSTATE_SSE_BIT] = {
570 /* SSE state component is always enabled if XSAVE is supported */
571 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
572 /* SSE state is in the legacy region of the XSAVE area */
573 .offset = 0,
574 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
575 },
576 [XSTATE_YMM_BIT] =
577 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
578 .offset = offsetof(X86XSaveArea, avx_state),
579 .size = sizeof(XSaveAVX) },
580 [XSTATE_BNDREGS_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndreg_state),
583 .size = sizeof(XSaveBNDREG) },
584 [XSTATE_BNDCSR_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
586 .offset = offsetof(X86XSaveArea, bndcsr_state),
587 .size = sizeof(XSaveBNDCSR) },
588 [XSTATE_OPMASK_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, opmask_state),
591 .size = sizeof(XSaveOpmask) },
592 [XSTATE_ZMM_Hi256_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
595 .size = sizeof(XSaveZMM_Hi256) },
596 [XSTATE_Hi16_ZMM_BIT] =
597 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
598 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
599 .size = sizeof(XSaveHi16_ZMM) },
600 [XSTATE_PKRU_BIT] =
601 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
602 .offset = offsetof(X86XSaveArea, pkru_state),
603 .size = sizeof(XSavePKRU) },
604 };
605
606 static uint32_t xsave_area_size(uint64_t mask)
607 {
608 int i;
609 uint64_t ret = 0;
610
611 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
612 const ExtSaveArea *esa = &x86_ext_save_areas[i];
613 if ((mask >> i) & 1) {
614 ret = MAX(ret, esa->offset + esa->size);
615 }
616 }
617 return ret;
618 }
619
620 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
621 {
622 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
623 cpu->env.features[FEAT_XSAVE_COMP_LO];
624 }
625
626 const char *get_register_name_32(unsigned int reg)
627 {
628 if (reg >= CPU_NB_REGS32) {
629 return NULL;
630 }
631 return x86_reg_info_32[reg].name;
632 }
633
634 /*
635 * Returns the set of feature flags that are supported and migratable by
636 * QEMU, for a given FeatureWord.
637 */
638 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
639 {
640 FeatureWordInfo *wi = &feature_word_info[w];
641 uint32_t r = 0;
642 int i;
643
644 for (i = 0; i < 32; i++) {
645 uint32_t f = 1U << i;
646
647 /* If the feature name is known, it is implicitly considered migratable,
648 * unless it is explicitly set in unmigratable_flags */
649 if ((wi->migratable_flags & f) ||
650 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
651 r |= f;
652 }
653 }
654 return r;
655 }
656
657 void host_cpuid(uint32_t function, uint32_t count,
658 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
659 {
660 uint32_t vec[4];
661
662 #ifdef __x86_64__
663 asm volatile("cpuid"
664 : "=a"(vec[0]), "=b"(vec[1]),
665 "=c"(vec[2]), "=d"(vec[3])
666 : "0"(function), "c"(count) : "cc");
667 #elif defined(__i386__)
668 asm volatile("pusha \n\t"
669 "cpuid \n\t"
670 "mov %%eax, 0(%2) \n\t"
671 "mov %%ebx, 4(%2) \n\t"
672 "mov %%ecx, 8(%2) \n\t"
673 "mov %%edx, 12(%2) \n\t"
674 "popa"
675 : : "a"(function), "c"(count), "S"(vec)
676 : "memory", "cc");
677 #else
678 abort();
679 #endif
680
681 if (eax)
682 *eax = vec[0];
683 if (ebx)
684 *ebx = vec[1];
685 if (ecx)
686 *ecx = vec[2];
687 if (edx)
688 *edx = vec[3];
689 }
690
691 /* CPU class name definitions: */
692
693 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
694 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
695
696 /* Return type name for a given CPU model name
697 * Caller is responsible for freeing the returned string.
698 */
699 static char *x86_cpu_type_name(const char *model_name)
700 {
701 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
702 }
703
704 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
705 {
706 ObjectClass *oc;
707 char *typename;
708
709 if (cpu_model == NULL) {
710 return NULL;
711 }
712
713 typename = x86_cpu_type_name(cpu_model);
714 oc = object_class_by_name(typename);
715 g_free(typename);
716 return oc;
717 }
718
719 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
720 {
721 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
722 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
723 return g_strndup(class_name,
724 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
725 }
726
727 struct X86CPUDefinition {
728 const char *name;
729 uint32_t level;
730 uint32_t xlevel;
731 /* vendor is zero-terminated, 12 character ASCII string */
732 char vendor[CPUID_VENDOR_SZ + 1];
733 int family;
734 int model;
735 int stepping;
736 FeatureWordArray features;
737 char model_id[48];
738 };
739
740 static X86CPUDefinition builtin_x86_defs[] = {
741 {
742 .name = "qemu64",
743 .level = 0xd,
744 .vendor = CPUID_VENDOR_AMD,
745 .family = 6,
746 .model = 6,
747 .stepping = 3,
748 .features[FEAT_1_EDX] =
749 PPRO_FEATURES |
750 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
751 CPUID_PSE36,
752 .features[FEAT_1_ECX] =
753 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
754 .features[FEAT_8000_0001_EDX] =
755 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
756 .features[FEAT_8000_0001_ECX] =
757 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
758 .xlevel = 0x8000000A,
759 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
760 },
761 {
762 .name = "phenom",
763 .level = 5,
764 .vendor = CPUID_VENDOR_AMD,
765 .family = 16,
766 .model = 2,
767 .stepping = 3,
768 /* Missing: CPUID_HT */
769 .features[FEAT_1_EDX] =
770 PPRO_FEATURES |
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
772 CPUID_PSE36 | CPUID_VME,
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
775 CPUID_EXT_POPCNT,
776 .features[FEAT_8000_0001_EDX] =
777 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
778 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
779 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
780 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
781 CPUID_EXT3_CR8LEG,
782 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
783 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
784 .features[FEAT_8000_0001_ECX] =
785 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
786 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
787 /* Missing: CPUID_SVM_LBRV */
788 .features[FEAT_SVM] =
789 CPUID_SVM_NPT,
790 .xlevel = 0x8000001A,
791 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
792 },
793 {
794 .name = "core2duo",
795 .level = 10,
796 .vendor = CPUID_VENDOR_INTEL,
797 .family = 6,
798 .model = 15,
799 .stepping = 11,
800 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
804 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
805 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
806 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
809 CPUID_EXT_CX16,
810 .features[FEAT_8000_0001_EDX] =
811 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
812 .features[FEAT_8000_0001_ECX] =
813 CPUID_EXT3_LAHF_LM,
814 .xlevel = 0x80000008,
815 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
816 },
817 {
818 .name = "kvm64",
819 .level = 0xd,
820 .vendor = CPUID_VENDOR_INTEL,
821 .family = 15,
822 .model = 6,
823 .stepping = 1,
824 /* Missing: CPUID_HT */
825 .features[FEAT_1_EDX] =
826 PPRO_FEATURES | CPUID_VME |
827 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
828 CPUID_PSE36,
829 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
832 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
833 .features[FEAT_8000_0001_EDX] =
834 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
835 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
836 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
837 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
838 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
839 .features[FEAT_8000_0001_ECX] =
840 0,
841 .xlevel = 0x80000008,
842 .model_id = "Common KVM processor"
843 },
844 {
845 .name = "qemu32",
846 .level = 4,
847 .vendor = CPUID_VENDOR_INTEL,
848 .family = 6,
849 .model = 6,
850 .stepping = 3,
851 .features[FEAT_1_EDX] =
852 PPRO_FEATURES,
853 .features[FEAT_1_ECX] =
854 CPUID_EXT_SSE3,
855 .xlevel = 0x80000004,
856 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
857 },
858 {
859 .name = "kvm32",
860 .level = 5,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 15,
863 .model = 6,
864 .stepping = 1,
865 .features[FEAT_1_EDX] =
866 PPRO_FEATURES | CPUID_VME |
867 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .features[FEAT_8000_0001_ECX] =
871 0,
872 .xlevel = 0x80000008,
873 .model_id = "Common 32-bit KVM processor"
874 },
875 {
876 .name = "coreduo",
877 .level = 10,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 6,
880 .model = 14,
881 .stepping = 8,
882 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
883 .features[FEAT_1_EDX] =
884 PPRO_FEATURES | CPUID_VME |
885 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
886 CPUID_SS,
887 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
888 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
889 .features[FEAT_1_ECX] =
890 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
891 .features[FEAT_8000_0001_EDX] =
892 CPUID_EXT2_NX,
893 .xlevel = 0x80000008,
894 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
895 },
896 {
897 .name = "486",
898 .level = 1,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 4,
901 .model = 8,
902 .stepping = 0,
903 .features[FEAT_1_EDX] =
904 I486_FEATURES,
905 .xlevel = 0,
906 },
907 {
908 .name = "pentium",
909 .level = 1,
910 .vendor = CPUID_VENDOR_INTEL,
911 .family = 5,
912 .model = 4,
913 .stepping = 3,
914 .features[FEAT_1_EDX] =
915 PENTIUM_FEATURES,
916 .xlevel = 0,
917 },
918 {
919 .name = "pentium2",
920 .level = 2,
921 .vendor = CPUID_VENDOR_INTEL,
922 .family = 6,
923 .model = 5,
924 .stepping = 2,
925 .features[FEAT_1_EDX] =
926 PENTIUM2_FEATURES,
927 .xlevel = 0,
928 },
929 {
930 .name = "pentium3",
931 .level = 3,
932 .vendor = CPUID_VENDOR_INTEL,
933 .family = 6,
934 .model = 7,
935 .stepping = 3,
936 .features[FEAT_1_EDX] =
937 PENTIUM3_FEATURES,
938 .xlevel = 0,
939 },
940 {
941 .name = "athlon",
942 .level = 2,
943 .vendor = CPUID_VENDOR_AMD,
944 .family = 6,
945 .model = 2,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
949 CPUID_MCA,
950 .features[FEAT_8000_0001_EDX] =
951 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
952 .xlevel = 0x80000008,
953 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
954 },
955 {
956 .name = "n270",
957 .level = 10,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 28,
961 .stepping = 2,
962 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
963 .features[FEAT_1_EDX] =
964 PPRO_FEATURES |
965 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
966 CPUID_ACPI | CPUID_SS,
967 /* Some CPUs got no CPUID_SEP */
968 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
969 * CPUID_EXT_XTPR */
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
972 CPUID_EXT_MOVBE,
973 .features[FEAT_8000_0001_EDX] =
974 CPUID_EXT2_NX,
975 .features[FEAT_8000_0001_ECX] =
976 CPUID_EXT3_LAHF_LM,
977 .xlevel = 0x80000008,
978 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
979 },
980 {
981 .name = "Conroe",
982 .level = 10,
983 .vendor = CPUID_VENDOR_INTEL,
984 .family = 6,
985 .model = 15,
986 .stepping = 3,
987 .features[FEAT_1_EDX] =
988 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
989 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
990 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
991 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
992 CPUID_DE | CPUID_FP87,
993 .features[FEAT_1_ECX] =
994 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
995 .features[FEAT_8000_0001_EDX] =
996 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
997 .features[FEAT_8000_0001_ECX] =
998 CPUID_EXT3_LAHF_LM,
999 .xlevel = 0x80000008,
1000 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1001 },
1002 {
1003 .name = "Penryn",
1004 .level = 10,
1005 .vendor = CPUID_VENDOR_INTEL,
1006 .family = 6,
1007 .model = 23,
1008 .stepping = 3,
1009 .features[FEAT_1_EDX] =
1010 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1011 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1012 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1013 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1014 CPUID_DE | CPUID_FP87,
1015 .features[FEAT_1_ECX] =
1016 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1017 CPUID_EXT_SSE3,
1018 .features[FEAT_8000_0001_EDX] =
1019 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1020 .features[FEAT_8000_0001_ECX] =
1021 CPUID_EXT3_LAHF_LM,
1022 .xlevel = 0x80000008,
1023 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1024 },
1025 {
1026 .name = "Nehalem",
1027 .level = 11,
1028 .vendor = CPUID_VENDOR_INTEL,
1029 .family = 6,
1030 .model = 26,
1031 .stepping = 3,
1032 .features[FEAT_1_EDX] =
1033 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1034 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1035 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1036 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1037 CPUID_DE | CPUID_FP87,
1038 .features[FEAT_1_ECX] =
1039 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1040 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1041 .features[FEAT_8000_0001_EDX] =
1042 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1043 .features[FEAT_8000_0001_ECX] =
1044 CPUID_EXT3_LAHF_LM,
1045 .xlevel = 0x80000008,
1046 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1047 },
1048 {
1049 .name = "Westmere",
1050 .level = 11,
1051 .vendor = CPUID_VENDOR_INTEL,
1052 .family = 6,
1053 .model = 44,
1054 .stepping = 1,
1055 .features[FEAT_1_EDX] =
1056 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1057 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1058 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1059 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1060 CPUID_DE | CPUID_FP87,
1061 .features[FEAT_1_ECX] =
1062 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1063 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1064 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1065 .features[FEAT_8000_0001_EDX] =
1066 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1067 .features[FEAT_8000_0001_ECX] =
1068 CPUID_EXT3_LAHF_LM,
1069 .features[FEAT_6_EAX] =
1070 CPUID_6_EAX_ARAT,
1071 .xlevel = 0x80000008,
1072 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1073 },
1074 {
1075 .name = "SandyBridge",
1076 .level = 0xd,
1077 .vendor = CPUID_VENDOR_INTEL,
1078 .family = 6,
1079 .model = 42,
1080 .stepping = 1,
1081 .features[FEAT_1_EDX] =
1082 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1083 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1084 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1085 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1086 CPUID_DE | CPUID_FP87,
1087 .features[FEAT_1_ECX] =
1088 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1089 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1090 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1091 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1092 CPUID_EXT_SSE3,
1093 .features[FEAT_8000_0001_EDX] =
1094 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1095 CPUID_EXT2_SYSCALL,
1096 .features[FEAT_8000_0001_ECX] =
1097 CPUID_EXT3_LAHF_LM,
1098 .features[FEAT_XSAVE] =
1099 CPUID_XSAVE_XSAVEOPT,
1100 .features[FEAT_6_EAX] =
1101 CPUID_6_EAX_ARAT,
1102 .xlevel = 0x80000008,
1103 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1104 },
1105 {
1106 .name = "IvyBridge",
1107 .level = 0xd,
1108 .vendor = CPUID_VENDOR_INTEL,
1109 .family = 6,
1110 .model = 58,
1111 .stepping = 9,
1112 .features[FEAT_1_EDX] =
1113 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1114 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1115 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1116 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1117 CPUID_DE | CPUID_FP87,
1118 .features[FEAT_1_ECX] =
1119 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1120 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1121 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1122 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1123 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1124 .features[FEAT_7_0_EBX] =
1125 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1126 CPUID_7_0_EBX_ERMS,
1127 .features[FEAT_8000_0001_EDX] =
1128 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1129 CPUID_EXT2_SYSCALL,
1130 .features[FEAT_8000_0001_ECX] =
1131 CPUID_EXT3_LAHF_LM,
1132 .features[FEAT_XSAVE] =
1133 CPUID_XSAVE_XSAVEOPT,
1134 .features[FEAT_6_EAX] =
1135 CPUID_6_EAX_ARAT,
1136 .xlevel = 0x80000008,
1137 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1138 },
1139 {
1140 .name = "Haswell-noTSX",
1141 .level = 0xd,
1142 .vendor = CPUID_VENDOR_INTEL,
1143 .family = 6,
1144 .model = 60,
1145 .stepping = 1,
1146 .features[FEAT_1_EDX] =
1147 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1148 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1149 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1150 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1151 CPUID_DE | CPUID_FP87,
1152 .features[FEAT_1_ECX] =
1153 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1154 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1155 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1156 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1157 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1158 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1159 .features[FEAT_8000_0001_EDX] =
1160 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1161 CPUID_EXT2_SYSCALL,
1162 .features[FEAT_8000_0001_ECX] =
1163 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1164 .features[FEAT_7_0_EBX] =
1165 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1166 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1167 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1168 .features[FEAT_XSAVE] =
1169 CPUID_XSAVE_XSAVEOPT,
1170 .features[FEAT_6_EAX] =
1171 CPUID_6_EAX_ARAT,
1172 .xlevel = 0x80000008,
1173 .model_id = "Intel Core Processor (Haswell, no TSX)",
1174 }, {
1175 .name = "Haswell",
1176 .level = 0xd,
1177 .vendor = CPUID_VENDOR_INTEL,
1178 .family = 6,
1179 .model = 60,
1180 .stepping = 1,
1181 .features[FEAT_1_EDX] =
1182 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1183 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1184 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1185 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1186 CPUID_DE | CPUID_FP87,
1187 .features[FEAT_1_ECX] =
1188 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1189 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1190 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1191 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1192 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1193 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1194 .features[FEAT_8000_0001_EDX] =
1195 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1196 CPUID_EXT2_SYSCALL,
1197 .features[FEAT_8000_0001_ECX] =
1198 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1199 .features[FEAT_7_0_EBX] =
1200 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1201 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1202 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1203 CPUID_7_0_EBX_RTM,
1204 .features[FEAT_XSAVE] =
1205 CPUID_XSAVE_XSAVEOPT,
1206 .features[FEAT_6_EAX] =
1207 CPUID_6_EAX_ARAT,
1208 .xlevel = 0x80000008,
1209 .model_id = "Intel Core Processor (Haswell)",
1210 },
1211 {
1212 .name = "Broadwell-noTSX",
1213 .level = 0xd,
1214 .vendor = CPUID_VENDOR_INTEL,
1215 .family = 6,
1216 .model = 61,
1217 .stepping = 2,
1218 .features[FEAT_1_EDX] =
1219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1223 CPUID_DE | CPUID_FP87,
1224 .features[FEAT_1_ECX] =
1225 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1226 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1227 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1228 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1229 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1230 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1231 .features[FEAT_8000_0001_EDX] =
1232 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1233 CPUID_EXT2_SYSCALL,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1236 .features[FEAT_7_0_EBX] =
1237 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1238 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1239 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1240 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1241 CPUID_7_0_EBX_SMAP,
1242 .features[FEAT_XSAVE] =
1243 CPUID_XSAVE_XSAVEOPT,
1244 .features[FEAT_6_EAX] =
1245 CPUID_6_EAX_ARAT,
1246 .xlevel = 0x80000008,
1247 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1248 },
1249 {
1250 .name = "Broadwell",
1251 .level = 0xd,
1252 .vendor = CPUID_VENDOR_INTEL,
1253 .family = 6,
1254 .model = 61,
1255 .stepping = 2,
1256 .features[FEAT_1_EDX] =
1257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1261 CPUID_DE | CPUID_FP87,
1262 .features[FEAT_1_ECX] =
1263 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1264 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1265 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1266 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1267 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1268 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1271 CPUID_EXT2_SYSCALL,
1272 .features[FEAT_8000_0001_ECX] =
1273 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1274 .features[FEAT_7_0_EBX] =
1275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1279 CPUID_7_0_EBX_SMAP,
1280 .features[FEAT_XSAVE] =
1281 CPUID_XSAVE_XSAVEOPT,
1282 .features[FEAT_6_EAX] =
1283 CPUID_6_EAX_ARAT,
1284 .xlevel = 0x80000008,
1285 .model_id = "Intel Core Processor (Broadwell)",
1286 },
1287 {
1288 .name = "Skylake-Client",
1289 .level = 0xd,
1290 .vendor = CPUID_VENDOR_INTEL,
1291 .family = 6,
1292 .model = 94,
1293 .stepping = 3,
1294 .features[FEAT_1_EDX] =
1295 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1296 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1297 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1298 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1299 CPUID_DE | CPUID_FP87,
1300 .features[FEAT_1_ECX] =
1301 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1302 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1303 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1304 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1305 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1306 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1307 .features[FEAT_8000_0001_EDX] =
1308 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1309 CPUID_EXT2_SYSCALL,
1310 .features[FEAT_8000_0001_ECX] =
1311 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1312 .features[FEAT_7_0_EBX] =
1313 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1314 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1315 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1316 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1317 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1318 /* Missing: XSAVES (not supported by some Linux versions,
1319 * including v4.1 to v4.6).
1320 * KVM doesn't yet expose any XSAVES state save component,
1321 * and the only one defined in Skylake (processor tracing)
1322 * probably will block migration anyway.
1323 */
1324 .features[FEAT_XSAVE] =
1325 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1326 CPUID_XSAVE_XGETBV1,
1327 .features[FEAT_6_EAX] =
1328 CPUID_6_EAX_ARAT,
1329 .xlevel = 0x80000008,
1330 .model_id = "Intel Core Processor (Skylake)",
1331 },
1332 {
1333 .name = "Opteron_G1",
1334 .level = 5,
1335 .vendor = CPUID_VENDOR_AMD,
1336 .family = 15,
1337 .model = 6,
1338 .stepping = 1,
1339 .features[FEAT_1_EDX] =
1340 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1341 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1342 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1343 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1344 CPUID_DE | CPUID_FP87,
1345 .features[FEAT_1_ECX] =
1346 CPUID_EXT_SSE3,
1347 .features[FEAT_8000_0001_EDX] =
1348 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1349 .xlevel = 0x80000008,
1350 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1351 },
1352 {
1353 .name = "Opteron_G2",
1354 .level = 5,
1355 .vendor = CPUID_VENDOR_AMD,
1356 .family = 15,
1357 .model = 6,
1358 .stepping = 1,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1367 /* Missing: CPUID_EXT2_RDTSCP */
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1372 .xlevel = 0x80000008,
1373 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1374 },
1375 {
1376 .name = "Opteron_G3",
1377 .level = 5,
1378 .vendor = CPUID_VENDOR_AMD,
1379 .family = 16,
1380 .model = 2,
1381 .stepping = 3,
1382 .features[FEAT_1_EDX] =
1383 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1384 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1385 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1386 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1387 CPUID_DE | CPUID_FP87,
1388 .features[FEAT_1_ECX] =
1389 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1390 CPUID_EXT_SSE3,
1391 /* Missing: CPUID_EXT2_RDTSCP */
1392 .features[FEAT_8000_0001_EDX] =
1393 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1394 .features[FEAT_8000_0001_ECX] =
1395 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1396 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1397 .xlevel = 0x80000008,
1398 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1399 },
1400 {
1401 .name = "Opteron_G4",
1402 .level = 0xd,
1403 .vendor = CPUID_VENDOR_AMD,
1404 .family = 21,
1405 .model = 1,
1406 .stepping = 2,
1407 .features[FEAT_1_EDX] =
1408 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1409 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1410 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1411 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1412 CPUID_DE | CPUID_FP87,
1413 .features[FEAT_1_ECX] =
1414 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1415 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1416 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1417 CPUID_EXT_SSE3,
1418 /* Missing: CPUID_EXT2_RDTSCP */
1419 .features[FEAT_8000_0001_EDX] =
1420 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1421 CPUID_EXT2_SYSCALL,
1422 .features[FEAT_8000_0001_ECX] =
1423 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1424 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1425 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1426 CPUID_EXT3_LAHF_LM,
1427 /* no xsaveopt! */
1428 .xlevel = 0x8000001A,
1429 .model_id = "AMD Opteron 62xx class CPU",
1430 },
1431 {
1432 .name = "Opteron_G5",
1433 .level = 0xd,
1434 .vendor = CPUID_VENDOR_AMD,
1435 .family = 21,
1436 .model = 2,
1437 .stepping = 0,
1438 .features[FEAT_1_EDX] =
1439 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1440 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1441 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1442 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1443 CPUID_DE | CPUID_FP87,
1444 .features[FEAT_1_ECX] =
1445 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1446 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1447 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1448 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1449 /* Missing: CPUID_EXT2_RDTSCP */
1450 .features[FEAT_8000_0001_EDX] =
1451 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1452 CPUID_EXT2_SYSCALL,
1453 .features[FEAT_8000_0001_ECX] =
1454 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1455 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1456 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1457 CPUID_EXT3_LAHF_LM,
1458 /* no xsaveopt! */
1459 .xlevel = 0x8000001A,
1460 .model_id = "AMD Opteron 63xx class CPU",
1461 },
1462 };
1463
1464 typedef struct PropValue {
1465 const char *prop, *value;
1466 } PropValue;
1467
1468 /* KVM-specific features that are automatically added/removed
1469 * from all CPU models when KVM is enabled.
1470 */
1471 static PropValue kvm_default_props[] = {
1472 { "kvmclock", "on" },
1473 { "kvm-nopiodelay", "on" },
1474 { "kvm-asyncpf", "on" },
1475 { "kvm-steal-time", "on" },
1476 { "kvm-pv-eoi", "on" },
1477 { "kvmclock-stable-bit", "on" },
1478 { "x2apic", "on" },
1479 { "acpi", "off" },
1480 { "monitor", "off" },
1481 { "svm", "off" },
1482 { NULL, NULL },
1483 };
1484
1485 /* TCG-specific defaults that override all CPU models when using TCG
1486 */
1487 static PropValue tcg_default_props[] = {
1488 { "vme", "off" },
1489 { NULL, NULL },
1490 };
1491
1492
1493 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1494 {
1495 PropValue *pv;
1496 for (pv = kvm_default_props; pv->prop; pv++) {
1497 if (!strcmp(pv->prop, prop)) {
1498 pv->value = value;
1499 break;
1500 }
1501 }
1502
1503 /* It is valid to call this function only for properties that
1504 * are already present in the kvm_default_props table.
1505 */
1506 assert(pv->prop);
1507 }
1508
1509 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1510 bool migratable_only);
1511
1512 static bool lmce_supported(void)
1513 {
1514 uint64_t mce_cap = 0;
1515
1516 #ifdef CONFIG_KVM
1517 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1518 return false;
1519 }
1520 #endif
1521
1522 return !!(mce_cap & MCG_LMCE_P);
1523 }
1524
1525 static int cpu_x86_fill_model_id(char *str)
1526 {
1527 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1528 int i;
1529
1530 for (i = 0; i < 3; i++) {
1531 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1532 memcpy(str + i * 16 + 0, &eax, 4);
1533 memcpy(str + i * 16 + 4, &ebx, 4);
1534 memcpy(str + i * 16 + 8, &ecx, 4);
1535 memcpy(str + i * 16 + 12, &edx, 4);
1536 }
1537 return 0;
1538 }
1539
1540 static Property max_x86_cpu_properties[] = {
1541 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1542 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1543 DEFINE_PROP_END_OF_LIST()
1544 };
1545
1546 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1547 {
1548 DeviceClass *dc = DEVICE_CLASS(oc);
1549 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1550
1551 xcc->ordering = 9;
1552
1553 xcc->model_description =
1554 "Enables all features supported by the accelerator in the current host";
1555
1556 dc->props = max_x86_cpu_properties;
1557 }
1558
1559 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1560
1561 static void max_x86_cpu_initfn(Object *obj)
1562 {
1563 X86CPU *cpu = X86_CPU(obj);
1564 CPUX86State *env = &cpu->env;
1565 KVMState *s = kvm_state;
1566
1567 /* We can't fill the features array here because we don't know yet if
1568 * "migratable" is true or false.
1569 */
1570 cpu->max_features = true;
1571
1572 if (kvm_enabled()) {
1573 X86CPUDefinition host_cpudef = { };
1574 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1575
1576 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1577 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1578
1579 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1580 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1581 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1582 host_cpudef.stepping = eax & 0x0F;
1583
1584 cpu_x86_fill_model_id(host_cpudef.model_id);
1585
1586 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1587
1588 env->cpuid_min_level =
1589 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1590 env->cpuid_min_xlevel =
1591 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1592 env->cpuid_min_xlevel2 =
1593 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1594
1595 if (lmce_supported()) {
1596 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1597 }
1598 } else {
1599 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1600 "vendor", &error_abort);
1601 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1602 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1603 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1604 object_property_set_str(OBJECT(cpu),
1605 "QEMU TCG CPU version " QEMU_HW_VERSION,
1606 "model-id", &error_abort);
1607 }
1608
1609 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1610 }
1611
1612 static const TypeInfo max_x86_cpu_type_info = {
1613 .name = X86_CPU_TYPE_NAME("max"),
1614 .parent = TYPE_X86_CPU,
1615 .instance_init = max_x86_cpu_initfn,
1616 .class_init = max_x86_cpu_class_init,
1617 };
1618
1619 #ifdef CONFIG_KVM
1620
1621 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1622 {
1623 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1624
1625 xcc->kvm_required = true;
1626 xcc->ordering = 8;
1627
1628 xcc->model_description =
1629 "KVM processor with all supported host features "
1630 "(only available in KVM mode)";
1631 }
1632
1633 static const TypeInfo host_x86_cpu_type_info = {
1634 .name = X86_CPU_TYPE_NAME("host"),
1635 .parent = X86_CPU_TYPE_NAME("max"),
1636 .class_init = host_x86_cpu_class_init,
1637 };
1638
1639 #endif
1640
1641 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1642 {
1643 FeatureWordInfo *f = &feature_word_info[w];
1644 int i;
1645
1646 for (i = 0; i < 32; ++i) {
1647 if ((1UL << i) & mask) {
1648 const char *reg = get_register_name_32(f->cpuid_reg);
1649 assert(reg);
1650 fprintf(stderr, "warning: %s doesn't support requested feature: "
1651 "CPUID.%02XH:%s%s%s [bit %d]\n",
1652 kvm_enabled() ? "host" : "TCG",
1653 f->cpuid_eax, reg,
1654 f->feat_names[i] ? "." : "",
1655 f->feat_names[i] ? f->feat_names[i] : "", i);
1656 }
1657 }
1658 }
1659
1660 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1661 const char *name, void *opaque,
1662 Error **errp)
1663 {
1664 X86CPU *cpu = X86_CPU(obj);
1665 CPUX86State *env = &cpu->env;
1666 int64_t value;
1667
1668 value = (env->cpuid_version >> 8) & 0xf;
1669 if (value == 0xf) {
1670 value += (env->cpuid_version >> 20) & 0xff;
1671 }
1672 visit_type_int(v, name, &value, errp);
1673 }
1674
1675 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1678 {
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 const int64_t min = 0;
1682 const int64_t max = 0xff + 0xf;
1683 Error *local_err = NULL;
1684 int64_t value;
1685
1686 visit_type_int(v, name, &value, &local_err);
1687 if (local_err) {
1688 error_propagate(errp, local_err);
1689 return;
1690 }
1691 if (value < min || value > max) {
1692 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1693 name ? name : "null", value, min, max);
1694 return;
1695 }
1696
1697 env->cpuid_version &= ~0xff00f00;
1698 if (value > 0x0f) {
1699 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1700 } else {
1701 env->cpuid_version |= value << 8;
1702 }
1703 }
1704
1705 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1706 const char *name, void *opaque,
1707 Error **errp)
1708 {
1709 X86CPU *cpu = X86_CPU(obj);
1710 CPUX86State *env = &cpu->env;
1711 int64_t value;
1712
1713 value = (env->cpuid_version >> 4) & 0xf;
1714 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1715 visit_type_int(v, name, &value, errp);
1716 }
1717
1718 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1719 const char *name, void *opaque,
1720 Error **errp)
1721 {
1722 X86CPU *cpu = X86_CPU(obj);
1723 CPUX86State *env = &cpu->env;
1724 const int64_t min = 0;
1725 const int64_t max = 0xff;
1726 Error *local_err = NULL;
1727 int64_t value;
1728
1729 visit_type_int(v, name, &value, &local_err);
1730 if (local_err) {
1731 error_propagate(errp, local_err);
1732 return;
1733 }
1734 if (value < min || value > max) {
1735 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1736 name ? name : "null", value, min, max);
1737 return;
1738 }
1739
1740 env->cpuid_version &= ~0xf00f0;
1741 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1742 }
1743
1744 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1745 const char *name, void *opaque,
1746 Error **errp)
1747 {
1748 X86CPU *cpu = X86_CPU(obj);
1749 CPUX86State *env = &cpu->env;
1750 int64_t value;
1751
1752 value = env->cpuid_version & 0xf;
1753 visit_type_int(v, name, &value, errp);
1754 }
1755
1756 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1757 const char *name, void *opaque,
1758 Error **errp)
1759 {
1760 X86CPU *cpu = X86_CPU(obj);
1761 CPUX86State *env = &cpu->env;
1762 const int64_t min = 0;
1763 const int64_t max = 0xf;
1764 Error *local_err = NULL;
1765 int64_t value;
1766
1767 visit_type_int(v, name, &value, &local_err);
1768 if (local_err) {
1769 error_propagate(errp, local_err);
1770 return;
1771 }
1772 if (value < min || value > max) {
1773 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1774 name ? name : "null", value, min, max);
1775 return;
1776 }
1777
1778 env->cpuid_version &= ~0xf;
1779 env->cpuid_version |= value & 0xf;
1780 }
1781
1782 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1783 {
1784 X86CPU *cpu = X86_CPU(obj);
1785 CPUX86State *env = &cpu->env;
1786 char *value;
1787
1788 value = g_malloc(CPUID_VENDOR_SZ + 1);
1789 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1790 env->cpuid_vendor3);
1791 return value;
1792 }
1793
1794 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1795 Error **errp)
1796 {
1797 X86CPU *cpu = X86_CPU(obj);
1798 CPUX86State *env = &cpu->env;
1799 int i;
1800
1801 if (strlen(value) != CPUID_VENDOR_SZ) {
1802 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1803 return;
1804 }
1805
1806 env->cpuid_vendor1 = 0;
1807 env->cpuid_vendor2 = 0;
1808 env->cpuid_vendor3 = 0;
1809 for (i = 0; i < 4; i++) {
1810 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1811 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1812 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1813 }
1814 }
1815
1816 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1817 {
1818 X86CPU *cpu = X86_CPU(obj);
1819 CPUX86State *env = &cpu->env;
1820 char *value;
1821 int i;
1822
1823 value = g_malloc(48 + 1);
1824 for (i = 0; i < 48; i++) {
1825 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1826 }
1827 value[48] = '\0';
1828 return value;
1829 }
1830
1831 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1832 Error **errp)
1833 {
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1836 int c, len, i;
1837
1838 if (model_id == NULL) {
1839 model_id = "";
1840 }
1841 len = strlen(model_id);
1842 memset(env->cpuid_model, 0, 48);
1843 for (i = 0; i < 48; i++) {
1844 if (i >= len) {
1845 c = '\0';
1846 } else {
1847 c = (uint8_t)model_id[i];
1848 }
1849 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1850 }
1851 }
1852
1853 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1854 void *opaque, Error **errp)
1855 {
1856 X86CPU *cpu = X86_CPU(obj);
1857 int64_t value;
1858
1859 value = cpu->env.tsc_khz * 1000;
1860 visit_type_int(v, name, &value, errp);
1861 }
1862
1863 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1865 {
1866 X86CPU *cpu = X86_CPU(obj);
1867 const int64_t min = 0;
1868 const int64_t max = INT64_MAX;
1869 Error *local_err = NULL;
1870 int64_t value;
1871
1872 visit_type_int(v, name, &value, &local_err);
1873 if (local_err) {
1874 error_propagate(errp, local_err);
1875 return;
1876 }
1877 if (value < min || value > max) {
1878 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1879 name ? name : "null", value, min, max);
1880 return;
1881 }
1882
1883 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1884 }
1885
1886 /* Generic getter for "feature-words" and "filtered-features" properties */
1887 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1888 const char *name, void *opaque,
1889 Error **errp)
1890 {
1891 uint32_t *array = (uint32_t *)opaque;
1892 FeatureWord w;
1893 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1894 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1895 X86CPUFeatureWordInfoList *list = NULL;
1896
1897 for (w = 0; w < FEATURE_WORDS; w++) {
1898 FeatureWordInfo *wi = &feature_word_info[w];
1899 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1900 qwi->cpuid_input_eax = wi->cpuid_eax;
1901 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1902 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1903 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1904 qwi->features = array[w];
1905
1906 /* List will be in reverse order, but order shouldn't matter */
1907 list_entries[w].next = list;
1908 list_entries[w].value = &word_infos[w];
1909 list = &list_entries[w];
1910 }
1911
1912 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1913 }
1914
1915 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1916 void *opaque, Error **errp)
1917 {
1918 X86CPU *cpu = X86_CPU(obj);
1919 int64_t value = cpu->hyperv_spinlock_attempts;
1920
1921 visit_type_int(v, name, &value, errp);
1922 }
1923
1924 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1925 void *opaque, Error **errp)
1926 {
1927 const int64_t min = 0xFFF;
1928 const int64_t max = UINT_MAX;
1929 X86CPU *cpu = X86_CPU(obj);
1930 Error *err = NULL;
1931 int64_t value;
1932
1933 visit_type_int(v, name, &value, &err);
1934 if (err) {
1935 error_propagate(errp, err);
1936 return;
1937 }
1938
1939 if (value < min || value > max) {
1940 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1941 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1942 object_get_typename(obj), name ? name : "null",
1943 value, min, max);
1944 return;
1945 }
1946 cpu->hyperv_spinlock_attempts = value;
1947 }
1948
1949 static PropertyInfo qdev_prop_spinlocks = {
1950 .name = "int",
1951 .get = x86_get_hv_spinlocks,
1952 .set = x86_set_hv_spinlocks,
1953 };
1954
1955 /* Convert all '_' in a feature string option name to '-', to make feature
1956 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1957 */
1958 static inline void feat2prop(char *s)
1959 {
1960 while ((s = strchr(s, '_'))) {
1961 *s = '-';
1962 }
1963 }
1964
1965 /* Return the feature property name for a feature flag bit */
1966 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1967 {
1968 /* XSAVE components are automatically enabled by other features,
1969 * so return the original feature name instead
1970 */
1971 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1972 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1973
1974 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1975 x86_ext_save_areas[comp].bits) {
1976 w = x86_ext_save_areas[comp].feature;
1977 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1978 }
1979 }
1980
1981 assert(bitnr < 32);
1982 assert(w < FEATURE_WORDS);
1983 return feature_word_info[w].feat_names[bitnr];
1984 }
1985
1986 /* Compatibily hack to maintain legacy +-feat semantic,
1987 * where +-feat overwrites any feature set by
1988 * feat=on|feat even if the later is parsed after +-feat
1989 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1990 */
1991 static GList *plus_features, *minus_features;
1992
1993 static gint compare_string(gconstpointer a, gconstpointer b)
1994 {
1995 return g_strcmp0(a, b);
1996 }
1997
1998 /* Parse "+feature,-feature,feature=foo" CPU feature string
1999 */
2000 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2001 Error **errp)
2002 {
2003 char *featurestr; /* Single 'key=value" string being parsed */
2004 static bool cpu_globals_initialized;
2005 bool ambiguous = false;
2006
2007 if (cpu_globals_initialized) {
2008 return;
2009 }
2010 cpu_globals_initialized = true;
2011
2012 if (!features) {
2013 return;
2014 }
2015
2016 for (featurestr = strtok(features, ",");
2017 featurestr;
2018 featurestr = strtok(NULL, ",")) {
2019 const char *name;
2020 const char *val = NULL;
2021 char *eq = NULL;
2022 char num[32];
2023 GlobalProperty *prop;
2024
2025 /* Compatibility syntax: */
2026 if (featurestr[0] == '+') {
2027 plus_features = g_list_append(plus_features,
2028 g_strdup(featurestr + 1));
2029 continue;
2030 } else if (featurestr[0] == '-') {
2031 minus_features = g_list_append(minus_features,
2032 g_strdup(featurestr + 1));
2033 continue;
2034 }
2035
2036 eq = strchr(featurestr, '=');
2037 if (eq) {
2038 *eq++ = 0;
2039 val = eq;
2040 } else {
2041 val = "on";
2042 }
2043
2044 feat2prop(featurestr);
2045 name = featurestr;
2046
2047 if (g_list_find_custom(plus_features, name, compare_string)) {
2048 error_report("warning: Ambiguous CPU model string. "
2049 "Don't mix both \"+%s\" and \"%s=%s\"",
2050 name, name, val);
2051 ambiguous = true;
2052 }
2053 if (g_list_find_custom(minus_features, name, compare_string)) {
2054 error_report("warning: Ambiguous CPU model string. "
2055 "Don't mix both \"-%s\" and \"%s=%s\"",
2056 name, name, val);
2057 ambiguous = true;
2058 }
2059
2060 /* Special case: */
2061 if (!strcmp(name, "tsc-freq")) {
2062 int ret;
2063 uint64_t tsc_freq;
2064
2065 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2066 if (ret < 0 || tsc_freq > INT64_MAX) {
2067 error_setg(errp, "bad numerical value %s", val);
2068 return;
2069 }
2070 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2071 val = num;
2072 name = "tsc-frequency";
2073 }
2074
2075 prop = g_new0(typeof(*prop), 1);
2076 prop->driver = typename;
2077 prop->property = g_strdup(name);
2078 prop->value = g_strdup(val);
2079 prop->errp = &error_fatal;
2080 qdev_prop_register_global(prop);
2081 }
2082
2083 if (ambiguous) {
2084 error_report("warning: Compatibility of ambiguous CPU model "
2085 "strings won't be kept on future QEMU versions");
2086 }
2087 }
2088
2089 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2090 static int x86_cpu_filter_features(X86CPU *cpu);
2091
2092 /* Check for missing features that may prevent the CPU class from
2093 * running using the current machine and accelerator.
2094 */
2095 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2096 strList **missing_feats)
2097 {
2098 X86CPU *xc;
2099 FeatureWord w;
2100 Error *err = NULL;
2101 strList **next = missing_feats;
2102
2103 if (xcc->kvm_required && !kvm_enabled()) {
2104 strList *new = g_new0(strList, 1);
2105 new->value = g_strdup("kvm");;
2106 *missing_feats = new;
2107 return;
2108 }
2109
2110 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2111
2112 x86_cpu_expand_features(xc, &err);
2113 if (err) {
2114 /* Errors at x86_cpu_expand_features should never happen,
2115 * but in case it does, just report the model as not
2116 * runnable at all using the "type" property.
2117 */
2118 strList *new = g_new0(strList, 1);
2119 new->value = g_strdup("type");
2120 *next = new;
2121 next = &new->next;
2122 }
2123
2124 x86_cpu_filter_features(xc);
2125
2126 for (w = 0; w < FEATURE_WORDS; w++) {
2127 uint32_t filtered = xc->filtered_features[w];
2128 int i;
2129 for (i = 0; i < 32; i++) {
2130 if (filtered & (1UL << i)) {
2131 strList *new = g_new0(strList, 1);
2132 new->value = g_strdup(x86_cpu_feature_name(w, i));
2133 *next = new;
2134 next = &new->next;
2135 }
2136 }
2137 }
2138
2139 object_unref(OBJECT(xc));
2140 }
2141
2142 /* Print all cpuid feature names in featureset
2143 */
2144 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2145 {
2146 int bit;
2147 bool first = true;
2148
2149 for (bit = 0; bit < 32; bit++) {
2150 if (featureset[bit]) {
2151 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2152 first = false;
2153 }
2154 }
2155 }
2156
2157 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2158 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2159 {
2160 ObjectClass *class_a = (ObjectClass *)a;
2161 ObjectClass *class_b = (ObjectClass *)b;
2162 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2163 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2164 const char *name_a, *name_b;
2165
2166 if (cc_a->ordering != cc_b->ordering) {
2167 return cc_a->ordering - cc_b->ordering;
2168 } else {
2169 name_a = object_class_get_name(class_a);
2170 name_b = object_class_get_name(class_b);
2171 return strcmp(name_a, name_b);
2172 }
2173 }
2174
2175 static GSList *get_sorted_cpu_model_list(void)
2176 {
2177 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2178 list = g_slist_sort(list, x86_cpu_list_compare);
2179 return list;
2180 }
2181
2182 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2183 {
2184 ObjectClass *oc = data;
2185 X86CPUClass *cc = X86_CPU_CLASS(oc);
2186 CPUListState *s = user_data;
2187 char *name = x86_cpu_class_get_model_name(cc);
2188 const char *desc = cc->model_description;
2189 if (!desc && cc->cpu_def) {
2190 desc = cc->cpu_def->model_id;
2191 }
2192
2193 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2194 name, desc);
2195 g_free(name);
2196 }
2197
2198 /* list available CPU models and flags */
2199 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2200 {
2201 int i;
2202 CPUListState s = {
2203 .file = f,
2204 .cpu_fprintf = cpu_fprintf,
2205 };
2206 GSList *list;
2207
2208 (*cpu_fprintf)(f, "Available CPUs:\n");
2209 list = get_sorted_cpu_model_list();
2210 g_slist_foreach(list, x86_cpu_list_entry, &s);
2211 g_slist_free(list);
2212
2213 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2214 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2215 FeatureWordInfo *fw = &feature_word_info[i];
2216
2217 (*cpu_fprintf)(f, " ");
2218 listflags(f, cpu_fprintf, fw->feat_names);
2219 (*cpu_fprintf)(f, "\n");
2220 }
2221 }
2222
2223 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2224 {
2225 ObjectClass *oc = data;
2226 X86CPUClass *cc = X86_CPU_CLASS(oc);
2227 CpuDefinitionInfoList **cpu_list = user_data;
2228 CpuDefinitionInfoList *entry;
2229 CpuDefinitionInfo *info;
2230
2231 info = g_malloc0(sizeof(*info));
2232 info->name = x86_cpu_class_get_model_name(cc);
2233 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2234 info->has_unavailable_features = true;
2235 info->q_typename = g_strdup(object_class_get_name(oc));
2236 info->migration_safe = cc->migration_safe;
2237 info->has_migration_safe = true;
2238 info->q_static = cc->static_model;
2239
2240 entry = g_malloc0(sizeof(*entry));
2241 entry->value = info;
2242 entry->next = *cpu_list;
2243 *cpu_list = entry;
2244 }
2245
2246 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2247 {
2248 CpuDefinitionInfoList *cpu_list = NULL;
2249 GSList *list = get_sorted_cpu_model_list();
2250 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2251 g_slist_free(list);
2252 return cpu_list;
2253 }
2254
2255 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2256 bool migratable_only)
2257 {
2258 FeatureWordInfo *wi = &feature_word_info[w];
2259 uint32_t r;
2260
2261 if (kvm_enabled()) {
2262 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2263 wi->cpuid_ecx,
2264 wi->cpuid_reg);
2265 } else if (tcg_enabled()) {
2266 r = wi->tcg_features;
2267 } else {
2268 return ~0;
2269 }
2270 if (migratable_only) {
2271 r &= x86_cpu_get_migratable_flags(w);
2272 }
2273 return r;
2274 }
2275
2276 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2277 {
2278 FeatureWord w;
2279
2280 for (w = 0; w < FEATURE_WORDS; w++) {
2281 report_unavailable_features(w, cpu->filtered_features[w]);
2282 }
2283 }
2284
2285 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2286 {
2287 PropValue *pv;
2288 for (pv = props; pv->prop; pv++) {
2289 if (!pv->value) {
2290 continue;
2291 }
2292 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2293 &error_abort);
2294 }
2295 }
2296
2297 /* Load data from X86CPUDefinition into a X86CPU object
2298 */
2299 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2300 {
2301 CPUX86State *env = &cpu->env;
2302 const char *vendor;
2303 char host_vendor[CPUID_VENDOR_SZ + 1];
2304 FeatureWord w;
2305
2306 /*NOTE: any property set by this function should be returned by
2307 * x86_cpu_static_props(), so static expansion of
2308 * query-cpu-model-expansion is always complete.
2309 */
2310
2311 /* CPU models only set _minimum_ values for level/xlevel: */
2312 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2313 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2314
2315 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2316 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2317 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2318 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2319 for (w = 0; w < FEATURE_WORDS; w++) {
2320 env->features[w] = def->features[w];
2321 }
2322
2323 /* Special cases not set in the X86CPUDefinition structs: */
2324 if (kvm_enabled()) {
2325 if (!kvm_irqchip_in_kernel()) {
2326 x86_cpu_change_kvm_default("x2apic", "off");
2327 }
2328
2329 x86_cpu_apply_props(cpu, kvm_default_props);
2330 } else if (tcg_enabled()) {
2331 x86_cpu_apply_props(cpu, tcg_default_props);
2332 }
2333
2334 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2335
2336 /* sysenter isn't supported in compatibility mode on AMD,
2337 * syscall isn't supported in compatibility mode on Intel.
2338 * Normally we advertise the actual CPU vendor, but you can
2339 * override this using the 'vendor' property if you want to use
2340 * KVM's sysenter/syscall emulation in compatibility mode and
2341 * when doing cross vendor migration
2342 */
2343 vendor = def->vendor;
2344 if (kvm_enabled()) {
2345 uint32_t ebx = 0, ecx = 0, edx = 0;
2346 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2347 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2348 vendor = host_vendor;
2349 }
2350
2351 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2352
2353 }
2354
2355 /* Return a QDict containing keys for all properties that can be included
2356 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2357 * must be included in the dictionary.
2358 */
2359 static QDict *x86_cpu_static_props(void)
2360 {
2361 FeatureWord w;
2362 int i;
2363 static const char *props[] = {
2364 "min-level",
2365 "min-xlevel",
2366 "family",
2367 "model",
2368 "stepping",
2369 "model-id",
2370 "vendor",
2371 "lmce",
2372 NULL,
2373 };
2374 static QDict *d;
2375
2376 if (d) {
2377 return d;
2378 }
2379
2380 d = qdict_new();
2381 for (i = 0; props[i]; i++) {
2382 qdict_put_obj(d, props[i], qnull());
2383 }
2384
2385 for (w = 0; w < FEATURE_WORDS; w++) {
2386 FeatureWordInfo *fi = &feature_word_info[w];
2387 int bit;
2388 for (bit = 0; bit < 32; bit++) {
2389 if (!fi->feat_names[bit]) {
2390 continue;
2391 }
2392 qdict_put_obj(d, fi->feat_names[bit], qnull());
2393 }
2394 }
2395
2396 return d;
2397 }
2398
2399 /* Add an entry to @props dict, with the value for property. */
2400 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2401 {
2402 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2403 &error_abort);
2404
2405 qdict_put_obj(props, prop, value);
2406 }
2407
2408 /* Convert CPU model data from X86CPU object to a property dictionary
2409 * that can recreate exactly the same CPU model.
2410 */
2411 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2412 {
2413 QDict *sprops = x86_cpu_static_props();
2414 const QDictEntry *e;
2415
2416 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2417 const char *prop = qdict_entry_key(e);
2418 x86_cpu_expand_prop(cpu, props, prop);
2419 }
2420 }
2421
2422 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2423 {
2424 const QDictEntry *prop;
2425 Error *err = NULL;
2426
2427 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2428 object_property_set_qobject(obj, qdict_entry_value(prop),
2429 qdict_entry_key(prop), &err);
2430 if (err) {
2431 break;
2432 }
2433 }
2434
2435 error_propagate(errp, err);
2436 }
2437
2438 /* Create X86CPU object according to model+props specification */
2439 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2440 {
2441 X86CPU *xc = NULL;
2442 X86CPUClass *xcc;
2443 Error *err = NULL;
2444
2445 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2446 if (xcc == NULL) {
2447 error_setg(&err, "CPU model '%s' not found", model);
2448 goto out;
2449 }
2450
2451 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2452 if (props) {
2453 object_apply_props(OBJECT(xc), props, &err);
2454 if (err) {
2455 goto out;
2456 }
2457 }
2458
2459 x86_cpu_expand_features(xc, &err);
2460 if (err) {
2461 goto out;
2462 }
2463
2464 out:
2465 if (err) {
2466 error_propagate(errp, err);
2467 object_unref(OBJECT(xc));
2468 xc = NULL;
2469 }
2470 return xc;
2471 }
2472
2473 CpuModelExpansionInfo *
2474 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2475 CpuModelInfo *model,
2476 Error **errp)
2477 {
2478 X86CPU *xc = NULL;
2479 Error *err = NULL;
2480 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2481 QDict *props = NULL;
2482 const char *base_name;
2483
2484 xc = x86_cpu_from_model(model->name,
2485 model->has_props ?
2486 qobject_to_qdict(model->props) :
2487 NULL, &err);
2488 if (err) {
2489 goto out;
2490 }
2491
2492
2493 switch (type) {
2494 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2495 /* Static expansion will be based on "base" only */
2496 base_name = "base";
2497 break;
2498 case CPU_MODEL_EXPANSION_TYPE_FULL:
2499 /* As we don't return every single property, full expansion needs
2500 * to keep the original model name+props, and add extra
2501 * properties on top of that.
2502 */
2503 base_name = model->name;
2504 if (model->has_props && model->props) {
2505 props = qdict_clone_shallow(qobject_to_qdict(model->props));
2506 }
2507 break;
2508 default:
2509 error_setg(&err, "Unsupportted expansion type");
2510 goto out;
2511 }
2512
2513 if (!props) {
2514 props = qdict_new();
2515 }
2516 x86_cpu_to_dict(xc, props);
2517
2518 ret->model = g_new0(CpuModelInfo, 1);
2519 ret->model->name = g_strdup(base_name);
2520 ret->model->props = QOBJECT(props);
2521 ret->model->has_props = true;
2522
2523 out:
2524 object_unref(OBJECT(xc));
2525 if (err) {
2526 error_propagate(errp, err);
2527 qapi_free_CpuModelExpansionInfo(ret);
2528 ret = NULL;
2529 }
2530 return ret;
2531 }
2532
2533 X86CPU *cpu_x86_init(const char *cpu_model)
2534 {
2535 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2536 }
2537
2538 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2539 {
2540 X86CPUDefinition *cpudef = data;
2541 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2542
2543 xcc->cpu_def = cpudef;
2544 xcc->migration_safe = true;
2545 }
2546
2547 static void x86_register_cpudef_type(X86CPUDefinition *def)
2548 {
2549 char *typename = x86_cpu_type_name(def->name);
2550 TypeInfo ti = {
2551 .name = typename,
2552 .parent = TYPE_X86_CPU,
2553 .class_init = x86_cpu_cpudef_class_init,
2554 .class_data = def,
2555 };
2556
2557 /* AMD aliases are handled at runtime based on CPUID vendor, so
2558 * they shouldn't be set on the CPU model table.
2559 */
2560 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2561
2562 type_register(&ti);
2563 g_free(typename);
2564 }
2565
2566 #if !defined(CONFIG_USER_ONLY)
2567
2568 void cpu_clear_apic_feature(CPUX86State *env)
2569 {
2570 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2571 }
2572
2573 #endif /* !CONFIG_USER_ONLY */
2574
2575 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2576 uint32_t *eax, uint32_t *ebx,
2577 uint32_t *ecx, uint32_t *edx)
2578 {
2579 X86CPU *cpu = x86_env_get_cpu(env);
2580 CPUState *cs = CPU(cpu);
2581 uint32_t pkg_offset;
2582
2583 /* test if maximum index reached */
2584 if (index & 0x80000000) {
2585 if (index > env->cpuid_xlevel) {
2586 if (env->cpuid_xlevel2 > 0) {
2587 /* Handle the Centaur's CPUID instruction. */
2588 if (index > env->cpuid_xlevel2) {
2589 index = env->cpuid_xlevel2;
2590 } else if (index < 0xC0000000) {
2591 index = env->cpuid_xlevel;
2592 }
2593 } else {
2594 /* Intel documentation states that invalid EAX input will
2595 * return the same information as EAX=cpuid_level
2596 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2597 */
2598 index = env->cpuid_level;
2599 }
2600 }
2601 } else {
2602 if (index > env->cpuid_level)
2603 index = env->cpuid_level;
2604 }
2605
2606 switch(index) {
2607 case 0:
2608 *eax = env->cpuid_level;
2609 *ebx = env->cpuid_vendor1;
2610 *edx = env->cpuid_vendor2;
2611 *ecx = env->cpuid_vendor3;
2612 break;
2613 case 1:
2614 *eax = env->cpuid_version;
2615 *ebx = (cpu->apic_id << 24) |
2616 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2617 *ecx = env->features[FEAT_1_ECX];
2618 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2619 *ecx |= CPUID_EXT_OSXSAVE;
2620 }
2621 *edx = env->features[FEAT_1_EDX];
2622 if (cs->nr_cores * cs->nr_threads > 1) {
2623 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2624 *edx |= CPUID_HT;
2625 }
2626 break;
2627 case 2:
2628 /* cache info: needed for Pentium Pro compatibility */
2629 if (cpu->cache_info_passthrough) {
2630 host_cpuid(index, 0, eax, ebx, ecx, edx);
2631 break;
2632 }
2633 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2634 *ebx = 0;
2635 if (!cpu->enable_l3_cache) {
2636 *ecx = 0;
2637 } else {
2638 *ecx = L3_N_DESCRIPTOR;
2639 }
2640 *edx = (L1D_DESCRIPTOR << 16) | \
2641 (L1I_DESCRIPTOR << 8) | \
2642 (L2_DESCRIPTOR);
2643 break;
2644 case 4:
2645 /* cache info: needed for Core compatibility */
2646 if (cpu->cache_info_passthrough) {
2647 host_cpuid(index, count, eax, ebx, ecx, edx);
2648 *eax &= ~0xFC000000;
2649 } else {
2650 *eax = 0;
2651 switch (count) {
2652 case 0: /* L1 dcache info */
2653 *eax |= CPUID_4_TYPE_DCACHE | \
2654 CPUID_4_LEVEL(1) | \
2655 CPUID_4_SELF_INIT_LEVEL;
2656 *ebx = (L1D_LINE_SIZE - 1) | \
2657 ((L1D_PARTITIONS - 1) << 12) | \
2658 ((L1D_ASSOCIATIVITY - 1) << 22);
2659 *ecx = L1D_SETS - 1;
2660 *edx = CPUID_4_NO_INVD_SHARING;
2661 break;
2662 case 1: /* L1 icache info */
2663 *eax |= CPUID_4_TYPE_ICACHE | \
2664 CPUID_4_LEVEL(1) | \
2665 CPUID_4_SELF_INIT_LEVEL;
2666 *ebx = (L1I_LINE_SIZE - 1) | \
2667 ((L1I_PARTITIONS - 1) << 12) | \
2668 ((L1I_ASSOCIATIVITY - 1) << 22);
2669 *ecx = L1I_SETS - 1;
2670 *edx = CPUID_4_NO_INVD_SHARING;
2671 break;
2672 case 2: /* L2 cache info */
2673 *eax |= CPUID_4_TYPE_UNIFIED | \
2674 CPUID_4_LEVEL(2) | \
2675 CPUID_4_SELF_INIT_LEVEL;
2676 if (cs->nr_threads > 1) {
2677 *eax |= (cs->nr_threads - 1) << 14;
2678 }
2679 *ebx = (L2_LINE_SIZE - 1) | \
2680 ((L2_PARTITIONS - 1) << 12) | \
2681 ((L2_ASSOCIATIVITY - 1) << 22);
2682 *ecx = L2_SETS - 1;
2683 *edx = CPUID_4_NO_INVD_SHARING;
2684 break;
2685 case 3: /* L3 cache info */
2686 if (!cpu->enable_l3_cache) {
2687 *eax = 0;
2688 *ebx = 0;
2689 *ecx = 0;
2690 *edx = 0;
2691 break;
2692 }
2693 *eax |= CPUID_4_TYPE_UNIFIED | \
2694 CPUID_4_LEVEL(3) | \
2695 CPUID_4_SELF_INIT_LEVEL;
2696 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2697 *eax |= ((1 << pkg_offset) - 1) << 14;
2698 *ebx = (L3_N_LINE_SIZE - 1) | \
2699 ((L3_N_PARTITIONS - 1) << 12) | \
2700 ((L3_N_ASSOCIATIVITY - 1) << 22);
2701 *ecx = L3_N_SETS - 1;
2702 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2703 break;
2704 default: /* end of info */
2705 *eax = 0;
2706 *ebx = 0;
2707 *ecx = 0;
2708 *edx = 0;
2709 break;
2710 }
2711 }
2712
2713 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2714 if ((*eax & 31) && cs->nr_cores > 1) {
2715 *eax |= (cs->nr_cores - 1) << 26;
2716 }
2717 break;
2718 case 5:
2719 /* mwait info: needed for Core compatibility */
2720 *eax = 0; /* Smallest monitor-line size in bytes */
2721 *ebx = 0; /* Largest monitor-line size in bytes */
2722 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2723 *edx = 0;
2724 break;
2725 case 6:
2726 /* Thermal and Power Leaf */
2727 *eax = env->features[FEAT_6_EAX];
2728 *ebx = 0;
2729 *ecx = 0;
2730 *edx = 0;
2731 break;
2732 case 7:
2733 /* Structured Extended Feature Flags Enumeration Leaf */
2734 if (count == 0) {
2735 *eax = 0; /* Maximum ECX value for sub-leaves */
2736 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2737 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2738 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2739 *ecx |= CPUID_7_0_ECX_OSPKE;
2740 }
2741 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2742 } else {
2743 *eax = 0;
2744 *ebx = 0;
2745 *ecx = 0;
2746 *edx = 0;
2747 }
2748 break;
2749 case 9:
2750 /* Direct Cache Access Information Leaf */
2751 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2752 *ebx = 0;
2753 *ecx = 0;
2754 *edx = 0;
2755 break;
2756 case 0xA:
2757 /* Architectural Performance Monitoring Leaf */
2758 if (kvm_enabled() && cpu->enable_pmu) {
2759 KVMState *s = cs->kvm_state;
2760
2761 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2762 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2763 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2764 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2765 } else {
2766 *eax = 0;
2767 *ebx = 0;
2768 *ecx = 0;
2769 *edx = 0;
2770 }
2771 break;
2772 case 0xB:
2773 /* Extended Topology Enumeration Leaf */
2774 if (!cpu->enable_cpuid_0xb) {
2775 *eax = *ebx = *ecx = *edx = 0;
2776 break;
2777 }
2778
2779 *ecx = count & 0xff;
2780 *edx = cpu->apic_id;
2781
2782 switch (count) {
2783 case 0:
2784 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2785 *ebx = cs->nr_threads;
2786 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2787 break;
2788 case 1:
2789 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2790 *ebx = cs->nr_cores * cs->nr_threads;
2791 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2792 break;
2793 default:
2794 *eax = 0;
2795 *ebx = 0;
2796 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2797 }
2798
2799 assert(!(*eax & ~0x1f));
2800 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2801 break;
2802 case 0xD: {
2803 /* Processor Extended State */
2804 *eax = 0;
2805 *ebx = 0;
2806 *ecx = 0;
2807 *edx = 0;
2808 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2809 break;
2810 }
2811
2812 if (count == 0) {
2813 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2814 *eax = env->features[FEAT_XSAVE_COMP_LO];
2815 *edx = env->features[FEAT_XSAVE_COMP_HI];
2816 *ebx = *ecx;
2817 } else if (count == 1) {
2818 *eax = env->features[FEAT_XSAVE];
2819 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2820 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2821 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2822 *eax = esa->size;
2823 *ebx = esa->offset;
2824 }
2825 }
2826 break;
2827 }
2828 case 0x80000000:
2829 *eax = env->cpuid_xlevel;
2830 *ebx = env->cpuid_vendor1;
2831 *edx = env->cpuid_vendor2;
2832 *ecx = env->cpuid_vendor3;
2833 break;
2834 case 0x80000001:
2835 *eax = env->cpuid_version;
2836 *ebx = 0;
2837 *ecx = env->features[FEAT_8000_0001_ECX];
2838 *edx = env->features[FEAT_8000_0001_EDX];
2839
2840 /* The Linux kernel checks for the CMPLegacy bit and
2841 * discards multiple thread information if it is set.
2842 * So don't set it here for Intel to make Linux guests happy.
2843 */
2844 if (cs->nr_cores * cs->nr_threads > 1) {
2845 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2846 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2847 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2848 *ecx |= 1 << 1; /* CmpLegacy bit */
2849 }
2850 }
2851 break;
2852 case 0x80000002:
2853 case 0x80000003:
2854 case 0x80000004:
2855 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2856 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2857 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2858 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2859 break;
2860 case 0x80000005:
2861 /* cache info (L1 cache) */
2862 if (cpu->cache_info_passthrough) {
2863 host_cpuid(index, 0, eax, ebx, ecx, edx);
2864 break;
2865 }
2866 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2867 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2868 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2869 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2870 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2871 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2872 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2873 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2874 break;
2875 case 0x80000006:
2876 /* cache info (L2 cache) */
2877 if (cpu->cache_info_passthrough) {
2878 host_cpuid(index, 0, eax, ebx, ecx, edx);
2879 break;
2880 }
2881 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2882 (L2_DTLB_2M_ENTRIES << 16) | \
2883 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2884 (L2_ITLB_2M_ENTRIES);
2885 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2886 (L2_DTLB_4K_ENTRIES << 16) | \
2887 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2888 (L2_ITLB_4K_ENTRIES);
2889 *ecx = (L2_SIZE_KB_AMD << 16) | \
2890 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2891 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2892 if (!cpu->enable_l3_cache) {
2893 *edx = ((L3_SIZE_KB / 512) << 18) | \
2894 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2895 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2896 } else {
2897 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2898 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2899 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2900 }
2901 break;
2902 case 0x80000007:
2903 *eax = 0;
2904 *ebx = 0;
2905 *ecx = 0;
2906 *edx = env->features[FEAT_8000_0007_EDX];
2907 break;
2908 case 0x80000008:
2909 /* virtual & phys address size in low 2 bytes. */
2910 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2911 /* 64 bit processor */
2912 *eax = cpu->phys_bits; /* configurable physical bits */
2913 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2914 *eax |= 0x00003900; /* 57 bits virtual */
2915 } else {
2916 *eax |= 0x00003000; /* 48 bits virtual */
2917 }
2918 } else {
2919 *eax = cpu->phys_bits;
2920 }
2921 *ebx = 0;
2922 *ecx = 0;
2923 *edx = 0;
2924 if (cs->nr_cores * cs->nr_threads > 1) {
2925 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2926 }
2927 break;
2928 case 0x8000000A:
2929 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2930 *eax = 0x00000001; /* SVM Revision */
2931 *ebx = 0x00000010; /* nr of ASIDs */
2932 *ecx = 0;
2933 *edx = env->features[FEAT_SVM]; /* optional features */
2934 } else {
2935 *eax = 0;
2936 *ebx = 0;
2937 *ecx = 0;
2938 *edx = 0;
2939 }
2940 break;
2941 case 0xC0000000:
2942 *eax = env->cpuid_xlevel2;
2943 *ebx = 0;
2944 *ecx = 0;
2945 *edx = 0;
2946 break;
2947 case 0xC0000001:
2948 /* Support for VIA CPU's CPUID instruction */
2949 *eax = env->cpuid_version;
2950 *ebx = 0;
2951 *ecx = 0;
2952 *edx = env->features[FEAT_C000_0001_EDX];
2953 break;
2954 case 0xC0000002:
2955 case 0xC0000003:
2956 case 0xC0000004:
2957 /* Reserved for the future, and now filled with zero */
2958 *eax = 0;
2959 *ebx = 0;
2960 *ecx = 0;
2961 *edx = 0;
2962 break;
2963 default:
2964 /* reserved values: zero */
2965 *eax = 0;
2966 *ebx = 0;
2967 *ecx = 0;
2968 *edx = 0;
2969 break;
2970 }
2971 }
2972
2973 /* CPUClass::reset() */
2974 static void x86_cpu_reset(CPUState *s)
2975 {
2976 X86CPU *cpu = X86_CPU(s);
2977 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2978 CPUX86State *env = &cpu->env;
2979 target_ulong cr4;
2980 uint64_t xcr0;
2981 int i;
2982
2983 xcc->parent_reset(s);
2984
2985 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2986
2987 env->old_exception = -1;
2988
2989 /* init to reset state */
2990
2991 env->hflags2 |= HF2_GIF_MASK;
2992
2993 cpu_x86_update_cr0(env, 0x60000010);
2994 env->a20_mask = ~0x0;
2995 env->smbase = 0x30000;
2996
2997 env->idt.limit = 0xffff;
2998 env->gdt.limit = 0xffff;
2999 env->ldt.limit = 0xffff;
3000 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3001 env->tr.limit = 0xffff;
3002 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3003
3004 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3005 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3006 DESC_R_MASK | DESC_A_MASK);
3007 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3008 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3009 DESC_A_MASK);
3010 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3011 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3012 DESC_A_MASK);
3013 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3014 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3015 DESC_A_MASK);
3016 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3017 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3018 DESC_A_MASK);
3019 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3020 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3021 DESC_A_MASK);
3022
3023 env->eip = 0xfff0;
3024 env->regs[R_EDX] = env->cpuid_version;
3025
3026 env->eflags = 0x2;
3027
3028 /* FPU init */
3029 for (i = 0; i < 8; i++) {
3030 env->fptags[i] = 1;
3031 }
3032 cpu_set_fpuc(env, 0x37f);
3033
3034 env->mxcsr = 0x1f80;
3035 /* All units are in INIT state. */
3036 env->xstate_bv = 0;
3037
3038 env->pat = 0x0007040600070406ULL;
3039 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3040
3041 memset(env->dr, 0, sizeof(env->dr));
3042 env->dr[6] = DR6_FIXED_1;
3043 env->dr[7] = DR7_FIXED_1;
3044 cpu_breakpoint_remove_all(s, BP_CPU);
3045 cpu_watchpoint_remove_all(s, BP_CPU);
3046
3047 cr4 = 0;
3048 xcr0 = XSTATE_FP_MASK;
3049
3050 #ifdef CONFIG_USER_ONLY
3051 /* Enable all the features for user-mode. */
3052 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3053 xcr0 |= XSTATE_SSE_MASK;
3054 }
3055 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3056 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3057 if (env->features[esa->feature] & esa->bits) {
3058 xcr0 |= 1ull << i;
3059 }
3060 }
3061
3062 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3063 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3064 }
3065 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3066 cr4 |= CR4_FSGSBASE_MASK;
3067 }
3068 #endif
3069
3070 env->xcr0 = xcr0;
3071 cpu_x86_update_cr4(env, cr4);
3072
3073 /*
3074 * SDM 11.11.5 requires:
3075 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3076 * - IA32_MTRR_PHYSMASKn.V = 0
3077 * All other bits are undefined. For simplification, zero it all.
3078 */
3079 env->mtrr_deftype = 0;
3080 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3081 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3082
3083 #if !defined(CONFIG_USER_ONLY)
3084 /* We hard-wire the BSP to the first CPU. */
3085 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3086
3087 s->halted = !cpu_is_bsp(cpu);
3088
3089 if (kvm_enabled()) {
3090 kvm_arch_reset_vcpu(cpu);
3091 }
3092 #endif
3093 }
3094
3095 #ifndef CONFIG_USER_ONLY
3096 bool cpu_is_bsp(X86CPU *cpu)
3097 {
3098 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3099 }
3100
3101 /* TODO: remove me, when reset over QOM tree is implemented */
3102 static void x86_cpu_machine_reset_cb(void *opaque)
3103 {
3104 X86CPU *cpu = opaque;
3105 cpu_reset(CPU(cpu));
3106 }
3107 #endif
3108
3109 static void mce_init(X86CPU *cpu)
3110 {
3111 CPUX86State *cenv = &cpu->env;
3112 unsigned int bank;
3113
3114 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3115 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3116 (CPUID_MCE | CPUID_MCA)) {
3117 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3118 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3119 cenv->mcg_ctl = ~(uint64_t)0;
3120 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3121 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3122 }
3123 }
3124 }
3125
3126 #ifndef CONFIG_USER_ONLY
3127 APICCommonClass *apic_get_class(void)
3128 {
3129 const char *apic_type = "apic";
3130
3131 if (kvm_apic_in_kernel()) {
3132 apic_type = "kvm-apic";
3133 } else if (xen_enabled()) {
3134 apic_type = "xen-apic";
3135 }
3136
3137 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3138 }
3139
3140 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3141 {
3142 APICCommonState *apic;
3143 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3144
3145 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3146
3147 object_property_add_child(OBJECT(cpu), "lapic",
3148 OBJECT(cpu->apic_state), &error_abort);
3149 object_unref(OBJECT(cpu->apic_state));
3150
3151 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3152 /* TODO: convert to link<> */
3153 apic = APIC_COMMON(cpu->apic_state);
3154 apic->cpu = cpu;
3155 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3156 }
3157
3158 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3159 {
3160 APICCommonState *apic;
3161 static bool apic_mmio_map_once;
3162
3163 if (cpu->apic_state == NULL) {
3164 return;
3165 }
3166 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3167 errp);
3168
3169 /* Map APIC MMIO area */
3170 apic = APIC_COMMON(cpu->apic_state);
3171 if (!apic_mmio_map_once) {
3172 memory_region_add_subregion_overlap(get_system_memory(),
3173 apic->apicbase &
3174 MSR_IA32_APICBASE_BASE,
3175 &apic->io_memory,
3176 0x1000);
3177 apic_mmio_map_once = true;
3178 }
3179 }
3180
3181 static void x86_cpu_machine_done(Notifier *n, void *unused)
3182 {
3183 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3184 MemoryRegion *smram =
3185 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3186
3187 if (smram) {
3188 cpu->smram = g_new(MemoryRegion, 1);
3189 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3190 smram, 0, 1ull << 32);
3191 memory_region_set_enabled(cpu->smram, false);
3192 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3193 }
3194 }
3195 #else
3196 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3197 {
3198 }
3199 #endif
3200
3201 /* Note: Only safe for use on x86(-64) hosts */
3202 static uint32_t x86_host_phys_bits(void)
3203 {
3204 uint32_t eax;
3205 uint32_t host_phys_bits;
3206
3207 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3208 if (eax >= 0x80000008) {
3209 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3210 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3211 * at 23:16 that can specify a maximum physical address bits for
3212 * the guest that can override this value; but I've not seen
3213 * anything with that set.
3214 */
3215 host_phys_bits = eax & 0xff;
3216 } else {
3217 /* It's an odd 64 bit machine that doesn't have the leaf for
3218 * physical address bits; fall back to 36 that's most older
3219 * Intel.
3220 */
3221 host_phys_bits = 36;
3222 }
3223
3224 return host_phys_bits;
3225 }
3226
3227 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3228 {
3229 if (*min < value) {
3230 *min = value;
3231 }
3232 }
3233
3234 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3235 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3236 {
3237 CPUX86State *env = &cpu->env;
3238 FeatureWordInfo *fi = &feature_word_info[w];
3239 uint32_t eax = fi->cpuid_eax;
3240 uint32_t region = eax & 0xF0000000;
3241
3242 if (!env->features[w]) {
3243 return;
3244 }
3245
3246 switch (region) {
3247 case 0x00000000:
3248 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3249 break;
3250 case 0x80000000:
3251 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3252 break;
3253 case 0xC0000000:
3254 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3255 break;
3256 }
3257 }
3258
3259 /* Calculate XSAVE components based on the configured CPU feature flags */
3260 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3261 {
3262 CPUX86State *env = &cpu->env;
3263 int i;
3264 uint64_t mask;
3265
3266 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3267 return;
3268 }
3269
3270 mask = 0;
3271 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3272 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3273 if (env->features[esa->feature] & esa->bits) {
3274 mask |= (1ULL << i);
3275 }
3276 }
3277
3278 env->features[FEAT_XSAVE_COMP_LO] = mask;
3279 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3280 }
3281
3282 /***** Steps involved on loading and filtering CPUID data
3283 *
3284 * When initializing and realizing a CPU object, the steps
3285 * involved in setting up CPUID data are:
3286 *
3287 * 1) Loading CPU model definition (X86CPUDefinition). This is
3288 * implemented by x86_cpu_load_def() and should be completely
3289 * transparent, as it is done automatically by instance_init.
3290 * No code should need to look at X86CPUDefinition structs
3291 * outside instance_init.
3292 *
3293 * 2) CPU expansion. This is done by realize before CPUID
3294 * filtering, and will make sure host/accelerator data is
3295 * loaded for CPU models that depend on host capabilities
3296 * (e.g. "host"). Done by x86_cpu_expand_features().
3297 *
3298 * 3) CPUID filtering. This initializes extra data related to
3299 * CPUID, and checks if the host supports all capabilities
3300 * required by the CPU. Runnability of a CPU model is
3301 * determined at this step. Done by x86_cpu_filter_features().
3302 *
3303 * Some operations don't require all steps to be performed.
3304 * More precisely:
3305 *
3306 * - CPU instance creation (instance_init) will run only CPU
3307 * model loading. CPU expansion can't run at instance_init-time
3308 * because host/accelerator data may be not available yet.
3309 * - CPU realization will perform both CPU model expansion and CPUID
3310 * filtering, and return an error in case one of them fails.
3311 * - query-cpu-definitions needs to run all 3 steps. It needs
3312 * to run CPUID filtering, as the 'unavailable-features'
3313 * field is set based on the filtering results.
3314 * - The query-cpu-model-expansion QMP command only needs to run
3315 * CPU model loading and CPU expansion. It should not filter
3316 * any CPUID data based on host capabilities.
3317 */
3318
3319 /* Expand CPU configuration data, based on configured features
3320 * and host/accelerator capabilities when appropriate.
3321 */
3322 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3323 {
3324 CPUX86State *env = &cpu->env;
3325 FeatureWord w;
3326 GList *l;
3327 Error *local_err = NULL;
3328
3329 /*TODO: cpu->max_features incorrectly overwrites features
3330 * set using "feat=on|off". Once we fix this, we can convert
3331 * plus_features & minus_features to global properties
3332 * inside x86_cpu_parse_featurestr() too.
3333 */
3334 if (cpu->max_features) {
3335 for (w = 0; w < FEATURE_WORDS; w++) {
3336 env->features[w] =
3337 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3338 }
3339 }
3340
3341 for (l = plus_features; l; l = l->next) {
3342 const char *prop = l->data;
3343 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3344 if (local_err) {
3345 goto out;
3346 }
3347 }
3348
3349 for (l = minus_features; l; l = l->next) {
3350 const char *prop = l->data;
3351 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3352 if (local_err) {
3353 goto out;
3354 }
3355 }
3356
3357 if (!kvm_enabled() || !cpu->expose_kvm) {
3358 env->features[FEAT_KVM] = 0;
3359 }
3360
3361 x86_cpu_enable_xsave_components(cpu);
3362
3363 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3364 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3365 if (cpu->full_cpuid_auto_level) {
3366 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3367 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3368 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3369 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3370 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3371 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3372 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3373 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3374 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3375 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3376 /* SVM requires CPUID[0x8000000A] */
3377 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3378 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3379 }
3380 }
3381
3382 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3383 if (env->cpuid_level == UINT32_MAX) {
3384 env->cpuid_level = env->cpuid_min_level;
3385 }
3386 if (env->cpuid_xlevel == UINT32_MAX) {
3387 env->cpuid_xlevel = env->cpuid_min_xlevel;
3388 }
3389 if (env->cpuid_xlevel2 == UINT32_MAX) {
3390 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3391 }
3392
3393 out:
3394 if (local_err != NULL) {
3395 error_propagate(errp, local_err);
3396 }
3397 }
3398
3399 /*
3400 * Finishes initialization of CPUID data, filters CPU feature
3401 * words based on host availability of each feature.
3402 *
3403 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3404 */
3405 static int x86_cpu_filter_features(X86CPU *cpu)
3406 {
3407 CPUX86State *env = &cpu->env;
3408 FeatureWord w;
3409 int rv = 0;
3410
3411 for (w = 0; w < FEATURE_WORDS; w++) {
3412 uint32_t host_feat =
3413 x86_cpu_get_supported_feature_word(w, false);
3414 uint32_t requested_features = env->features[w];
3415 env->features[w] &= host_feat;
3416 cpu->filtered_features[w] = requested_features & ~env->features[w];
3417 if (cpu->filtered_features[w]) {
3418 rv = 1;
3419 }
3420 }
3421
3422 return rv;
3423 }
3424
3425 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3426 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3427 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3428 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3429 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3430 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3431 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3432 {
3433 CPUState *cs = CPU(dev);
3434 X86CPU *cpu = X86_CPU(dev);
3435 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3436 CPUX86State *env = &cpu->env;
3437 Error *local_err = NULL;
3438 static bool ht_warned;
3439
3440 if (xcc->kvm_required && !kvm_enabled()) {
3441 char *name = x86_cpu_class_get_model_name(xcc);
3442 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3443 g_free(name);
3444 goto out;
3445 }
3446
3447 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3448 error_setg(errp, "apic-id property was not initialized properly");
3449 return;
3450 }
3451
3452 x86_cpu_expand_features(cpu, &local_err);
3453 if (local_err) {
3454 goto out;
3455 }
3456
3457 if (x86_cpu_filter_features(cpu) &&
3458 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3459 x86_cpu_report_filtered_features(cpu);
3460 if (cpu->enforce_cpuid) {
3461 error_setg(&local_err,
3462 kvm_enabled() ?
3463 "Host doesn't support requested features" :
3464 "TCG doesn't support requested features");
3465 goto out;
3466 }
3467 }
3468
3469 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3470 * CPUID[1].EDX.
3471 */
3472 if (IS_AMD_CPU(env)) {
3473 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3474 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3475 & CPUID_EXT2_AMD_ALIASES);
3476 }
3477
3478 /* For 64bit systems think about the number of physical bits to present.
3479 * ideally this should be the same as the host; anything other than matching
3480 * the host can cause incorrect guest behaviour.
3481 * QEMU used to pick the magic value of 40 bits that corresponds to
3482 * consumer AMD devices but nothing else.
3483 */
3484 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3485 if (kvm_enabled()) {
3486 uint32_t host_phys_bits = x86_host_phys_bits();
3487 static bool warned;
3488
3489 if (cpu->host_phys_bits) {
3490 /* The user asked for us to use the host physical bits */
3491 cpu->phys_bits = host_phys_bits;
3492 }
3493
3494 /* Print a warning if the user set it to a value that's not the
3495 * host value.
3496 */
3497 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3498 !warned) {
3499 error_report("Warning: Host physical bits (%u)"
3500 " does not match phys-bits property (%u)",
3501 host_phys_bits, cpu->phys_bits);
3502 warned = true;
3503 }
3504
3505 if (cpu->phys_bits &&
3506 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3507 cpu->phys_bits < 32)) {
3508 error_setg(errp, "phys-bits should be between 32 and %u "
3509 " (but is %u)",
3510 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3511 return;
3512 }
3513 } else {
3514 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3515 error_setg(errp, "TCG only supports phys-bits=%u",
3516 TCG_PHYS_ADDR_BITS);
3517 return;
3518 }
3519 }
3520 /* 0 means it was not explicitly set by the user (or by machine
3521 * compat_props or by the host code above). In this case, the default
3522 * is the value used by TCG (40).
3523 */
3524 if (cpu->phys_bits == 0) {
3525 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3526 }
3527 } else {
3528 /* For 32 bit systems don't use the user set value, but keep
3529 * phys_bits consistent with what we tell the guest.
3530 */
3531 if (cpu->phys_bits != 0) {
3532 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3533 return;
3534 }
3535
3536 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3537 cpu->phys_bits = 36;
3538 } else {
3539 cpu->phys_bits = 32;
3540 }
3541 }
3542 cpu_exec_realizefn(cs, &local_err);
3543 if (local_err != NULL) {
3544 error_propagate(errp, local_err);
3545 return;
3546 }
3547
3548 if (tcg_enabled()) {
3549 tcg_x86_init();
3550 }
3551
3552 #ifndef CONFIG_USER_ONLY
3553 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3554
3555 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3556 x86_cpu_apic_create(cpu, &local_err);
3557 if (local_err != NULL) {
3558 goto out;
3559 }
3560 }
3561 #endif
3562
3563 mce_init(cpu);
3564
3565 #ifndef CONFIG_USER_ONLY
3566 if (tcg_enabled()) {
3567 AddressSpace *newas = g_new(AddressSpace, 1);
3568
3569 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3570 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3571
3572 /* Outer container... */
3573 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3574 memory_region_set_enabled(cpu->cpu_as_root, true);
3575
3576 /* ... with two regions inside: normal system memory with low
3577 * priority, and...
3578 */
3579 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3580 get_system_memory(), 0, ~0ull);
3581 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3582 memory_region_set_enabled(cpu->cpu_as_mem, true);
3583 address_space_init(newas, cpu->cpu_as_root, "CPU");
3584 cs->num_ases = 1;
3585 cpu_address_space_init(cs, newas, 0);
3586
3587 /* ... SMRAM with higher priority, linked from /machine/smram. */
3588 cpu->machine_done.notify = x86_cpu_machine_done;
3589 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3590 }
3591 #endif
3592
3593 qemu_init_vcpu(cs);
3594
3595 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3596 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3597 * based on inputs (sockets,cores,threads), it is still better to gives
3598 * users a warning.
3599 *
3600 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3601 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3602 */
3603 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3604 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3605 " -smp options properly.");
3606 ht_warned = true;
3607 }
3608
3609 x86_cpu_apic_realize(cpu, &local_err);
3610 if (local_err != NULL) {
3611 goto out;
3612 }
3613 cpu_reset(cs);
3614
3615 xcc->parent_realize(dev, &local_err);
3616
3617 out:
3618 if (local_err != NULL) {
3619 error_propagate(errp, local_err);
3620 return;
3621 }
3622 }
3623
3624 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3625 {
3626 X86CPU *cpu = X86_CPU(dev);
3627 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3628 Error *local_err = NULL;
3629
3630 #ifndef CONFIG_USER_ONLY
3631 cpu_remove_sync(CPU(dev));
3632 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3633 #endif
3634
3635 if (cpu->apic_state) {
3636 object_unparent(OBJECT(cpu->apic_state));
3637 cpu->apic_state = NULL;
3638 }
3639
3640 xcc->parent_unrealize(dev, &local_err);
3641 if (local_err != NULL) {
3642 error_propagate(errp, local_err);
3643 return;
3644 }
3645 }
3646
3647 typedef struct BitProperty {
3648 uint32_t *ptr;
3649 uint32_t mask;
3650 } BitProperty;
3651
3652 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3653 void *opaque, Error **errp)
3654 {
3655 BitProperty *fp = opaque;
3656 bool value = (*fp->ptr & fp->mask) == fp->mask;
3657 visit_type_bool(v, name, &value, errp);
3658 }
3659
3660 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3661 void *opaque, Error **errp)
3662 {
3663 DeviceState *dev = DEVICE(obj);
3664 BitProperty *fp = opaque;
3665 Error *local_err = NULL;
3666 bool value;
3667
3668 if (dev->realized) {
3669 qdev_prop_set_after_realize(dev, name, errp);
3670 return;
3671 }
3672
3673 visit_type_bool(v, name, &value, &local_err);
3674 if (local_err) {
3675 error_propagate(errp, local_err);
3676 return;
3677 }
3678
3679 if (value) {
3680 *fp->ptr |= fp->mask;
3681 } else {
3682 *fp->ptr &= ~fp->mask;
3683 }
3684 }
3685
3686 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3687 void *opaque)
3688 {
3689 BitProperty *prop = opaque;
3690 g_free(prop);
3691 }
3692
3693 /* Register a boolean property to get/set a single bit in a uint32_t field.
3694 *
3695 * The same property name can be registered multiple times to make it affect
3696 * multiple bits in the same FeatureWord. In that case, the getter will return
3697 * true only if all bits are set.
3698 */
3699 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3700 const char *prop_name,
3701 uint32_t *field,
3702 int bitnr)
3703 {
3704 BitProperty *fp;
3705 ObjectProperty *op;
3706 uint32_t mask = (1UL << bitnr);
3707
3708 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3709 if (op) {
3710 fp = op->opaque;
3711 assert(fp->ptr == field);
3712 fp->mask |= mask;
3713 } else {
3714 fp = g_new0(BitProperty, 1);
3715 fp->ptr = field;
3716 fp->mask = mask;
3717 object_property_add(OBJECT(cpu), prop_name, "bool",
3718 x86_cpu_get_bit_prop,
3719 x86_cpu_set_bit_prop,
3720 x86_cpu_release_bit_prop, fp, &error_abort);
3721 }
3722 }
3723
3724 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3725 FeatureWord w,
3726 int bitnr)
3727 {
3728 FeatureWordInfo *fi = &feature_word_info[w];
3729 const char *name = fi->feat_names[bitnr];
3730
3731 if (!name) {
3732 return;
3733 }
3734
3735 /* Property names should use "-" instead of "_".
3736 * Old names containing underscores are registered as aliases
3737 * using object_property_add_alias()
3738 */
3739 assert(!strchr(name, '_'));
3740 /* aliases don't use "|" delimiters anymore, they are registered
3741 * manually using object_property_add_alias() */
3742 assert(!strchr(name, '|'));
3743 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3744 }
3745
3746 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3747 {
3748 X86CPU *cpu = X86_CPU(cs);
3749 CPUX86State *env = &cpu->env;
3750 GuestPanicInformation *panic_info = NULL;
3751
3752 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3753 GuestPanicInformationHyperV *panic_info_hv =
3754 g_malloc0(sizeof(GuestPanicInformationHyperV));
3755 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3756
3757 panic_info->type = GUEST_PANIC_INFORMATION_KIND_HYPER_V;
3758 panic_info->u.hyper_v.data = panic_info_hv;
3759
3760 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3761 panic_info_hv->arg1 = env->msr_hv_crash_params[0];
3762 panic_info_hv->arg2 = env->msr_hv_crash_params[1];
3763 panic_info_hv->arg3 = env->msr_hv_crash_params[2];
3764 panic_info_hv->arg4 = env->msr_hv_crash_params[3];
3765 panic_info_hv->arg5 = env->msr_hv_crash_params[4];
3766 }
3767
3768 return panic_info;
3769 }
3770 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3771 const char *name, void *opaque,
3772 Error **errp)
3773 {
3774 CPUState *cs = CPU(obj);
3775 GuestPanicInformation *panic_info;
3776
3777 if (!cs->crash_occurred) {
3778 error_setg(errp, "No crash occured");
3779 return;
3780 }
3781
3782 panic_info = x86_cpu_get_crash_info(cs);
3783 if (panic_info == NULL) {
3784 error_setg(errp, "No crash information");
3785 return;
3786 }
3787
3788 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3789 errp);
3790 qapi_free_GuestPanicInformation(panic_info);
3791 }
3792
3793 static void x86_cpu_initfn(Object *obj)
3794 {
3795 CPUState *cs = CPU(obj);
3796 X86CPU *cpu = X86_CPU(obj);
3797 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3798 CPUX86State *env = &cpu->env;
3799 FeatureWord w;
3800
3801 cs->env_ptr = env;
3802
3803 object_property_add(obj, "family", "int",
3804 x86_cpuid_version_get_family,
3805 x86_cpuid_version_set_family, NULL, NULL, NULL);
3806 object_property_add(obj, "model", "int",
3807 x86_cpuid_version_get_model,
3808 x86_cpuid_version_set_model, NULL, NULL, NULL);
3809 object_property_add(obj, "stepping", "int",
3810 x86_cpuid_version_get_stepping,
3811 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3812 object_property_add_str(obj, "vendor",
3813 x86_cpuid_get_vendor,
3814 x86_cpuid_set_vendor, NULL);
3815 object_property_add_str(obj, "model-id",
3816 x86_cpuid_get_model_id,
3817 x86_cpuid_set_model_id, NULL);
3818 object_property_add(obj, "tsc-frequency", "int",
3819 x86_cpuid_get_tsc_freq,
3820 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3821 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3822 x86_cpu_get_feature_words,
3823 NULL, NULL, (void *)env->features, NULL);
3824 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3825 x86_cpu_get_feature_words,
3826 NULL, NULL, (void *)cpu->filtered_features, NULL);
3827
3828 object_property_add(obj, "crash-information", "GuestPanicInformation",
3829 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3830
3831 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3832
3833 for (w = 0; w < FEATURE_WORDS; w++) {
3834 int bitnr;
3835
3836 for (bitnr = 0; bitnr < 32; bitnr++) {
3837 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3838 }
3839 }
3840
3841 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3842 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3843 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3844 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3845 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3846 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3847 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3848
3849 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3850 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3851 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3852 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3853 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3854 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3855 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3856 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3857 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3858 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3859 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3860 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3861 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3862 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3863 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3864 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3865 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3866 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3867 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3868 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3869 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3870
3871 if (xcc->cpu_def) {
3872 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3873 }
3874 }
3875
3876 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3877 {
3878 X86CPU *cpu = X86_CPU(cs);
3879
3880 return cpu->apic_id;
3881 }
3882
3883 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3884 {
3885 X86CPU *cpu = X86_CPU(cs);
3886
3887 return cpu->env.cr[0] & CR0_PG_MASK;
3888 }
3889
3890 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3891 {
3892 X86CPU *cpu = X86_CPU(cs);
3893
3894 cpu->env.eip = value;
3895 }
3896
3897 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3898 {
3899 X86CPU *cpu = X86_CPU(cs);
3900
3901 cpu->env.eip = tb->pc - tb->cs_base;
3902 }
3903
3904 static bool x86_cpu_has_work(CPUState *cs)
3905 {
3906 X86CPU *cpu = X86_CPU(cs);
3907 CPUX86State *env = &cpu->env;
3908
3909 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3910 CPU_INTERRUPT_POLL)) &&
3911 (env->eflags & IF_MASK)) ||
3912 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3913 CPU_INTERRUPT_INIT |
3914 CPU_INTERRUPT_SIPI |
3915 CPU_INTERRUPT_MCE)) ||
3916 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3917 !(env->hflags & HF_SMM_MASK));
3918 }
3919
3920 static Property x86_cpu_properties[] = {
3921 #ifdef CONFIG_USER_ONLY
3922 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3923 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3924 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3925 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3926 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3927 #else
3928 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3929 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3930 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3931 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3932 #endif
3933 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3934 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3935 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3936 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3937 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3938 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3939 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3940 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3941 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3942 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3943 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3944 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3945 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3946 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3947 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3948 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3949 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3950 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3951 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3952 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3953 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3954 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3955 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3956 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3957 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3958 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3959 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3960 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3961 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
3962 DEFINE_PROP_END_OF_LIST()
3963 };
3964
3965 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3966 {
3967 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3968 CPUClass *cc = CPU_CLASS(oc);
3969 DeviceClass *dc = DEVICE_CLASS(oc);
3970
3971 xcc->parent_realize = dc->realize;
3972 xcc->parent_unrealize = dc->unrealize;
3973 dc->realize = x86_cpu_realizefn;
3974 dc->unrealize = x86_cpu_unrealizefn;
3975 dc->props = x86_cpu_properties;
3976
3977 xcc->parent_reset = cc->reset;
3978 cc->reset = x86_cpu_reset;
3979 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3980
3981 cc->class_by_name = x86_cpu_class_by_name;
3982 cc->parse_features = x86_cpu_parse_featurestr;
3983 cc->has_work = x86_cpu_has_work;
3984 cc->do_interrupt = x86_cpu_do_interrupt;
3985 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3986 cc->dump_state = x86_cpu_dump_state;
3987 cc->get_crash_info = x86_cpu_get_crash_info;
3988 cc->set_pc = x86_cpu_set_pc;
3989 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3990 cc->gdb_read_register = x86_cpu_gdb_read_register;
3991 cc->gdb_write_register = x86_cpu_gdb_write_register;
3992 cc->get_arch_id = x86_cpu_get_arch_id;
3993 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3994 #ifdef CONFIG_USER_ONLY
3995 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3996 #else
3997 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3998 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3999 cc->write_elf64_note = x86_cpu_write_elf64_note;
4000 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4001 cc->write_elf32_note = x86_cpu_write_elf32_note;
4002 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4003 cc->vmsd = &vmstate_x86_cpu;
4004 #endif
4005 /* CPU_NB_REGS * 2 = general regs + xmm regs
4006 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
4007 */
4008 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
4009 #ifndef CONFIG_USER_ONLY
4010 cc->debug_excp_handler = breakpoint_handler;
4011 #endif
4012 cc->cpu_exec_enter = x86_cpu_exec_enter;
4013 cc->cpu_exec_exit = x86_cpu_exec_exit;
4014
4015 dc->cannot_instantiate_with_device_add_yet = false;
4016 }
4017
4018 static const TypeInfo x86_cpu_type_info = {
4019 .name = TYPE_X86_CPU,
4020 .parent = TYPE_CPU,
4021 .instance_size = sizeof(X86CPU),
4022 .instance_init = x86_cpu_initfn,
4023 .abstract = true,
4024 .class_size = sizeof(X86CPUClass),
4025 .class_init = x86_cpu_common_class_init,
4026 };
4027
4028
4029 /* "base" CPU model, used by query-cpu-model-expansion */
4030 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4031 {
4032 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4033
4034 xcc->static_model = true;
4035 xcc->migration_safe = true;
4036 xcc->model_description = "base CPU model type with no features enabled";
4037 xcc->ordering = 8;
4038 }
4039
4040 static const TypeInfo x86_base_cpu_type_info = {
4041 .name = X86_CPU_TYPE_NAME("base"),
4042 .parent = TYPE_X86_CPU,
4043 .class_init = x86_cpu_base_class_init,
4044 };
4045
4046 static void x86_cpu_register_types(void)
4047 {
4048 int i;
4049
4050 type_register_static(&x86_cpu_type_info);
4051 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4052 x86_register_cpudef_type(&builtin_x86_defs[i]);
4053 }
4054 type_register_static(&max_x86_cpu_type_info);
4055 type_register_static(&x86_base_cpu_type_info);
4056 #ifdef CONFIG_KVM
4057 type_register_static(&host_x86_cpu_type_info);
4058 #endif
4059 }
4060
4061 type_init(x86_cpu_register_types)