]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/i386/cpu.c
pc: add 2.12 machine types
[mirror_qemu.git] / target / i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "qemu/osdep.h"
20#include "qemu/cutils.h"
21
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "sysemu/kvm.h"
25#include "sysemu/hvf.h"
26#include "sysemu/cpus.h"
27#include "kvm_i386.h"
28
29#include "qemu/error-report.h"
30#include "qemu/option.h"
31#include "qemu/config-file.h"
32#include "qapi/qmp/qerror.h"
33#include "qapi/qmp/types.h"
34
35#include "qapi-types.h"
36#include "qapi-visit.h"
37#include "qapi/visitor.h"
38#include "qom/qom-qobject.h"
39#include "sysemu/arch_init.h"
40
41#if defined(CONFIG_KVM)
42#include <linux/kvm_para.h>
43#endif
44
45#include "sysemu/sysemu.h"
46#include "hw/qdev-properties.h"
47#include "hw/i386/topology.h"
48#ifndef CONFIG_USER_ONLY
49#include "exec/address-spaces.h"
50#include "hw/hw.h"
51#include "hw/xen/xen.h"
52#include "hw/i386/apic_internal.h"
53#endif
54
55#include "disas/capstone.h"
56
57
58/* Cache topology CPUID constants: */
59
60/* CPUID Leaf 2 Descriptors */
61
62#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
63#define CPUID_2_L1I_32KB_8WAY_64B 0x30
64#define CPUID_2_L2_2MB_8WAY_64B 0x7d
65#define CPUID_2_L3_16MB_16WAY_64B 0x4d
66
67
68/* CPUID Leaf 4 constants: */
69
70/* EAX: */
71#define CPUID_4_TYPE_DCACHE 1
72#define CPUID_4_TYPE_ICACHE 2
73#define CPUID_4_TYPE_UNIFIED 3
74
75#define CPUID_4_LEVEL(l) ((l) << 5)
76
77#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
78#define CPUID_4_FULLY_ASSOC (1 << 9)
79
80/* EDX: */
81#define CPUID_4_NO_INVD_SHARING (1 << 0)
82#define CPUID_4_INCLUSIVE (1 << 1)
83#define CPUID_4_COMPLEX_IDX (1 << 2)
84
85#define ASSOC_FULL 0xFF
86
87/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
88#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
89 a == 2 ? 0x2 : \
90 a == 4 ? 0x4 : \
91 a == 8 ? 0x6 : \
92 a == 16 ? 0x8 : \
93 a == 32 ? 0xA : \
94 a == 48 ? 0xB : \
95 a == 64 ? 0xC : \
96 a == 96 ? 0xD : \
97 a == 128 ? 0xE : \
98 a == ASSOC_FULL ? 0xF : \
99 0 /* invalid value */)
100
101
102/* Definitions of the hardcoded cache entries we expose: */
103
104/* L1 data cache: */
105#define L1D_LINE_SIZE 64
106#define L1D_ASSOCIATIVITY 8
107#define L1D_SETS 64
108#define L1D_PARTITIONS 1
109/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
110#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
111/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
112#define L1D_LINES_PER_TAG 1
113#define L1D_SIZE_KB_AMD 64
114#define L1D_ASSOCIATIVITY_AMD 2
115
116/* L1 instruction cache: */
117#define L1I_LINE_SIZE 64
118#define L1I_ASSOCIATIVITY 8
119#define L1I_SETS 64
120#define L1I_PARTITIONS 1
121/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
122#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
123/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
124#define L1I_LINES_PER_TAG 1
125#define L1I_SIZE_KB_AMD 64
126#define L1I_ASSOCIATIVITY_AMD 2
127
128/* Level 2 unified cache: */
129#define L2_LINE_SIZE 64
130#define L2_ASSOCIATIVITY 16
131#define L2_SETS 4096
132#define L2_PARTITIONS 1
133/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
134/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
135#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
136/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
137#define L2_LINES_PER_TAG 1
138#define L2_SIZE_KB_AMD 512
139
140/* Level 3 unified cache: */
141#define L3_SIZE_KB 0 /* disabled */
142#define L3_ASSOCIATIVITY 0 /* disabled */
143#define L3_LINES_PER_TAG 0 /* disabled */
144#define L3_LINE_SIZE 0 /* disabled */
145#define L3_N_LINE_SIZE 64
146#define L3_N_ASSOCIATIVITY 16
147#define L3_N_SETS 16384
148#define L3_N_PARTITIONS 1
149#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
150#define L3_N_LINES_PER_TAG 1
151#define L3_N_SIZE_KB_AMD 16384
152
153/* TLB definitions: */
154
155#define L1_DTLB_2M_ASSOC 1
156#define L1_DTLB_2M_ENTRIES 255
157#define L1_DTLB_4K_ASSOC 1
158#define L1_DTLB_4K_ENTRIES 255
159
160#define L1_ITLB_2M_ASSOC 1
161#define L1_ITLB_2M_ENTRIES 255
162#define L1_ITLB_4K_ASSOC 1
163#define L1_ITLB_4K_ENTRIES 255
164
165#define L2_DTLB_2M_ASSOC 0 /* disabled */
166#define L2_DTLB_2M_ENTRIES 0 /* disabled */
167#define L2_DTLB_4K_ASSOC 4
168#define L2_DTLB_4K_ENTRIES 512
169
170#define L2_ITLB_2M_ASSOC 0 /* disabled */
171#define L2_ITLB_2M_ENTRIES 0 /* disabled */
172#define L2_ITLB_4K_ASSOC 4
173#define L2_ITLB_4K_ENTRIES 512
174
175
176
177static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
178 uint32_t vendor2, uint32_t vendor3)
179{
180 int i;
181 for (i = 0; i < 4; i++) {
182 dst[i] = vendor1 >> (8 * i);
183 dst[i + 4] = vendor2 >> (8 * i);
184 dst[i + 8] = vendor3 >> (8 * i);
185 }
186 dst[CPUID_VENDOR_SZ] = '\0';
187}
188
189#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
190#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
191 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
192#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
193 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
194 CPUID_PSE36 | CPUID_FXSR)
195#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
196#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
197 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
198 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
199 CPUID_PAE | CPUID_SEP | CPUID_APIC)
200
201#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
202 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
203 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
204 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
205 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
206 /* partly implemented:
207 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
208 /* missing:
209 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
210#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
211 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
212 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
213 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
214 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
215 /* missing:
216 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
217 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
218 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
219 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
220 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
221
222#ifdef TARGET_X86_64
223#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
224#else
225#define TCG_EXT2_X86_64_FEATURES 0
226#endif
227
228#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
229 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
230 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
231 TCG_EXT2_X86_64_FEATURES)
232#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
233 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
234#define TCG_EXT4_FEATURES 0
235#define TCG_SVM_FEATURES 0
236#define TCG_KVM_FEATURES 0
237#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
238 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
239 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
240 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
241 CPUID_7_0_EBX_ERMS)
242 /* missing:
243 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
244 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
245 CPUID_7_0_EBX_RDSEED */
246#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
247 CPUID_7_0_ECX_LA57)
248#define TCG_7_0_EDX_FEATURES 0
249#define TCG_APM_FEATURES 0
250#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
251#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
252 /* missing:
253 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
254
255typedef struct FeatureWordInfo {
256 /* feature flags names are taken from "Intel Processor Identification and
257 * the CPUID Instruction" and AMD's "CPUID Specification".
258 * In cases of disagreement between feature naming conventions,
259 * aliases may be added.
260 */
261 const char *feat_names[32];
262 uint32_t cpuid_eax; /* Input EAX for CPUID */
263 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
264 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
265 int cpuid_reg; /* output register (R_* constant) */
266 uint32_t tcg_features; /* Feature flags supported by TCG */
267 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
268 uint32_t migratable_flags; /* Feature flags known to be migratable */
269} FeatureWordInfo;
270
271static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
272 [FEAT_1_EDX] = {
273 .feat_names = {
274 "fpu", "vme", "de", "pse",
275 "tsc", "msr", "pae", "mce",
276 "cx8", "apic", NULL, "sep",
277 "mtrr", "pge", "mca", "cmov",
278 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
279 NULL, "ds" /* Intel dts */, "acpi", "mmx",
280 "fxsr", "sse", "sse2", "ss",
281 "ht" /* Intel htt */, "tm", "ia64", "pbe",
282 },
283 .cpuid_eax = 1, .cpuid_reg = R_EDX,
284 .tcg_features = TCG_FEATURES,
285 },
286 [FEAT_1_ECX] = {
287 .feat_names = {
288 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
289 "ds-cpl", "vmx", "smx", "est",
290 "tm2", "ssse3", "cid", NULL,
291 "fma", "cx16", "xtpr", "pdcm",
292 NULL, "pcid", "dca", "sse4.1",
293 "sse4.2", "x2apic", "movbe", "popcnt",
294 "tsc-deadline", "aes", "xsave", "osxsave",
295 "avx", "f16c", "rdrand", "hypervisor",
296 },
297 .cpuid_eax = 1, .cpuid_reg = R_ECX,
298 .tcg_features = TCG_EXT_FEATURES,
299 },
300 /* Feature names that are already defined on feature_name[] but
301 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
302 * names on feat_names below. They are copied automatically
303 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
304 */
305 [FEAT_8000_0001_EDX] = {
306 .feat_names = {
307 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
308 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
309 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
310 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
311 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
312 "nx", NULL, "mmxext", NULL /* mmx */,
313 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
314 NULL, "lm", "3dnowext", "3dnow",
315 },
316 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
317 .tcg_features = TCG_EXT2_FEATURES,
318 },
319 [FEAT_8000_0001_ECX] = {
320 .feat_names = {
321 "lahf-lm", "cmp-legacy", "svm", "extapic",
322 "cr8legacy", "abm", "sse4a", "misalignsse",
323 "3dnowprefetch", "osvw", "ibs", "xop",
324 "skinit", "wdt", NULL, "lwp",
325 "fma4", "tce", NULL, "nodeid-msr",
326 NULL, "tbm", "topoext", "perfctr-core",
327 "perfctr-nb", NULL, NULL, NULL,
328 NULL, NULL, NULL, NULL,
329 },
330 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
331 .tcg_features = TCG_EXT3_FEATURES,
332 },
333 [FEAT_C000_0001_EDX] = {
334 .feat_names = {
335 NULL, NULL, "xstore", "xstore-en",
336 NULL, NULL, "xcrypt", "xcrypt-en",
337 "ace2", "ace2-en", "phe", "phe-en",
338 "pmm", "pmm-en", NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 },
344 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
345 .tcg_features = TCG_EXT4_FEATURES,
346 },
347 [FEAT_KVM] = {
348 .feat_names = {
349 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
350 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
351 NULL, "kvm-pv-tlb-flush", NULL, NULL,
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 "kvmclock-stable-bit", NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL,
357 },
358 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
359 .tcg_features = TCG_KVM_FEATURES,
360 },
361 [FEAT_HYPERV_EAX] = {
362 .feat_names = {
363 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
364 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
365 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
366 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
367 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
368 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 },
375 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
376 },
377 [FEAT_HYPERV_EBX] = {
378 .feat_names = {
379 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
380 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
381 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
382 NULL /* hv_create_port */, NULL /* hv_connect_port */,
383 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
384 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
385 NULL, NULL,
386 NULL, NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 },
391 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
392 },
393 [FEAT_HYPERV_EDX] = {
394 .feat_names = {
395 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
396 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
397 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
398 NULL, NULL,
399 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 },
406 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
407 },
408 [FEAT_SVM] = {
409 .feat_names = {
410 "npt", "lbrv", "svm-lock", "nrip-save",
411 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
412 NULL, NULL, "pause-filter", NULL,
413 "pfthreshold", NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 },
419 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
420 .tcg_features = TCG_SVM_FEATURES,
421 },
422 [FEAT_7_0_EBX] = {
423 .feat_names = {
424 "fsgsbase", "tsc-adjust", NULL, "bmi1",
425 "hle", "avx2", NULL, "smep",
426 "bmi2", "erms", "invpcid", "rtm",
427 NULL, NULL, "mpx", NULL,
428 "avx512f", "avx512dq", "rdseed", "adx",
429 "smap", "avx512ifma", "pcommit", "clflushopt",
430 "clwb", NULL, "avx512pf", "avx512er",
431 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
432 },
433 .cpuid_eax = 7,
434 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
435 .cpuid_reg = R_EBX,
436 .tcg_features = TCG_7_0_EBX_FEATURES,
437 },
438 [FEAT_7_0_ECX] = {
439 .feat_names = {
440 NULL, "avx512vbmi", "umip", "pku",
441 "ospke", NULL, "avx512vbmi2", NULL,
442 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
443 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
444 "la57", NULL, NULL, NULL,
445 NULL, NULL, "rdpid", NULL,
446 NULL, NULL, NULL, NULL,
447 NULL, NULL, NULL, NULL,
448 },
449 .cpuid_eax = 7,
450 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
451 .cpuid_reg = R_ECX,
452 .tcg_features = TCG_7_0_ECX_FEATURES,
453 },
454 [FEAT_7_0_EDX] = {
455 .feat_names = {
456 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 },
465 .cpuid_eax = 7,
466 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
467 .cpuid_reg = R_EDX,
468 .tcg_features = TCG_7_0_EDX_FEATURES,
469 },
470 [FEAT_8000_0007_EDX] = {
471 .feat_names = {
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 "invtsc", NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 },
481 .cpuid_eax = 0x80000007,
482 .cpuid_reg = R_EDX,
483 .tcg_features = TCG_APM_FEATURES,
484 .unmigratable_flags = CPUID_APM_INVTSC,
485 },
486 [FEAT_XSAVE] = {
487 .feat_names = {
488 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 },
497 .cpuid_eax = 0xd,
498 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
499 .cpuid_reg = R_EAX,
500 .tcg_features = TCG_XSAVE_FEATURES,
501 },
502 [FEAT_6_EAX] = {
503 .feat_names = {
504 NULL, NULL, "arat", NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 },
513 .cpuid_eax = 6, .cpuid_reg = R_EAX,
514 .tcg_features = TCG_6_EAX_FEATURES,
515 },
516 [FEAT_XSAVE_COMP_LO] = {
517 .cpuid_eax = 0xD,
518 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
519 .cpuid_reg = R_EAX,
520 .tcg_features = ~0U,
521 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
522 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
523 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
524 XSTATE_PKRU_MASK,
525 },
526 [FEAT_XSAVE_COMP_HI] = {
527 .cpuid_eax = 0xD,
528 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
529 .cpuid_reg = R_EDX,
530 .tcg_features = ~0U,
531 },
532};
533
534typedef struct X86RegisterInfo32 {
535 /* Name of register */
536 const char *name;
537 /* QAPI enum value register */
538 X86CPURegister32 qapi_enum;
539} X86RegisterInfo32;
540
541#define REGISTER(reg) \
542 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
543static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
544 REGISTER(EAX),
545 REGISTER(ECX),
546 REGISTER(EDX),
547 REGISTER(EBX),
548 REGISTER(ESP),
549 REGISTER(EBP),
550 REGISTER(ESI),
551 REGISTER(EDI),
552};
553#undef REGISTER
554
555typedef struct ExtSaveArea {
556 uint32_t feature, bits;
557 uint32_t offset, size;
558} ExtSaveArea;
559
560static const ExtSaveArea x86_ext_save_areas[] = {
561 [XSTATE_FP_BIT] = {
562 /* x87 FP state component is always enabled if XSAVE is supported */
563 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
564 /* x87 state is in the legacy region of the XSAVE area */
565 .offset = 0,
566 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
567 },
568 [XSTATE_SSE_BIT] = {
569 /* SSE state component is always enabled if XSAVE is supported */
570 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
571 /* SSE state is in the legacy region of the XSAVE area */
572 .offset = 0,
573 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
574 },
575 [XSTATE_YMM_BIT] =
576 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
577 .offset = offsetof(X86XSaveArea, avx_state),
578 .size = sizeof(XSaveAVX) },
579 [XSTATE_BNDREGS_BIT] =
580 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
581 .offset = offsetof(X86XSaveArea, bndreg_state),
582 .size = sizeof(XSaveBNDREG) },
583 [XSTATE_BNDCSR_BIT] =
584 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
585 .offset = offsetof(X86XSaveArea, bndcsr_state),
586 .size = sizeof(XSaveBNDCSR) },
587 [XSTATE_OPMASK_BIT] =
588 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
589 .offset = offsetof(X86XSaveArea, opmask_state),
590 .size = sizeof(XSaveOpmask) },
591 [XSTATE_ZMM_Hi256_BIT] =
592 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
593 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
594 .size = sizeof(XSaveZMM_Hi256) },
595 [XSTATE_Hi16_ZMM_BIT] =
596 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
597 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
598 .size = sizeof(XSaveHi16_ZMM) },
599 [XSTATE_PKRU_BIT] =
600 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
601 .offset = offsetof(X86XSaveArea, pkru_state),
602 .size = sizeof(XSavePKRU) },
603};
604
605static uint32_t xsave_area_size(uint64_t mask)
606{
607 int i;
608 uint64_t ret = 0;
609
610 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
611 const ExtSaveArea *esa = &x86_ext_save_areas[i];
612 if ((mask >> i) & 1) {
613 ret = MAX(ret, esa->offset + esa->size);
614 }
615 }
616 return ret;
617}
618
619static inline bool accel_uses_host_cpuid(void)
620{
621 return kvm_enabled() || hvf_enabled();
622}
623
624static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
625{
626 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
627 cpu->env.features[FEAT_XSAVE_COMP_LO];
628}
629
630const char *get_register_name_32(unsigned int reg)
631{
632 if (reg >= CPU_NB_REGS32) {
633 return NULL;
634 }
635 return x86_reg_info_32[reg].name;
636}
637
638/*
639 * Returns the set of feature flags that are supported and migratable by
640 * QEMU, for a given FeatureWord.
641 */
642static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
643{
644 FeatureWordInfo *wi = &feature_word_info[w];
645 uint32_t r = 0;
646 int i;
647
648 for (i = 0; i < 32; i++) {
649 uint32_t f = 1U << i;
650
651 /* If the feature name is known, it is implicitly considered migratable,
652 * unless it is explicitly set in unmigratable_flags */
653 if ((wi->migratable_flags & f) ||
654 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
655 r |= f;
656 }
657 }
658 return r;
659}
660
661void host_cpuid(uint32_t function, uint32_t count,
662 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
663{
664 uint32_t vec[4];
665
666#ifdef __x86_64__
667 asm volatile("cpuid"
668 : "=a"(vec[0]), "=b"(vec[1]),
669 "=c"(vec[2]), "=d"(vec[3])
670 : "0"(function), "c"(count) : "cc");
671#elif defined(__i386__)
672 asm volatile("pusha \n\t"
673 "cpuid \n\t"
674 "mov %%eax, 0(%2) \n\t"
675 "mov %%ebx, 4(%2) \n\t"
676 "mov %%ecx, 8(%2) \n\t"
677 "mov %%edx, 12(%2) \n\t"
678 "popa"
679 : : "a"(function), "c"(count), "S"(vec)
680 : "memory", "cc");
681#else
682 abort();
683#endif
684
685 if (eax)
686 *eax = vec[0];
687 if (ebx)
688 *ebx = vec[1];
689 if (ecx)
690 *ecx = vec[2];
691 if (edx)
692 *edx = vec[3];
693}
694
695void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
696{
697 uint32_t eax, ebx, ecx, edx;
698
699 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
700 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
701
702 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
703 if (family) {
704 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
705 }
706 if (model) {
707 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
708 }
709 if (stepping) {
710 *stepping = eax & 0x0F;
711 }
712}
713
714/* CPU class name definitions: */
715
716/* Return type name for a given CPU model name
717 * Caller is responsible for freeing the returned string.
718 */
719static char *x86_cpu_type_name(const char *model_name)
720{
721 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
722}
723
724static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
725{
726 ObjectClass *oc;
727 char *typename;
728
729 if (cpu_model == NULL) {
730 return NULL;
731 }
732
733 typename = x86_cpu_type_name(cpu_model);
734 oc = object_class_by_name(typename);
735 g_free(typename);
736 return oc;
737}
738
739static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
740{
741 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
742 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
743 return g_strndup(class_name,
744 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
745}
746
747struct X86CPUDefinition {
748 const char *name;
749 uint32_t level;
750 uint32_t xlevel;
751 /* vendor is zero-terminated, 12 character ASCII string */
752 char vendor[CPUID_VENDOR_SZ + 1];
753 int family;
754 int model;
755 int stepping;
756 FeatureWordArray features;
757 char model_id[48];
758};
759
760static X86CPUDefinition builtin_x86_defs[] = {
761 {
762 .name = "qemu64",
763 .level = 0xd,
764 .vendor = CPUID_VENDOR_AMD,
765 .family = 6,
766 .model = 6,
767 .stepping = 3,
768 .features[FEAT_1_EDX] =
769 PPRO_FEATURES |
770 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 CPUID_PSE36,
772 .features[FEAT_1_ECX] =
773 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
774 .features[FEAT_8000_0001_EDX] =
775 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
776 .features[FEAT_8000_0001_ECX] =
777 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
778 .xlevel = 0x8000000A,
779 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
780 },
781 {
782 .name = "phenom",
783 .level = 5,
784 .vendor = CPUID_VENDOR_AMD,
785 .family = 16,
786 .model = 2,
787 .stepping = 3,
788 /* Missing: CPUID_HT */
789 .features[FEAT_1_EDX] =
790 PPRO_FEATURES |
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
792 CPUID_PSE36 | CPUID_VME,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
795 CPUID_EXT_POPCNT,
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
798 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
799 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
800 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
801 CPUID_EXT3_CR8LEG,
802 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
803 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
804 .features[FEAT_8000_0001_ECX] =
805 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
806 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
807 /* Missing: CPUID_SVM_LBRV */
808 .features[FEAT_SVM] =
809 CPUID_SVM_NPT,
810 .xlevel = 0x8000001A,
811 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
812 },
813 {
814 .name = "core2duo",
815 .level = 10,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 6,
818 .model = 15,
819 .stepping = 11,
820 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
821 .features[FEAT_1_EDX] =
822 PPRO_FEATURES |
823 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
824 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
825 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
826 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
827 .features[FEAT_1_ECX] =
828 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
829 CPUID_EXT_CX16,
830 .features[FEAT_8000_0001_EDX] =
831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
832 .features[FEAT_8000_0001_ECX] =
833 CPUID_EXT3_LAHF_LM,
834 .xlevel = 0x80000008,
835 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
836 },
837 {
838 .name = "kvm64",
839 .level = 0xd,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 15,
842 .model = 6,
843 .stepping = 1,
844 /* Missing: CPUID_HT */
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES | CPUID_VME |
847 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
848 CPUID_PSE36,
849 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
850 .features[FEAT_1_ECX] =
851 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
852 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
853 .features[FEAT_8000_0001_EDX] =
854 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
855 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
856 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
857 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
858 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
859 .features[FEAT_8000_0001_ECX] =
860 0,
861 .xlevel = 0x80000008,
862 .model_id = "Common KVM processor"
863 },
864 {
865 .name = "qemu32",
866 .level = 4,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 6,
869 .model = 6,
870 .stepping = 3,
871 .features[FEAT_1_EDX] =
872 PPRO_FEATURES,
873 .features[FEAT_1_ECX] =
874 CPUID_EXT_SSE3,
875 .xlevel = 0x80000004,
876 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
877 },
878 {
879 .name = "kvm32",
880 .level = 5,
881 .vendor = CPUID_VENDOR_INTEL,
882 .family = 15,
883 .model = 6,
884 .stepping = 1,
885 .features[FEAT_1_EDX] =
886 PPRO_FEATURES | CPUID_VME |
887 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3,
890 .features[FEAT_8000_0001_ECX] =
891 0,
892 .xlevel = 0x80000008,
893 .model_id = "Common 32-bit KVM processor"
894 },
895 {
896 .name = "coreduo",
897 .level = 10,
898 .vendor = CPUID_VENDOR_INTEL,
899 .family = 6,
900 .model = 14,
901 .stepping = 8,
902 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
903 .features[FEAT_1_EDX] =
904 PPRO_FEATURES | CPUID_VME |
905 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
906 CPUID_SS,
907 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
908 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
909 .features[FEAT_1_ECX] =
910 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
911 .features[FEAT_8000_0001_EDX] =
912 CPUID_EXT2_NX,
913 .xlevel = 0x80000008,
914 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
915 },
916 {
917 .name = "486",
918 .level = 1,
919 .vendor = CPUID_VENDOR_INTEL,
920 .family = 4,
921 .model = 8,
922 .stepping = 0,
923 .features[FEAT_1_EDX] =
924 I486_FEATURES,
925 .xlevel = 0,
926 },
927 {
928 .name = "pentium",
929 .level = 1,
930 .vendor = CPUID_VENDOR_INTEL,
931 .family = 5,
932 .model = 4,
933 .stepping = 3,
934 .features[FEAT_1_EDX] =
935 PENTIUM_FEATURES,
936 .xlevel = 0,
937 },
938 {
939 .name = "pentium2",
940 .level = 2,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 5,
944 .stepping = 2,
945 .features[FEAT_1_EDX] =
946 PENTIUM2_FEATURES,
947 .xlevel = 0,
948 },
949 {
950 .name = "pentium3",
951 .level = 3,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 7,
955 .stepping = 3,
956 .features[FEAT_1_EDX] =
957 PENTIUM3_FEATURES,
958 .xlevel = 0,
959 },
960 {
961 .name = "athlon",
962 .level = 2,
963 .vendor = CPUID_VENDOR_AMD,
964 .family = 6,
965 .model = 2,
966 .stepping = 3,
967 .features[FEAT_1_EDX] =
968 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
969 CPUID_MCA,
970 .features[FEAT_8000_0001_EDX] =
971 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
972 .xlevel = 0x80000008,
973 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
974 },
975 {
976 .name = "n270",
977 .level = 10,
978 .vendor = CPUID_VENDOR_INTEL,
979 .family = 6,
980 .model = 28,
981 .stepping = 2,
982 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
983 .features[FEAT_1_EDX] =
984 PPRO_FEATURES |
985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
986 CPUID_ACPI | CPUID_SS,
987 /* Some CPUs got no CPUID_SEP */
988 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
989 * CPUID_EXT_XTPR */
990 .features[FEAT_1_ECX] =
991 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
992 CPUID_EXT_MOVBE,
993 .features[FEAT_8000_0001_EDX] =
994 CPUID_EXT2_NX,
995 .features[FEAT_8000_0001_ECX] =
996 CPUID_EXT3_LAHF_LM,
997 .xlevel = 0x80000008,
998 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
999 },
1000 {
1001 .name = "Conroe",
1002 .level = 10,
1003 .vendor = CPUID_VENDOR_INTEL,
1004 .family = 6,
1005 .model = 15,
1006 .stepping = 3,
1007 .features[FEAT_1_EDX] =
1008 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1009 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1010 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1011 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1012 CPUID_DE | CPUID_FP87,
1013 .features[FEAT_1_ECX] =
1014 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .xlevel = 0x80000008,
1020 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1021 },
1022 {
1023 .name = "Penryn",
1024 .level = 10,
1025 .vendor = CPUID_VENDOR_INTEL,
1026 .family = 6,
1027 .model = 23,
1028 .stepping = 3,
1029 .features[FEAT_1_EDX] =
1030 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1031 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1032 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1033 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1034 CPUID_DE | CPUID_FP87,
1035 .features[FEAT_1_ECX] =
1036 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1037 CPUID_EXT_SSE3,
1038 .features[FEAT_8000_0001_EDX] =
1039 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1040 .features[FEAT_8000_0001_ECX] =
1041 CPUID_EXT3_LAHF_LM,
1042 .xlevel = 0x80000008,
1043 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1044 },
1045 {
1046 .name = "Nehalem",
1047 .level = 11,
1048 .vendor = CPUID_VENDOR_INTEL,
1049 .family = 6,
1050 .model = 26,
1051 .stepping = 3,
1052 .features[FEAT_1_EDX] =
1053 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1054 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1055 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1056 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1057 CPUID_DE | CPUID_FP87,
1058 .features[FEAT_1_ECX] =
1059 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1061 .features[FEAT_8000_0001_EDX] =
1062 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1063 .features[FEAT_8000_0001_ECX] =
1064 CPUID_EXT3_LAHF_LM,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1067 },
1068 {
1069 .name = "Westmere",
1070 .level = 11,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 44,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1083 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1084 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1085 .features[FEAT_8000_0001_EDX] =
1086 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1087 .features[FEAT_8000_0001_ECX] =
1088 CPUID_EXT3_LAHF_LM,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1093 },
1094 {
1095 .name = "SandyBridge",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 42,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1110 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1111 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1112 CPUID_EXT_SSE3,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_XSAVE] =
1119 CPUID_XSAVE_XSAVEOPT,
1120 .features[FEAT_6_EAX] =
1121 CPUID_6_EAX_ARAT,
1122 .xlevel = 0x80000008,
1123 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1124 },
1125 {
1126 .name = "IvyBridge",
1127 .level = 0xd,
1128 .vendor = CPUID_VENDOR_INTEL,
1129 .family = 6,
1130 .model = 58,
1131 .stepping = 9,
1132 .features[FEAT_1_EDX] =
1133 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1134 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1135 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1136 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1137 CPUID_DE | CPUID_FP87,
1138 .features[FEAT_1_ECX] =
1139 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1140 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1141 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1142 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1143 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1144 .features[FEAT_7_0_EBX] =
1145 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1146 CPUID_7_0_EBX_ERMS,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 CPUID_EXT2_SYSCALL,
1150 .features[FEAT_8000_0001_ECX] =
1151 CPUID_EXT3_LAHF_LM,
1152 .features[FEAT_XSAVE] =
1153 CPUID_XSAVE_XSAVEOPT,
1154 .features[FEAT_6_EAX] =
1155 CPUID_6_EAX_ARAT,
1156 .xlevel = 0x80000008,
1157 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1158 },
1159 {
1160 .name = "Haswell-noTSX",
1161 .level = 0xd,
1162 .vendor = CPUID_VENDOR_INTEL,
1163 .family = 6,
1164 .model = 60,
1165 .stepping = 1,
1166 .features[FEAT_1_EDX] =
1167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1171 CPUID_DE | CPUID_FP87,
1172 .features[FEAT_1_ECX] =
1173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1179 .features[FEAT_8000_0001_EDX] =
1180 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1181 CPUID_EXT2_SYSCALL,
1182 .features[FEAT_8000_0001_ECX] =
1183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1184 .features[FEAT_7_0_EBX] =
1185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1186 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1188 .features[FEAT_XSAVE] =
1189 CPUID_XSAVE_XSAVEOPT,
1190 .features[FEAT_6_EAX] =
1191 CPUID_6_EAX_ARAT,
1192 .xlevel = 0x80000008,
1193 .model_id = "Intel Core Processor (Haswell, no TSX)",
1194 }, {
1195 .name = "Haswell",
1196 .level = 0xd,
1197 .vendor = CPUID_VENDOR_INTEL,
1198 .family = 6,
1199 .model = 60,
1200 .stepping = 4,
1201 .features[FEAT_1_EDX] =
1202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1206 CPUID_DE | CPUID_FP87,
1207 .features[FEAT_1_ECX] =
1208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1216 CPUID_EXT2_SYSCALL,
1217 .features[FEAT_8000_0001_ECX] =
1218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1219 .features[FEAT_7_0_EBX] =
1220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1223 CPUID_7_0_EBX_RTM,
1224 .features[FEAT_XSAVE] =
1225 CPUID_XSAVE_XSAVEOPT,
1226 .features[FEAT_6_EAX] =
1227 CPUID_6_EAX_ARAT,
1228 .xlevel = 0x80000008,
1229 .model_id = "Intel Core Processor (Haswell)",
1230 },
1231 {
1232 .name = "Broadwell-noTSX",
1233 .level = 0xd,
1234 .vendor = CPUID_VENDOR_INTEL,
1235 .family = 6,
1236 .model = 61,
1237 .stepping = 2,
1238 .features[FEAT_1_EDX] =
1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1243 CPUID_DE | CPUID_FP87,
1244 .features[FEAT_1_ECX] =
1245 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1246 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1247 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1248 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1249 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1250 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1251 .features[FEAT_8000_0001_EDX] =
1252 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1253 CPUID_EXT2_SYSCALL,
1254 .features[FEAT_8000_0001_ECX] =
1255 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1256 .features[FEAT_7_0_EBX] =
1257 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1258 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1259 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1260 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1261 CPUID_7_0_EBX_SMAP,
1262 .features[FEAT_XSAVE] =
1263 CPUID_XSAVE_XSAVEOPT,
1264 .features[FEAT_6_EAX] =
1265 CPUID_6_EAX_ARAT,
1266 .xlevel = 0x80000008,
1267 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1268 },
1269 {
1270 .name = "Broadwell",
1271 .level = 0xd,
1272 .vendor = CPUID_VENDOR_INTEL,
1273 .family = 6,
1274 .model = 61,
1275 .stepping = 2,
1276 .features[FEAT_1_EDX] =
1277 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1278 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1279 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1280 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1281 CPUID_DE | CPUID_FP87,
1282 .features[FEAT_1_ECX] =
1283 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1284 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1285 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1286 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1287 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1288 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1289 .features[FEAT_8000_0001_EDX] =
1290 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1291 CPUID_EXT2_SYSCALL,
1292 .features[FEAT_8000_0001_ECX] =
1293 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1294 .features[FEAT_7_0_EBX] =
1295 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1296 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1297 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1298 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1299 CPUID_7_0_EBX_SMAP,
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT,
1302 .features[FEAT_6_EAX] =
1303 CPUID_6_EAX_ARAT,
1304 .xlevel = 0x80000008,
1305 .model_id = "Intel Core Processor (Broadwell)",
1306 },
1307 {
1308 .name = "Skylake-Client",
1309 .level = 0xd,
1310 .vendor = CPUID_VENDOR_INTEL,
1311 .family = 6,
1312 .model = 94,
1313 .stepping = 3,
1314 .features[FEAT_1_EDX] =
1315 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1316 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1317 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1318 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1319 CPUID_DE | CPUID_FP87,
1320 .features[FEAT_1_ECX] =
1321 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1322 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1323 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1324 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1325 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1326 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1329 CPUID_EXT2_SYSCALL,
1330 .features[FEAT_8000_0001_ECX] =
1331 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1332 .features[FEAT_7_0_EBX] =
1333 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1334 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1335 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1336 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1337 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1338 /* Missing: XSAVES (not supported by some Linux versions,
1339 * including v4.1 to v4.12).
1340 * KVM doesn't yet expose any XSAVES state save component,
1341 * and the only one defined in Skylake (processor tracing)
1342 * probably will block migration anyway.
1343 */
1344 .features[FEAT_XSAVE] =
1345 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1346 CPUID_XSAVE_XGETBV1,
1347 .features[FEAT_6_EAX] =
1348 CPUID_6_EAX_ARAT,
1349 .xlevel = 0x80000008,
1350 .model_id = "Intel Core Processor (Skylake)",
1351 },
1352 {
1353 .name = "Skylake-Server",
1354 .level = 0xd,
1355 .vendor = CPUID_VENDOR_INTEL,
1356 .family = 6,
1357 .model = 85,
1358 .stepping = 4,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1367 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1368 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1369 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1370 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1371 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1372 .features[FEAT_8000_0001_EDX] =
1373 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1374 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1375 .features[FEAT_8000_0001_ECX] =
1376 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1377 .features[FEAT_7_0_EBX] =
1378 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1379 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1380 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1381 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1382 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1383 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1384 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1385 CPUID_7_0_EBX_AVX512VL,
1386 /* Missing: XSAVES (not supported by some Linux versions,
1387 * including v4.1 to v4.12).
1388 * KVM doesn't yet expose any XSAVES state save component,
1389 * and the only one defined in Skylake (processor tracing)
1390 * probably will block migration anyway.
1391 */
1392 .features[FEAT_XSAVE] =
1393 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1394 CPUID_XSAVE_XGETBV1,
1395 .features[FEAT_6_EAX] =
1396 CPUID_6_EAX_ARAT,
1397 .xlevel = 0x80000008,
1398 .model_id = "Intel Xeon Processor (Skylake)",
1399 },
1400 {
1401 .name = "Opteron_G1",
1402 .level = 5,
1403 .vendor = CPUID_VENDOR_AMD,
1404 .family = 15,
1405 .model = 6,
1406 .stepping = 1,
1407 .features[FEAT_1_EDX] =
1408 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1409 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1410 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1411 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1412 CPUID_DE | CPUID_FP87,
1413 .features[FEAT_1_ECX] =
1414 CPUID_EXT_SSE3,
1415 .features[FEAT_8000_0001_EDX] =
1416 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1417 .xlevel = 0x80000008,
1418 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1419 },
1420 {
1421 .name = "Opteron_G2",
1422 .level = 5,
1423 .vendor = CPUID_VENDOR_AMD,
1424 .family = 15,
1425 .model = 6,
1426 .stepping = 1,
1427 .features[FEAT_1_EDX] =
1428 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1429 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1430 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1431 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1432 CPUID_DE | CPUID_FP87,
1433 .features[FEAT_1_ECX] =
1434 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1435 /* Missing: CPUID_EXT2_RDTSCP */
1436 .features[FEAT_8000_0001_EDX] =
1437 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1438 .features[FEAT_8000_0001_ECX] =
1439 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1440 .xlevel = 0x80000008,
1441 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1442 },
1443 {
1444 .name = "Opteron_G3",
1445 .level = 5,
1446 .vendor = CPUID_VENDOR_AMD,
1447 .family = 16,
1448 .model = 2,
1449 .stepping = 3,
1450 .features[FEAT_1_EDX] =
1451 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1452 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1453 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1454 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1455 CPUID_DE | CPUID_FP87,
1456 .features[FEAT_1_ECX] =
1457 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1458 CPUID_EXT_SSE3,
1459 /* Missing: CPUID_EXT2_RDTSCP */
1460 .features[FEAT_8000_0001_EDX] =
1461 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1462 .features[FEAT_8000_0001_ECX] =
1463 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1464 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1465 .xlevel = 0x80000008,
1466 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1467 },
1468 {
1469 .name = "Opteron_G4",
1470 .level = 0xd,
1471 .vendor = CPUID_VENDOR_AMD,
1472 .family = 21,
1473 .model = 1,
1474 .stepping = 2,
1475 .features[FEAT_1_EDX] =
1476 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1477 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1478 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1479 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1480 CPUID_DE | CPUID_FP87,
1481 .features[FEAT_1_ECX] =
1482 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1483 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1484 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1485 CPUID_EXT_SSE3,
1486 /* Missing: CPUID_EXT2_RDTSCP */
1487 .features[FEAT_8000_0001_EDX] =
1488 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1489 CPUID_EXT2_SYSCALL,
1490 .features[FEAT_8000_0001_ECX] =
1491 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1492 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1493 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1494 CPUID_EXT3_LAHF_LM,
1495 /* no xsaveopt! */
1496 .xlevel = 0x8000001A,
1497 .model_id = "AMD Opteron 62xx class CPU",
1498 },
1499 {
1500 .name = "Opteron_G5",
1501 .level = 0xd,
1502 .vendor = CPUID_VENDOR_AMD,
1503 .family = 21,
1504 .model = 2,
1505 .stepping = 0,
1506 .features[FEAT_1_EDX] =
1507 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1508 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1509 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1510 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1511 CPUID_DE | CPUID_FP87,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1514 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1515 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1516 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1517 /* Missing: CPUID_EXT2_RDTSCP */
1518 .features[FEAT_8000_0001_EDX] =
1519 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1520 CPUID_EXT2_SYSCALL,
1521 .features[FEAT_8000_0001_ECX] =
1522 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1523 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1524 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1525 CPUID_EXT3_LAHF_LM,
1526 /* no xsaveopt! */
1527 .xlevel = 0x8000001A,
1528 .model_id = "AMD Opteron 63xx class CPU",
1529 },
1530 {
1531 .name = "EPYC",
1532 .level = 0xd,
1533 .vendor = CPUID_VENDOR_AMD,
1534 .family = 23,
1535 .model = 1,
1536 .stepping = 2,
1537 .features[FEAT_1_EDX] =
1538 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1539 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1540 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1541 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1542 CPUID_VME | CPUID_FP87,
1543 .features[FEAT_1_ECX] =
1544 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1545 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1546 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1547 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1548 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1549 .features[FEAT_8000_0001_EDX] =
1550 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1551 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1552 CPUID_EXT2_SYSCALL,
1553 .features[FEAT_8000_0001_ECX] =
1554 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1555 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1556 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1557 .features[FEAT_7_0_EBX] =
1558 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1559 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1560 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1561 CPUID_7_0_EBX_SHA_NI,
1562 /* Missing: XSAVES (not supported by some Linux versions,
1563 * including v4.1 to v4.12).
1564 * KVM doesn't yet expose any XSAVES state save component.
1565 */
1566 .features[FEAT_XSAVE] =
1567 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1568 CPUID_XSAVE_XGETBV1,
1569 .features[FEAT_6_EAX] =
1570 CPUID_6_EAX_ARAT,
1571 .xlevel = 0x8000000A,
1572 .model_id = "AMD EPYC Processor",
1573 },
1574};
1575
1576typedef struct PropValue {
1577 const char *prop, *value;
1578} PropValue;
1579
1580/* KVM-specific features that are automatically added/removed
1581 * from all CPU models when KVM is enabled.
1582 */
1583static PropValue kvm_default_props[] = {
1584 { "kvmclock", "on" },
1585 { "kvm-nopiodelay", "on" },
1586 { "kvm-asyncpf", "on" },
1587 { "kvm-steal-time", "on" },
1588 { "kvm-pv-eoi", "on" },
1589 { "kvmclock-stable-bit", "on" },
1590 { "x2apic", "on" },
1591 { "acpi", "off" },
1592 { "monitor", "off" },
1593 { "svm", "off" },
1594 { NULL, NULL },
1595};
1596
1597/* TCG-specific defaults that override all CPU models when using TCG
1598 */
1599static PropValue tcg_default_props[] = {
1600 { "vme", "off" },
1601 { NULL, NULL },
1602};
1603
1604
1605void x86_cpu_change_kvm_default(const char *prop, const char *value)
1606{
1607 PropValue *pv;
1608 for (pv = kvm_default_props; pv->prop; pv++) {
1609 if (!strcmp(pv->prop, prop)) {
1610 pv->value = value;
1611 break;
1612 }
1613 }
1614
1615 /* It is valid to call this function only for properties that
1616 * are already present in the kvm_default_props table.
1617 */
1618 assert(pv->prop);
1619}
1620
1621static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1622 bool migratable_only);
1623
1624static bool lmce_supported(void)
1625{
1626 uint64_t mce_cap = 0;
1627
1628#ifdef CONFIG_KVM
1629 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1630 return false;
1631 }
1632#endif
1633
1634 return !!(mce_cap & MCG_LMCE_P);
1635}
1636
1637#define CPUID_MODEL_ID_SZ 48
1638
1639/**
1640 * cpu_x86_fill_model_id:
1641 * Get CPUID model ID string from host CPU.
1642 *
1643 * @str should have at least CPUID_MODEL_ID_SZ bytes
1644 *
1645 * The function does NOT add a null terminator to the string
1646 * automatically.
1647 */
1648static int cpu_x86_fill_model_id(char *str)
1649{
1650 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1651 int i;
1652
1653 for (i = 0; i < 3; i++) {
1654 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1655 memcpy(str + i * 16 + 0, &eax, 4);
1656 memcpy(str + i * 16 + 4, &ebx, 4);
1657 memcpy(str + i * 16 + 8, &ecx, 4);
1658 memcpy(str + i * 16 + 12, &edx, 4);
1659 }
1660 return 0;
1661}
1662
1663static Property max_x86_cpu_properties[] = {
1664 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1665 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1666 DEFINE_PROP_END_OF_LIST()
1667};
1668
1669static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1670{
1671 DeviceClass *dc = DEVICE_CLASS(oc);
1672 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1673
1674 xcc->ordering = 9;
1675
1676 xcc->model_description =
1677 "Enables all features supported by the accelerator in the current host";
1678
1679 dc->props = max_x86_cpu_properties;
1680}
1681
1682static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1683
1684static void max_x86_cpu_initfn(Object *obj)
1685{
1686 X86CPU *cpu = X86_CPU(obj);
1687 CPUX86State *env = &cpu->env;
1688 KVMState *s = kvm_state;
1689
1690 /* We can't fill the features array here because we don't know yet if
1691 * "migratable" is true or false.
1692 */
1693 cpu->max_features = true;
1694
1695 if (accel_uses_host_cpuid()) {
1696 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
1697 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
1698 int family, model, stepping;
1699 X86CPUDefinition host_cpudef = { };
1700 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1701
1702 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1703 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1704
1705 host_vendor_fms(vendor, &family, &model, &stepping);
1706
1707 cpu_x86_fill_model_id(model_id);
1708
1709 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
1710 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
1711 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
1712 object_property_set_int(OBJECT(cpu), stepping, "stepping",
1713 &error_abort);
1714 object_property_set_str(OBJECT(cpu), model_id, "model-id",
1715 &error_abort);
1716
1717 if (kvm_enabled()) {
1718 env->cpuid_min_level =
1719 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1720 env->cpuid_min_xlevel =
1721 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1722 env->cpuid_min_xlevel2 =
1723 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1724 } else {
1725 env->cpuid_min_level =
1726 hvf_get_supported_cpuid(0x0, 0, R_EAX);
1727 env->cpuid_min_xlevel =
1728 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
1729 env->cpuid_min_xlevel2 =
1730 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
1731 }
1732
1733 if (lmce_supported()) {
1734 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1735 }
1736 } else {
1737 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1738 "vendor", &error_abort);
1739 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1740 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1741 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1742 object_property_set_str(OBJECT(cpu),
1743 "QEMU TCG CPU version " QEMU_HW_VERSION,
1744 "model-id", &error_abort);
1745 }
1746
1747 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1748}
1749
1750static const TypeInfo max_x86_cpu_type_info = {
1751 .name = X86_CPU_TYPE_NAME("max"),
1752 .parent = TYPE_X86_CPU,
1753 .instance_init = max_x86_cpu_initfn,
1754 .class_init = max_x86_cpu_class_init,
1755};
1756
1757#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
1758static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1759{
1760 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1761
1762 xcc->host_cpuid_required = true;
1763 xcc->ordering = 8;
1764
1765 if (kvm_enabled()) {
1766 xcc->model_description =
1767 "KVM processor with all supported host features ";
1768 } else if (hvf_enabled()) {
1769 xcc->model_description =
1770 "HVF processor with all supported host features ";
1771 }
1772}
1773
1774static const TypeInfo host_x86_cpu_type_info = {
1775 .name = X86_CPU_TYPE_NAME("host"),
1776 .parent = X86_CPU_TYPE_NAME("max"),
1777 .class_init = host_x86_cpu_class_init,
1778};
1779
1780#endif
1781
1782static void report_unavailable_features(FeatureWord w, uint32_t mask)
1783{
1784 FeatureWordInfo *f = &feature_word_info[w];
1785 int i;
1786
1787 for (i = 0; i < 32; ++i) {
1788 if ((1UL << i) & mask) {
1789 const char *reg = get_register_name_32(f->cpuid_reg);
1790 assert(reg);
1791 warn_report("%s doesn't support requested feature: "
1792 "CPUID.%02XH:%s%s%s [bit %d]",
1793 accel_uses_host_cpuid() ? "host" : "TCG",
1794 f->cpuid_eax, reg,
1795 f->feat_names[i] ? "." : "",
1796 f->feat_names[i] ? f->feat_names[i] : "", i);
1797 }
1798 }
1799}
1800
1801static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1802 const char *name, void *opaque,
1803 Error **errp)
1804{
1805 X86CPU *cpu = X86_CPU(obj);
1806 CPUX86State *env = &cpu->env;
1807 int64_t value;
1808
1809 value = (env->cpuid_version >> 8) & 0xf;
1810 if (value == 0xf) {
1811 value += (env->cpuid_version >> 20) & 0xff;
1812 }
1813 visit_type_int(v, name, &value, errp);
1814}
1815
1816static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1817 const char *name, void *opaque,
1818 Error **errp)
1819{
1820 X86CPU *cpu = X86_CPU(obj);
1821 CPUX86State *env = &cpu->env;
1822 const int64_t min = 0;
1823 const int64_t max = 0xff + 0xf;
1824 Error *local_err = NULL;
1825 int64_t value;
1826
1827 visit_type_int(v, name, &value, &local_err);
1828 if (local_err) {
1829 error_propagate(errp, local_err);
1830 return;
1831 }
1832 if (value < min || value > max) {
1833 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1834 name ? name : "null", value, min, max);
1835 return;
1836 }
1837
1838 env->cpuid_version &= ~0xff00f00;
1839 if (value > 0x0f) {
1840 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1841 } else {
1842 env->cpuid_version |= value << 8;
1843 }
1844}
1845
1846static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1847 const char *name, void *opaque,
1848 Error **errp)
1849{
1850 X86CPU *cpu = X86_CPU(obj);
1851 CPUX86State *env = &cpu->env;
1852 int64_t value;
1853
1854 value = (env->cpuid_version >> 4) & 0xf;
1855 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1856 visit_type_int(v, name, &value, errp);
1857}
1858
1859static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1860 const char *name, void *opaque,
1861 Error **errp)
1862{
1863 X86CPU *cpu = X86_CPU(obj);
1864 CPUX86State *env = &cpu->env;
1865 const int64_t min = 0;
1866 const int64_t max = 0xff;
1867 Error *local_err = NULL;
1868 int64_t value;
1869
1870 visit_type_int(v, name, &value, &local_err);
1871 if (local_err) {
1872 error_propagate(errp, local_err);
1873 return;
1874 }
1875 if (value < min || value > max) {
1876 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1877 name ? name : "null", value, min, max);
1878 return;
1879 }
1880
1881 env->cpuid_version &= ~0xf00f0;
1882 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1883}
1884
1885static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1886 const char *name, void *opaque,
1887 Error **errp)
1888{
1889 X86CPU *cpu = X86_CPU(obj);
1890 CPUX86State *env = &cpu->env;
1891 int64_t value;
1892
1893 value = env->cpuid_version & 0xf;
1894 visit_type_int(v, name, &value, errp);
1895}
1896
1897static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1898 const char *name, void *opaque,
1899 Error **errp)
1900{
1901 X86CPU *cpu = X86_CPU(obj);
1902 CPUX86State *env = &cpu->env;
1903 const int64_t min = 0;
1904 const int64_t max = 0xf;
1905 Error *local_err = NULL;
1906 int64_t value;
1907
1908 visit_type_int(v, name, &value, &local_err);
1909 if (local_err) {
1910 error_propagate(errp, local_err);
1911 return;
1912 }
1913 if (value < min || value > max) {
1914 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1915 name ? name : "null", value, min, max);
1916 return;
1917 }
1918
1919 env->cpuid_version &= ~0xf;
1920 env->cpuid_version |= value & 0xf;
1921}
1922
1923static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1924{
1925 X86CPU *cpu = X86_CPU(obj);
1926 CPUX86State *env = &cpu->env;
1927 char *value;
1928
1929 value = g_malloc(CPUID_VENDOR_SZ + 1);
1930 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1931 env->cpuid_vendor3);
1932 return value;
1933}
1934
1935static void x86_cpuid_set_vendor(Object *obj, const char *value,
1936 Error **errp)
1937{
1938 X86CPU *cpu = X86_CPU(obj);
1939 CPUX86State *env = &cpu->env;
1940 int i;
1941
1942 if (strlen(value) != CPUID_VENDOR_SZ) {
1943 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1944 return;
1945 }
1946
1947 env->cpuid_vendor1 = 0;
1948 env->cpuid_vendor2 = 0;
1949 env->cpuid_vendor3 = 0;
1950 for (i = 0; i < 4; i++) {
1951 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1952 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1953 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1954 }
1955}
1956
1957static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1958{
1959 X86CPU *cpu = X86_CPU(obj);
1960 CPUX86State *env = &cpu->env;
1961 char *value;
1962 int i;
1963
1964 value = g_malloc(48 + 1);
1965 for (i = 0; i < 48; i++) {
1966 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1967 }
1968 value[48] = '\0';
1969 return value;
1970}
1971
1972static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1973 Error **errp)
1974{
1975 X86CPU *cpu = X86_CPU(obj);
1976 CPUX86State *env = &cpu->env;
1977 int c, len, i;
1978
1979 if (model_id == NULL) {
1980 model_id = "";
1981 }
1982 len = strlen(model_id);
1983 memset(env->cpuid_model, 0, 48);
1984 for (i = 0; i < 48; i++) {
1985 if (i >= len) {
1986 c = '\0';
1987 } else {
1988 c = (uint8_t)model_id[i];
1989 }
1990 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1991 }
1992}
1993
1994static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1995 void *opaque, Error **errp)
1996{
1997 X86CPU *cpu = X86_CPU(obj);
1998 int64_t value;
1999
2000 value = cpu->env.tsc_khz * 1000;
2001 visit_type_int(v, name, &value, errp);
2002}
2003
2004static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2005 void *opaque, Error **errp)
2006{
2007 X86CPU *cpu = X86_CPU(obj);
2008 const int64_t min = 0;
2009 const int64_t max = INT64_MAX;
2010 Error *local_err = NULL;
2011 int64_t value;
2012
2013 visit_type_int(v, name, &value, &local_err);
2014 if (local_err) {
2015 error_propagate(errp, local_err);
2016 return;
2017 }
2018 if (value < min || value > max) {
2019 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2020 name ? name : "null", value, min, max);
2021 return;
2022 }
2023
2024 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2025}
2026
2027/* Generic getter for "feature-words" and "filtered-features" properties */
2028static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2029 const char *name, void *opaque,
2030 Error **errp)
2031{
2032 uint32_t *array = (uint32_t *)opaque;
2033 FeatureWord w;
2034 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2035 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2036 X86CPUFeatureWordInfoList *list = NULL;
2037
2038 for (w = 0; w < FEATURE_WORDS; w++) {
2039 FeatureWordInfo *wi = &feature_word_info[w];
2040 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2041 qwi->cpuid_input_eax = wi->cpuid_eax;
2042 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2043 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2044 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2045 qwi->features = array[w];
2046
2047 /* List will be in reverse order, but order shouldn't matter */
2048 list_entries[w].next = list;
2049 list_entries[w].value = &word_infos[w];
2050 list = &list_entries[w];
2051 }
2052
2053 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2054}
2055
2056static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2057 void *opaque, Error **errp)
2058{
2059 X86CPU *cpu = X86_CPU(obj);
2060 int64_t value = cpu->hyperv_spinlock_attempts;
2061
2062 visit_type_int(v, name, &value, errp);
2063}
2064
2065static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2066 void *opaque, Error **errp)
2067{
2068 const int64_t min = 0xFFF;
2069 const int64_t max = UINT_MAX;
2070 X86CPU *cpu = X86_CPU(obj);
2071 Error *err = NULL;
2072 int64_t value;
2073
2074 visit_type_int(v, name, &value, &err);
2075 if (err) {
2076 error_propagate(errp, err);
2077 return;
2078 }
2079
2080 if (value < min || value > max) {
2081 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2082 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2083 object_get_typename(obj), name ? name : "null",
2084 value, min, max);
2085 return;
2086 }
2087 cpu->hyperv_spinlock_attempts = value;
2088}
2089
2090static const PropertyInfo qdev_prop_spinlocks = {
2091 .name = "int",
2092 .get = x86_get_hv_spinlocks,
2093 .set = x86_set_hv_spinlocks,
2094};
2095
2096/* Convert all '_' in a feature string option name to '-', to make feature
2097 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2098 */
2099static inline void feat2prop(char *s)
2100{
2101 while ((s = strchr(s, '_'))) {
2102 *s = '-';
2103 }
2104}
2105
2106/* Return the feature property name for a feature flag bit */
2107static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2108{
2109 /* XSAVE components are automatically enabled by other features,
2110 * so return the original feature name instead
2111 */
2112 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2113 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2114
2115 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2116 x86_ext_save_areas[comp].bits) {
2117 w = x86_ext_save_areas[comp].feature;
2118 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2119 }
2120 }
2121
2122 assert(bitnr < 32);
2123 assert(w < FEATURE_WORDS);
2124 return feature_word_info[w].feat_names[bitnr];
2125}
2126
2127/* Compatibily hack to maintain legacy +-feat semantic,
2128 * where +-feat overwrites any feature set by
2129 * feat=on|feat even if the later is parsed after +-feat
2130 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2131 */
2132static GList *plus_features, *minus_features;
2133
2134static gint compare_string(gconstpointer a, gconstpointer b)
2135{
2136 return g_strcmp0(a, b);
2137}
2138
2139/* Parse "+feature,-feature,feature=foo" CPU feature string
2140 */
2141static void x86_cpu_parse_featurestr(const char *typename, char *features,
2142 Error **errp)
2143{
2144 char *featurestr; /* Single 'key=value" string being parsed */
2145 static bool cpu_globals_initialized;
2146 bool ambiguous = false;
2147
2148 if (cpu_globals_initialized) {
2149 return;
2150 }
2151 cpu_globals_initialized = true;
2152
2153 if (!features) {
2154 return;
2155 }
2156
2157 for (featurestr = strtok(features, ",");
2158 featurestr;
2159 featurestr = strtok(NULL, ",")) {
2160 const char *name;
2161 const char *val = NULL;
2162 char *eq = NULL;
2163 char num[32];
2164 GlobalProperty *prop;
2165
2166 /* Compatibility syntax: */
2167 if (featurestr[0] == '+') {
2168 plus_features = g_list_append(plus_features,
2169 g_strdup(featurestr + 1));
2170 continue;
2171 } else if (featurestr[0] == '-') {
2172 minus_features = g_list_append(minus_features,
2173 g_strdup(featurestr + 1));
2174 continue;
2175 }
2176
2177 eq = strchr(featurestr, '=');
2178 if (eq) {
2179 *eq++ = 0;
2180 val = eq;
2181 } else {
2182 val = "on";
2183 }
2184
2185 feat2prop(featurestr);
2186 name = featurestr;
2187
2188 if (g_list_find_custom(plus_features, name, compare_string)) {
2189 warn_report("Ambiguous CPU model string. "
2190 "Don't mix both \"+%s\" and \"%s=%s\"",
2191 name, name, val);
2192 ambiguous = true;
2193 }
2194 if (g_list_find_custom(minus_features, name, compare_string)) {
2195 warn_report("Ambiguous CPU model string. "
2196 "Don't mix both \"-%s\" and \"%s=%s\"",
2197 name, name, val);
2198 ambiguous = true;
2199 }
2200
2201 /* Special case: */
2202 if (!strcmp(name, "tsc-freq")) {
2203 int ret;
2204 uint64_t tsc_freq;
2205
2206 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2207 if (ret < 0 || tsc_freq > INT64_MAX) {
2208 error_setg(errp, "bad numerical value %s", val);
2209 return;
2210 }
2211 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2212 val = num;
2213 name = "tsc-frequency";
2214 }
2215
2216 prop = g_new0(typeof(*prop), 1);
2217 prop->driver = typename;
2218 prop->property = g_strdup(name);
2219 prop->value = g_strdup(val);
2220 prop->errp = &error_fatal;
2221 qdev_prop_register_global(prop);
2222 }
2223
2224 if (ambiguous) {
2225 warn_report("Compatibility of ambiguous CPU model "
2226 "strings won't be kept on future QEMU versions");
2227 }
2228}
2229
2230static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2231static int x86_cpu_filter_features(X86CPU *cpu);
2232
2233/* Check for missing features that may prevent the CPU class from
2234 * running using the current machine and accelerator.
2235 */
2236static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2237 strList **missing_feats)
2238{
2239 X86CPU *xc;
2240 FeatureWord w;
2241 Error *err = NULL;
2242 strList **next = missing_feats;
2243
2244 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2245 strList *new = g_new0(strList, 1);
2246 new->value = g_strdup("kvm");
2247 *missing_feats = new;
2248 return;
2249 }
2250
2251 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2252
2253 x86_cpu_expand_features(xc, &err);
2254 if (err) {
2255 /* Errors at x86_cpu_expand_features should never happen,
2256 * but in case it does, just report the model as not
2257 * runnable at all using the "type" property.
2258 */
2259 strList *new = g_new0(strList, 1);
2260 new->value = g_strdup("type");
2261 *next = new;
2262 next = &new->next;
2263 }
2264
2265 x86_cpu_filter_features(xc);
2266
2267 for (w = 0; w < FEATURE_WORDS; w++) {
2268 uint32_t filtered = xc->filtered_features[w];
2269 int i;
2270 for (i = 0; i < 32; i++) {
2271 if (filtered & (1UL << i)) {
2272 strList *new = g_new0(strList, 1);
2273 new->value = g_strdup(x86_cpu_feature_name(w, i));
2274 *next = new;
2275 next = &new->next;
2276 }
2277 }
2278 }
2279
2280 object_unref(OBJECT(xc));
2281}
2282
2283/* Print all cpuid feature names in featureset
2284 */
2285static void listflags(FILE *f, fprintf_function print, const char **featureset)
2286{
2287 int bit;
2288 bool first = true;
2289
2290 for (bit = 0; bit < 32; bit++) {
2291 if (featureset[bit]) {
2292 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2293 first = false;
2294 }
2295 }
2296}
2297
2298/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2299static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2300{
2301 ObjectClass *class_a = (ObjectClass *)a;
2302 ObjectClass *class_b = (ObjectClass *)b;
2303 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2304 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2305 const char *name_a, *name_b;
2306
2307 if (cc_a->ordering != cc_b->ordering) {
2308 return cc_a->ordering - cc_b->ordering;
2309 } else {
2310 name_a = object_class_get_name(class_a);
2311 name_b = object_class_get_name(class_b);
2312 return strcmp(name_a, name_b);
2313 }
2314}
2315
2316static GSList *get_sorted_cpu_model_list(void)
2317{
2318 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2319 list = g_slist_sort(list, x86_cpu_list_compare);
2320 return list;
2321}
2322
2323static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2324{
2325 ObjectClass *oc = data;
2326 X86CPUClass *cc = X86_CPU_CLASS(oc);
2327 CPUListState *s = user_data;
2328 char *name = x86_cpu_class_get_model_name(cc);
2329 const char *desc = cc->model_description;
2330 if (!desc && cc->cpu_def) {
2331 desc = cc->cpu_def->model_id;
2332 }
2333
2334 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2335 name, desc);
2336 g_free(name);
2337}
2338
2339/* list available CPU models and flags */
2340void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2341{
2342 int i;
2343 CPUListState s = {
2344 .file = f,
2345 .cpu_fprintf = cpu_fprintf,
2346 };
2347 GSList *list;
2348
2349 (*cpu_fprintf)(f, "Available CPUs:\n");
2350 list = get_sorted_cpu_model_list();
2351 g_slist_foreach(list, x86_cpu_list_entry, &s);
2352 g_slist_free(list);
2353
2354 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2355 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2356 FeatureWordInfo *fw = &feature_word_info[i];
2357
2358 (*cpu_fprintf)(f, " ");
2359 listflags(f, cpu_fprintf, fw->feat_names);
2360 (*cpu_fprintf)(f, "\n");
2361 }
2362}
2363
2364static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2365{
2366 ObjectClass *oc = data;
2367 X86CPUClass *cc = X86_CPU_CLASS(oc);
2368 CpuDefinitionInfoList **cpu_list = user_data;
2369 CpuDefinitionInfoList *entry;
2370 CpuDefinitionInfo *info;
2371
2372 info = g_malloc0(sizeof(*info));
2373 info->name = x86_cpu_class_get_model_name(cc);
2374 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2375 info->has_unavailable_features = true;
2376 info->q_typename = g_strdup(object_class_get_name(oc));
2377 info->migration_safe = cc->migration_safe;
2378 info->has_migration_safe = true;
2379 info->q_static = cc->static_model;
2380
2381 entry = g_malloc0(sizeof(*entry));
2382 entry->value = info;
2383 entry->next = *cpu_list;
2384 *cpu_list = entry;
2385}
2386
2387CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2388{
2389 CpuDefinitionInfoList *cpu_list = NULL;
2390 GSList *list = get_sorted_cpu_model_list();
2391 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2392 g_slist_free(list);
2393 return cpu_list;
2394}
2395
2396static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2397 bool migratable_only)
2398{
2399 FeatureWordInfo *wi = &feature_word_info[w];
2400 uint32_t r;
2401
2402 if (kvm_enabled()) {
2403 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2404 wi->cpuid_ecx,
2405 wi->cpuid_reg);
2406 } else if (hvf_enabled()) {
2407 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2408 wi->cpuid_ecx,
2409 wi->cpuid_reg);
2410 } else if (tcg_enabled()) {
2411 r = wi->tcg_features;
2412 } else {
2413 return ~0;
2414 }
2415 if (migratable_only) {
2416 r &= x86_cpu_get_migratable_flags(w);
2417 }
2418 return r;
2419}
2420
2421static void x86_cpu_report_filtered_features(X86CPU *cpu)
2422{
2423 FeatureWord w;
2424
2425 for (w = 0; w < FEATURE_WORDS; w++) {
2426 report_unavailable_features(w, cpu->filtered_features[w]);
2427 }
2428}
2429
2430static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2431{
2432 PropValue *pv;
2433 for (pv = props; pv->prop; pv++) {
2434 if (!pv->value) {
2435 continue;
2436 }
2437 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2438 &error_abort);
2439 }
2440}
2441
2442/* Load data from X86CPUDefinition into a X86CPU object
2443 */
2444static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2445{
2446 CPUX86State *env = &cpu->env;
2447 const char *vendor;
2448 char host_vendor[CPUID_VENDOR_SZ + 1];
2449 FeatureWord w;
2450
2451 /*NOTE: any property set by this function should be returned by
2452 * x86_cpu_static_props(), so static expansion of
2453 * query-cpu-model-expansion is always complete.
2454 */
2455
2456 /* CPU models only set _minimum_ values for level/xlevel: */
2457 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2458 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2459
2460 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2461 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2462 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2463 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2464 for (w = 0; w < FEATURE_WORDS; w++) {
2465 env->features[w] = def->features[w];
2466 }
2467
2468 /* Special cases not set in the X86CPUDefinition structs: */
2469 /* TODO: in-kernel irqchip for hvf */
2470 if (kvm_enabled()) {
2471 if (!kvm_irqchip_in_kernel()) {
2472 x86_cpu_change_kvm_default("x2apic", "off");
2473 }
2474
2475 x86_cpu_apply_props(cpu, kvm_default_props);
2476 } else if (tcg_enabled()) {
2477 x86_cpu_apply_props(cpu, tcg_default_props);
2478 }
2479
2480 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2481
2482 /* sysenter isn't supported in compatibility mode on AMD,
2483 * syscall isn't supported in compatibility mode on Intel.
2484 * Normally we advertise the actual CPU vendor, but you can
2485 * override this using the 'vendor' property if you want to use
2486 * KVM's sysenter/syscall emulation in compatibility mode and
2487 * when doing cross vendor migration
2488 */
2489 vendor = def->vendor;
2490 if (accel_uses_host_cpuid()) {
2491 uint32_t ebx = 0, ecx = 0, edx = 0;
2492 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2493 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2494 vendor = host_vendor;
2495 }
2496
2497 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2498
2499}
2500
2501/* Return a QDict containing keys for all properties that can be included
2502 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2503 * must be included in the dictionary.
2504 */
2505static QDict *x86_cpu_static_props(void)
2506{
2507 FeatureWord w;
2508 int i;
2509 static const char *props[] = {
2510 "min-level",
2511 "min-xlevel",
2512 "family",
2513 "model",
2514 "stepping",
2515 "model-id",
2516 "vendor",
2517 "lmce",
2518 NULL,
2519 };
2520 static QDict *d;
2521
2522 if (d) {
2523 return d;
2524 }
2525
2526 d = qdict_new();
2527 for (i = 0; props[i]; i++) {
2528 qdict_put_null(d, props[i]);
2529 }
2530
2531 for (w = 0; w < FEATURE_WORDS; w++) {
2532 FeatureWordInfo *fi = &feature_word_info[w];
2533 int bit;
2534 for (bit = 0; bit < 32; bit++) {
2535 if (!fi->feat_names[bit]) {
2536 continue;
2537 }
2538 qdict_put_null(d, fi->feat_names[bit]);
2539 }
2540 }
2541
2542 return d;
2543}
2544
2545/* Add an entry to @props dict, with the value for property. */
2546static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2547{
2548 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2549 &error_abort);
2550
2551 qdict_put_obj(props, prop, value);
2552}
2553
2554/* Convert CPU model data from X86CPU object to a property dictionary
2555 * that can recreate exactly the same CPU model.
2556 */
2557static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2558{
2559 QDict *sprops = x86_cpu_static_props();
2560 const QDictEntry *e;
2561
2562 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2563 const char *prop = qdict_entry_key(e);
2564 x86_cpu_expand_prop(cpu, props, prop);
2565 }
2566}
2567
2568/* Convert CPU model data from X86CPU object to a property dictionary
2569 * that can recreate exactly the same CPU model, including every
2570 * writeable QOM property.
2571 */
2572static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2573{
2574 ObjectPropertyIterator iter;
2575 ObjectProperty *prop;
2576
2577 object_property_iter_init(&iter, OBJECT(cpu));
2578 while ((prop = object_property_iter_next(&iter))) {
2579 /* skip read-only or write-only properties */
2580 if (!prop->get || !prop->set) {
2581 continue;
2582 }
2583
2584 /* "hotplugged" is the only property that is configurable
2585 * on the command-line but will be set differently on CPUs
2586 * created using "-cpu ... -smp ..." and by CPUs created
2587 * on the fly by x86_cpu_from_model() for querying. Skip it.
2588 */
2589 if (!strcmp(prop->name, "hotplugged")) {
2590 continue;
2591 }
2592 x86_cpu_expand_prop(cpu, props, prop->name);
2593 }
2594}
2595
2596static void object_apply_props(Object *obj, QDict *props, Error **errp)
2597{
2598 const QDictEntry *prop;
2599 Error *err = NULL;
2600
2601 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2602 object_property_set_qobject(obj, qdict_entry_value(prop),
2603 qdict_entry_key(prop), &err);
2604 if (err) {
2605 break;
2606 }
2607 }
2608
2609 error_propagate(errp, err);
2610}
2611
2612/* Create X86CPU object according to model+props specification */
2613static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2614{
2615 X86CPU *xc = NULL;
2616 X86CPUClass *xcc;
2617 Error *err = NULL;
2618
2619 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2620 if (xcc == NULL) {
2621 error_setg(&err, "CPU model '%s' not found", model);
2622 goto out;
2623 }
2624
2625 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2626 if (props) {
2627 object_apply_props(OBJECT(xc), props, &err);
2628 if (err) {
2629 goto out;
2630 }
2631 }
2632
2633 x86_cpu_expand_features(xc, &err);
2634 if (err) {
2635 goto out;
2636 }
2637
2638out:
2639 if (err) {
2640 error_propagate(errp, err);
2641 object_unref(OBJECT(xc));
2642 xc = NULL;
2643 }
2644 return xc;
2645}
2646
2647CpuModelExpansionInfo *
2648arch_query_cpu_model_expansion(CpuModelExpansionType type,
2649 CpuModelInfo *model,
2650 Error **errp)
2651{
2652 X86CPU *xc = NULL;
2653 Error *err = NULL;
2654 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2655 QDict *props = NULL;
2656 const char *base_name;
2657
2658 xc = x86_cpu_from_model(model->name,
2659 model->has_props ?
2660 qobject_to_qdict(model->props) :
2661 NULL, &err);
2662 if (err) {
2663 goto out;
2664 }
2665
2666 props = qdict_new();
2667
2668 switch (type) {
2669 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2670 /* Static expansion will be based on "base" only */
2671 base_name = "base";
2672 x86_cpu_to_dict(xc, props);
2673 break;
2674 case CPU_MODEL_EXPANSION_TYPE_FULL:
2675 /* As we don't return every single property, full expansion needs
2676 * to keep the original model name+props, and add extra
2677 * properties on top of that.
2678 */
2679 base_name = model->name;
2680 x86_cpu_to_dict_full(xc, props);
2681 break;
2682 default:
2683 error_setg(&err, "Unsupportted expansion type");
2684 goto out;
2685 }
2686
2687 if (!props) {
2688 props = qdict_new();
2689 }
2690 x86_cpu_to_dict(xc, props);
2691
2692 ret->model = g_new0(CpuModelInfo, 1);
2693 ret->model->name = g_strdup(base_name);
2694 ret->model->props = QOBJECT(props);
2695 ret->model->has_props = true;
2696
2697out:
2698 object_unref(OBJECT(xc));
2699 if (err) {
2700 error_propagate(errp, err);
2701 qapi_free_CpuModelExpansionInfo(ret);
2702 ret = NULL;
2703 }
2704 return ret;
2705}
2706
2707static gchar *x86_gdb_arch_name(CPUState *cs)
2708{
2709#ifdef TARGET_X86_64
2710 return g_strdup("i386:x86-64");
2711#else
2712 return g_strdup("i386");
2713#endif
2714}
2715
2716static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2717{
2718 X86CPUDefinition *cpudef = data;
2719 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2720
2721 xcc->cpu_def = cpudef;
2722 xcc->migration_safe = true;
2723}
2724
2725static void x86_register_cpudef_type(X86CPUDefinition *def)
2726{
2727 char *typename = x86_cpu_type_name(def->name);
2728 TypeInfo ti = {
2729 .name = typename,
2730 .parent = TYPE_X86_CPU,
2731 .class_init = x86_cpu_cpudef_class_init,
2732 .class_data = def,
2733 };
2734
2735 /* AMD aliases are handled at runtime based on CPUID vendor, so
2736 * they shouldn't be set on the CPU model table.
2737 */
2738 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2739
2740 type_register(&ti);
2741 g_free(typename);
2742}
2743
2744#if !defined(CONFIG_USER_ONLY)
2745
2746void cpu_clear_apic_feature(CPUX86State *env)
2747{
2748 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2749}
2750
2751#endif /* !CONFIG_USER_ONLY */
2752
2753void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2754 uint32_t *eax, uint32_t *ebx,
2755 uint32_t *ecx, uint32_t *edx)
2756{
2757 X86CPU *cpu = x86_env_get_cpu(env);
2758 CPUState *cs = CPU(cpu);
2759 uint32_t pkg_offset;
2760 uint32_t limit;
2761 uint32_t signature[3];
2762
2763 /* Calculate & apply limits for different index ranges */
2764 if (index >= 0xC0000000) {
2765 limit = env->cpuid_xlevel2;
2766 } else if (index >= 0x80000000) {
2767 limit = env->cpuid_xlevel;
2768 } else if (index >= 0x40000000) {
2769 limit = 0x40000001;
2770 } else {
2771 limit = env->cpuid_level;
2772 }
2773
2774 if (index > limit) {
2775 /* Intel documentation states that invalid EAX input will
2776 * return the same information as EAX=cpuid_level
2777 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2778 */
2779 index = env->cpuid_level;
2780 }
2781
2782 switch(index) {
2783 case 0:
2784 *eax = env->cpuid_level;
2785 *ebx = env->cpuid_vendor1;
2786 *edx = env->cpuid_vendor2;
2787 *ecx = env->cpuid_vendor3;
2788 break;
2789 case 1:
2790 *eax = env->cpuid_version;
2791 *ebx = (cpu->apic_id << 24) |
2792 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2793 *ecx = env->features[FEAT_1_ECX];
2794 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2795 *ecx |= CPUID_EXT_OSXSAVE;
2796 }
2797 *edx = env->features[FEAT_1_EDX];
2798 if (cs->nr_cores * cs->nr_threads > 1) {
2799 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2800 *edx |= CPUID_HT;
2801 }
2802 break;
2803 case 2:
2804 /* cache info: needed for Pentium Pro compatibility */
2805 if (cpu->cache_info_passthrough) {
2806 host_cpuid(index, 0, eax, ebx, ecx, edx);
2807 break;
2808 }
2809 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2810 *ebx = 0;
2811 if (!cpu->enable_l3_cache) {
2812 *ecx = 0;
2813 } else {
2814 *ecx = L3_N_DESCRIPTOR;
2815 }
2816 *edx = (L1D_DESCRIPTOR << 16) | \
2817 (L1I_DESCRIPTOR << 8) | \
2818 (L2_DESCRIPTOR);
2819 break;
2820 case 4:
2821 /* cache info: needed for Core compatibility */
2822 if (cpu->cache_info_passthrough) {
2823 host_cpuid(index, count, eax, ebx, ecx, edx);
2824 *eax &= ~0xFC000000;
2825 } else {
2826 *eax = 0;
2827 switch (count) {
2828 case 0: /* L1 dcache info */
2829 *eax |= CPUID_4_TYPE_DCACHE | \
2830 CPUID_4_LEVEL(1) | \
2831 CPUID_4_SELF_INIT_LEVEL;
2832 *ebx = (L1D_LINE_SIZE - 1) | \
2833 ((L1D_PARTITIONS - 1) << 12) | \
2834 ((L1D_ASSOCIATIVITY - 1) << 22);
2835 *ecx = L1D_SETS - 1;
2836 *edx = CPUID_4_NO_INVD_SHARING;
2837 break;
2838 case 1: /* L1 icache info */
2839 *eax |= CPUID_4_TYPE_ICACHE | \
2840 CPUID_4_LEVEL(1) | \
2841 CPUID_4_SELF_INIT_LEVEL;
2842 *ebx = (L1I_LINE_SIZE - 1) | \
2843 ((L1I_PARTITIONS - 1) << 12) | \
2844 ((L1I_ASSOCIATIVITY - 1) << 22);
2845 *ecx = L1I_SETS - 1;
2846 *edx = CPUID_4_NO_INVD_SHARING;
2847 break;
2848 case 2: /* L2 cache info */
2849 *eax |= CPUID_4_TYPE_UNIFIED | \
2850 CPUID_4_LEVEL(2) | \
2851 CPUID_4_SELF_INIT_LEVEL;
2852 if (cs->nr_threads > 1) {
2853 *eax |= (cs->nr_threads - 1) << 14;
2854 }
2855 *ebx = (L2_LINE_SIZE - 1) | \
2856 ((L2_PARTITIONS - 1) << 12) | \
2857 ((L2_ASSOCIATIVITY - 1) << 22);
2858 *ecx = L2_SETS - 1;
2859 *edx = CPUID_4_NO_INVD_SHARING;
2860 break;
2861 case 3: /* L3 cache info */
2862 if (!cpu->enable_l3_cache) {
2863 *eax = 0;
2864 *ebx = 0;
2865 *ecx = 0;
2866 *edx = 0;
2867 break;
2868 }
2869 *eax |= CPUID_4_TYPE_UNIFIED | \
2870 CPUID_4_LEVEL(3) | \
2871 CPUID_4_SELF_INIT_LEVEL;
2872 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2873 *eax |= ((1 << pkg_offset) - 1) << 14;
2874 *ebx = (L3_N_LINE_SIZE - 1) | \
2875 ((L3_N_PARTITIONS - 1) << 12) | \
2876 ((L3_N_ASSOCIATIVITY - 1) << 22);
2877 *ecx = L3_N_SETS - 1;
2878 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2879 break;
2880 default: /* end of info */
2881 *eax = 0;
2882 *ebx = 0;
2883 *ecx = 0;
2884 *edx = 0;
2885 break;
2886 }
2887 }
2888
2889 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2890 if ((*eax & 31) && cs->nr_cores > 1) {
2891 *eax |= (cs->nr_cores - 1) << 26;
2892 }
2893 break;
2894 case 5:
2895 /* mwait info: needed for Core compatibility */
2896 *eax = 0; /* Smallest monitor-line size in bytes */
2897 *ebx = 0; /* Largest monitor-line size in bytes */
2898 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2899 *edx = 0;
2900 break;
2901 case 6:
2902 /* Thermal and Power Leaf */
2903 *eax = env->features[FEAT_6_EAX];
2904 *ebx = 0;
2905 *ecx = 0;
2906 *edx = 0;
2907 break;
2908 case 7:
2909 /* Structured Extended Feature Flags Enumeration Leaf */
2910 if (count == 0) {
2911 *eax = 0; /* Maximum ECX value for sub-leaves */
2912 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2913 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2914 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2915 *ecx |= CPUID_7_0_ECX_OSPKE;
2916 }
2917 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2918 } else {
2919 *eax = 0;
2920 *ebx = 0;
2921 *ecx = 0;
2922 *edx = 0;
2923 }
2924 break;
2925 case 9:
2926 /* Direct Cache Access Information Leaf */
2927 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2928 *ebx = 0;
2929 *ecx = 0;
2930 *edx = 0;
2931 break;
2932 case 0xA:
2933 /* Architectural Performance Monitoring Leaf */
2934 if (kvm_enabled() && cpu->enable_pmu) {
2935 KVMState *s = cs->kvm_state;
2936
2937 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2938 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2939 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2940 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2941 } else if (hvf_enabled() && cpu->enable_pmu) {
2942 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
2943 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
2944 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
2945 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
2946 } else {
2947 *eax = 0;
2948 *ebx = 0;
2949 *ecx = 0;
2950 *edx = 0;
2951 }
2952 break;
2953 case 0xB:
2954 /* Extended Topology Enumeration Leaf */
2955 if (!cpu->enable_cpuid_0xb) {
2956 *eax = *ebx = *ecx = *edx = 0;
2957 break;
2958 }
2959
2960 *ecx = count & 0xff;
2961 *edx = cpu->apic_id;
2962
2963 switch (count) {
2964 case 0:
2965 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2966 *ebx = cs->nr_threads;
2967 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2968 break;
2969 case 1:
2970 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2971 *ebx = cs->nr_cores * cs->nr_threads;
2972 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2973 break;
2974 default:
2975 *eax = 0;
2976 *ebx = 0;
2977 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2978 }
2979
2980 assert(!(*eax & ~0x1f));
2981 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2982 break;
2983 case 0xD: {
2984 /* Processor Extended State */
2985 *eax = 0;
2986 *ebx = 0;
2987 *ecx = 0;
2988 *edx = 0;
2989 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2990 break;
2991 }
2992
2993 if (count == 0) {
2994 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2995 *eax = env->features[FEAT_XSAVE_COMP_LO];
2996 *edx = env->features[FEAT_XSAVE_COMP_HI];
2997 *ebx = *ecx;
2998 } else if (count == 1) {
2999 *eax = env->features[FEAT_XSAVE];
3000 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3001 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3002 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3003 *eax = esa->size;
3004 *ebx = esa->offset;
3005 }
3006 }
3007 break;
3008 }
3009 case 0x40000000:
3010 /*
3011 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3012 * set here, but we restrict to TCG none the less.
3013 */
3014 if (tcg_enabled() && cpu->expose_tcg) {
3015 memcpy(signature, "TCGTCGTCGTCG", 12);
3016 *eax = 0x40000001;
3017 *ebx = signature[0];
3018 *ecx = signature[1];
3019 *edx = signature[2];
3020 } else {
3021 *eax = 0;
3022 *ebx = 0;
3023 *ecx = 0;
3024 *edx = 0;
3025 }
3026 break;
3027 case 0x40000001:
3028 *eax = 0;
3029 *ebx = 0;
3030 *ecx = 0;
3031 *edx = 0;
3032 break;
3033 case 0x80000000:
3034 *eax = env->cpuid_xlevel;
3035 *ebx = env->cpuid_vendor1;
3036 *edx = env->cpuid_vendor2;
3037 *ecx = env->cpuid_vendor3;
3038 break;
3039 case 0x80000001:
3040 *eax = env->cpuid_version;
3041 *ebx = 0;
3042 *ecx = env->features[FEAT_8000_0001_ECX];
3043 *edx = env->features[FEAT_8000_0001_EDX];
3044
3045 /* The Linux kernel checks for the CMPLegacy bit and
3046 * discards multiple thread information if it is set.
3047 * So don't set it here for Intel to make Linux guests happy.
3048 */
3049 if (cs->nr_cores * cs->nr_threads > 1) {
3050 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3051 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3052 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3053 *ecx |= 1 << 1; /* CmpLegacy bit */
3054 }
3055 }
3056 break;
3057 case 0x80000002:
3058 case 0x80000003:
3059 case 0x80000004:
3060 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3061 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3062 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3063 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3064 break;
3065 case 0x80000005:
3066 /* cache info (L1 cache) */
3067 if (cpu->cache_info_passthrough) {
3068 host_cpuid(index, 0, eax, ebx, ecx, edx);
3069 break;
3070 }
3071 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3072 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3073 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3074 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3075 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3076 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3077 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3078 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3079 break;
3080 case 0x80000006:
3081 /* cache info (L2 cache) */
3082 if (cpu->cache_info_passthrough) {
3083 host_cpuid(index, 0, eax, ebx, ecx, edx);
3084 break;
3085 }
3086 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3087 (L2_DTLB_2M_ENTRIES << 16) | \
3088 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3089 (L2_ITLB_2M_ENTRIES);
3090 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3091 (L2_DTLB_4K_ENTRIES << 16) | \
3092 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3093 (L2_ITLB_4K_ENTRIES);
3094 *ecx = (L2_SIZE_KB_AMD << 16) | \
3095 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3096 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3097 if (!cpu->enable_l3_cache) {
3098 *edx = ((L3_SIZE_KB / 512) << 18) | \
3099 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3100 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3101 } else {
3102 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3103 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3104 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3105 }
3106 break;
3107 case 0x80000007:
3108 *eax = 0;
3109 *ebx = 0;
3110 *ecx = 0;
3111 *edx = env->features[FEAT_8000_0007_EDX];
3112 break;
3113 case 0x80000008:
3114 /* virtual & phys address size in low 2 bytes. */
3115 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3116 /* 64 bit processor */
3117 *eax = cpu->phys_bits; /* configurable physical bits */
3118 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3119 *eax |= 0x00003900; /* 57 bits virtual */
3120 } else {
3121 *eax |= 0x00003000; /* 48 bits virtual */
3122 }
3123 } else {
3124 *eax = cpu->phys_bits;
3125 }
3126 *ebx = 0;
3127 *ecx = 0;
3128 *edx = 0;
3129 if (cs->nr_cores * cs->nr_threads > 1) {
3130 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3131 }
3132 break;
3133 case 0x8000000A:
3134 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3135 *eax = 0x00000001; /* SVM Revision */
3136 *ebx = 0x00000010; /* nr of ASIDs */
3137 *ecx = 0;
3138 *edx = env->features[FEAT_SVM]; /* optional features */
3139 } else {
3140 *eax = 0;
3141 *ebx = 0;
3142 *ecx = 0;
3143 *edx = 0;
3144 }
3145 break;
3146 case 0xC0000000:
3147 *eax = env->cpuid_xlevel2;
3148 *ebx = 0;
3149 *ecx = 0;
3150 *edx = 0;
3151 break;
3152 case 0xC0000001:
3153 /* Support for VIA CPU's CPUID instruction */
3154 *eax = env->cpuid_version;
3155 *ebx = 0;
3156 *ecx = 0;
3157 *edx = env->features[FEAT_C000_0001_EDX];
3158 break;
3159 case 0xC0000002:
3160 case 0xC0000003:
3161 case 0xC0000004:
3162 /* Reserved for the future, and now filled with zero */
3163 *eax = 0;
3164 *ebx = 0;
3165 *ecx = 0;
3166 *edx = 0;
3167 break;
3168 default:
3169 /* reserved values: zero */
3170 *eax = 0;
3171 *ebx = 0;
3172 *ecx = 0;
3173 *edx = 0;
3174 break;
3175 }
3176}
3177
3178/* CPUClass::reset() */
3179static void x86_cpu_reset(CPUState *s)
3180{
3181 X86CPU *cpu = X86_CPU(s);
3182 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3183 CPUX86State *env = &cpu->env;
3184 target_ulong cr4;
3185 uint64_t xcr0;
3186 int i;
3187
3188 xcc->parent_reset(s);
3189
3190 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3191
3192 env->old_exception = -1;
3193
3194 /* init to reset state */
3195
3196 env->hflags2 |= HF2_GIF_MASK;
3197
3198 cpu_x86_update_cr0(env, 0x60000010);
3199 env->a20_mask = ~0x0;
3200 env->smbase = 0x30000;
3201
3202 env->idt.limit = 0xffff;
3203 env->gdt.limit = 0xffff;
3204 env->ldt.limit = 0xffff;
3205 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3206 env->tr.limit = 0xffff;
3207 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3208
3209 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3210 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3211 DESC_R_MASK | DESC_A_MASK);
3212 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3213 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3214 DESC_A_MASK);
3215 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3216 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3217 DESC_A_MASK);
3218 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3219 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3220 DESC_A_MASK);
3221 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3222 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3223 DESC_A_MASK);
3224 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3225 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3226 DESC_A_MASK);
3227
3228 env->eip = 0xfff0;
3229 env->regs[R_EDX] = env->cpuid_version;
3230
3231 env->eflags = 0x2;
3232
3233 /* FPU init */
3234 for (i = 0; i < 8; i++) {
3235 env->fptags[i] = 1;
3236 }
3237 cpu_set_fpuc(env, 0x37f);
3238
3239 env->mxcsr = 0x1f80;
3240 /* All units are in INIT state. */
3241 env->xstate_bv = 0;
3242
3243 env->pat = 0x0007040600070406ULL;
3244 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3245
3246 memset(env->dr, 0, sizeof(env->dr));
3247 env->dr[6] = DR6_FIXED_1;
3248 env->dr[7] = DR7_FIXED_1;
3249 cpu_breakpoint_remove_all(s, BP_CPU);
3250 cpu_watchpoint_remove_all(s, BP_CPU);
3251
3252 cr4 = 0;
3253 xcr0 = XSTATE_FP_MASK;
3254
3255#ifdef CONFIG_USER_ONLY
3256 /* Enable all the features for user-mode. */
3257 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3258 xcr0 |= XSTATE_SSE_MASK;
3259 }
3260 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3261 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3262 if (env->features[esa->feature] & esa->bits) {
3263 xcr0 |= 1ull << i;
3264 }
3265 }
3266
3267 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3268 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3269 }
3270 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3271 cr4 |= CR4_FSGSBASE_MASK;
3272 }
3273#endif
3274
3275 env->xcr0 = xcr0;
3276 cpu_x86_update_cr4(env, cr4);
3277
3278 /*
3279 * SDM 11.11.5 requires:
3280 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3281 * - IA32_MTRR_PHYSMASKn.V = 0
3282 * All other bits are undefined. For simplification, zero it all.
3283 */
3284 env->mtrr_deftype = 0;
3285 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3286 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3287
3288 env->interrupt_injected = -1;
3289 env->exception_injected = -1;
3290 env->nmi_injected = false;
3291#if !defined(CONFIG_USER_ONLY)
3292 /* We hard-wire the BSP to the first CPU. */
3293 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3294
3295 s->halted = !cpu_is_bsp(cpu);
3296
3297 if (kvm_enabled()) {
3298 kvm_arch_reset_vcpu(cpu);
3299 }
3300 else if (hvf_enabled()) {
3301 hvf_reset_vcpu(s);
3302 }
3303#endif
3304}
3305
3306#ifndef CONFIG_USER_ONLY
3307bool cpu_is_bsp(X86CPU *cpu)
3308{
3309 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3310}
3311
3312/* TODO: remove me, when reset over QOM tree is implemented */
3313static void x86_cpu_machine_reset_cb(void *opaque)
3314{
3315 X86CPU *cpu = opaque;
3316 cpu_reset(CPU(cpu));
3317}
3318#endif
3319
3320static void mce_init(X86CPU *cpu)
3321{
3322 CPUX86State *cenv = &cpu->env;
3323 unsigned int bank;
3324
3325 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3326 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3327 (CPUID_MCE | CPUID_MCA)) {
3328 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3329 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3330 cenv->mcg_ctl = ~(uint64_t)0;
3331 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3332 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3333 }
3334 }
3335}
3336
3337#ifndef CONFIG_USER_ONLY
3338APICCommonClass *apic_get_class(void)
3339{
3340 const char *apic_type = "apic";
3341
3342 /* TODO: in-kernel irqchip for hvf */
3343 if (kvm_apic_in_kernel()) {
3344 apic_type = "kvm-apic";
3345 } else if (xen_enabled()) {
3346 apic_type = "xen-apic";
3347 }
3348
3349 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3350}
3351
3352static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3353{
3354 APICCommonState *apic;
3355 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3356
3357 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3358
3359 object_property_add_child(OBJECT(cpu), "lapic",
3360 OBJECT(cpu->apic_state), &error_abort);
3361 object_unref(OBJECT(cpu->apic_state));
3362
3363 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3364 /* TODO: convert to link<> */
3365 apic = APIC_COMMON(cpu->apic_state);
3366 apic->cpu = cpu;
3367 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3368}
3369
3370static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3371{
3372 APICCommonState *apic;
3373 static bool apic_mmio_map_once;
3374
3375 if (cpu->apic_state == NULL) {
3376 return;
3377 }
3378 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3379 errp);
3380
3381 /* Map APIC MMIO area */
3382 apic = APIC_COMMON(cpu->apic_state);
3383 if (!apic_mmio_map_once) {
3384 memory_region_add_subregion_overlap(get_system_memory(),
3385 apic->apicbase &
3386 MSR_IA32_APICBASE_BASE,
3387 &apic->io_memory,
3388 0x1000);
3389 apic_mmio_map_once = true;
3390 }
3391}
3392
3393static void x86_cpu_machine_done(Notifier *n, void *unused)
3394{
3395 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3396 MemoryRegion *smram =
3397 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3398
3399 if (smram) {
3400 cpu->smram = g_new(MemoryRegion, 1);
3401 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3402 smram, 0, 1ull << 32);
3403 memory_region_set_enabled(cpu->smram, true);
3404 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3405 }
3406}
3407#else
3408static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3409{
3410}
3411#endif
3412
3413/* Note: Only safe for use on x86(-64) hosts */
3414static uint32_t x86_host_phys_bits(void)
3415{
3416 uint32_t eax;
3417 uint32_t host_phys_bits;
3418
3419 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3420 if (eax >= 0x80000008) {
3421 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3422 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3423 * at 23:16 that can specify a maximum physical address bits for
3424 * the guest that can override this value; but I've not seen
3425 * anything with that set.
3426 */
3427 host_phys_bits = eax & 0xff;
3428 } else {
3429 /* It's an odd 64 bit machine that doesn't have the leaf for
3430 * physical address bits; fall back to 36 that's most older
3431 * Intel.
3432 */
3433 host_phys_bits = 36;
3434 }
3435
3436 return host_phys_bits;
3437}
3438
3439static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3440{
3441 if (*min < value) {
3442 *min = value;
3443 }
3444}
3445
3446/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3447static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3448{
3449 CPUX86State *env = &cpu->env;
3450 FeatureWordInfo *fi = &feature_word_info[w];
3451 uint32_t eax = fi->cpuid_eax;
3452 uint32_t region = eax & 0xF0000000;
3453
3454 if (!env->features[w]) {
3455 return;
3456 }
3457
3458 switch (region) {
3459 case 0x00000000:
3460 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3461 break;
3462 case 0x80000000:
3463 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3464 break;
3465 case 0xC0000000:
3466 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3467 break;
3468 }
3469}
3470
3471/* Calculate XSAVE components based on the configured CPU feature flags */
3472static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3473{
3474 CPUX86State *env = &cpu->env;
3475 int i;
3476 uint64_t mask;
3477
3478 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3479 return;
3480 }
3481
3482 mask = 0;
3483 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3484 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3485 if (env->features[esa->feature] & esa->bits) {
3486 mask |= (1ULL << i);
3487 }
3488 }
3489
3490 env->features[FEAT_XSAVE_COMP_LO] = mask;
3491 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3492}
3493
3494/***** Steps involved on loading and filtering CPUID data
3495 *
3496 * When initializing and realizing a CPU object, the steps
3497 * involved in setting up CPUID data are:
3498 *
3499 * 1) Loading CPU model definition (X86CPUDefinition). This is
3500 * implemented by x86_cpu_load_def() and should be completely
3501 * transparent, as it is done automatically by instance_init.
3502 * No code should need to look at X86CPUDefinition structs
3503 * outside instance_init.
3504 *
3505 * 2) CPU expansion. This is done by realize before CPUID
3506 * filtering, and will make sure host/accelerator data is
3507 * loaded for CPU models that depend on host capabilities
3508 * (e.g. "host"). Done by x86_cpu_expand_features().
3509 *
3510 * 3) CPUID filtering. This initializes extra data related to
3511 * CPUID, and checks if the host supports all capabilities
3512 * required by the CPU. Runnability of a CPU model is
3513 * determined at this step. Done by x86_cpu_filter_features().
3514 *
3515 * Some operations don't require all steps to be performed.
3516 * More precisely:
3517 *
3518 * - CPU instance creation (instance_init) will run only CPU
3519 * model loading. CPU expansion can't run at instance_init-time
3520 * because host/accelerator data may be not available yet.
3521 * - CPU realization will perform both CPU model expansion and CPUID
3522 * filtering, and return an error in case one of them fails.
3523 * - query-cpu-definitions needs to run all 3 steps. It needs
3524 * to run CPUID filtering, as the 'unavailable-features'
3525 * field is set based on the filtering results.
3526 * - The query-cpu-model-expansion QMP command only needs to run
3527 * CPU model loading and CPU expansion. It should not filter
3528 * any CPUID data based on host capabilities.
3529 */
3530
3531/* Expand CPU configuration data, based on configured features
3532 * and host/accelerator capabilities when appropriate.
3533 */
3534static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3535{
3536 CPUX86State *env = &cpu->env;
3537 FeatureWord w;
3538 GList *l;
3539 Error *local_err = NULL;
3540
3541 /*TODO: Now cpu->max_features doesn't overwrite features
3542 * set using QOM properties, and we can convert
3543 * plus_features & minus_features to global properties
3544 * inside x86_cpu_parse_featurestr() too.
3545 */
3546 if (cpu->max_features) {
3547 for (w = 0; w < FEATURE_WORDS; w++) {
3548 /* Override only features that weren't set explicitly
3549 * by the user.
3550 */
3551 env->features[w] |=
3552 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3553 ~env->user_features[w];
3554 }
3555 }
3556
3557 for (l = plus_features; l; l = l->next) {
3558 const char *prop = l->data;
3559 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3560 if (local_err) {
3561 goto out;
3562 }
3563 }
3564
3565 for (l = minus_features; l; l = l->next) {
3566 const char *prop = l->data;
3567 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3568 if (local_err) {
3569 goto out;
3570 }
3571 }
3572
3573 if (!kvm_enabled() || !cpu->expose_kvm) {
3574 env->features[FEAT_KVM] = 0;
3575 }
3576
3577 x86_cpu_enable_xsave_components(cpu);
3578
3579 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3580 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3581 if (cpu->full_cpuid_auto_level) {
3582 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3583 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3584 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3585 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3586 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3587 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3588 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3589 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3590 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3591 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3592 /* SVM requires CPUID[0x8000000A] */
3593 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3594 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3595 }
3596 }
3597
3598 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3599 if (env->cpuid_level == UINT32_MAX) {
3600 env->cpuid_level = env->cpuid_min_level;
3601 }
3602 if (env->cpuid_xlevel == UINT32_MAX) {
3603 env->cpuid_xlevel = env->cpuid_min_xlevel;
3604 }
3605 if (env->cpuid_xlevel2 == UINT32_MAX) {
3606 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3607 }
3608
3609out:
3610 if (local_err != NULL) {
3611 error_propagate(errp, local_err);
3612 }
3613}
3614
3615/*
3616 * Finishes initialization of CPUID data, filters CPU feature
3617 * words based on host availability of each feature.
3618 *
3619 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3620 */
3621static int x86_cpu_filter_features(X86CPU *cpu)
3622{
3623 CPUX86State *env = &cpu->env;
3624 FeatureWord w;
3625 int rv = 0;
3626
3627 for (w = 0; w < FEATURE_WORDS; w++) {
3628 uint32_t host_feat =
3629 x86_cpu_get_supported_feature_word(w, false);
3630 uint32_t requested_features = env->features[w];
3631 env->features[w] &= host_feat;
3632 cpu->filtered_features[w] = requested_features & ~env->features[w];
3633 if (cpu->filtered_features[w]) {
3634 rv = 1;
3635 }
3636 }
3637
3638 return rv;
3639}
3640
3641#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3642 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3643 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3644#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3645 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3646 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3647static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3648{
3649 CPUState *cs = CPU(dev);
3650 X86CPU *cpu = X86_CPU(dev);
3651 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3652 CPUX86State *env = &cpu->env;
3653 Error *local_err = NULL;
3654 static bool ht_warned;
3655
3656 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3657 char *name = x86_cpu_class_get_model_name(xcc);
3658 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3659 g_free(name);
3660 goto out;
3661 }
3662
3663 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3664 error_setg(errp, "apic-id property was not initialized properly");
3665 return;
3666 }
3667
3668 x86_cpu_expand_features(cpu, &local_err);
3669 if (local_err) {
3670 goto out;
3671 }
3672
3673 if (x86_cpu_filter_features(cpu) &&
3674 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3675 x86_cpu_report_filtered_features(cpu);
3676 if (cpu->enforce_cpuid) {
3677 error_setg(&local_err,
3678 accel_uses_host_cpuid() ?
3679 "Host doesn't support requested features" :
3680 "TCG doesn't support requested features");
3681 goto out;
3682 }
3683 }
3684
3685 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3686 * CPUID[1].EDX.
3687 */
3688 if (IS_AMD_CPU(env)) {
3689 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3690 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3691 & CPUID_EXT2_AMD_ALIASES);
3692 }
3693
3694 /* For 64bit systems think about the number of physical bits to present.
3695 * ideally this should be the same as the host; anything other than matching
3696 * the host can cause incorrect guest behaviour.
3697 * QEMU used to pick the magic value of 40 bits that corresponds to
3698 * consumer AMD devices but nothing else.
3699 */
3700 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3701 if (accel_uses_host_cpuid()) {
3702 uint32_t host_phys_bits = x86_host_phys_bits();
3703 static bool warned;
3704
3705 if (cpu->host_phys_bits) {
3706 /* The user asked for us to use the host physical bits */
3707 cpu->phys_bits = host_phys_bits;
3708 }
3709
3710 /* Print a warning if the user set it to a value that's not the
3711 * host value.
3712 */
3713 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3714 !warned) {
3715 warn_report("Host physical bits (%u)"
3716 " does not match phys-bits property (%u)",
3717 host_phys_bits, cpu->phys_bits);
3718 warned = true;
3719 }
3720
3721 if (cpu->phys_bits &&
3722 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3723 cpu->phys_bits < 32)) {
3724 error_setg(errp, "phys-bits should be between 32 and %u "
3725 " (but is %u)",
3726 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3727 return;
3728 }
3729 } else {
3730 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3731 error_setg(errp, "TCG only supports phys-bits=%u",
3732 TCG_PHYS_ADDR_BITS);
3733 return;
3734 }
3735 }
3736 /* 0 means it was not explicitly set by the user (or by machine
3737 * compat_props or by the host code above). In this case, the default
3738 * is the value used by TCG (40).
3739 */
3740 if (cpu->phys_bits == 0) {
3741 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3742 }
3743 } else {
3744 /* For 32 bit systems don't use the user set value, but keep
3745 * phys_bits consistent with what we tell the guest.
3746 */
3747 if (cpu->phys_bits != 0) {
3748 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3749 return;
3750 }
3751
3752 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3753 cpu->phys_bits = 36;
3754 } else {
3755 cpu->phys_bits = 32;
3756 }
3757 }
3758 cpu_exec_realizefn(cs, &local_err);
3759 if (local_err != NULL) {
3760 error_propagate(errp, local_err);
3761 return;
3762 }
3763
3764#ifndef CONFIG_USER_ONLY
3765 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3766
3767 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3768 x86_cpu_apic_create(cpu, &local_err);
3769 if (local_err != NULL) {
3770 goto out;
3771 }
3772 }
3773#endif
3774
3775 mce_init(cpu);
3776
3777#ifndef CONFIG_USER_ONLY
3778 if (tcg_enabled()) {
3779 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3780 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3781
3782 /* Outer container... */
3783 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3784 memory_region_set_enabled(cpu->cpu_as_root, true);
3785
3786 /* ... with two regions inside: normal system memory with low
3787 * priority, and...
3788 */
3789 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3790 get_system_memory(), 0, ~0ull);
3791 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3792 memory_region_set_enabled(cpu->cpu_as_mem, true);
3793
3794 cs->num_ases = 2;
3795 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
3796 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
3797
3798 /* ... SMRAM with higher priority, linked from /machine/smram. */
3799 cpu->machine_done.notify = x86_cpu_machine_done;
3800 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3801 }
3802#endif
3803
3804 qemu_init_vcpu(cs);
3805
3806 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3807 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3808 * based on inputs (sockets,cores,threads), it is still better to gives
3809 * users a warning.
3810 *
3811 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3812 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3813 */
3814 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3815 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3816 " -smp options properly.");
3817 ht_warned = true;
3818 }
3819
3820 x86_cpu_apic_realize(cpu, &local_err);
3821 if (local_err != NULL) {
3822 goto out;
3823 }
3824 cpu_reset(cs);
3825
3826 xcc->parent_realize(dev, &local_err);
3827
3828out:
3829 if (local_err != NULL) {
3830 error_propagate(errp, local_err);
3831 return;
3832 }
3833}
3834
3835static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3836{
3837 X86CPU *cpu = X86_CPU(dev);
3838 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3839 Error *local_err = NULL;
3840
3841#ifndef CONFIG_USER_ONLY
3842 cpu_remove_sync(CPU(dev));
3843 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3844#endif
3845
3846 if (cpu->apic_state) {
3847 object_unparent(OBJECT(cpu->apic_state));
3848 cpu->apic_state = NULL;
3849 }
3850
3851 xcc->parent_unrealize(dev, &local_err);
3852 if (local_err != NULL) {
3853 error_propagate(errp, local_err);
3854 return;
3855 }
3856}
3857
3858typedef struct BitProperty {
3859 FeatureWord w;
3860 uint32_t mask;
3861} BitProperty;
3862
3863static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3864 void *opaque, Error **errp)
3865{
3866 X86CPU *cpu = X86_CPU(obj);
3867 BitProperty *fp = opaque;
3868 uint32_t f = cpu->env.features[fp->w];
3869 bool value = (f & fp->mask) == fp->mask;
3870 visit_type_bool(v, name, &value, errp);
3871}
3872
3873static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3874 void *opaque, Error **errp)
3875{
3876 DeviceState *dev = DEVICE(obj);
3877 X86CPU *cpu = X86_CPU(obj);
3878 BitProperty *fp = opaque;
3879 Error *local_err = NULL;
3880 bool value;
3881
3882 if (dev->realized) {
3883 qdev_prop_set_after_realize(dev, name, errp);
3884 return;
3885 }
3886
3887 visit_type_bool(v, name, &value, &local_err);
3888 if (local_err) {
3889 error_propagate(errp, local_err);
3890 return;
3891 }
3892
3893 if (value) {
3894 cpu->env.features[fp->w] |= fp->mask;
3895 } else {
3896 cpu->env.features[fp->w] &= ~fp->mask;
3897 }
3898 cpu->env.user_features[fp->w] |= fp->mask;
3899}
3900
3901static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3902 void *opaque)
3903{
3904 BitProperty *prop = opaque;
3905 g_free(prop);
3906}
3907
3908/* Register a boolean property to get/set a single bit in a uint32_t field.
3909 *
3910 * The same property name can be registered multiple times to make it affect
3911 * multiple bits in the same FeatureWord. In that case, the getter will return
3912 * true only if all bits are set.
3913 */
3914static void x86_cpu_register_bit_prop(X86CPU *cpu,
3915 const char *prop_name,
3916 FeatureWord w,
3917 int bitnr)
3918{
3919 BitProperty *fp;
3920 ObjectProperty *op;
3921 uint32_t mask = (1UL << bitnr);
3922
3923 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3924 if (op) {
3925 fp = op->opaque;
3926 assert(fp->w == w);
3927 fp->mask |= mask;
3928 } else {
3929 fp = g_new0(BitProperty, 1);
3930 fp->w = w;
3931 fp->mask = mask;
3932 object_property_add(OBJECT(cpu), prop_name, "bool",
3933 x86_cpu_get_bit_prop,
3934 x86_cpu_set_bit_prop,
3935 x86_cpu_release_bit_prop, fp, &error_abort);
3936 }
3937}
3938
3939static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3940 FeatureWord w,
3941 int bitnr)
3942{
3943 FeatureWordInfo *fi = &feature_word_info[w];
3944 const char *name = fi->feat_names[bitnr];
3945
3946 if (!name) {
3947 return;
3948 }
3949
3950 /* Property names should use "-" instead of "_".
3951 * Old names containing underscores are registered as aliases
3952 * using object_property_add_alias()
3953 */
3954 assert(!strchr(name, '_'));
3955 /* aliases don't use "|" delimiters anymore, they are registered
3956 * manually using object_property_add_alias() */
3957 assert(!strchr(name, '|'));
3958 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3959}
3960
3961static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3962{
3963 X86CPU *cpu = X86_CPU(cs);
3964 CPUX86State *env = &cpu->env;
3965 GuestPanicInformation *panic_info = NULL;
3966
3967 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
3968 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3969
3970 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3971
3972 assert(HV_CRASH_PARAMS >= 5);
3973 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3974 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3975 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3976 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3977 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3978 }
3979
3980 return panic_info;
3981}
3982static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3983 const char *name, void *opaque,
3984 Error **errp)
3985{
3986 CPUState *cs = CPU(obj);
3987 GuestPanicInformation *panic_info;
3988
3989 if (!cs->crash_occurred) {
3990 error_setg(errp, "No crash occured");
3991 return;
3992 }
3993
3994 panic_info = x86_cpu_get_crash_info(cs);
3995 if (panic_info == NULL) {
3996 error_setg(errp, "No crash information");
3997 return;
3998 }
3999
4000 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4001 errp);
4002 qapi_free_GuestPanicInformation(panic_info);
4003}
4004
4005static void x86_cpu_initfn(Object *obj)
4006{
4007 CPUState *cs = CPU(obj);
4008 X86CPU *cpu = X86_CPU(obj);
4009 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4010 CPUX86State *env = &cpu->env;
4011 FeatureWord w;
4012
4013 cs->env_ptr = env;
4014
4015 object_property_add(obj, "family", "int",
4016 x86_cpuid_version_get_family,
4017 x86_cpuid_version_set_family, NULL, NULL, NULL);
4018 object_property_add(obj, "model", "int",
4019 x86_cpuid_version_get_model,
4020 x86_cpuid_version_set_model, NULL, NULL, NULL);
4021 object_property_add(obj, "stepping", "int",
4022 x86_cpuid_version_get_stepping,
4023 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4024 object_property_add_str(obj, "vendor",
4025 x86_cpuid_get_vendor,
4026 x86_cpuid_set_vendor, NULL);
4027 object_property_add_str(obj, "model-id",
4028 x86_cpuid_get_model_id,
4029 x86_cpuid_set_model_id, NULL);
4030 object_property_add(obj, "tsc-frequency", "int",
4031 x86_cpuid_get_tsc_freq,
4032 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4033 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4034 x86_cpu_get_feature_words,
4035 NULL, NULL, (void *)env->features, NULL);
4036 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4037 x86_cpu_get_feature_words,
4038 NULL, NULL, (void *)cpu->filtered_features, NULL);
4039
4040 object_property_add(obj, "crash-information", "GuestPanicInformation",
4041 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4042
4043 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4044
4045 for (w = 0; w < FEATURE_WORDS; w++) {
4046 int bitnr;
4047
4048 for (bitnr = 0; bitnr < 32; bitnr++) {
4049 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4050 }
4051 }
4052
4053 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4054 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4055 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4056 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4057 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4058 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4059 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4060
4061 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4062 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4063 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4064 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4065 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4066 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4067 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4068 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4069 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4070 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4071 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4072 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4073 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4074 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4075 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4076 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4077 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4078 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4079 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4080 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4081 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4082
4083 if (xcc->cpu_def) {
4084 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4085 }
4086}
4087
4088static int64_t x86_cpu_get_arch_id(CPUState *cs)
4089{
4090 X86CPU *cpu = X86_CPU(cs);
4091
4092 return cpu->apic_id;
4093}
4094
4095static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4096{
4097 X86CPU *cpu = X86_CPU(cs);
4098
4099 return cpu->env.cr[0] & CR0_PG_MASK;
4100}
4101
4102static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4103{
4104 X86CPU *cpu = X86_CPU(cs);
4105
4106 cpu->env.eip = value;
4107}
4108
4109static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4110{
4111 X86CPU *cpu = X86_CPU(cs);
4112
4113 cpu->env.eip = tb->pc - tb->cs_base;
4114}
4115
4116static bool x86_cpu_has_work(CPUState *cs)
4117{
4118 X86CPU *cpu = X86_CPU(cs);
4119 CPUX86State *env = &cpu->env;
4120
4121 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4122 CPU_INTERRUPT_POLL)) &&
4123 (env->eflags & IF_MASK)) ||
4124 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4125 CPU_INTERRUPT_INIT |
4126 CPU_INTERRUPT_SIPI |
4127 CPU_INTERRUPT_MCE)) ||
4128 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4129 !(env->hflags & HF_SMM_MASK));
4130}
4131
4132static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4133{
4134 X86CPU *cpu = X86_CPU(cs);
4135 CPUX86State *env = &cpu->env;
4136
4137 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4138 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4139 : bfd_mach_i386_i8086);
4140 info->print_insn = print_insn_i386;
4141
4142 info->cap_arch = CS_ARCH_X86;
4143 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4144 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4145 : CS_MODE_16);
4146 info->cap_insn_unit = 1;
4147 info->cap_insn_split = 8;
4148}
4149
4150void x86_update_hflags(CPUX86State *env)
4151{
4152 uint32_t hflags;
4153#define HFLAG_COPY_MASK \
4154 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4155 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4156 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4157 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4158
4159 hflags = env->hflags & HFLAG_COPY_MASK;
4160 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4161 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4162 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4163 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4164 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4165
4166 if (env->cr[4] & CR4_OSFXSR_MASK) {
4167 hflags |= HF_OSFXSR_MASK;
4168 }
4169
4170 if (env->efer & MSR_EFER_LMA) {
4171 hflags |= HF_LMA_MASK;
4172 }
4173
4174 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4175 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4176 } else {
4177 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4178 (DESC_B_SHIFT - HF_CS32_SHIFT);
4179 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4180 (DESC_B_SHIFT - HF_SS32_SHIFT);
4181 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4182 !(hflags & HF_CS32_MASK)) {
4183 hflags |= HF_ADDSEG_MASK;
4184 } else {
4185 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4186 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4187 }
4188 }
4189 env->hflags = hflags;
4190}
4191
4192static Property x86_cpu_properties[] = {
4193#ifdef CONFIG_USER_ONLY
4194 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4195 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4196 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4197 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4198 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4199#else
4200 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4201 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4202 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4203 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4204#endif
4205 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4206 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4207 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4208 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4209 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4210 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4211 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4212 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4213 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4214 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4215 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4216 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4217 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4218 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4219 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4220 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4221 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4222 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4223 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4224 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4225 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4226 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4227 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4228 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4229 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4230 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4231 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4232 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4233 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4234 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4235 false),
4236 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4237 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4238
4239 /*
4240 * From "Requirements for Implementing the Microsoft
4241 * Hypervisor Interface":
4242 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4243 *
4244 * "Starting with Windows Server 2012 and Windows 8, if
4245 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4246 * the hypervisor imposes no specific limit to the number of VPs.
4247 * In this case, Windows Server 2012 guest VMs may use more than
4248 * 64 VPs, up to the maximum supported number of processors applicable
4249 * to the specific Windows version being used."
4250 */
4251 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4252 DEFINE_PROP_END_OF_LIST()
4253};
4254
4255static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4256{
4257 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4258 CPUClass *cc = CPU_CLASS(oc);
4259 DeviceClass *dc = DEVICE_CLASS(oc);
4260
4261 xcc->parent_realize = dc->realize;
4262 xcc->parent_unrealize = dc->unrealize;
4263 dc->realize = x86_cpu_realizefn;
4264 dc->unrealize = x86_cpu_unrealizefn;
4265 dc->props = x86_cpu_properties;
4266
4267 xcc->parent_reset = cc->reset;
4268 cc->reset = x86_cpu_reset;
4269 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4270
4271 cc->class_by_name = x86_cpu_class_by_name;
4272 cc->parse_features = x86_cpu_parse_featurestr;
4273 cc->has_work = x86_cpu_has_work;
4274#ifdef CONFIG_TCG
4275 cc->do_interrupt = x86_cpu_do_interrupt;
4276 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4277#endif
4278 cc->dump_state = x86_cpu_dump_state;
4279 cc->get_crash_info = x86_cpu_get_crash_info;
4280 cc->set_pc = x86_cpu_set_pc;
4281 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4282 cc->gdb_read_register = x86_cpu_gdb_read_register;
4283 cc->gdb_write_register = x86_cpu_gdb_write_register;
4284 cc->get_arch_id = x86_cpu_get_arch_id;
4285 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4286#ifdef CONFIG_USER_ONLY
4287 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4288#else
4289 cc->asidx_from_attrs = x86_asidx_from_attrs;
4290 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4291 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4292 cc->write_elf64_note = x86_cpu_write_elf64_note;
4293 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4294 cc->write_elf32_note = x86_cpu_write_elf32_note;
4295 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4296 cc->vmsd = &vmstate_x86_cpu;
4297#endif
4298 cc->gdb_arch_name = x86_gdb_arch_name;
4299#ifdef TARGET_X86_64
4300 cc->gdb_core_xml_file = "i386-64bit.xml";
4301 cc->gdb_num_core_regs = 57;
4302#else
4303 cc->gdb_core_xml_file = "i386-32bit.xml";
4304 cc->gdb_num_core_regs = 41;
4305#endif
4306#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4307 cc->debug_excp_handler = breakpoint_handler;
4308#endif
4309 cc->cpu_exec_enter = x86_cpu_exec_enter;
4310 cc->cpu_exec_exit = x86_cpu_exec_exit;
4311#ifdef CONFIG_TCG
4312 cc->tcg_initialize = tcg_x86_init;
4313#endif
4314 cc->disas_set_info = x86_disas_set_info;
4315
4316 dc->user_creatable = true;
4317}
4318
4319static const TypeInfo x86_cpu_type_info = {
4320 .name = TYPE_X86_CPU,
4321 .parent = TYPE_CPU,
4322 .instance_size = sizeof(X86CPU),
4323 .instance_init = x86_cpu_initfn,
4324 .abstract = true,
4325 .class_size = sizeof(X86CPUClass),
4326 .class_init = x86_cpu_common_class_init,
4327};
4328
4329
4330/* "base" CPU model, used by query-cpu-model-expansion */
4331static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4332{
4333 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4334
4335 xcc->static_model = true;
4336 xcc->migration_safe = true;
4337 xcc->model_description = "base CPU model type with no features enabled";
4338 xcc->ordering = 8;
4339}
4340
4341static const TypeInfo x86_base_cpu_type_info = {
4342 .name = X86_CPU_TYPE_NAME("base"),
4343 .parent = TYPE_X86_CPU,
4344 .class_init = x86_cpu_base_class_init,
4345};
4346
4347static void x86_cpu_register_types(void)
4348{
4349 int i;
4350
4351 type_register_static(&x86_cpu_type_info);
4352 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4353 x86_register_cpudef_type(&builtin_x86_defs[i]);
4354 }
4355 type_register_static(&max_x86_cpu_type_info);
4356 type_register_static(&x86_base_cpu_type_info);
4357#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4358 type_register_static(&host_x86_cpu_type_info);
4359#endif
4360}
4361
4362type_init(x86_cpu_register_types)