]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386/kvm: add support for Hyper-V reenlightenment MSRs
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59
60 /* Cache topology CPUID constants: */
61
62 /* CPUID Leaf 2 Descriptors */
63
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
68
69
70 /* CPUID Leaf 4 constants: */
71
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
76
77 #define CPUID_4_LEVEL(l) ((l) << 5)
78
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
81
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
86
87 #define ASSOC_FULL 0xFF
88
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
102
103
104 /* Definitions of the hardcoded cache entries we expose: */
105
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
117
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
129
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
141
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
154
155 /* TLB definitions: */
156
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
161
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
166
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
171
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
176
177 /* CPUID Leaf 0x14 constants: */
178 #define INTEL_PT_MAX_SUBLEAF 0x1
179 /*
180 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
181 * MSR can be accessed;
182 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
183 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
184 * of Intel PT MSRs across warm reset;
185 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
186 */
187 #define INTEL_PT_MINIMAL_EBX 0xf
188 /*
189 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
190 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
191 * accessed;
192 * bit[01]: ToPA tables can hold any number of output entries, up to the
193 * maximum allowed by the MaskOrTableOffset field of
194 * IA32_RTIT_OUTPUT_MASK_PTRS;
195 * bit[02]: Support Single-Range Output scheme;
196 */
197 #define INTEL_PT_MINIMAL_ECX 0x7
198 /* generated packets which contain IP payloads have LIP values */
199 #define INTEL_PT_IP_LIP (1 << 31)
200 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
201 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
202 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
203 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
204 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
205
206 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
207 uint32_t vendor2, uint32_t vendor3)
208 {
209 int i;
210 for (i = 0; i < 4; i++) {
211 dst[i] = vendor1 >> (8 * i);
212 dst[i + 4] = vendor2 >> (8 * i);
213 dst[i + 8] = vendor3 >> (8 * i);
214 }
215 dst[CPUID_VENDOR_SZ] = '\0';
216 }
217
218 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
219 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
220 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
221 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
222 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
223 CPUID_PSE36 | CPUID_FXSR)
224 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
225 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
226 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
227 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
228 CPUID_PAE | CPUID_SEP | CPUID_APIC)
229
230 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
231 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
232 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
233 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
234 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
235 /* partly implemented:
236 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
237 /* missing:
238 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
239 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
240 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
241 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
242 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
243 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
244 /* missing:
245 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
246 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
247 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
248 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
249 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
250
251 #ifdef TARGET_X86_64
252 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
253 #else
254 #define TCG_EXT2_X86_64_FEATURES 0
255 #endif
256
257 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
258 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
259 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
260 TCG_EXT2_X86_64_FEATURES)
261 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
262 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
263 #define TCG_EXT4_FEATURES 0
264 #define TCG_SVM_FEATURES 0
265 #define TCG_KVM_FEATURES 0
266 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
267 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
268 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
269 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
270 CPUID_7_0_EBX_ERMS)
271 /* missing:
272 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
273 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
274 CPUID_7_0_EBX_RDSEED */
275 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
276 CPUID_7_0_ECX_LA57)
277 #define TCG_7_0_EDX_FEATURES 0
278 #define TCG_APM_FEATURES 0
279 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
280 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
281 /* missing:
282 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
283
284 typedef struct FeatureWordInfo {
285 /* feature flags names are taken from "Intel Processor Identification and
286 * the CPUID Instruction" and AMD's "CPUID Specification".
287 * In cases of disagreement between feature naming conventions,
288 * aliases may be added.
289 */
290 const char *feat_names[32];
291 uint32_t cpuid_eax; /* Input EAX for CPUID */
292 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
293 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
294 int cpuid_reg; /* output register (R_* constant) */
295 uint32_t tcg_features; /* Feature flags supported by TCG */
296 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
297 uint32_t migratable_flags; /* Feature flags known to be migratable */
298 /* Features that shouldn't be auto-enabled by "-cpu host" */
299 uint32_t no_autoenable_flags;
300 } FeatureWordInfo;
301
302 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
303 [FEAT_1_EDX] = {
304 .feat_names = {
305 "fpu", "vme", "de", "pse",
306 "tsc", "msr", "pae", "mce",
307 "cx8", "apic", NULL, "sep",
308 "mtrr", "pge", "mca", "cmov",
309 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
310 NULL, "ds" /* Intel dts */, "acpi", "mmx",
311 "fxsr", "sse", "sse2", "ss",
312 "ht" /* Intel htt */, "tm", "ia64", "pbe",
313 },
314 .cpuid_eax = 1, .cpuid_reg = R_EDX,
315 .tcg_features = TCG_FEATURES,
316 },
317 [FEAT_1_ECX] = {
318 .feat_names = {
319 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
320 "ds-cpl", "vmx", "smx", "est",
321 "tm2", "ssse3", "cid", NULL,
322 "fma", "cx16", "xtpr", "pdcm",
323 NULL, "pcid", "dca", "sse4.1",
324 "sse4.2", "x2apic", "movbe", "popcnt",
325 "tsc-deadline", "aes", "xsave", "osxsave",
326 "avx", "f16c", "rdrand", "hypervisor",
327 },
328 .cpuid_eax = 1, .cpuid_reg = R_ECX,
329 .tcg_features = TCG_EXT_FEATURES,
330 },
331 /* Feature names that are already defined on feature_name[] but
332 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
333 * names on feat_names below. They are copied automatically
334 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
335 */
336 [FEAT_8000_0001_EDX] = {
337 .feat_names = {
338 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
339 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
340 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
341 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
342 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
343 "nx", NULL, "mmxext", NULL /* mmx */,
344 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
345 NULL, "lm", "3dnowext", "3dnow",
346 },
347 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_EXT2_FEATURES,
349 },
350 [FEAT_8000_0001_ECX] = {
351 .feat_names = {
352 "lahf-lm", "cmp-legacy", "svm", "extapic",
353 "cr8legacy", "abm", "sse4a", "misalignsse",
354 "3dnowprefetch", "osvw", "ibs", "xop",
355 "skinit", "wdt", NULL, "lwp",
356 "fma4", "tce", NULL, "nodeid-msr",
357 NULL, "tbm", "topoext", "perfctr-core",
358 "perfctr-nb", NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL,
360 },
361 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
362 .tcg_features = TCG_EXT3_FEATURES,
363 },
364 [FEAT_C000_0001_EDX] = {
365 .feat_names = {
366 NULL, NULL, "xstore", "xstore-en",
367 NULL, NULL, "xcrypt", "xcrypt-en",
368 "ace2", "ace2-en", "phe", "phe-en",
369 "pmm", "pmm-en", NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 },
375 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
376 .tcg_features = TCG_EXT4_FEATURES,
377 },
378 [FEAT_KVM] = {
379 .feat_names = {
380 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
381 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
382 NULL, "kvm-pv-tlb-flush", NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 "kvmclock-stable-bit", NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 },
389 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
390 .tcg_features = TCG_KVM_FEATURES,
391 },
392 [FEAT_KVM_HINTS] = {
393 .feat_names = {
394 "kvm-hint-dedicated", NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 },
403 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_KVM_FEATURES,
405 /*
406 * KVM hints aren't auto-enabled by -cpu host, they need to be
407 * explicitly enabled in the command-line.
408 */
409 .no_autoenable_flags = ~0U,
410 },
411 [FEAT_HYPERV_EAX] = {
412 .feat_names = {
413 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
414 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
415 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
416 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
417 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
418 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
419 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
420 NULL, NULL,
421 NULL, NULL, NULL, NULL,
422 NULL, NULL, NULL, NULL,
423 NULL, NULL, NULL, NULL,
424 NULL, NULL, NULL, NULL,
425 },
426 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
427 },
428 [FEAT_HYPERV_EBX] = {
429 .feat_names = {
430 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
431 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
432 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
433 NULL /* hv_create_port */, NULL /* hv_connect_port */,
434 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
435 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
436 NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
441 },
442 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
443 },
444 [FEAT_HYPERV_EDX] = {
445 .feat_names = {
446 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
447 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
448 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
449 NULL, NULL,
450 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 },
457 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
458 },
459 [FEAT_SVM] = {
460 .feat_names = {
461 "npt", "lbrv", "svm-lock", "nrip-save",
462 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
463 NULL, NULL, "pause-filter", NULL,
464 "pfthreshold", NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 },
470 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
471 .tcg_features = TCG_SVM_FEATURES,
472 },
473 [FEAT_7_0_EBX] = {
474 .feat_names = {
475 "fsgsbase", "tsc-adjust", NULL, "bmi1",
476 "hle", "avx2", NULL, "smep",
477 "bmi2", "erms", "invpcid", "rtm",
478 NULL, NULL, "mpx", NULL,
479 "avx512f", "avx512dq", "rdseed", "adx",
480 "smap", "avx512ifma", "pcommit", "clflushopt",
481 "clwb", "intel-pt", "avx512pf", "avx512er",
482 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
483 },
484 .cpuid_eax = 7,
485 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
486 .cpuid_reg = R_EBX,
487 .tcg_features = TCG_7_0_EBX_FEATURES,
488 },
489 [FEAT_7_0_ECX] = {
490 .feat_names = {
491 NULL, "avx512vbmi", "umip", "pku",
492 "ospke", NULL, "avx512vbmi2", NULL,
493 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
494 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
495 "la57", NULL, NULL, NULL,
496 NULL, NULL, "rdpid", NULL,
497 NULL, NULL, NULL, NULL,
498 NULL, NULL, NULL, NULL,
499 },
500 .cpuid_eax = 7,
501 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
502 .cpuid_reg = R_ECX,
503 .tcg_features = TCG_7_0_ECX_FEATURES,
504 },
505 [FEAT_7_0_EDX] = {
506 .feat_names = {
507 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 NULL, NULL, "spec-ctrl", NULL,
514 NULL, NULL, NULL, NULL,
515 },
516 .cpuid_eax = 7,
517 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
518 .cpuid_reg = R_EDX,
519 .tcg_features = TCG_7_0_EDX_FEATURES,
520 },
521 [FEAT_8000_0007_EDX] = {
522 .feat_names = {
523 NULL, NULL, NULL, NULL,
524 NULL, NULL, NULL, NULL,
525 "invtsc", NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 NULL, NULL, NULL, NULL,
530 NULL, NULL, NULL, NULL,
531 },
532 .cpuid_eax = 0x80000007,
533 .cpuid_reg = R_EDX,
534 .tcg_features = TCG_APM_FEATURES,
535 .unmigratable_flags = CPUID_APM_INVTSC,
536 },
537 [FEAT_8000_0008_EBX] = {
538 .feat_names = {
539 NULL, NULL, NULL, NULL,
540 NULL, NULL, NULL, NULL,
541 NULL, NULL, NULL, NULL,
542 "ibpb", NULL, NULL, NULL,
543 NULL, NULL, NULL, NULL,
544 NULL, NULL, NULL, NULL,
545 NULL, NULL, NULL, NULL,
546 NULL, NULL, NULL, NULL,
547 },
548 .cpuid_eax = 0x80000008,
549 .cpuid_reg = R_EBX,
550 .tcg_features = 0,
551 .unmigratable_flags = 0,
552 },
553 [FEAT_XSAVE] = {
554 .feat_names = {
555 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
556 NULL, NULL, NULL, NULL,
557 NULL, NULL, NULL, NULL,
558 NULL, NULL, NULL, NULL,
559 NULL, NULL, NULL, NULL,
560 NULL, NULL, NULL, NULL,
561 NULL, NULL, NULL, NULL,
562 NULL, NULL, NULL, NULL,
563 },
564 .cpuid_eax = 0xd,
565 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
566 .cpuid_reg = R_EAX,
567 .tcg_features = TCG_XSAVE_FEATURES,
568 },
569 [FEAT_6_EAX] = {
570 .feat_names = {
571 NULL, NULL, "arat", NULL,
572 NULL, NULL, NULL, NULL,
573 NULL, NULL, NULL, NULL,
574 NULL, NULL, NULL, NULL,
575 NULL, NULL, NULL, NULL,
576 NULL, NULL, NULL, NULL,
577 NULL, NULL, NULL, NULL,
578 NULL, NULL, NULL, NULL,
579 },
580 .cpuid_eax = 6, .cpuid_reg = R_EAX,
581 .tcg_features = TCG_6_EAX_FEATURES,
582 },
583 [FEAT_XSAVE_COMP_LO] = {
584 .cpuid_eax = 0xD,
585 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
586 .cpuid_reg = R_EAX,
587 .tcg_features = ~0U,
588 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
589 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
590 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
591 XSTATE_PKRU_MASK,
592 },
593 [FEAT_XSAVE_COMP_HI] = {
594 .cpuid_eax = 0xD,
595 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
596 .cpuid_reg = R_EDX,
597 .tcg_features = ~0U,
598 },
599 };
600
601 typedef struct X86RegisterInfo32 {
602 /* Name of register */
603 const char *name;
604 /* QAPI enum value register */
605 X86CPURegister32 qapi_enum;
606 } X86RegisterInfo32;
607
608 #define REGISTER(reg) \
609 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
610 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
611 REGISTER(EAX),
612 REGISTER(ECX),
613 REGISTER(EDX),
614 REGISTER(EBX),
615 REGISTER(ESP),
616 REGISTER(EBP),
617 REGISTER(ESI),
618 REGISTER(EDI),
619 };
620 #undef REGISTER
621
622 typedef struct ExtSaveArea {
623 uint32_t feature, bits;
624 uint32_t offset, size;
625 } ExtSaveArea;
626
627 static const ExtSaveArea x86_ext_save_areas[] = {
628 [XSTATE_FP_BIT] = {
629 /* x87 FP state component is always enabled if XSAVE is supported */
630 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
631 /* x87 state is in the legacy region of the XSAVE area */
632 .offset = 0,
633 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
634 },
635 [XSTATE_SSE_BIT] = {
636 /* SSE state component is always enabled if XSAVE is supported */
637 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
638 /* SSE state is in the legacy region of the XSAVE area */
639 .offset = 0,
640 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
641 },
642 [XSTATE_YMM_BIT] =
643 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
644 .offset = offsetof(X86XSaveArea, avx_state),
645 .size = sizeof(XSaveAVX) },
646 [XSTATE_BNDREGS_BIT] =
647 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
648 .offset = offsetof(X86XSaveArea, bndreg_state),
649 .size = sizeof(XSaveBNDREG) },
650 [XSTATE_BNDCSR_BIT] =
651 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
652 .offset = offsetof(X86XSaveArea, bndcsr_state),
653 .size = sizeof(XSaveBNDCSR) },
654 [XSTATE_OPMASK_BIT] =
655 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
656 .offset = offsetof(X86XSaveArea, opmask_state),
657 .size = sizeof(XSaveOpmask) },
658 [XSTATE_ZMM_Hi256_BIT] =
659 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
660 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
661 .size = sizeof(XSaveZMM_Hi256) },
662 [XSTATE_Hi16_ZMM_BIT] =
663 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
664 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
665 .size = sizeof(XSaveHi16_ZMM) },
666 [XSTATE_PKRU_BIT] =
667 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
668 .offset = offsetof(X86XSaveArea, pkru_state),
669 .size = sizeof(XSavePKRU) },
670 };
671
672 static uint32_t xsave_area_size(uint64_t mask)
673 {
674 int i;
675 uint64_t ret = 0;
676
677 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
678 const ExtSaveArea *esa = &x86_ext_save_areas[i];
679 if ((mask >> i) & 1) {
680 ret = MAX(ret, esa->offset + esa->size);
681 }
682 }
683 return ret;
684 }
685
686 static inline bool accel_uses_host_cpuid(void)
687 {
688 return kvm_enabled() || hvf_enabled();
689 }
690
691 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
692 {
693 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
694 cpu->env.features[FEAT_XSAVE_COMP_LO];
695 }
696
697 const char *get_register_name_32(unsigned int reg)
698 {
699 if (reg >= CPU_NB_REGS32) {
700 return NULL;
701 }
702 return x86_reg_info_32[reg].name;
703 }
704
705 /*
706 * Returns the set of feature flags that are supported and migratable by
707 * QEMU, for a given FeatureWord.
708 */
709 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
710 {
711 FeatureWordInfo *wi = &feature_word_info[w];
712 uint32_t r = 0;
713 int i;
714
715 for (i = 0; i < 32; i++) {
716 uint32_t f = 1U << i;
717
718 /* If the feature name is known, it is implicitly considered migratable,
719 * unless it is explicitly set in unmigratable_flags */
720 if ((wi->migratable_flags & f) ||
721 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
722 r |= f;
723 }
724 }
725 return r;
726 }
727
728 void host_cpuid(uint32_t function, uint32_t count,
729 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
730 {
731 uint32_t vec[4];
732
733 #ifdef __x86_64__
734 asm volatile("cpuid"
735 : "=a"(vec[0]), "=b"(vec[1]),
736 "=c"(vec[2]), "=d"(vec[3])
737 : "0"(function), "c"(count) : "cc");
738 #elif defined(__i386__)
739 asm volatile("pusha \n\t"
740 "cpuid \n\t"
741 "mov %%eax, 0(%2) \n\t"
742 "mov %%ebx, 4(%2) \n\t"
743 "mov %%ecx, 8(%2) \n\t"
744 "mov %%edx, 12(%2) \n\t"
745 "popa"
746 : : "a"(function), "c"(count), "S"(vec)
747 : "memory", "cc");
748 #else
749 abort();
750 #endif
751
752 if (eax)
753 *eax = vec[0];
754 if (ebx)
755 *ebx = vec[1];
756 if (ecx)
757 *ecx = vec[2];
758 if (edx)
759 *edx = vec[3];
760 }
761
762 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
763 {
764 uint32_t eax, ebx, ecx, edx;
765
766 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
767 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
768
769 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
770 if (family) {
771 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
772 }
773 if (model) {
774 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
775 }
776 if (stepping) {
777 *stepping = eax & 0x0F;
778 }
779 }
780
781 /* CPU class name definitions: */
782
783 /* Return type name for a given CPU model name
784 * Caller is responsible for freeing the returned string.
785 */
786 static char *x86_cpu_type_name(const char *model_name)
787 {
788 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
789 }
790
791 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
792 {
793 ObjectClass *oc;
794 char *typename = x86_cpu_type_name(cpu_model);
795 oc = object_class_by_name(typename);
796 g_free(typename);
797 return oc;
798 }
799
800 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
801 {
802 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
803 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
804 return g_strndup(class_name,
805 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
806 }
807
808 struct X86CPUDefinition {
809 const char *name;
810 uint32_t level;
811 uint32_t xlevel;
812 /* vendor is zero-terminated, 12 character ASCII string */
813 char vendor[CPUID_VENDOR_SZ + 1];
814 int family;
815 int model;
816 int stepping;
817 FeatureWordArray features;
818 const char *model_id;
819 };
820
821 static X86CPUDefinition builtin_x86_defs[] = {
822 {
823 .name = "qemu64",
824 .level = 0xd,
825 .vendor = CPUID_VENDOR_AMD,
826 .family = 6,
827 .model = 6,
828 .stepping = 3,
829 .features[FEAT_1_EDX] =
830 PPRO_FEATURES |
831 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
832 CPUID_PSE36,
833 .features[FEAT_1_ECX] =
834 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
835 .features[FEAT_8000_0001_EDX] =
836 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
837 .features[FEAT_8000_0001_ECX] =
838 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
839 .xlevel = 0x8000000A,
840 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
841 },
842 {
843 .name = "phenom",
844 .level = 5,
845 .vendor = CPUID_VENDOR_AMD,
846 .family = 16,
847 .model = 2,
848 .stepping = 3,
849 /* Missing: CPUID_HT */
850 .features[FEAT_1_EDX] =
851 PPRO_FEATURES |
852 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
853 CPUID_PSE36 | CPUID_VME,
854 .features[FEAT_1_ECX] =
855 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
856 CPUID_EXT_POPCNT,
857 .features[FEAT_8000_0001_EDX] =
858 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
859 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
860 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
861 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
862 CPUID_EXT3_CR8LEG,
863 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
864 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
865 .features[FEAT_8000_0001_ECX] =
866 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
867 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
868 /* Missing: CPUID_SVM_LBRV */
869 .features[FEAT_SVM] =
870 CPUID_SVM_NPT,
871 .xlevel = 0x8000001A,
872 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
873 },
874 {
875 .name = "core2duo",
876 .level = 10,
877 .vendor = CPUID_VENDOR_INTEL,
878 .family = 6,
879 .model = 15,
880 .stepping = 11,
881 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
885 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
886 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
887 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
890 CPUID_EXT_CX16,
891 .features[FEAT_8000_0001_EDX] =
892 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
893 .features[FEAT_8000_0001_ECX] =
894 CPUID_EXT3_LAHF_LM,
895 .xlevel = 0x80000008,
896 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
897 },
898 {
899 .name = "kvm64",
900 .level = 0xd,
901 .vendor = CPUID_VENDOR_INTEL,
902 .family = 15,
903 .model = 6,
904 .stepping = 1,
905 /* Missing: CPUID_HT */
906 .features[FEAT_1_EDX] =
907 PPRO_FEATURES | CPUID_VME |
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
909 CPUID_PSE36,
910 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
913 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
916 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
917 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
918 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
919 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
920 .features[FEAT_8000_0001_ECX] =
921 0,
922 .xlevel = 0x80000008,
923 .model_id = "Common KVM processor"
924 },
925 {
926 .name = "qemu32",
927 .level = 4,
928 .vendor = CPUID_VENDOR_INTEL,
929 .family = 6,
930 .model = 6,
931 .stepping = 3,
932 .features[FEAT_1_EDX] =
933 PPRO_FEATURES,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSE3,
936 .xlevel = 0x80000004,
937 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
938 },
939 {
940 .name = "kvm32",
941 .level = 5,
942 .vendor = CPUID_VENDOR_INTEL,
943 .family = 15,
944 .model = 6,
945 .stepping = 1,
946 .features[FEAT_1_EDX] =
947 PPRO_FEATURES | CPUID_VME |
948 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
949 .features[FEAT_1_ECX] =
950 CPUID_EXT_SSE3,
951 .features[FEAT_8000_0001_ECX] =
952 0,
953 .xlevel = 0x80000008,
954 .model_id = "Common 32-bit KVM processor"
955 },
956 {
957 .name = "coreduo",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 14,
962 .stepping = 8,
963 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
964 .features[FEAT_1_EDX] =
965 PPRO_FEATURES | CPUID_VME |
966 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
967 CPUID_SS,
968 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
969 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_NX,
974 .xlevel = 0x80000008,
975 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
976 },
977 {
978 .name = "486",
979 .level = 1,
980 .vendor = CPUID_VENDOR_INTEL,
981 .family = 4,
982 .model = 8,
983 .stepping = 0,
984 .features[FEAT_1_EDX] =
985 I486_FEATURES,
986 .xlevel = 0,
987 .model_id = "",
988 },
989 {
990 .name = "pentium",
991 .level = 1,
992 .vendor = CPUID_VENDOR_INTEL,
993 .family = 5,
994 .model = 4,
995 .stepping = 3,
996 .features[FEAT_1_EDX] =
997 PENTIUM_FEATURES,
998 .xlevel = 0,
999 .model_id = "",
1000 },
1001 {
1002 .name = "pentium2",
1003 .level = 2,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 5,
1007 .stepping = 2,
1008 .features[FEAT_1_EDX] =
1009 PENTIUM2_FEATURES,
1010 .xlevel = 0,
1011 .model_id = "",
1012 },
1013 {
1014 .name = "pentium3",
1015 .level = 3,
1016 .vendor = CPUID_VENDOR_INTEL,
1017 .family = 6,
1018 .model = 7,
1019 .stepping = 3,
1020 .features[FEAT_1_EDX] =
1021 PENTIUM3_FEATURES,
1022 .xlevel = 0,
1023 .model_id = "",
1024 },
1025 {
1026 .name = "athlon",
1027 .level = 2,
1028 .vendor = CPUID_VENDOR_AMD,
1029 .family = 6,
1030 .model = 2,
1031 .stepping = 3,
1032 .features[FEAT_1_EDX] =
1033 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1034 CPUID_MCA,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1037 .xlevel = 0x80000008,
1038 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1039 },
1040 {
1041 .name = "n270",
1042 .level = 10,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 28,
1046 .stepping = 2,
1047 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1048 .features[FEAT_1_EDX] =
1049 PPRO_FEATURES |
1050 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1051 CPUID_ACPI | CPUID_SS,
1052 /* Some CPUs got no CPUID_SEP */
1053 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1054 * CPUID_EXT_XTPR */
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1057 CPUID_EXT_MOVBE,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .xlevel = 0x80000008,
1063 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1064 },
1065 {
1066 .name = "Conroe",
1067 .level = 10,
1068 .vendor = CPUID_VENDOR_INTEL,
1069 .family = 6,
1070 .model = 15,
1071 .stepping = 3,
1072 .features[FEAT_1_EDX] =
1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1077 CPUID_DE | CPUID_FP87,
1078 .features[FEAT_1_ECX] =
1079 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .xlevel = 0x80000008,
1085 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1086 },
1087 {
1088 .name = "Penryn",
1089 .level = 10,
1090 .vendor = CPUID_VENDOR_INTEL,
1091 .family = 6,
1092 .model = 23,
1093 .stepping = 3,
1094 .features[FEAT_1_EDX] =
1095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1099 CPUID_DE | CPUID_FP87,
1100 .features[FEAT_1_ECX] =
1101 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1102 CPUID_EXT_SSE3,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1105 .features[FEAT_8000_0001_ECX] =
1106 CPUID_EXT3_LAHF_LM,
1107 .xlevel = 0x80000008,
1108 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1109 },
1110 {
1111 .name = "Nehalem",
1112 .level = 11,
1113 .vendor = CPUID_VENDOR_INTEL,
1114 .family = 6,
1115 .model = 26,
1116 .stepping = 3,
1117 .features[FEAT_1_EDX] =
1118 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1119 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1120 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1121 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1122 CPUID_DE | CPUID_FP87,
1123 .features[FEAT_1_ECX] =
1124 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1125 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1126 .features[FEAT_8000_0001_EDX] =
1127 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1128 .features[FEAT_8000_0001_ECX] =
1129 CPUID_EXT3_LAHF_LM,
1130 .xlevel = 0x80000008,
1131 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1132 },
1133 {
1134 .name = "Nehalem-IBRS",
1135 .level = 11,
1136 .vendor = CPUID_VENDOR_INTEL,
1137 .family = 6,
1138 .model = 26,
1139 .stepping = 3,
1140 .features[FEAT_1_EDX] =
1141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1145 CPUID_DE | CPUID_FP87,
1146 .features[FEAT_1_ECX] =
1147 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1148 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1149 .features[FEAT_7_0_EDX] =
1150 CPUID_7_0_EDX_SPEC_CTRL,
1151 .features[FEAT_8000_0001_EDX] =
1152 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1153 .features[FEAT_8000_0001_ECX] =
1154 CPUID_EXT3_LAHF_LM,
1155 .xlevel = 0x80000008,
1156 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1157 },
1158 {
1159 .name = "Westmere",
1160 .level = 11,
1161 .vendor = CPUID_VENDOR_INTEL,
1162 .family = 6,
1163 .model = 44,
1164 .stepping = 1,
1165 .features[FEAT_1_EDX] =
1166 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1167 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1168 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1169 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1170 CPUID_DE | CPUID_FP87,
1171 .features[FEAT_1_ECX] =
1172 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1173 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1174 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1175 .features[FEAT_8000_0001_EDX] =
1176 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_6_EAX] =
1180 CPUID_6_EAX_ARAT,
1181 .xlevel = 0x80000008,
1182 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1183 },
1184 {
1185 .name = "Westmere-IBRS",
1186 .level = 11,
1187 .vendor = CPUID_VENDOR_INTEL,
1188 .family = 6,
1189 .model = 44,
1190 .stepping = 1,
1191 .features[FEAT_1_EDX] =
1192 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1193 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1194 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1195 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1196 CPUID_DE | CPUID_FP87,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1199 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1200 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1201 .features[FEAT_8000_0001_EDX] =
1202 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EDX] =
1206 CPUID_7_0_EDX_SPEC_CTRL,
1207 .features[FEAT_6_EAX] =
1208 CPUID_6_EAX_ARAT,
1209 .xlevel = 0x80000008,
1210 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1211 },
1212 {
1213 .name = "SandyBridge",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 42,
1218 .stepping = 1,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1228 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1230 CPUID_EXT_SSE3,
1231 .features[FEAT_8000_0001_EDX] =
1232 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1233 CPUID_EXT2_SYSCALL,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_LAHF_LM,
1236 .features[FEAT_XSAVE] =
1237 CPUID_XSAVE_XSAVEOPT,
1238 .features[FEAT_6_EAX] =
1239 CPUID_6_EAX_ARAT,
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1242 },
1243 {
1244 .name = "SandyBridge-IBRS",
1245 .level = 0xd,
1246 .vendor = CPUID_VENDOR_INTEL,
1247 .family = 6,
1248 .model = 42,
1249 .stepping = 1,
1250 .features[FEAT_1_EDX] =
1251 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1252 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1253 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1254 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1255 CPUID_DE | CPUID_FP87,
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1258 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1259 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1260 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1261 CPUID_EXT_SSE3,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_LAHF_LM,
1267 .features[FEAT_7_0_EDX] =
1268 CPUID_7_0_EDX_SPEC_CTRL,
1269 .features[FEAT_XSAVE] =
1270 CPUID_XSAVE_XSAVEOPT,
1271 .features[FEAT_6_EAX] =
1272 CPUID_6_EAX_ARAT,
1273 .xlevel = 0x80000008,
1274 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1275 },
1276 {
1277 .name = "IvyBridge",
1278 .level = 0xd,
1279 .vendor = CPUID_VENDOR_INTEL,
1280 .family = 6,
1281 .model = 58,
1282 .stepping = 9,
1283 .features[FEAT_1_EDX] =
1284 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1285 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1286 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1287 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1288 CPUID_DE | CPUID_FP87,
1289 .features[FEAT_1_ECX] =
1290 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1291 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1292 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1293 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1294 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1295 .features[FEAT_7_0_EBX] =
1296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1297 CPUID_7_0_EBX_ERMS,
1298 .features[FEAT_8000_0001_EDX] =
1299 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1300 CPUID_EXT2_SYSCALL,
1301 .features[FEAT_8000_0001_ECX] =
1302 CPUID_EXT3_LAHF_LM,
1303 .features[FEAT_XSAVE] =
1304 CPUID_XSAVE_XSAVEOPT,
1305 .features[FEAT_6_EAX] =
1306 CPUID_6_EAX_ARAT,
1307 .xlevel = 0x80000008,
1308 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1309 },
1310 {
1311 .name = "IvyBridge-IBRS",
1312 .level = 0xd,
1313 .vendor = CPUID_VENDOR_INTEL,
1314 .family = 6,
1315 .model = 58,
1316 .stepping = 9,
1317 .features[FEAT_1_EDX] =
1318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1322 CPUID_DE | CPUID_FP87,
1323 .features[FEAT_1_ECX] =
1324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1325 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1326 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1327 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1328 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1329 .features[FEAT_7_0_EBX] =
1330 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1331 CPUID_7_0_EBX_ERMS,
1332 .features[FEAT_8000_0001_EDX] =
1333 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1334 CPUID_EXT2_SYSCALL,
1335 .features[FEAT_8000_0001_ECX] =
1336 CPUID_EXT3_LAHF_LM,
1337 .features[FEAT_7_0_EDX] =
1338 CPUID_7_0_EDX_SPEC_CTRL,
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT,
1341 .features[FEAT_6_EAX] =
1342 CPUID_6_EAX_ARAT,
1343 .xlevel = 0x80000008,
1344 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1345 },
1346 {
1347 .name = "Haswell-noTSX",
1348 .level = 0xd,
1349 .vendor = CPUID_VENDOR_INTEL,
1350 .family = 6,
1351 .model = 60,
1352 .stepping = 1,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1361 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1362 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1363 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1364 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1365 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1368 CPUID_EXT2_SYSCALL,
1369 .features[FEAT_8000_0001_ECX] =
1370 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1371 .features[FEAT_7_0_EBX] =
1372 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1373 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1374 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1375 .features[FEAT_XSAVE] =
1376 CPUID_XSAVE_XSAVEOPT,
1377 .features[FEAT_6_EAX] =
1378 CPUID_6_EAX_ARAT,
1379 .xlevel = 0x80000008,
1380 .model_id = "Intel Core Processor (Haswell, no TSX)",
1381 },
1382 {
1383 .name = "Haswell-noTSX-IBRS",
1384 .level = 0xd,
1385 .vendor = CPUID_VENDOR_INTEL,
1386 .family = 6,
1387 .model = 60,
1388 .stepping = 1,
1389 .features[FEAT_1_EDX] =
1390 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1391 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1392 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1393 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1394 CPUID_DE | CPUID_FP87,
1395 .features[FEAT_1_ECX] =
1396 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1397 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1398 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1399 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1400 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1401 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1402 .features[FEAT_8000_0001_EDX] =
1403 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1404 CPUID_EXT2_SYSCALL,
1405 .features[FEAT_8000_0001_ECX] =
1406 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1407 .features[FEAT_7_0_EDX] =
1408 CPUID_7_0_EDX_SPEC_CTRL,
1409 .features[FEAT_7_0_EBX] =
1410 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1411 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1412 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1413 .features[FEAT_XSAVE] =
1414 CPUID_XSAVE_XSAVEOPT,
1415 .features[FEAT_6_EAX] =
1416 CPUID_6_EAX_ARAT,
1417 .xlevel = 0x80000008,
1418 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1419 },
1420 {
1421 .name = "Haswell",
1422 .level = 0xd,
1423 .vendor = CPUID_VENDOR_INTEL,
1424 .family = 6,
1425 .model = 60,
1426 .stepping = 4,
1427 .features[FEAT_1_EDX] =
1428 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1429 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1430 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1431 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1432 CPUID_DE | CPUID_FP87,
1433 .features[FEAT_1_ECX] =
1434 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1435 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1436 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1437 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1438 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1439 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1440 .features[FEAT_8000_0001_EDX] =
1441 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1442 CPUID_EXT2_SYSCALL,
1443 .features[FEAT_8000_0001_ECX] =
1444 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1445 .features[FEAT_7_0_EBX] =
1446 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1447 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1448 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1449 CPUID_7_0_EBX_RTM,
1450 .features[FEAT_XSAVE] =
1451 CPUID_XSAVE_XSAVEOPT,
1452 .features[FEAT_6_EAX] =
1453 CPUID_6_EAX_ARAT,
1454 .xlevel = 0x80000008,
1455 .model_id = "Intel Core Processor (Haswell)",
1456 },
1457 {
1458 .name = "Haswell-IBRS",
1459 .level = 0xd,
1460 .vendor = CPUID_VENDOR_INTEL,
1461 .family = 6,
1462 .model = 60,
1463 .stepping = 4,
1464 .features[FEAT_1_EDX] =
1465 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1466 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1467 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1468 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1469 CPUID_DE | CPUID_FP87,
1470 .features[FEAT_1_ECX] =
1471 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1472 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1473 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1474 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1475 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1476 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1477 .features[FEAT_8000_0001_EDX] =
1478 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1479 CPUID_EXT2_SYSCALL,
1480 .features[FEAT_8000_0001_ECX] =
1481 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1482 .features[FEAT_7_0_EDX] =
1483 CPUID_7_0_EDX_SPEC_CTRL,
1484 .features[FEAT_7_0_EBX] =
1485 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1486 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1487 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1488 CPUID_7_0_EBX_RTM,
1489 .features[FEAT_XSAVE] =
1490 CPUID_XSAVE_XSAVEOPT,
1491 .features[FEAT_6_EAX] =
1492 CPUID_6_EAX_ARAT,
1493 .xlevel = 0x80000008,
1494 .model_id = "Intel Core Processor (Haswell, IBRS)",
1495 },
1496 {
1497 .name = "Broadwell-noTSX",
1498 .level = 0xd,
1499 .vendor = CPUID_VENDOR_INTEL,
1500 .family = 6,
1501 .model = 61,
1502 .stepping = 2,
1503 .features[FEAT_1_EDX] =
1504 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1505 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1506 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1507 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1508 CPUID_DE | CPUID_FP87,
1509 .features[FEAT_1_ECX] =
1510 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1511 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1512 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1513 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1514 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1515 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1516 .features[FEAT_8000_0001_EDX] =
1517 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1518 CPUID_EXT2_SYSCALL,
1519 .features[FEAT_8000_0001_ECX] =
1520 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1521 .features[FEAT_7_0_EBX] =
1522 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1523 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1524 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1525 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1526 CPUID_7_0_EBX_SMAP,
1527 .features[FEAT_XSAVE] =
1528 CPUID_XSAVE_XSAVEOPT,
1529 .features[FEAT_6_EAX] =
1530 CPUID_6_EAX_ARAT,
1531 .xlevel = 0x80000008,
1532 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1533 },
1534 {
1535 .name = "Broadwell-noTSX-IBRS",
1536 .level = 0xd,
1537 .vendor = CPUID_VENDOR_INTEL,
1538 .family = 6,
1539 .model = 61,
1540 .stepping = 2,
1541 .features[FEAT_1_EDX] =
1542 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1543 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1544 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1545 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1546 CPUID_DE | CPUID_FP87,
1547 .features[FEAT_1_ECX] =
1548 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1549 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1550 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1551 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1552 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1553 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1554 .features[FEAT_8000_0001_EDX] =
1555 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1556 CPUID_EXT2_SYSCALL,
1557 .features[FEAT_8000_0001_ECX] =
1558 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1559 .features[FEAT_7_0_EDX] =
1560 CPUID_7_0_EDX_SPEC_CTRL,
1561 .features[FEAT_7_0_EBX] =
1562 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1563 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1564 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1565 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1566 CPUID_7_0_EBX_SMAP,
1567 .features[FEAT_XSAVE] =
1568 CPUID_XSAVE_XSAVEOPT,
1569 .features[FEAT_6_EAX] =
1570 CPUID_6_EAX_ARAT,
1571 .xlevel = 0x80000008,
1572 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1573 },
1574 {
1575 .name = "Broadwell",
1576 .level = 0xd,
1577 .vendor = CPUID_VENDOR_INTEL,
1578 .family = 6,
1579 .model = 61,
1580 .stepping = 2,
1581 .features[FEAT_1_EDX] =
1582 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1583 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1584 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1585 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1586 CPUID_DE | CPUID_FP87,
1587 .features[FEAT_1_ECX] =
1588 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1589 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1590 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1591 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1592 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1593 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1594 .features[FEAT_8000_0001_EDX] =
1595 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1596 CPUID_EXT2_SYSCALL,
1597 .features[FEAT_8000_0001_ECX] =
1598 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1599 .features[FEAT_7_0_EBX] =
1600 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1601 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1602 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1603 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1604 CPUID_7_0_EBX_SMAP,
1605 .features[FEAT_XSAVE] =
1606 CPUID_XSAVE_XSAVEOPT,
1607 .features[FEAT_6_EAX] =
1608 CPUID_6_EAX_ARAT,
1609 .xlevel = 0x80000008,
1610 .model_id = "Intel Core Processor (Broadwell)",
1611 },
1612 {
1613 .name = "Broadwell-IBRS",
1614 .level = 0xd,
1615 .vendor = CPUID_VENDOR_INTEL,
1616 .family = 6,
1617 .model = 61,
1618 .stepping = 2,
1619 .features[FEAT_1_EDX] =
1620 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1621 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1622 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1623 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1624 CPUID_DE | CPUID_FP87,
1625 .features[FEAT_1_ECX] =
1626 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1627 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1628 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1629 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1630 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1631 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1632 .features[FEAT_8000_0001_EDX] =
1633 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1634 CPUID_EXT2_SYSCALL,
1635 .features[FEAT_8000_0001_ECX] =
1636 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1637 .features[FEAT_7_0_EDX] =
1638 CPUID_7_0_EDX_SPEC_CTRL,
1639 .features[FEAT_7_0_EBX] =
1640 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1641 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1642 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1643 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1644 CPUID_7_0_EBX_SMAP,
1645 .features[FEAT_XSAVE] =
1646 CPUID_XSAVE_XSAVEOPT,
1647 .features[FEAT_6_EAX] =
1648 CPUID_6_EAX_ARAT,
1649 .xlevel = 0x80000008,
1650 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1651 },
1652 {
1653 .name = "Skylake-Client",
1654 .level = 0xd,
1655 .vendor = CPUID_VENDOR_INTEL,
1656 .family = 6,
1657 .model = 94,
1658 .stepping = 3,
1659 .features[FEAT_1_EDX] =
1660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1664 CPUID_DE | CPUID_FP87,
1665 .features[FEAT_1_ECX] =
1666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1672 .features[FEAT_8000_0001_EDX] =
1673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1674 CPUID_EXT2_SYSCALL,
1675 .features[FEAT_8000_0001_ECX] =
1676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1677 .features[FEAT_7_0_EBX] =
1678 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1679 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1680 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1681 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1682 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1683 /* Missing: XSAVES (not supported by some Linux versions,
1684 * including v4.1 to v4.12).
1685 * KVM doesn't yet expose any XSAVES state save component,
1686 * and the only one defined in Skylake (processor tracing)
1687 * probably will block migration anyway.
1688 */
1689 .features[FEAT_XSAVE] =
1690 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1691 CPUID_XSAVE_XGETBV1,
1692 .features[FEAT_6_EAX] =
1693 CPUID_6_EAX_ARAT,
1694 .xlevel = 0x80000008,
1695 .model_id = "Intel Core Processor (Skylake)",
1696 },
1697 {
1698 .name = "Skylake-Client-IBRS",
1699 .level = 0xd,
1700 .vendor = CPUID_VENDOR_INTEL,
1701 .family = 6,
1702 .model = 94,
1703 .stepping = 3,
1704 .features[FEAT_1_EDX] =
1705 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1706 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1707 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1708 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1709 CPUID_DE | CPUID_FP87,
1710 .features[FEAT_1_ECX] =
1711 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1712 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1713 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1714 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1715 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1716 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1717 .features[FEAT_8000_0001_EDX] =
1718 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1719 CPUID_EXT2_SYSCALL,
1720 .features[FEAT_8000_0001_ECX] =
1721 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1722 .features[FEAT_7_0_EDX] =
1723 CPUID_7_0_EDX_SPEC_CTRL,
1724 .features[FEAT_7_0_EBX] =
1725 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1726 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1727 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1728 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1729 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1730 /* Missing: XSAVES (not supported by some Linux versions,
1731 * including v4.1 to v4.12).
1732 * KVM doesn't yet expose any XSAVES state save component,
1733 * and the only one defined in Skylake (processor tracing)
1734 * probably will block migration anyway.
1735 */
1736 .features[FEAT_XSAVE] =
1737 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1738 CPUID_XSAVE_XGETBV1,
1739 .features[FEAT_6_EAX] =
1740 CPUID_6_EAX_ARAT,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel Core Processor (Skylake, IBRS)",
1743 },
1744 {
1745 .name = "Skylake-Server",
1746 .level = 0xd,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 85,
1750 .stepping = 4,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1759 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1760 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1761 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1762 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1763 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1764 .features[FEAT_8000_0001_EDX] =
1765 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1766 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1767 .features[FEAT_8000_0001_ECX] =
1768 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1769 .features[FEAT_7_0_EBX] =
1770 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1771 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1772 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1773 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1774 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1775 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1776 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1777 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1778 /* Missing: XSAVES (not supported by some Linux versions,
1779 * including v4.1 to v4.12).
1780 * KVM doesn't yet expose any XSAVES state save component,
1781 * and the only one defined in Skylake (processor tracing)
1782 * probably will block migration anyway.
1783 */
1784 .features[FEAT_XSAVE] =
1785 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1786 CPUID_XSAVE_XGETBV1,
1787 .features[FEAT_6_EAX] =
1788 CPUID_6_EAX_ARAT,
1789 .xlevel = 0x80000008,
1790 .model_id = "Intel Xeon Processor (Skylake)",
1791 },
1792 {
1793 .name = "Skylake-Server-IBRS",
1794 .level = 0xd,
1795 .vendor = CPUID_VENDOR_INTEL,
1796 .family = 6,
1797 .model = 85,
1798 .stepping = 4,
1799 .features[FEAT_1_EDX] =
1800 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1801 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1802 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1803 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1804 CPUID_DE | CPUID_FP87,
1805 .features[FEAT_1_ECX] =
1806 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1807 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1808 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1809 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1810 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1811 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1812 .features[FEAT_8000_0001_EDX] =
1813 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1814 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1815 .features[FEAT_8000_0001_ECX] =
1816 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1817 .features[FEAT_7_0_EDX] =
1818 CPUID_7_0_EDX_SPEC_CTRL,
1819 .features[FEAT_7_0_EBX] =
1820 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1821 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1822 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1823 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1824 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1825 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1826 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1827 CPUID_7_0_EBX_AVX512VL,
1828 /* Missing: XSAVES (not supported by some Linux versions,
1829 * including v4.1 to v4.12).
1830 * KVM doesn't yet expose any XSAVES state save component,
1831 * and the only one defined in Skylake (processor tracing)
1832 * probably will block migration anyway.
1833 */
1834 .features[FEAT_XSAVE] =
1835 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1836 CPUID_XSAVE_XGETBV1,
1837 .features[FEAT_6_EAX] =
1838 CPUID_6_EAX_ARAT,
1839 .xlevel = 0x80000008,
1840 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1841 },
1842 {
1843 .name = "Opteron_G1",
1844 .level = 5,
1845 .vendor = CPUID_VENDOR_AMD,
1846 .family = 15,
1847 .model = 6,
1848 .stepping = 1,
1849 .features[FEAT_1_EDX] =
1850 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1851 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1852 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1853 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1854 CPUID_DE | CPUID_FP87,
1855 .features[FEAT_1_ECX] =
1856 CPUID_EXT_SSE3,
1857 .features[FEAT_8000_0001_EDX] =
1858 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1859 .xlevel = 0x80000008,
1860 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1861 },
1862 {
1863 .name = "Opteron_G2",
1864 .level = 5,
1865 .vendor = CPUID_VENDOR_AMD,
1866 .family = 15,
1867 .model = 6,
1868 .stepping = 1,
1869 .features[FEAT_1_EDX] =
1870 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1871 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1872 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1873 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1874 CPUID_DE | CPUID_FP87,
1875 .features[FEAT_1_ECX] =
1876 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1877 /* Missing: CPUID_EXT2_RDTSCP */
1878 .features[FEAT_8000_0001_EDX] =
1879 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1880 .features[FEAT_8000_0001_ECX] =
1881 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1882 .xlevel = 0x80000008,
1883 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1884 },
1885 {
1886 .name = "Opteron_G3",
1887 .level = 5,
1888 .vendor = CPUID_VENDOR_AMD,
1889 .family = 16,
1890 .model = 2,
1891 .stepping = 3,
1892 .features[FEAT_1_EDX] =
1893 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1894 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1895 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1896 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1897 CPUID_DE | CPUID_FP87,
1898 .features[FEAT_1_ECX] =
1899 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1900 CPUID_EXT_SSE3,
1901 /* Missing: CPUID_EXT2_RDTSCP */
1902 .features[FEAT_8000_0001_EDX] =
1903 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1904 .features[FEAT_8000_0001_ECX] =
1905 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1906 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1907 .xlevel = 0x80000008,
1908 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1909 },
1910 {
1911 .name = "Opteron_G4",
1912 .level = 0xd,
1913 .vendor = CPUID_VENDOR_AMD,
1914 .family = 21,
1915 .model = 1,
1916 .stepping = 2,
1917 .features[FEAT_1_EDX] =
1918 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1919 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1920 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1921 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1922 CPUID_DE | CPUID_FP87,
1923 .features[FEAT_1_ECX] =
1924 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1925 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1926 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1927 CPUID_EXT_SSE3,
1928 /* Missing: CPUID_EXT2_RDTSCP */
1929 .features[FEAT_8000_0001_EDX] =
1930 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1931 CPUID_EXT2_SYSCALL,
1932 .features[FEAT_8000_0001_ECX] =
1933 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1934 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1935 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1936 CPUID_EXT3_LAHF_LM,
1937 /* no xsaveopt! */
1938 .xlevel = 0x8000001A,
1939 .model_id = "AMD Opteron 62xx class CPU",
1940 },
1941 {
1942 .name = "Opteron_G5",
1943 .level = 0xd,
1944 .vendor = CPUID_VENDOR_AMD,
1945 .family = 21,
1946 .model = 2,
1947 .stepping = 0,
1948 .features[FEAT_1_EDX] =
1949 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1950 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1951 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1952 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1953 CPUID_DE | CPUID_FP87,
1954 .features[FEAT_1_ECX] =
1955 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1956 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1957 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1958 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1959 /* Missing: CPUID_EXT2_RDTSCP */
1960 .features[FEAT_8000_0001_EDX] =
1961 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1962 CPUID_EXT2_SYSCALL,
1963 .features[FEAT_8000_0001_ECX] =
1964 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1965 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1966 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1967 CPUID_EXT3_LAHF_LM,
1968 /* no xsaveopt! */
1969 .xlevel = 0x8000001A,
1970 .model_id = "AMD Opteron 63xx class CPU",
1971 },
1972 {
1973 .name = "EPYC",
1974 .level = 0xd,
1975 .vendor = CPUID_VENDOR_AMD,
1976 .family = 23,
1977 .model = 1,
1978 .stepping = 2,
1979 .features[FEAT_1_EDX] =
1980 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1981 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1982 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1983 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1984 CPUID_VME | CPUID_FP87,
1985 .features[FEAT_1_ECX] =
1986 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1987 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1988 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1989 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1990 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1991 .features[FEAT_8000_0001_EDX] =
1992 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1993 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1994 CPUID_EXT2_SYSCALL,
1995 .features[FEAT_8000_0001_ECX] =
1996 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1997 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1998 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1999 .features[FEAT_7_0_EBX] =
2000 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2001 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2002 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2003 CPUID_7_0_EBX_SHA_NI,
2004 /* Missing: XSAVES (not supported by some Linux versions,
2005 * including v4.1 to v4.12).
2006 * KVM doesn't yet expose any XSAVES state save component.
2007 */
2008 .features[FEAT_XSAVE] =
2009 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2010 CPUID_XSAVE_XGETBV1,
2011 .features[FEAT_6_EAX] =
2012 CPUID_6_EAX_ARAT,
2013 .xlevel = 0x8000000A,
2014 .model_id = "AMD EPYC Processor",
2015 },
2016 {
2017 .name = "EPYC-IBPB",
2018 .level = 0xd,
2019 .vendor = CPUID_VENDOR_AMD,
2020 .family = 23,
2021 .model = 1,
2022 .stepping = 2,
2023 .features[FEAT_1_EDX] =
2024 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2025 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2026 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2027 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2028 CPUID_VME | CPUID_FP87,
2029 .features[FEAT_1_ECX] =
2030 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2031 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2032 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2033 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2034 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2035 .features[FEAT_8000_0001_EDX] =
2036 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2037 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2038 CPUID_EXT2_SYSCALL,
2039 .features[FEAT_8000_0001_ECX] =
2040 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2041 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2042 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2043 .features[FEAT_8000_0008_EBX] =
2044 CPUID_8000_0008_EBX_IBPB,
2045 .features[FEAT_7_0_EBX] =
2046 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2047 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2048 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2049 CPUID_7_0_EBX_SHA_NI,
2050 /* Missing: XSAVES (not supported by some Linux versions,
2051 * including v4.1 to v4.12).
2052 * KVM doesn't yet expose any XSAVES state save component.
2053 */
2054 .features[FEAT_XSAVE] =
2055 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2056 CPUID_XSAVE_XGETBV1,
2057 .features[FEAT_6_EAX] =
2058 CPUID_6_EAX_ARAT,
2059 .xlevel = 0x8000000A,
2060 .model_id = "AMD EPYC Processor (with IBPB)",
2061 },
2062 };
2063
2064 typedef struct PropValue {
2065 const char *prop, *value;
2066 } PropValue;
2067
2068 /* KVM-specific features that are automatically added/removed
2069 * from all CPU models when KVM is enabled.
2070 */
2071 static PropValue kvm_default_props[] = {
2072 { "kvmclock", "on" },
2073 { "kvm-nopiodelay", "on" },
2074 { "kvm-asyncpf", "on" },
2075 { "kvm-steal-time", "on" },
2076 { "kvm-pv-eoi", "on" },
2077 { "kvmclock-stable-bit", "on" },
2078 { "x2apic", "on" },
2079 { "acpi", "off" },
2080 { "monitor", "off" },
2081 { "svm", "off" },
2082 { NULL, NULL },
2083 };
2084
2085 /* TCG-specific defaults that override all CPU models when using TCG
2086 */
2087 static PropValue tcg_default_props[] = {
2088 { "vme", "off" },
2089 { NULL, NULL },
2090 };
2091
2092
2093 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2094 {
2095 PropValue *pv;
2096 for (pv = kvm_default_props; pv->prop; pv++) {
2097 if (!strcmp(pv->prop, prop)) {
2098 pv->value = value;
2099 break;
2100 }
2101 }
2102
2103 /* It is valid to call this function only for properties that
2104 * are already present in the kvm_default_props table.
2105 */
2106 assert(pv->prop);
2107 }
2108
2109 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2110 bool migratable_only);
2111
2112 static bool lmce_supported(void)
2113 {
2114 uint64_t mce_cap = 0;
2115
2116 #ifdef CONFIG_KVM
2117 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2118 return false;
2119 }
2120 #endif
2121
2122 return !!(mce_cap & MCG_LMCE_P);
2123 }
2124
2125 #define CPUID_MODEL_ID_SZ 48
2126
2127 /**
2128 * cpu_x86_fill_model_id:
2129 * Get CPUID model ID string from host CPU.
2130 *
2131 * @str should have at least CPUID_MODEL_ID_SZ bytes
2132 *
2133 * The function does NOT add a null terminator to the string
2134 * automatically.
2135 */
2136 static int cpu_x86_fill_model_id(char *str)
2137 {
2138 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2139 int i;
2140
2141 for (i = 0; i < 3; i++) {
2142 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2143 memcpy(str + i * 16 + 0, &eax, 4);
2144 memcpy(str + i * 16 + 4, &ebx, 4);
2145 memcpy(str + i * 16 + 8, &ecx, 4);
2146 memcpy(str + i * 16 + 12, &edx, 4);
2147 }
2148 return 0;
2149 }
2150
2151 static Property max_x86_cpu_properties[] = {
2152 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2153 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2154 DEFINE_PROP_END_OF_LIST()
2155 };
2156
2157 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2158 {
2159 DeviceClass *dc = DEVICE_CLASS(oc);
2160 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2161
2162 xcc->ordering = 9;
2163
2164 xcc->model_description =
2165 "Enables all features supported by the accelerator in the current host";
2166
2167 dc->props = max_x86_cpu_properties;
2168 }
2169
2170 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2171
2172 static void max_x86_cpu_initfn(Object *obj)
2173 {
2174 X86CPU *cpu = X86_CPU(obj);
2175 CPUX86State *env = &cpu->env;
2176 KVMState *s = kvm_state;
2177
2178 /* We can't fill the features array here because we don't know yet if
2179 * "migratable" is true or false.
2180 */
2181 cpu->max_features = true;
2182
2183 if (accel_uses_host_cpuid()) {
2184 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2185 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2186 int family, model, stepping;
2187 X86CPUDefinition host_cpudef = { };
2188 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2189
2190 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2191 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2192
2193 host_vendor_fms(vendor, &family, &model, &stepping);
2194
2195 cpu_x86_fill_model_id(model_id);
2196
2197 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2198 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2199 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2200 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2201 &error_abort);
2202 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2203 &error_abort);
2204
2205 if (kvm_enabled()) {
2206 env->cpuid_min_level =
2207 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2208 env->cpuid_min_xlevel =
2209 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2210 env->cpuid_min_xlevel2 =
2211 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2212 } else {
2213 env->cpuid_min_level =
2214 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2215 env->cpuid_min_xlevel =
2216 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2217 env->cpuid_min_xlevel2 =
2218 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2219 }
2220
2221 if (lmce_supported()) {
2222 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2223 }
2224 } else {
2225 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2226 "vendor", &error_abort);
2227 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2228 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2229 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2230 object_property_set_str(OBJECT(cpu),
2231 "QEMU TCG CPU version " QEMU_HW_VERSION,
2232 "model-id", &error_abort);
2233 }
2234
2235 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2236 }
2237
2238 static const TypeInfo max_x86_cpu_type_info = {
2239 .name = X86_CPU_TYPE_NAME("max"),
2240 .parent = TYPE_X86_CPU,
2241 .instance_init = max_x86_cpu_initfn,
2242 .class_init = max_x86_cpu_class_init,
2243 };
2244
2245 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2246 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2247 {
2248 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2249
2250 xcc->host_cpuid_required = true;
2251 xcc->ordering = 8;
2252
2253 if (kvm_enabled()) {
2254 xcc->model_description =
2255 "KVM processor with all supported host features ";
2256 } else if (hvf_enabled()) {
2257 xcc->model_description =
2258 "HVF processor with all supported host features ";
2259 }
2260 }
2261
2262 static const TypeInfo host_x86_cpu_type_info = {
2263 .name = X86_CPU_TYPE_NAME("host"),
2264 .parent = X86_CPU_TYPE_NAME("max"),
2265 .class_init = host_x86_cpu_class_init,
2266 };
2267
2268 #endif
2269
2270 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2271 {
2272 FeatureWordInfo *f = &feature_word_info[w];
2273 int i;
2274
2275 for (i = 0; i < 32; ++i) {
2276 if ((1UL << i) & mask) {
2277 const char *reg = get_register_name_32(f->cpuid_reg);
2278 assert(reg);
2279 warn_report("%s doesn't support requested feature: "
2280 "CPUID.%02XH:%s%s%s [bit %d]",
2281 accel_uses_host_cpuid() ? "host" : "TCG",
2282 f->cpuid_eax, reg,
2283 f->feat_names[i] ? "." : "",
2284 f->feat_names[i] ? f->feat_names[i] : "", i);
2285 }
2286 }
2287 }
2288
2289 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2290 const char *name, void *opaque,
2291 Error **errp)
2292 {
2293 X86CPU *cpu = X86_CPU(obj);
2294 CPUX86State *env = &cpu->env;
2295 int64_t value;
2296
2297 value = (env->cpuid_version >> 8) & 0xf;
2298 if (value == 0xf) {
2299 value += (env->cpuid_version >> 20) & 0xff;
2300 }
2301 visit_type_int(v, name, &value, errp);
2302 }
2303
2304 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2305 const char *name, void *opaque,
2306 Error **errp)
2307 {
2308 X86CPU *cpu = X86_CPU(obj);
2309 CPUX86State *env = &cpu->env;
2310 const int64_t min = 0;
2311 const int64_t max = 0xff + 0xf;
2312 Error *local_err = NULL;
2313 int64_t value;
2314
2315 visit_type_int(v, name, &value, &local_err);
2316 if (local_err) {
2317 error_propagate(errp, local_err);
2318 return;
2319 }
2320 if (value < min || value > max) {
2321 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2322 name ? name : "null", value, min, max);
2323 return;
2324 }
2325
2326 env->cpuid_version &= ~0xff00f00;
2327 if (value > 0x0f) {
2328 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2329 } else {
2330 env->cpuid_version |= value << 8;
2331 }
2332 }
2333
2334 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2335 const char *name, void *opaque,
2336 Error **errp)
2337 {
2338 X86CPU *cpu = X86_CPU(obj);
2339 CPUX86State *env = &cpu->env;
2340 int64_t value;
2341
2342 value = (env->cpuid_version >> 4) & 0xf;
2343 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2344 visit_type_int(v, name, &value, errp);
2345 }
2346
2347 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2348 const char *name, void *opaque,
2349 Error **errp)
2350 {
2351 X86CPU *cpu = X86_CPU(obj);
2352 CPUX86State *env = &cpu->env;
2353 const int64_t min = 0;
2354 const int64_t max = 0xff;
2355 Error *local_err = NULL;
2356 int64_t value;
2357
2358 visit_type_int(v, name, &value, &local_err);
2359 if (local_err) {
2360 error_propagate(errp, local_err);
2361 return;
2362 }
2363 if (value < min || value > max) {
2364 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2365 name ? name : "null", value, min, max);
2366 return;
2367 }
2368
2369 env->cpuid_version &= ~0xf00f0;
2370 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2371 }
2372
2373 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2374 const char *name, void *opaque,
2375 Error **errp)
2376 {
2377 X86CPU *cpu = X86_CPU(obj);
2378 CPUX86State *env = &cpu->env;
2379 int64_t value;
2380
2381 value = env->cpuid_version & 0xf;
2382 visit_type_int(v, name, &value, errp);
2383 }
2384
2385 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2386 const char *name, void *opaque,
2387 Error **errp)
2388 {
2389 X86CPU *cpu = X86_CPU(obj);
2390 CPUX86State *env = &cpu->env;
2391 const int64_t min = 0;
2392 const int64_t max = 0xf;
2393 Error *local_err = NULL;
2394 int64_t value;
2395
2396 visit_type_int(v, name, &value, &local_err);
2397 if (local_err) {
2398 error_propagate(errp, local_err);
2399 return;
2400 }
2401 if (value < min || value > max) {
2402 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2403 name ? name : "null", value, min, max);
2404 return;
2405 }
2406
2407 env->cpuid_version &= ~0xf;
2408 env->cpuid_version |= value & 0xf;
2409 }
2410
2411 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2412 {
2413 X86CPU *cpu = X86_CPU(obj);
2414 CPUX86State *env = &cpu->env;
2415 char *value;
2416
2417 value = g_malloc(CPUID_VENDOR_SZ + 1);
2418 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2419 env->cpuid_vendor3);
2420 return value;
2421 }
2422
2423 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2424 Error **errp)
2425 {
2426 X86CPU *cpu = X86_CPU(obj);
2427 CPUX86State *env = &cpu->env;
2428 int i;
2429
2430 if (strlen(value) != CPUID_VENDOR_SZ) {
2431 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2432 return;
2433 }
2434
2435 env->cpuid_vendor1 = 0;
2436 env->cpuid_vendor2 = 0;
2437 env->cpuid_vendor3 = 0;
2438 for (i = 0; i < 4; i++) {
2439 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2440 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2441 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2442 }
2443 }
2444
2445 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2446 {
2447 X86CPU *cpu = X86_CPU(obj);
2448 CPUX86State *env = &cpu->env;
2449 char *value;
2450 int i;
2451
2452 value = g_malloc(48 + 1);
2453 for (i = 0; i < 48; i++) {
2454 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2455 }
2456 value[48] = '\0';
2457 return value;
2458 }
2459
2460 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2461 Error **errp)
2462 {
2463 X86CPU *cpu = X86_CPU(obj);
2464 CPUX86State *env = &cpu->env;
2465 int c, len, i;
2466
2467 if (model_id == NULL) {
2468 model_id = "";
2469 }
2470 len = strlen(model_id);
2471 memset(env->cpuid_model, 0, 48);
2472 for (i = 0; i < 48; i++) {
2473 if (i >= len) {
2474 c = '\0';
2475 } else {
2476 c = (uint8_t)model_id[i];
2477 }
2478 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2479 }
2480 }
2481
2482 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2483 void *opaque, Error **errp)
2484 {
2485 X86CPU *cpu = X86_CPU(obj);
2486 int64_t value;
2487
2488 value = cpu->env.tsc_khz * 1000;
2489 visit_type_int(v, name, &value, errp);
2490 }
2491
2492 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2493 void *opaque, Error **errp)
2494 {
2495 X86CPU *cpu = X86_CPU(obj);
2496 const int64_t min = 0;
2497 const int64_t max = INT64_MAX;
2498 Error *local_err = NULL;
2499 int64_t value;
2500
2501 visit_type_int(v, name, &value, &local_err);
2502 if (local_err) {
2503 error_propagate(errp, local_err);
2504 return;
2505 }
2506 if (value < min || value > max) {
2507 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2508 name ? name : "null", value, min, max);
2509 return;
2510 }
2511
2512 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2513 }
2514
2515 /* Generic getter for "feature-words" and "filtered-features" properties */
2516 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2517 const char *name, void *opaque,
2518 Error **errp)
2519 {
2520 uint32_t *array = (uint32_t *)opaque;
2521 FeatureWord w;
2522 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2523 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2524 X86CPUFeatureWordInfoList *list = NULL;
2525
2526 for (w = 0; w < FEATURE_WORDS; w++) {
2527 FeatureWordInfo *wi = &feature_word_info[w];
2528 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2529 qwi->cpuid_input_eax = wi->cpuid_eax;
2530 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2531 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2532 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2533 qwi->features = array[w];
2534
2535 /* List will be in reverse order, but order shouldn't matter */
2536 list_entries[w].next = list;
2537 list_entries[w].value = &word_infos[w];
2538 list = &list_entries[w];
2539 }
2540
2541 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2542 }
2543
2544 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2545 void *opaque, Error **errp)
2546 {
2547 X86CPU *cpu = X86_CPU(obj);
2548 int64_t value = cpu->hyperv_spinlock_attempts;
2549
2550 visit_type_int(v, name, &value, errp);
2551 }
2552
2553 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2554 void *opaque, Error **errp)
2555 {
2556 const int64_t min = 0xFFF;
2557 const int64_t max = UINT_MAX;
2558 X86CPU *cpu = X86_CPU(obj);
2559 Error *err = NULL;
2560 int64_t value;
2561
2562 visit_type_int(v, name, &value, &err);
2563 if (err) {
2564 error_propagate(errp, err);
2565 return;
2566 }
2567
2568 if (value < min || value > max) {
2569 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2570 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2571 object_get_typename(obj), name ? name : "null",
2572 value, min, max);
2573 return;
2574 }
2575 cpu->hyperv_spinlock_attempts = value;
2576 }
2577
2578 static const PropertyInfo qdev_prop_spinlocks = {
2579 .name = "int",
2580 .get = x86_get_hv_spinlocks,
2581 .set = x86_set_hv_spinlocks,
2582 };
2583
2584 /* Convert all '_' in a feature string option name to '-', to make feature
2585 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2586 */
2587 static inline void feat2prop(char *s)
2588 {
2589 while ((s = strchr(s, '_'))) {
2590 *s = '-';
2591 }
2592 }
2593
2594 /* Return the feature property name for a feature flag bit */
2595 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2596 {
2597 /* XSAVE components are automatically enabled by other features,
2598 * so return the original feature name instead
2599 */
2600 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2601 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2602
2603 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2604 x86_ext_save_areas[comp].bits) {
2605 w = x86_ext_save_areas[comp].feature;
2606 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2607 }
2608 }
2609
2610 assert(bitnr < 32);
2611 assert(w < FEATURE_WORDS);
2612 return feature_word_info[w].feat_names[bitnr];
2613 }
2614
2615 /* Compatibily hack to maintain legacy +-feat semantic,
2616 * where +-feat overwrites any feature set by
2617 * feat=on|feat even if the later is parsed after +-feat
2618 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2619 */
2620 static GList *plus_features, *minus_features;
2621
2622 static gint compare_string(gconstpointer a, gconstpointer b)
2623 {
2624 return g_strcmp0(a, b);
2625 }
2626
2627 /* Parse "+feature,-feature,feature=foo" CPU feature string
2628 */
2629 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2630 Error **errp)
2631 {
2632 char *featurestr; /* Single 'key=value" string being parsed */
2633 static bool cpu_globals_initialized;
2634 bool ambiguous = false;
2635
2636 if (cpu_globals_initialized) {
2637 return;
2638 }
2639 cpu_globals_initialized = true;
2640
2641 if (!features) {
2642 return;
2643 }
2644
2645 for (featurestr = strtok(features, ",");
2646 featurestr;
2647 featurestr = strtok(NULL, ",")) {
2648 const char *name;
2649 const char *val = NULL;
2650 char *eq = NULL;
2651 char num[32];
2652 GlobalProperty *prop;
2653
2654 /* Compatibility syntax: */
2655 if (featurestr[0] == '+') {
2656 plus_features = g_list_append(plus_features,
2657 g_strdup(featurestr + 1));
2658 continue;
2659 } else if (featurestr[0] == '-') {
2660 minus_features = g_list_append(minus_features,
2661 g_strdup(featurestr + 1));
2662 continue;
2663 }
2664
2665 eq = strchr(featurestr, '=');
2666 if (eq) {
2667 *eq++ = 0;
2668 val = eq;
2669 } else {
2670 val = "on";
2671 }
2672
2673 feat2prop(featurestr);
2674 name = featurestr;
2675
2676 if (g_list_find_custom(plus_features, name, compare_string)) {
2677 warn_report("Ambiguous CPU model string. "
2678 "Don't mix both \"+%s\" and \"%s=%s\"",
2679 name, name, val);
2680 ambiguous = true;
2681 }
2682 if (g_list_find_custom(minus_features, name, compare_string)) {
2683 warn_report("Ambiguous CPU model string. "
2684 "Don't mix both \"-%s\" and \"%s=%s\"",
2685 name, name, val);
2686 ambiguous = true;
2687 }
2688
2689 /* Special case: */
2690 if (!strcmp(name, "tsc-freq")) {
2691 int ret;
2692 uint64_t tsc_freq;
2693
2694 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2695 if (ret < 0 || tsc_freq > INT64_MAX) {
2696 error_setg(errp, "bad numerical value %s", val);
2697 return;
2698 }
2699 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2700 val = num;
2701 name = "tsc-frequency";
2702 }
2703
2704 prop = g_new0(typeof(*prop), 1);
2705 prop->driver = typename;
2706 prop->property = g_strdup(name);
2707 prop->value = g_strdup(val);
2708 prop->errp = &error_fatal;
2709 qdev_prop_register_global(prop);
2710 }
2711
2712 if (ambiguous) {
2713 warn_report("Compatibility of ambiguous CPU model "
2714 "strings won't be kept on future QEMU versions");
2715 }
2716 }
2717
2718 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2719 static int x86_cpu_filter_features(X86CPU *cpu);
2720
2721 /* Check for missing features that may prevent the CPU class from
2722 * running using the current machine and accelerator.
2723 */
2724 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2725 strList **missing_feats)
2726 {
2727 X86CPU *xc;
2728 FeatureWord w;
2729 Error *err = NULL;
2730 strList **next = missing_feats;
2731
2732 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2733 strList *new = g_new0(strList, 1);
2734 new->value = g_strdup("kvm");
2735 *missing_feats = new;
2736 return;
2737 }
2738
2739 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2740
2741 x86_cpu_expand_features(xc, &err);
2742 if (err) {
2743 /* Errors at x86_cpu_expand_features should never happen,
2744 * but in case it does, just report the model as not
2745 * runnable at all using the "type" property.
2746 */
2747 strList *new = g_new0(strList, 1);
2748 new->value = g_strdup("type");
2749 *next = new;
2750 next = &new->next;
2751 }
2752
2753 x86_cpu_filter_features(xc);
2754
2755 for (w = 0; w < FEATURE_WORDS; w++) {
2756 uint32_t filtered = xc->filtered_features[w];
2757 int i;
2758 for (i = 0; i < 32; i++) {
2759 if (filtered & (1UL << i)) {
2760 strList *new = g_new0(strList, 1);
2761 new->value = g_strdup(x86_cpu_feature_name(w, i));
2762 *next = new;
2763 next = &new->next;
2764 }
2765 }
2766 }
2767
2768 object_unref(OBJECT(xc));
2769 }
2770
2771 /* Print all cpuid feature names in featureset
2772 */
2773 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2774 {
2775 int bit;
2776 bool first = true;
2777
2778 for (bit = 0; bit < 32; bit++) {
2779 if (featureset[bit]) {
2780 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2781 first = false;
2782 }
2783 }
2784 }
2785
2786 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2787 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2788 {
2789 ObjectClass *class_a = (ObjectClass *)a;
2790 ObjectClass *class_b = (ObjectClass *)b;
2791 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2792 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2793 const char *name_a, *name_b;
2794
2795 if (cc_a->ordering != cc_b->ordering) {
2796 return cc_a->ordering - cc_b->ordering;
2797 } else {
2798 name_a = object_class_get_name(class_a);
2799 name_b = object_class_get_name(class_b);
2800 return strcmp(name_a, name_b);
2801 }
2802 }
2803
2804 static GSList *get_sorted_cpu_model_list(void)
2805 {
2806 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2807 list = g_slist_sort(list, x86_cpu_list_compare);
2808 return list;
2809 }
2810
2811 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2812 {
2813 ObjectClass *oc = data;
2814 X86CPUClass *cc = X86_CPU_CLASS(oc);
2815 CPUListState *s = user_data;
2816 char *name = x86_cpu_class_get_model_name(cc);
2817 const char *desc = cc->model_description;
2818 if (!desc && cc->cpu_def) {
2819 desc = cc->cpu_def->model_id;
2820 }
2821
2822 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2823 name, desc);
2824 g_free(name);
2825 }
2826
2827 /* list available CPU models and flags */
2828 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2829 {
2830 int i;
2831 CPUListState s = {
2832 .file = f,
2833 .cpu_fprintf = cpu_fprintf,
2834 };
2835 GSList *list;
2836
2837 (*cpu_fprintf)(f, "Available CPUs:\n");
2838 list = get_sorted_cpu_model_list();
2839 g_slist_foreach(list, x86_cpu_list_entry, &s);
2840 g_slist_free(list);
2841
2842 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2843 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2844 FeatureWordInfo *fw = &feature_word_info[i];
2845
2846 (*cpu_fprintf)(f, " ");
2847 listflags(f, cpu_fprintf, fw->feat_names);
2848 (*cpu_fprintf)(f, "\n");
2849 }
2850 }
2851
2852 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2853 {
2854 ObjectClass *oc = data;
2855 X86CPUClass *cc = X86_CPU_CLASS(oc);
2856 CpuDefinitionInfoList **cpu_list = user_data;
2857 CpuDefinitionInfoList *entry;
2858 CpuDefinitionInfo *info;
2859
2860 info = g_malloc0(sizeof(*info));
2861 info->name = x86_cpu_class_get_model_name(cc);
2862 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2863 info->has_unavailable_features = true;
2864 info->q_typename = g_strdup(object_class_get_name(oc));
2865 info->migration_safe = cc->migration_safe;
2866 info->has_migration_safe = true;
2867 info->q_static = cc->static_model;
2868
2869 entry = g_malloc0(sizeof(*entry));
2870 entry->value = info;
2871 entry->next = *cpu_list;
2872 *cpu_list = entry;
2873 }
2874
2875 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2876 {
2877 CpuDefinitionInfoList *cpu_list = NULL;
2878 GSList *list = get_sorted_cpu_model_list();
2879 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2880 g_slist_free(list);
2881 return cpu_list;
2882 }
2883
2884 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2885 bool migratable_only)
2886 {
2887 FeatureWordInfo *wi = &feature_word_info[w];
2888 uint32_t r;
2889
2890 if (kvm_enabled()) {
2891 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2892 wi->cpuid_ecx,
2893 wi->cpuid_reg);
2894 } else if (hvf_enabled()) {
2895 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2896 wi->cpuid_ecx,
2897 wi->cpuid_reg);
2898 } else if (tcg_enabled()) {
2899 r = wi->tcg_features;
2900 } else {
2901 return ~0;
2902 }
2903 if (migratable_only) {
2904 r &= x86_cpu_get_migratable_flags(w);
2905 }
2906 return r;
2907 }
2908
2909 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2910 {
2911 FeatureWord w;
2912
2913 for (w = 0; w < FEATURE_WORDS; w++) {
2914 report_unavailable_features(w, cpu->filtered_features[w]);
2915 }
2916 }
2917
2918 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2919 {
2920 PropValue *pv;
2921 for (pv = props; pv->prop; pv++) {
2922 if (!pv->value) {
2923 continue;
2924 }
2925 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2926 &error_abort);
2927 }
2928 }
2929
2930 /* Load data from X86CPUDefinition into a X86CPU object
2931 */
2932 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2933 {
2934 CPUX86State *env = &cpu->env;
2935 const char *vendor;
2936 char host_vendor[CPUID_VENDOR_SZ + 1];
2937 FeatureWord w;
2938
2939 /*NOTE: any property set by this function should be returned by
2940 * x86_cpu_static_props(), so static expansion of
2941 * query-cpu-model-expansion is always complete.
2942 */
2943
2944 /* CPU models only set _minimum_ values for level/xlevel: */
2945 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2946 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2947
2948 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2949 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2950 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2951 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2952 for (w = 0; w < FEATURE_WORDS; w++) {
2953 env->features[w] = def->features[w];
2954 }
2955
2956 /* Special cases not set in the X86CPUDefinition structs: */
2957 /* TODO: in-kernel irqchip for hvf */
2958 if (kvm_enabled()) {
2959 if (!kvm_irqchip_in_kernel()) {
2960 x86_cpu_change_kvm_default("x2apic", "off");
2961 }
2962
2963 x86_cpu_apply_props(cpu, kvm_default_props);
2964 } else if (tcg_enabled()) {
2965 x86_cpu_apply_props(cpu, tcg_default_props);
2966 }
2967
2968 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2969
2970 /* sysenter isn't supported in compatibility mode on AMD,
2971 * syscall isn't supported in compatibility mode on Intel.
2972 * Normally we advertise the actual CPU vendor, but you can
2973 * override this using the 'vendor' property if you want to use
2974 * KVM's sysenter/syscall emulation in compatibility mode and
2975 * when doing cross vendor migration
2976 */
2977 vendor = def->vendor;
2978 if (accel_uses_host_cpuid()) {
2979 uint32_t ebx = 0, ecx = 0, edx = 0;
2980 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2981 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2982 vendor = host_vendor;
2983 }
2984
2985 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2986
2987 }
2988
2989 /* Return a QDict containing keys for all properties that can be included
2990 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2991 * must be included in the dictionary.
2992 */
2993 static QDict *x86_cpu_static_props(void)
2994 {
2995 FeatureWord w;
2996 int i;
2997 static const char *props[] = {
2998 "min-level",
2999 "min-xlevel",
3000 "family",
3001 "model",
3002 "stepping",
3003 "model-id",
3004 "vendor",
3005 "lmce",
3006 NULL,
3007 };
3008 static QDict *d;
3009
3010 if (d) {
3011 return d;
3012 }
3013
3014 d = qdict_new();
3015 for (i = 0; props[i]; i++) {
3016 qdict_put_null(d, props[i]);
3017 }
3018
3019 for (w = 0; w < FEATURE_WORDS; w++) {
3020 FeatureWordInfo *fi = &feature_word_info[w];
3021 int bit;
3022 for (bit = 0; bit < 32; bit++) {
3023 if (!fi->feat_names[bit]) {
3024 continue;
3025 }
3026 qdict_put_null(d, fi->feat_names[bit]);
3027 }
3028 }
3029
3030 return d;
3031 }
3032
3033 /* Add an entry to @props dict, with the value for property. */
3034 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3035 {
3036 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3037 &error_abort);
3038
3039 qdict_put_obj(props, prop, value);
3040 }
3041
3042 /* Convert CPU model data from X86CPU object to a property dictionary
3043 * that can recreate exactly the same CPU model.
3044 */
3045 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3046 {
3047 QDict *sprops = x86_cpu_static_props();
3048 const QDictEntry *e;
3049
3050 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3051 const char *prop = qdict_entry_key(e);
3052 x86_cpu_expand_prop(cpu, props, prop);
3053 }
3054 }
3055
3056 /* Convert CPU model data from X86CPU object to a property dictionary
3057 * that can recreate exactly the same CPU model, including every
3058 * writeable QOM property.
3059 */
3060 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3061 {
3062 ObjectPropertyIterator iter;
3063 ObjectProperty *prop;
3064
3065 object_property_iter_init(&iter, OBJECT(cpu));
3066 while ((prop = object_property_iter_next(&iter))) {
3067 /* skip read-only or write-only properties */
3068 if (!prop->get || !prop->set) {
3069 continue;
3070 }
3071
3072 /* "hotplugged" is the only property that is configurable
3073 * on the command-line but will be set differently on CPUs
3074 * created using "-cpu ... -smp ..." and by CPUs created
3075 * on the fly by x86_cpu_from_model() for querying. Skip it.
3076 */
3077 if (!strcmp(prop->name, "hotplugged")) {
3078 continue;
3079 }
3080 x86_cpu_expand_prop(cpu, props, prop->name);
3081 }
3082 }
3083
3084 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3085 {
3086 const QDictEntry *prop;
3087 Error *err = NULL;
3088
3089 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3090 object_property_set_qobject(obj, qdict_entry_value(prop),
3091 qdict_entry_key(prop), &err);
3092 if (err) {
3093 break;
3094 }
3095 }
3096
3097 error_propagate(errp, err);
3098 }
3099
3100 /* Create X86CPU object according to model+props specification */
3101 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3102 {
3103 X86CPU *xc = NULL;
3104 X86CPUClass *xcc;
3105 Error *err = NULL;
3106
3107 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3108 if (xcc == NULL) {
3109 error_setg(&err, "CPU model '%s' not found", model);
3110 goto out;
3111 }
3112
3113 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3114 if (props) {
3115 object_apply_props(OBJECT(xc), props, &err);
3116 if (err) {
3117 goto out;
3118 }
3119 }
3120
3121 x86_cpu_expand_features(xc, &err);
3122 if (err) {
3123 goto out;
3124 }
3125
3126 out:
3127 if (err) {
3128 error_propagate(errp, err);
3129 object_unref(OBJECT(xc));
3130 xc = NULL;
3131 }
3132 return xc;
3133 }
3134
3135 CpuModelExpansionInfo *
3136 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3137 CpuModelInfo *model,
3138 Error **errp)
3139 {
3140 X86CPU *xc = NULL;
3141 Error *err = NULL;
3142 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3143 QDict *props = NULL;
3144 const char *base_name;
3145
3146 xc = x86_cpu_from_model(model->name,
3147 model->has_props ?
3148 qobject_to(QDict, model->props) :
3149 NULL, &err);
3150 if (err) {
3151 goto out;
3152 }
3153
3154 props = qdict_new();
3155
3156 switch (type) {
3157 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3158 /* Static expansion will be based on "base" only */
3159 base_name = "base";
3160 x86_cpu_to_dict(xc, props);
3161 break;
3162 case CPU_MODEL_EXPANSION_TYPE_FULL:
3163 /* As we don't return every single property, full expansion needs
3164 * to keep the original model name+props, and add extra
3165 * properties on top of that.
3166 */
3167 base_name = model->name;
3168 x86_cpu_to_dict_full(xc, props);
3169 break;
3170 default:
3171 error_setg(&err, "Unsupportted expansion type");
3172 goto out;
3173 }
3174
3175 if (!props) {
3176 props = qdict_new();
3177 }
3178 x86_cpu_to_dict(xc, props);
3179
3180 ret->model = g_new0(CpuModelInfo, 1);
3181 ret->model->name = g_strdup(base_name);
3182 ret->model->props = QOBJECT(props);
3183 ret->model->has_props = true;
3184
3185 out:
3186 object_unref(OBJECT(xc));
3187 if (err) {
3188 error_propagate(errp, err);
3189 qapi_free_CpuModelExpansionInfo(ret);
3190 ret = NULL;
3191 }
3192 return ret;
3193 }
3194
3195 static gchar *x86_gdb_arch_name(CPUState *cs)
3196 {
3197 #ifdef TARGET_X86_64
3198 return g_strdup("i386:x86-64");
3199 #else
3200 return g_strdup("i386");
3201 #endif
3202 }
3203
3204 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3205 {
3206 X86CPUDefinition *cpudef = data;
3207 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3208
3209 xcc->cpu_def = cpudef;
3210 xcc->migration_safe = true;
3211 }
3212
3213 static void x86_register_cpudef_type(X86CPUDefinition *def)
3214 {
3215 char *typename = x86_cpu_type_name(def->name);
3216 TypeInfo ti = {
3217 .name = typename,
3218 .parent = TYPE_X86_CPU,
3219 .class_init = x86_cpu_cpudef_class_init,
3220 .class_data = def,
3221 };
3222
3223 /* AMD aliases are handled at runtime based on CPUID vendor, so
3224 * they shouldn't be set on the CPU model table.
3225 */
3226 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3227 /* catch mistakes instead of silently truncating model_id when too long */
3228 assert(def->model_id && strlen(def->model_id) <= 48);
3229
3230
3231 type_register(&ti);
3232 g_free(typename);
3233 }
3234
3235 #if !defined(CONFIG_USER_ONLY)
3236
3237 void cpu_clear_apic_feature(CPUX86State *env)
3238 {
3239 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3240 }
3241
3242 #endif /* !CONFIG_USER_ONLY */
3243
3244 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3245 uint32_t *eax, uint32_t *ebx,
3246 uint32_t *ecx, uint32_t *edx)
3247 {
3248 X86CPU *cpu = x86_env_get_cpu(env);
3249 CPUState *cs = CPU(cpu);
3250 uint32_t pkg_offset;
3251 uint32_t limit;
3252 uint32_t signature[3];
3253
3254 /* Calculate & apply limits for different index ranges */
3255 if (index >= 0xC0000000) {
3256 limit = env->cpuid_xlevel2;
3257 } else if (index >= 0x80000000) {
3258 limit = env->cpuid_xlevel;
3259 } else if (index >= 0x40000000) {
3260 limit = 0x40000001;
3261 } else {
3262 limit = env->cpuid_level;
3263 }
3264
3265 if (index > limit) {
3266 /* Intel documentation states that invalid EAX input will
3267 * return the same information as EAX=cpuid_level
3268 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3269 */
3270 index = env->cpuid_level;
3271 }
3272
3273 switch(index) {
3274 case 0:
3275 *eax = env->cpuid_level;
3276 *ebx = env->cpuid_vendor1;
3277 *edx = env->cpuid_vendor2;
3278 *ecx = env->cpuid_vendor3;
3279 break;
3280 case 1:
3281 *eax = env->cpuid_version;
3282 *ebx = (cpu->apic_id << 24) |
3283 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3284 *ecx = env->features[FEAT_1_ECX];
3285 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3286 *ecx |= CPUID_EXT_OSXSAVE;
3287 }
3288 *edx = env->features[FEAT_1_EDX];
3289 if (cs->nr_cores * cs->nr_threads > 1) {
3290 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3291 *edx |= CPUID_HT;
3292 }
3293 break;
3294 case 2:
3295 /* cache info: needed for Pentium Pro compatibility */
3296 if (cpu->cache_info_passthrough) {
3297 host_cpuid(index, 0, eax, ebx, ecx, edx);
3298 break;
3299 }
3300 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3301 *ebx = 0;
3302 if (!cpu->enable_l3_cache) {
3303 *ecx = 0;
3304 } else {
3305 *ecx = L3_N_DESCRIPTOR;
3306 }
3307 *edx = (L1D_DESCRIPTOR << 16) | \
3308 (L1I_DESCRIPTOR << 8) | \
3309 (L2_DESCRIPTOR);
3310 break;
3311 case 4:
3312 /* cache info: needed for Core compatibility */
3313 if (cpu->cache_info_passthrough) {
3314 host_cpuid(index, count, eax, ebx, ecx, edx);
3315 *eax &= ~0xFC000000;
3316 } else {
3317 *eax = 0;
3318 switch (count) {
3319 case 0: /* L1 dcache info */
3320 *eax |= CPUID_4_TYPE_DCACHE | \
3321 CPUID_4_LEVEL(1) | \
3322 CPUID_4_SELF_INIT_LEVEL;
3323 *ebx = (L1D_LINE_SIZE - 1) | \
3324 ((L1D_PARTITIONS - 1) << 12) | \
3325 ((L1D_ASSOCIATIVITY - 1) << 22);
3326 *ecx = L1D_SETS - 1;
3327 *edx = CPUID_4_NO_INVD_SHARING;
3328 break;
3329 case 1: /* L1 icache info */
3330 *eax |= CPUID_4_TYPE_ICACHE | \
3331 CPUID_4_LEVEL(1) | \
3332 CPUID_4_SELF_INIT_LEVEL;
3333 *ebx = (L1I_LINE_SIZE - 1) | \
3334 ((L1I_PARTITIONS - 1) << 12) | \
3335 ((L1I_ASSOCIATIVITY - 1) << 22);
3336 *ecx = L1I_SETS - 1;
3337 *edx = CPUID_4_NO_INVD_SHARING;
3338 break;
3339 case 2: /* L2 cache info */
3340 *eax |= CPUID_4_TYPE_UNIFIED | \
3341 CPUID_4_LEVEL(2) | \
3342 CPUID_4_SELF_INIT_LEVEL;
3343 if (cs->nr_threads > 1) {
3344 *eax |= (cs->nr_threads - 1) << 14;
3345 }
3346 *ebx = (L2_LINE_SIZE - 1) | \
3347 ((L2_PARTITIONS - 1) << 12) | \
3348 ((L2_ASSOCIATIVITY - 1) << 22);
3349 *ecx = L2_SETS - 1;
3350 *edx = CPUID_4_NO_INVD_SHARING;
3351 break;
3352 case 3: /* L3 cache info */
3353 if (!cpu->enable_l3_cache) {
3354 *eax = 0;
3355 *ebx = 0;
3356 *ecx = 0;
3357 *edx = 0;
3358 break;
3359 }
3360 *eax |= CPUID_4_TYPE_UNIFIED | \
3361 CPUID_4_LEVEL(3) | \
3362 CPUID_4_SELF_INIT_LEVEL;
3363 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3364 *eax |= ((1 << pkg_offset) - 1) << 14;
3365 *ebx = (L3_N_LINE_SIZE - 1) | \
3366 ((L3_N_PARTITIONS - 1) << 12) | \
3367 ((L3_N_ASSOCIATIVITY - 1) << 22);
3368 *ecx = L3_N_SETS - 1;
3369 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3370 break;
3371 default: /* end of info */
3372 *eax = 0;
3373 *ebx = 0;
3374 *ecx = 0;
3375 *edx = 0;
3376 break;
3377 }
3378 }
3379
3380 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3381 if ((*eax & 31) && cs->nr_cores > 1) {
3382 *eax |= (cs->nr_cores - 1) << 26;
3383 }
3384 break;
3385 case 5:
3386 /* mwait info: needed for Core compatibility */
3387 *eax = 0; /* Smallest monitor-line size in bytes */
3388 *ebx = 0; /* Largest monitor-line size in bytes */
3389 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3390 *edx = 0;
3391 break;
3392 case 6:
3393 /* Thermal and Power Leaf */
3394 *eax = env->features[FEAT_6_EAX];
3395 *ebx = 0;
3396 *ecx = 0;
3397 *edx = 0;
3398 break;
3399 case 7:
3400 /* Structured Extended Feature Flags Enumeration Leaf */
3401 if (count == 0) {
3402 *eax = 0; /* Maximum ECX value for sub-leaves */
3403 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3404 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3405 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3406 *ecx |= CPUID_7_0_ECX_OSPKE;
3407 }
3408 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3409 } else {
3410 *eax = 0;
3411 *ebx = 0;
3412 *ecx = 0;
3413 *edx = 0;
3414 }
3415 break;
3416 case 9:
3417 /* Direct Cache Access Information Leaf */
3418 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3419 *ebx = 0;
3420 *ecx = 0;
3421 *edx = 0;
3422 break;
3423 case 0xA:
3424 /* Architectural Performance Monitoring Leaf */
3425 if (kvm_enabled() && cpu->enable_pmu) {
3426 KVMState *s = cs->kvm_state;
3427
3428 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3429 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3430 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3431 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3432 } else if (hvf_enabled() && cpu->enable_pmu) {
3433 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3434 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3435 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3436 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3437 } else {
3438 *eax = 0;
3439 *ebx = 0;
3440 *ecx = 0;
3441 *edx = 0;
3442 }
3443 break;
3444 case 0xB:
3445 /* Extended Topology Enumeration Leaf */
3446 if (!cpu->enable_cpuid_0xb) {
3447 *eax = *ebx = *ecx = *edx = 0;
3448 break;
3449 }
3450
3451 *ecx = count & 0xff;
3452 *edx = cpu->apic_id;
3453
3454 switch (count) {
3455 case 0:
3456 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3457 *ebx = cs->nr_threads;
3458 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3459 break;
3460 case 1:
3461 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3462 *ebx = cs->nr_cores * cs->nr_threads;
3463 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3464 break;
3465 default:
3466 *eax = 0;
3467 *ebx = 0;
3468 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3469 }
3470
3471 assert(!(*eax & ~0x1f));
3472 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3473 break;
3474 case 0xD: {
3475 /* Processor Extended State */
3476 *eax = 0;
3477 *ebx = 0;
3478 *ecx = 0;
3479 *edx = 0;
3480 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3481 break;
3482 }
3483
3484 if (count == 0) {
3485 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3486 *eax = env->features[FEAT_XSAVE_COMP_LO];
3487 *edx = env->features[FEAT_XSAVE_COMP_HI];
3488 *ebx = *ecx;
3489 } else if (count == 1) {
3490 *eax = env->features[FEAT_XSAVE];
3491 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3492 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3493 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3494 *eax = esa->size;
3495 *ebx = esa->offset;
3496 }
3497 }
3498 break;
3499 }
3500 case 0x14: {
3501 /* Intel Processor Trace Enumeration */
3502 *eax = 0;
3503 *ebx = 0;
3504 *ecx = 0;
3505 *edx = 0;
3506 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3507 !kvm_enabled()) {
3508 break;
3509 }
3510
3511 if (count == 0) {
3512 *eax = INTEL_PT_MAX_SUBLEAF;
3513 *ebx = INTEL_PT_MINIMAL_EBX;
3514 *ecx = INTEL_PT_MINIMAL_ECX;
3515 } else if (count == 1) {
3516 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3517 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3518 }
3519 break;
3520 }
3521 case 0x40000000:
3522 /*
3523 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3524 * set here, but we restrict to TCG none the less.
3525 */
3526 if (tcg_enabled() && cpu->expose_tcg) {
3527 memcpy(signature, "TCGTCGTCGTCG", 12);
3528 *eax = 0x40000001;
3529 *ebx = signature[0];
3530 *ecx = signature[1];
3531 *edx = signature[2];
3532 } else {
3533 *eax = 0;
3534 *ebx = 0;
3535 *ecx = 0;
3536 *edx = 0;
3537 }
3538 break;
3539 case 0x40000001:
3540 *eax = 0;
3541 *ebx = 0;
3542 *ecx = 0;
3543 *edx = 0;
3544 break;
3545 case 0x80000000:
3546 *eax = env->cpuid_xlevel;
3547 *ebx = env->cpuid_vendor1;
3548 *edx = env->cpuid_vendor2;
3549 *ecx = env->cpuid_vendor3;
3550 break;
3551 case 0x80000001:
3552 *eax = env->cpuid_version;
3553 *ebx = 0;
3554 *ecx = env->features[FEAT_8000_0001_ECX];
3555 *edx = env->features[FEAT_8000_0001_EDX];
3556
3557 /* The Linux kernel checks for the CMPLegacy bit and
3558 * discards multiple thread information if it is set.
3559 * So don't set it here for Intel to make Linux guests happy.
3560 */
3561 if (cs->nr_cores * cs->nr_threads > 1) {
3562 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3563 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3564 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3565 *ecx |= 1 << 1; /* CmpLegacy bit */
3566 }
3567 }
3568 break;
3569 case 0x80000002:
3570 case 0x80000003:
3571 case 0x80000004:
3572 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3573 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3574 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3575 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3576 break;
3577 case 0x80000005:
3578 /* cache info (L1 cache) */
3579 if (cpu->cache_info_passthrough) {
3580 host_cpuid(index, 0, eax, ebx, ecx, edx);
3581 break;
3582 }
3583 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3584 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3585 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3586 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3587 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3588 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3589 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3590 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3591 break;
3592 case 0x80000006:
3593 /* cache info (L2 cache) */
3594 if (cpu->cache_info_passthrough) {
3595 host_cpuid(index, 0, eax, ebx, ecx, edx);
3596 break;
3597 }
3598 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3599 (L2_DTLB_2M_ENTRIES << 16) | \
3600 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3601 (L2_ITLB_2M_ENTRIES);
3602 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3603 (L2_DTLB_4K_ENTRIES << 16) | \
3604 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3605 (L2_ITLB_4K_ENTRIES);
3606 *ecx = (L2_SIZE_KB_AMD << 16) | \
3607 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3608 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3609 if (!cpu->enable_l3_cache) {
3610 *edx = ((L3_SIZE_KB / 512) << 18) | \
3611 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3612 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3613 } else {
3614 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3615 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3616 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3617 }
3618 break;
3619 case 0x80000007:
3620 *eax = 0;
3621 *ebx = 0;
3622 *ecx = 0;
3623 *edx = env->features[FEAT_8000_0007_EDX];
3624 break;
3625 case 0x80000008:
3626 /* virtual & phys address size in low 2 bytes. */
3627 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3628 /* 64 bit processor */
3629 *eax = cpu->phys_bits; /* configurable physical bits */
3630 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3631 *eax |= 0x00003900; /* 57 bits virtual */
3632 } else {
3633 *eax |= 0x00003000; /* 48 bits virtual */
3634 }
3635 } else {
3636 *eax = cpu->phys_bits;
3637 }
3638 *ebx = env->features[FEAT_8000_0008_EBX];
3639 *ecx = 0;
3640 *edx = 0;
3641 if (cs->nr_cores * cs->nr_threads > 1) {
3642 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3643 }
3644 break;
3645 case 0x8000000A:
3646 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3647 *eax = 0x00000001; /* SVM Revision */
3648 *ebx = 0x00000010; /* nr of ASIDs */
3649 *ecx = 0;
3650 *edx = env->features[FEAT_SVM]; /* optional features */
3651 } else {
3652 *eax = 0;
3653 *ebx = 0;
3654 *ecx = 0;
3655 *edx = 0;
3656 }
3657 break;
3658 case 0xC0000000:
3659 *eax = env->cpuid_xlevel2;
3660 *ebx = 0;
3661 *ecx = 0;
3662 *edx = 0;
3663 break;
3664 case 0xC0000001:
3665 /* Support for VIA CPU's CPUID instruction */
3666 *eax = env->cpuid_version;
3667 *ebx = 0;
3668 *ecx = 0;
3669 *edx = env->features[FEAT_C000_0001_EDX];
3670 break;
3671 case 0xC0000002:
3672 case 0xC0000003:
3673 case 0xC0000004:
3674 /* Reserved for the future, and now filled with zero */
3675 *eax = 0;
3676 *ebx = 0;
3677 *ecx = 0;
3678 *edx = 0;
3679 break;
3680 case 0x8000001F:
3681 *eax = sev_enabled() ? 0x2 : 0;
3682 *ebx = sev_get_cbit_position();
3683 *ebx |= sev_get_reduced_phys_bits() << 6;
3684 *ecx = 0;
3685 *edx = 0;
3686 break;
3687 default:
3688 /* reserved values: zero */
3689 *eax = 0;
3690 *ebx = 0;
3691 *ecx = 0;
3692 *edx = 0;
3693 break;
3694 }
3695 }
3696
3697 /* CPUClass::reset() */
3698 static void x86_cpu_reset(CPUState *s)
3699 {
3700 X86CPU *cpu = X86_CPU(s);
3701 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3702 CPUX86State *env = &cpu->env;
3703 target_ulong cr4;
3704 uint64_t xcr0;
3705 int i;
3706
3707 xcc->parent_reset(s);
3708
3709 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3710
3711 env->old_exception = -1;
3712
3713 /* init to reset state */
3714
3715 env->hflags2 |= HF2_GIF_MASK;
3716
3717 cpu_x86_update_cr0(env, 0x60000010);
3718 env->a20_mask = ~0x0;
3719 env->smbase = 0x30000;
3720 env->msr_smi_count = 0;
3721
3722 env->idt.limit = 0xffff;
3723 env->gdt.limit = 0xffff;
3724 env->ldt.limit = 0xffff;
3725 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3726 env->tr.limit = 0xffff;
3727 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3728
3729 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3730 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3731 DESC_R_MASK | DESC_A_MASK);
3732 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3734 DESC_A_MASK);
3735 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3736 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3737 DESC_A_MASK);
3738 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3739 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3740 DESC_A_MASK);
3741 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3742 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3743 DESC_A_MASK);
3744 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3745 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3746 DESC_A_MASK);
3747
3748 env->eip = 0xfff0;
3749 env->regs[R_EDX] = env->cpuid_version;
3750
3751 env->eflags = 0x2;
3752
3753 /* FPU init */
3754 for (i = 0; i < 8; i++) {
3755 env->fptags[i] = 1;
3756 }
3757 cpu_set_fpuc(env, 0x37f);
3758
3759 env->mxcsr = 0x1f80;
3760 /* All units are in INIT state. */
3761 env->xstate_bv = 0;
3762
3763 env->pat = 0x0007040600070406ULL;
3764 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3765
3766 memset(env->dr, 0, sizeof(env->dr));
3767 env->dr[6] = DR6_FIXED_1;
3768 env->dr[7] = DR7_FIXED_1;
3769 cpu_breakpoint_remove_all(s, BP_CPU);
3770 cpu_watchpoint_remove_all(s, BP_CPU);
3771
3772 cr4 = 0;
3773 xcr0 = XSTATE_FP_MASK;
3774
3775 #ifdef CONFIG_USER_ONLY
3776 /* Enable all the features for user-mode. */
3777 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3778 xcr0 |= XSTATE_SSE_MASK;
3779 }
3780 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3781 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3782 if (env->features[esa->feature] & esa->bits) {
3783 xcr0 |= 1ull << i;
3784 }
3785 }
3786
3787 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3788 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3789 }
3790 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3791 cr4 |= CR4_FSGSBASE_MASK;
3792 }
3793 #endif
3794
3795 env->xcr0 = xcr0;
3796 cpu_x86_update_cr4(env, cr4);
3797
3798 /*
3799 * SDM 11.11.5 requires:
3800 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3801 * - IA32_MTRR_PHYSMASKn.V = 0
3802 * All other bits are undefined. For simplification, zero it all.
3803 */
3804 env->mtrr_deftype = 0;
3805 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3806 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3807
3808 env->interrupt_injected = -1;
3809 env->exception_injected = -1;
3810 env->nmi_injected = false;
3811 #if !defined(CONFIG_USER_ONLY)
3812 /* We hard-wire the BSP to the first CPU. */
3813 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3814
3815 s->halted = !cpu_is_bsp(cpu);
3816
3817 if (kvm_enabled()) {
3818 kvm_arch_reset_vcpu(cpu);
3819 }
3820 else if (hvf_enabled()) {
3821 hvf_reset_vcpu(s);
3822 }
3823 #endif
3824 }
3825
3826 #ifndef CONFIG_USER_ONLY
3827 bool cpu_is_bsp(X86CPU *cpu)
3828 {
3829 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3830 }
3831
3832 /* TODO: remove me, when reset over QOM tree is implemented */
3833 static void x86_cpu_machine_reset_cb(void *opaque)
3834 {
3835 X86CPU *cpu = opaque;
3836 cpu_reset(CPU(cpu));
3837 }
3838 #endif
3839
3840 static void mce_init(X86CPU *cpu)
3841 {
3842 CPUX86State *cenv = &cpu->env;
3843 unsigned int bank;
3844
3845 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3846 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3847 (CPUID_MCE | CPUID_MCA)) {
3848 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3849 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3850 cenv->mcg_ctl = ~(uint64_t)0;
3851 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3852 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3853 }
3854 }
3855 }
3856
3857 #ifndef CONFIG_USER_ONLY
3858 APICCommonClass *apic_get_class(void)
3859 {
3860 const char *apic_type = "apic";
3861
3862 /* TODO: in-kernel irqchip for hvf */
3863 if (kvm_apic_in_kernel()) {
3864 apic_type = "kvm-apic";
3865 } else if (xen_enabled()) {
3866 apic_type = "xen-apic";
3867 }
3868
3869 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3870 }
3871
3872 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3873 {
3874 APICCommonState *apic;
3875 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3876
3877 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3878
3879 object_property_add_child(OBJECT(cpu), "lapic",
3880 OBJECT(cpu->apic_state), &error_abort);
3881 object_unref(OBJECT(cpu->apic_state));
3882
3883 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3884 /* TODO: convert to link<> */
3885 apic = APIC_COMMON(cpu->apic_state);
3886 apic->cpu = cpu;
3887 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3888 }
3889
3890 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3891 {
3892 APICCommonState *apic;
3893 static bool apic_mmio_map_once;
3894
3895 if (cpu->apic_state == NULL) {
3896 return;
3897 }
3898 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3899 errp);
3900
3901 /* Map APIC MMIO area */
3902 apic = APIC_COMMON(cpu->apic_state);
3903 if (!apic_mmio_map_once) {
3904 memory_region_add_subregion_overlap(get_system_memory(),
3905 apic->apicbase &
3906 MSR_IA32_APICBASE_BASE,
3907 &apic->io_memory,
3908 0x1000);
3909 apic_mmio_map_once = true;
3910 }
3911 }
3912
3913 static void x86_cpu_machine_done(Notifier *n, void *unused)
3914 {
3915 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3916 MemoryRegion *smram =
3917 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3918
3919 if (smram) {
3920 cpu->smram = g_new(MemoryRegion, 1);
3921 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3922 smram, 0, 1ull << 32);
3923 memory_region_set_enabled(cpu->smram, true);
3924 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3925 }
3926 }
3927 #else
3928 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3929 {
3930 }
3931 #endif
3932
3933 /* Note: Only safe for use on x86(-64) hosts */
3934 static uint32_t x86_host_phys_bits(void)
3935 {
3936 uint32_t eax;
3937 uint32_t host_phys_bits;
3938
3939 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3940 if (eax >= 0x80000008) {
3941 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3942 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3943 * at 23:16 that can specify a maximum physical address bits for
3944 * the guest that can override this value; but I've not seen
3945 * anything with that set.
3946 */
3947 host_phys_bits = eax & 0xff;
3948 } else {
3949 /* It's an odd 64 bit machine that doesn't have the leaf for
3950 * physical address bits; fall back to 36 that's most older
3951 * Intel.
3952 */
3953 host_phys_bits = 36;
3954 }
3955
3956 return host_phys_bits;
3957 }
3958
3959 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3960 {
3961 if (*min < value) {
3962 *min = value;
3963 }
3964 }
3965
3966 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3967 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3968 {
3969 CPUX86State *env = &cpu->env;
3970 FeatureWordInfo *fi = &feature_word_info[w];
3971 uint32_t eax = fi->cpuid_eax;
3972 uint32_t region = eax & 0xF0000000;
3973
3974 if (!env->features[w]) {
3975 return;
3976 }
3977
3978 switch (region) {
3979 case 0x00000000:
3980 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3981 break;
3982 case 0x80000000:
3983 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3984 break;
3985 case 0xC0000000:
3986 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3987 break;
3988 }
3989 }
3990
3991 /* Calculate XSAVE components based on the configured CPU feature flags */
3992 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3993 {
3994 CPUX86State *env = &cpu->env;
3995 int i;
3996 uint64_t mask;
3997
3998 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3999 return;
4000 }
4001
4002 mask = 0;
4003 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4004 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4005 if (env->features[esa->feature] & esa->bits) {
4006 mask |= (1ULL << i);
4007 }
4008 }
4009
4010 env->features[FEAT_XSAVE_COMP_LO] = mask;
4011 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4012 }
4013
4014 /***** Steps involved on loading and filtering CPUID data
4015 *
4016 * When initializing and realizing a CPU object, the steps
4017 * involved in setting up CPUID data are:
4018 *
4019 * 1) Loading CPU model definition (X86CPUDefinition). This is
4020 * implemented by x86_cpu_load_def() and should be completely
4021 * transparent, as it is done automatically by instance_init.
4022 * No code should need to look at X86CPUDefinition structs
4023 * outside instance_init.
4024 *
4025 * 2) CPU expansion. This is done by realize before CPUID
4026 * filtering, and will make sure host/accelerator data is
4027 * loaded for CPU models that depend on host capabilities
4028 * (e.g. "host"). Done by x86_cpu_expand_features().
4029 *
4030 * 3) CPUID filtering. This initializes extra data related to
4031 * CPUID, and checks if the host supports all capabilities
4032 * required by the CPU. Runnability of a CPU model is
4033 * determined at this step. Done by x86_cpu_filter_features().
4034 *
4035 * Some operations don't require all steps to be performed.
4036 * More precisely:
4037 *
4038 * - CPU instance creation (instance_init) will run only CPU
4039 * model loading. CPU expansion can't run at instance_init-time
4040 * because host/accelerator data may be not available yet.
4041 * - CPU realization will perform both CPU model expansion and CPUID
4042 * filtering, and return an error in case one of them fails.
4043 * - query-cpu-definitions needs to run all 3 steps. It needs
4044 * to run CPUID filtering, as the 'unavailable-features'
4045 * field is set based on the filtering results.
4046 * - The query-cpu-model-expansion QMP command only needs to run
4047 * CPU model loading and CPU expansion. It should not filter
4048 * any CPUID data based on host capabilities.
4049 */
4050
4051 /* Expand CPU configuration data, based on configured features
4052 * and host/accelerator capabilities when appropriate.
4053 */
4054 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4055 {
4056 CPUX86State *env = &cpu->env;
4057 FeatureWord w;
4058 GList *l;
4059 Error *local_err = NULL;
4060
4061 /*TODO: Now cpu->max_features doesn't overwrite features
4062 * set using QOM properties, and we can convert
4063 * plus_features & minus_features to global properties
4064 * inside x86_cpu_parse_featurestr() too.
4065 */
4066 if (cpu->max_features) {
4067 for (w = 0; w < FEATURE_WORDS; w++) {
4068 /* Override only features that weren't set explicitly
4069 * by the user.
4070 */
4071 env->features[w] |=
4072 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4073 ~env->user_features[w] & \
4074 ~feature_word_info[w].no_autoenable_flags;
4075 }
4076 }
4077
4078 for (l = plus_features; l; l = l->next) {
4079 const char *prop = l->data;
4080 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4081 if (local_err) {
4082 goto out;
4083 }
4084 }
4085
4086 for (l = minus_features; l; l = l->next) {
4087 const char *prop = l->data;
4088 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4089 if (local_err) {
4090 goto out;
4091 }
4092 }
4093
4094 if (!kvm_enabled() || !cpu->expose_kvm) {
4095 env->features[FEAT_KVM] = 0;
4096 }
4097
4098 x86_cpu_enable_xsave_components(cpu);
4099
4100 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4101 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4102 if (cpu->full_cpuid_auto_level) {
4103 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4104 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4105 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4106 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4107 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4108 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4109 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4110 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4111 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4112 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4113 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4114 /* SVM requires CPUID[0x8000000A] */
4115 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4116 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4117 }
4118
4119 /* SEV requires CPUID[0x8000001F] */
4120 if (sev_enabled()) {
4121 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4122 }
4123 }
4124
4125 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4126 if (env->cpuid_level == UINT32_MAX) {
4127 env->cpuid_level = env->cpuid_min_level;
4128 }
4129 if (env->cpuid_xlevel == UINT32_MAX) {
4130 env->cpuid_xlevel = env->cpuid_min_xlevel;
4131 }
4132 if (env->cpuid_xlevel2 == UINT32_MAX) {
4133 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4134 }
4135
4136 out:
4137 if (local_err != NULL) {
4138 error_propagate(errp, local_err);
4139 }
4140 }
4141
4142 /*
4143 * Finishes initialization of CPUID data, filters CPU feature
4144 * words based on host availability of each feature.
4145 *
4146 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4147 */
4148 static int x86_cpu_filter_features(X86CPU *cpu)
4149 {
4150 CPUX86State *env = &cpu->env;
4151 FeatureWord w;
4152 int rv = 0;
4153
4154 for (w = 0; w < FEATURE_WORDS; w++) {
4155 uint32_t host_feat =
4156 x86_cpu_get_supported_feature_word(w, false);
4157 uint32_t requested_features = env->features[w];
4158 env->features[w] &= host_feat;
4159 cpu->filtered_features[w] = requested_features & ~env->features[w];
4160 if (cpu->filtered_features[w]) {
4161 rv = 1;
4162 }
4163 }
4164
4165 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4166 kvm_enabled()) {
4167 KVMState *s = CPU(cpu)->kvm_state;
4168 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4169 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4170 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4171 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4172 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4173
4174 if (!eax_0 ||
4175 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4176 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4177 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4178 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4179 INTEL_PT_ADDR_RANGES_NUM) ||
4180 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4181 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4182 (ecx_0 & INTEL_PT_IP_LIP)) {
4183 /*
4184 * Processor Trace capabilities aren't configurable, so if the
4185 * host can't emulate the capabilities we report on
4186 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4187 */
4188 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4189 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4190 rv = 1;
4191 }
4192 }
4193
4194 return rv;
4195 }
4196
4197 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4198 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4199 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4200 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4201 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4202 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4203 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4204 {
4205 CPUState *cs = CPU(dev);
4206 X86CPU *cpu = X86_CPU(dev);
4207 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4208 CPUX86State *env = &cpu->env;
4209 Error *local_err = NULL;
4210 static bool ht_warned;
4211
4212 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4213 char *name = x86_cpu_class_get_model_name(xcc);
4214 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4215 g_free(name);
4216 goto out;
4217 }
4218
4219 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4220 error_setg(errp, "apic-id property was not initialized properly");
4221 return;
4222 }
4223
4224 x86_cpu_expand_features(cpu, &local_err);
4225 if (local_err) {
4226 goto out;
4227 }
4228
4229 if (x86_cpu_filter_features(cpu) &&
4230 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4231 x86_cpu_report_filtered_features(cpu);
4232 if (cpu->enforce_cpuid) {
4233 error_setg(&local_err,
4234 accel_uses_host_cpuid() ?
4235 "Host doesn't support requested features" :
4236 "TCG doesn't support requested features");
4237 goto out;
4238 }
4239 }
4240
4241 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4242 * CPUID[1].EDX.
4243 */
4244 if (IS_AMD_CPU(env)) {
4245 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4246 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4247 & CPUID_EXT2_AMD_ALIASES);
4248 }
4249
4250 /* For 64bit systems think about the number of physical bits to present.
4251 * ideally this should be the same as the host; anything other than matching
4252 * the host can cause incorrect guest behaviour.
4253 * QEMU used to pick the magic value of 40 bits that corresponds to
4254 * consumer AMD devices but nothing else.
4255 */
4256 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4257 if (accel_uses_host_cpuid()) {
4258 uint32_t host_phys_bits = x86_host_phys_bits();
4259 static bool warned;
4260
4261 if (cpu->host_phys_bits) {
4262 /* The user asked for us to use the host physical bits */
4263 cpu->phys_bits = host_phys_bits;
4264 }
4265
4266 /* Print a warning if the user set it to a value that's not the
4267 * host value.
4268 */
4269 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4270 !warned) {
4271 warn_report("Host physical bits (%u)"
4272 " does not match phys-bits property (%u)",
4273 host_phys_bits, cpu->phys_bits);
4274 warned = true;
4275 }
4276
4277 if (cpu->phys_bits &&
4278 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4279 cpu->phys_bits < 32)) {
4280 error_setg(errp, "phys-bits should be between 32 and %u "
4281 " (but is %u)",
4282 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4283 return;
4284 }
4285 } else {
4286 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4287 error_setg(errp, "TCG only supports phys-bits=%u",
4288 TCG_PHYS_ADDR_BITS);
4289 return;
4290 }
4291 }
4292 /* 0 means it was not explicitly set by the user (or by machine
4293 * compat_props or by the host code above). In this case, the default
4294 * is the value used by TCG (40).
4295 */
4296 if (cpu->phys_bits == 0) {
4297 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4298 }
4299 } else {
4300 /* For 32 bit systems don't use the user set value, but keep
4301 * phys_bits consistent with what we tell the guest.
4302 */
4303 if (cpu->phys_bits != 0) {
4304 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4305 return;
4306 }
4307
4308 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4309 cpu->phys_bits = 36;
4310 } else {
4311 cpu->phys_bits = 32;
4312 }
4313 }
4314 cpu_exec_realizefn(cs, &local_err);
4315 if (local_err != NULL) {
4316 error_propagate(errp, local_err);
4317 return;
4318 }
4319
4320 #ifndef CONFIG_USER_ONLY
4321 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4322
4323 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4324 x86_cpu_apic_create(cpu, &local_err);
4325 if (local_err != NULL) {
4326 goto out;
4327 }
4328 }
4329 #endif
4330
4331 mce_init(cpu);
4332
4333 #ifndef CONFIG_USER_ONLY
4334 if (tcg_enabled()) {
4335 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4336 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4337
4338 /* Outer container... */
4339 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4340 memory_region_set_enabled(cpu->cpu_as_root, true);
4341
4342 /* ... with two regions inside: normal system memory with low
4343 * priority, and...
4344 */
4345 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4346 get_system_memory(), 0, ~0ull);
4347 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4348 memory_region_set_enabled(cpu->cpu_as_mem, true);
4349
4350 cs->num_ases = 2;
4351 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4352 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4353
4354 /* ... SMRAM with higher priority, linked from /machine/smram. */
4355 cpu->machine_done.notify = x86_cpu_machine_done;
4356 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4357 }
4358 #endif
4359
4360 qemu_init_vcpu(cs);
4361
4362 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4363 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4364 * based on inputs (sockets,cores,threads), it is still better to gives
4365 * users a warning.
4366 *
4367 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4368 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4369 */
4370 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4371 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4372 " -smp options properly.");
4373 ht_warned = true;
4374 }
4375
4376 x86_cpu_apic_realize(cpu, &local_err);
4377 if (local_err != NULL) {
4378 goto out;
4379 }
4380 cpu_reset(cs);
4381
4382 xcc->parent_realize(dev, &local_err);
4383
4384 out:
4385 if (local_err != NULL) {
4386 error_propagate(errp, local_err);
4387 return;
4388 }
4389 }
4390
4391 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4392 {
4393 X86CPU *cpu = X86_CPU(dev);
4394 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4395 Error *local_err = NULL;
4396
4397 #ifndef CONFIG_USER_ONLY
4398 cpu_remove_sync(CPU(dev));
4399 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4400 #endif
4401
4402 if (cpu->apic_state) {
4403 object_unparent(OBJECT(cpu->apic_state));
4404 cpu->apic_state = NULL;
4405 }
4406
4407 xcc->parent_unrealize(dev, &local_err);
4408 if (local_err != NULL) {
4409 error_propagate(errp, local_err);
4410 return;
4411 }
4412 }
4413
4414 typedef struct BitProperty {
4415 FeatureWord w;
4416 uint32_t mask;
4417 } BitProperty;
4418
4419 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4420 void *opaque, Error **errp)
4421 {
4422 X86CPU *cpu = X86_CPU(obj);
4423 BitProperty *fp = opaque;
4424 uint32_t f = cpu->env.features[fp->w];
4425 bool value = (f & fp->mask) == fp->mask;
4426 visit_type_bool(v, name, &value, errp);
4427 }
4428
4429 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4430 void *opaque, Error **errp)
4431 {
4432 DeviceState *dev = DEVICE(obj);
4433 X86CPU *cpu = X86_CPU(obj);
4434 BitProperty *fp = opaque;
4435 Error *local_err = NULL;
4436 bool value;
4437
4438 if (dev->realized) {
4439 qdev_prop_set_after_realize(dev, name, errp);
4440 return;
4441 }
4442
4443 visit_type_bool(v, name, &value, &local_err);
4444 if (local_err) {
4445 error_propagate(errp, local_err);
4446 return;
4447 }
4448
4449 if (value) {
4450 cpu->env.features[fp->w] |= fp->mask;
4451 } else {
4452 cpu->env.features[fp->w] &= ~fp->mask;
4453 }
4454 cpu->env.user_features[fp->w] |= fp->mask;
4455 }
4456
4457 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4458 void *opaque)
4459 {
4460 BitProperty *prop = opaque;
4461 g_free(prop);
4462 }
4463
4464 /* Register a boolean property to get/set a single bit in a uint32_t field.
4465 *
4466 * The same property name can be registered multiple times to make it affect
4467 * multiple bits in the same FeatureWord. In that case, the getter will return
4468 * true only if all bits are set.
4469 */
4470 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4471 const char *prop_name,
4472 FeatureWord w,
4473 int bitnr)
4474 {
4475 BitProperty *fp;
4476 ObjectProperty *op;
4477 uint32_t mask = (1UL << bitnr);
4478
4479 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4480 if (op) {
4481 fp = op->opaque;
4482 assert(fp->w == w);
4483 fp->mask |= mask;
4484 } else {
4485 fp = g_new0(BitProperty, 1);
4486 fp->w = w;
4487 fp->mask = mask;
4488 object_property_add(OBJECT(cpu), prop_name, "bool",
4489 x86_cpu_get_bit_prop,
4490 x86_cpu_set_bit_prop,
4491 x86_cpu_release_bit_prop, fp, &error_abort);
4492 }
4493 }
4494
4495 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4496 FeatureWord w,
4497 int bitnr)
4498 {
4499 FeatureWordInfo *fi = &feature_word_info[w];
4500 const char *name = fi->feat_names[bitnr];
4501
4502 if (!name) {
4503 return;
4504 }
4505
4506 /* Property names should use "-" instead of "_".
4507 * Old names containing underscores are registered as aliases
4508 * using object_property_add_alias()
4509 */
4510 assert(!strchr(name, '_'));
4511 /* aliases don't use "|" delimiters anymore, they are registered
4512 * manually using object_property_add_alias() */
4513 assert(!strchr(name, '|'));
4514 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4515 }
4516
4517 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4518 {
4519 X86CPU *cpu = X86_CPU(cs);
4520 CPUX86State *env = &cpu->env;
4521 GuestPanicInformation *panic_info = NULL;
4522
4523 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4524 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4525
4526 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4527
4528 assert(HV_CRASH_PARAMS >= 5);
4529 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4530 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4531 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4532 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4533 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4534 }
4535
4536 return panic_info;
4537 }
4538 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4539 const char *name, void *opaque,
4540 Error **errp)
4541 {
4542 CPUState *cs = CPU(obj);
4543 GuestPanicInformation *panic_info;
4544
4545 if (!cs->crash_occurred) {
4546 error_setg(errp, "No crash occured");
4547 return;
4548 }
4549
4550 panic_info = x86_cpu_get_crash_info(cs);
4551 if (panic_info == NULL) {
4552 error_setg(errp, "No crash information");
4553 return;
4554 }
4555
4556 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4557 errp);
4558 qapi_free_GuestPanicInformation(panic_info);
4559 }
4560
4561 static void x86_cpu_initfn(Object *obj)
4562 {
4563 CPUState *cs = CPU(obj);
4564 X86CPU *cpu = X86_CPU(obj);
4565 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4566 CPUX86State *env = &cpu->env;
4567 FeatureWord w;
4568
4569 cs->env_ptr = env;
4570
4571 object_property_add(obj, "family", "int",
4572 x86_cpuid_version_get_family,
4573 x86_cpuid_version_set_family, NULL, NULL, NULL);
4574 object_property_add(obj, "model", "int",
4575 x86_cpuid_version_get_model,
4576 x86_cpuid_version_set_model, NULL, NULL, NULL);
4577 object_property_add(obj, "stepping", "int",
4578 x86_cpuid_version_get_stepping,
4579 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4580 object_property_add_str(obj, "vendor",
4581 x86_cpuid_get_vendor,
4582 x86_cpuid_set_vendor, NULL);
4583 object_property_add_str(obj, "model-id",
4584 x86_cpuid_get_model_id,
4585 x86_cpuid_set_model_id, NULL);
4586 object_property_add(obj, "tsc-frequency", "int",
4587 x86_cpuid_get_tsc_freq,
4588 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4589 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4590 x86_cpu_get_feature_words,
4591 NULL, NULL, (void *)env->features, NULL);
4592 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4593 x86_cpu_get_feature_words,
4594 NULL, NULL, (void *)cpu->filtered_features, NULL);
4595
4596 object_property_add(obj, "crash-information", "GuestPanicInformation",
4597 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4598
4599 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4600
4601 for (w = 0; w < FEATURE_WORDS; w++) {
4602 int bitnr;
4603
4604 for (bitnr = 0; bitnr < 32; bitnr++) {
4605 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4606 }
4607 }
4608
4609 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4610 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4611 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4612 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4613 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4614 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4615 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4616
4617 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4618 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4619 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4620 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4621 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4622 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4623 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4624 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4625 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4626 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4627 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4628 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4629 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4630 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4631 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4632 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4633 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4634 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4635 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4636 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4637 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4638
4639 if (xcc->cpu_def) {
4640 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4641 }
4642 }
4643
4644 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4645 {
4646 X86CPU *cpu = X86_CPU(cs);
4647
4648 return cpu->apic_id;
4649 }
4650
4651 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4652 {
4653 X86CPU *cpu = X86_CPU(cs);
4654
4655 return cpu->env.cr[0] & CR0_PG_MASK;
4656 }
4657
4658 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4659 {
4660 X86CPU *cpu = X86_CPU(cs);
4661
4662 cpu->env.eip = value;
4663 }
4664
4665 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4666 {
4667 X86CPU *cpu = X86_CPU(cs);
4668
4669 cpu->env.eip = tb->pc - tb->cs_base;
4670 }
4671
4672 static bool x86_cpu_has_work(CPUState *cs)
4673 {
4674 X86CPU *cpu = X86_CPU(cs);
4675 CPUX86State *env = &cpu->env;
4676
4677 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4678 CPU_INTERRUPT_POLL)) &&
4679 (env->eflags & IF_MASK)) ||
4680 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4681 CPU_INTERRUPT_INIT |
4682 CPU_INTERRUPT_SIPI |
4683 CPU_INTERRUPT_MCE)) ||
4684 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4685 !(env->hflags & HF_SMM_MASK));
4686 }
4687
4688 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4689 {
4690 X86CPU *cpu = X86_CPU(cs);
4691 CPUX86State *env = &cpu->env;
4692
4693 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4694 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4695 : bfd_mach_i386_i8086);
4696 info->print_insn = print_insn_i386;
4697
4698 info->cap_arch = CS_ARCH_X86;
4699 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4700 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4701 : CS_MODE_16);
4702 info->cap_insn_unit = 1;
4703 info->cap_insn_split = 8;
4704 }
4705
4706 void x86_update_hflags(CPUX86State *env)
4707 {
4708 uint32_t hflags;
4709 #define HFLAG_COPY_MASK \
4710 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4711 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4712 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4713 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4714
4715 hflags = env->hflags & HFLAG_COPY_MASK;
4716 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4717 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4718 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4719 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4720 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4721
4722 if (env->cr[4] & CR4_OSFXSR_MASK) {
4723 hflags |= HF_OSFXSR_MASK;
4724 }
4725
4726 if (env->efer & MSR_EFER_LMA) {
4727 hflags |= HF_LMA_MASK;
4728 }
4729
4730 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4731 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4732 } else {
4733 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4734 (DESC_B_SHIFT - HF_CS32_SHIFT);
4735 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4736 (DESC_B_SHIFT - HF_SS32_SHIFT);
4737 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4738 !(hflags & HF_CS32_MASK)) {
4739 hflags |= HF_ADDSEG_MASK;
4740 } else {
4741 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4742 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4743 }
4744 }
4745 env->hflags = hflags;
4746 }
4747
4748 static Property x86_cpu_properties[] = {
4749 #ifdef CONFIG_USER_ONLY
4750 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4751 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4752 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4753 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4754 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4755 #else
4756 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4757 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4758 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4759 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4760 #endif
4761 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4762 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4763 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4764 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4765 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4766 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4767 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4768 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4769 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4770 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4771 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4772 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4773 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
4774 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
4775 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4776 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4777 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4778 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4779 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4780 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4781 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4782 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4783 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4784 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4785 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4786 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4787 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4788 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4789 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4790 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4791 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4792 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4793 false),
4794 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4795 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4796
4797 /*
4798 * From "Requirements for Implementing the Microsoft
4799 * Hypervisor Interface":
4800 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4801 *
4802 * "Starting with Windows Server 2012 and Windows 8, if
4803 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4804 * the hypervisor imposes no specific limit to the number of VPs.
4805 * In this case, Windows Server 2012 guest VMs may use more than
4806 * 64 VPs, up to the maximum supported number of processors applicable
4807 * to the specific Windows version being used."
4808 */
4809 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4810 DEFINE_PROP_END_OF_LIST()
4811 };
4812
4813 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4814 {
4815 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4816 CPUClass *cc = CPU_CLASS(oc);
4817 DeviceClass *dc = DEVICE_CLASS(oc);
4818
4819 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4820 &xcc->parent_realize);
4821 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4822 &xcc->parent_unrealize);
4823 dc->props = x86_cpu_properties;
4824
4825 xcc->parent_reset = cc->reset;
4826 cc->reset = x86_cpu_reset;
4827 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4828
4829 cc->class_by_name = x86_cpu_class_by_name;
4830 cc->parse_features = x86_cpu_parse_featurestr;
4831 cc->has_work = x86_cpu_has_work;
4832 #ifdef CONFIG_TCG
4833 cc->do_interrupt = x86_cpu_do_interrupt;
4834 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4835 #endif
4836 cc->dump_state = x86_cpu_dump_state;
4837 cc->get_crash_info = x86_cpu_get_crash_info;
4838 cc->set_pc = x86_cpu_set_pc;
4839 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4840 cc->gdb_read_register = x86_cpu_gdb_read_register;
4841 cc->gdb_write_register = x86_cpu_gdb_write_register;
4842 cc->get_arch_id = x86_cpu_get_arch_id;
4843 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4844 #ifdef CONFIG_USER_ONLY
4845 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4846 #else
4847 cc->asidx_from_attrs = x86_asidx_from_attrs;
4848 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4849 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4850 cc->write_elf64_note = x86_cpu_write_elf64_note;
4851 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4852 cc->write_elf32_note = x86_cpu_write_elf32_note;
4853 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4854 cc->vmsd = &vmstate_x86_cpu;
4855 #endif
4856 cc->gdb_arch_name = x86_gdb_arch_name;
4857 #ifdef TARGET_X86_64
4858 cc->gdb_core_xml_file = "i386-64bit.xml";
4859 cc->gdb_num_core_regs = 57;
4860 #else
4861 cc->gdb_core_xml_file = "i386-32bit.xml";
4862 cc->gdb_num_core_regs = 41;
4863 #endif
4864 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4865 cc->debug_excp_handler = breakpoint_handler;
4866 #endif
4867 cc->cpu_exec_enter = x86_cpu_exec_enter;
4868 cc->cpu_exec_exit = x86_cpu_exec_exit;
4869 #ifdef CONFIG_TCG
4870 cc->tcg_initialize = tcg_x86_init;
4871 #endif
4872 cc->disas_set_info = x86_disas_set_info;
4873
4874 dc->user_creatable = true;
4875 }
4876
4877 static const TypeInfo x86_cpu_type_info = {
4878 .name = TYPE_X86_CPU,
4879 .parent = TYPE_CPU,
4880 .instance_size = sizeof(X86CPU),
4881 .instance_init = x86_cpu_initfn,
4882 .abstract = true,
4883 .class_size = sizeof(X86CPUClass),
4884 .class_init = x86_cpu_common_class_init,
4885 };
4886
4887
4888 /* "base" CPU model, used by query-cpu-model-expansion */
4889 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4890 {
4891 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4892
4893 xcc->static_model = true;
4894 xcc->migration_safe = true;
4895 xcc->model_description = "base CPU model type with no features enabled";
4896 xcc->ordering = 8;
4897 }
4898
4899 static const TypeInfo x86_base_cpu_type_info = {
4900 .name = X86_CPU_TYPE_NAME("base"),
4901 .parent = TYPE_X86_CPU,
4902 .class_init = x86_cpu_base_class_init,
4903 };
4904
4905 static void x86_cpu_register_types(void)
4906 {
4907 int i;
4908
4909 type_register_static(&x86_cpu_type_info);
4910 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4911 x86_register_cpudef_type(&builtin_x86_defs[i]);
4912 }
4913 type_register_static(&max_x86_cpu_type_info);
4914 type_register_static(&x86_base_cpu_type_info);
4915 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4916 type_register_static(&host_x86_cpu_type_info);
4917 #endif
4918 }
4919
4920 type_init(x86_cpu_register_types)