]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
Merge remote-tracking branch 'remotes/ehabkost/tags/machine-next-pull-request' into...
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59
60 /* Cache topology CPUID constants: */
61
62 /* CPUID Leaf 2 Descriptors */
63
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
68
69
70 /* CPUID Leaf 4 constants: */
71
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
76
77 #define CPUID_4_LEVEL(l) ((l) << 5)
78
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
81
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
86
87 #define ASSOC_FULL 0xFF
88
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
102
103
104 /* Definitions of the hardcoded cache entries we expose: */
105
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
117
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
129
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
141
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
154
155 /* TLB definitions: */
156
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
161
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
166
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
171
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
176
177 /* CPUID Leaf 0x14 constants: */
178 #define INTEL_PT_MAX_SUBLEAF 0x1
179 /*
180 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
181 * MSR can be accessed;
182 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
183 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
184 * of Intel PT MSRs across warm reset;
185 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
186 */
187 #define INTEL_PT_MINIMAL_EBX 0xf
188 /*
189 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
190 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
191 * accessed;
192 * bit[01]: ToPA tables can hold any number of output entries, up to the
193 * maximum allowed by the MaskOrTableOffset field of
194 * IA32_RTIT_OUTPUT_MASK_PTRS;
195 * bit[02]: Support Single-Range Output scheme;
196 */
197 #define INTEL_PT_MINIMAL_ECX 0x7
198 /* generated packets which contain IP payloads have LIP values */
199 #define INTEL_PT_IP_LIP (1 << 31)
200 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
201 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
202 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
203 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
204 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
205
206 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
207 uint32_t vendor2, uint32_t vendor3)
208 {
209 int i;
210 for (i = 0; i < 4; i++) {
211 dst[i] = vendor1 >> (8 * i);
212 dst[i + 4] = vendor2 >> (8 * i);
213 dst[i + 8] = vendor3 >> (8 * i);
214 }
215 dst[CPUID_VENDOR_SZ] = '\0';
216 }
217
218 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
219 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
220 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
221 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
222 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
223 CPUID_PSE36 | CPUID_FXSR)
224 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
225 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
226 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
227 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
228 CPUID_PAE | CPUID_SEP | CPUID_APIC)
229
230 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
231 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
232 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
233 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
234 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
235 /* partly implemented:
236 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
237 /* missing:
238 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
239 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
240 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
241 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
242 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
243 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
244 /* missing:
245 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
246 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
247 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
248 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
249 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
250
251 #ifdef TARGET_X86_64
252 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
253 #else
254 #define TCG_EXT2_X86_64_FEATURES 0
255 #endif
256
257 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
258 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
259 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
260 TCG_EXT2_X86_64_FEATURES)
261 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
262 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
263 #define TCG_EXT4_FEATURES 0
264 #define TCG_SVM_FEATURES 0
265 #define TCG_KVM_FEATURES 0
266 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
267 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
268 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
269 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
270 CPUID_7_0_EBX_ERMS)
271 /* missing:
272 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
273 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
274 CPUID_7_0_EBX_RDSEED */
275 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
276 CPUID_7_0_ECX_LA57)
277 #define TCG_7_0_EDX_FEATURES 0
278 #define TCG_APM_FEATURES 0
279 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
280 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
281 /* missing:
282 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
283
284 typedef struct FeatureWordInfo {
285 /* feature flags names are taken from "Intel Processor Identification and
286 * the CPUID Instruction" and AMD's "CPUID Specification".
287 * In cases of disagreement between feature naming conventions,
288 * aliases may be added.
289 */
290 const char *feat_names[32];
291 uint32_t cpuid_eax; /* Input EAX for CPUID */
292 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
293 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
294 int cpuid_reg; /* output register (R_* constant) */
295 uint32_t tcg_features; /* Feature flags supported by TCG */
296 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
297 uint32_t migratable_flags; /* Feature flags known to be migratable */
298 } FeatureWordInfo;
299
300 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
301 [FEAT_1_EDX] = {
302 .feat_names = {
303 "fpu", "vme", "de", "pse",
304 "tsc", "msr", "pae", "mce",
305 "cx8", "apic", NULL, "sep",
306 "mtrr", "pge", "mca", "cmov",
307 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
308 NULL, "ds" /* Intel dts */, "acpi", "mmx",
309 "fxsr", "sse", "sse2", "ss",
310 "ht" /* Intel htt */, "tm", "ia64", "pbe",
311 },
312 .cpuid_eax = 1, .cpuid_reg = R_EDX,
313 .tcg_features = TCG_FEATURES,
314 },
315 [FEAT_1_ECX] = {
316 .feat_names = {
317 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
318 "ds-cpl", "vmx", "smx", "est",
319 "tm2", "ssse3", "cid", NULL,
320 "fma", "cx16", "xtpr", "pdcm",
321 NULL, "pcid", "dca", "sse4.1",
322 "sse4.2", "x2apic", "movbe", "popcnt",
323 "tsc-deadline", "aes", "xsave", "osxsave",
324 "avx", "f16c", "rdrand", "hypervisor",
325 },
326 .cpuid_eax = 1, .cpuid_reg = R_ECX,
327 .tcg_features = TCG_EXT_FEATURES,
328 },
329 /* Feature names that are already defined on feature_name[] but
330 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
331 * names on feat_names below. They are copied automatically
332 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
333 */
334 [FEAT_8000_0001_EDX] = {
335 .feat_names = {
336 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
337 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
338 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
339 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
340 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
341 "nx", NULL, "mmxext", NULL /* mmx */,
342 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
343 NULL, "lm", "3dnowext", "3dnow",
344 },
345 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT2_FEATURES,
347 },
348 [FEAT_8000_0001_ECX] = {
349 .feat_names = {
350 "lahf-lm", "cmp-legacy", "svm", "extapic",
351 "cr8legacy", "abm", "sse4a", "misalignsse",
352 "3dnowprefetch", "osvw", "ibs", "xop",
353 "skinit", "wdt", NULL, "lwp",
354 "fma4", "tce", NULL, "nodeid-msr",
355 NULL, "tbm", "topoext", "perfctr-core",
356 "perfctr-nb", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 },
359 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
360 .tcg_features = TCG_EXT3_FEATURES,
361 },
362 [FEAT_C000_0001_EDX] = {
363 .feat_names = {
364 NULL, NULL, "xstore", "xstore-en",
365 NULL, NULL, "xcrypt", "xcrypt-en",
366 "ace2", "ace2-en", "phe", "phe-en",
367 "pmm", "pmm-en", NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 },
373 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
374 .tcg_features = TCG_EXT4_FEATURES,
375 },
376 [FEAT_KVM] = {
377 .feat_names = {
378 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
379 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
380 NULL, "kvm-pv-tlb-flush", NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 "kvmclock-stable-bit", NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 },
387 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
388 .tcg_features = TCG_KVM_FEATURES,
389 },
390 [FEAT_KVM_HINTS] = {
391 .feat_names = {
392 "kvm-hint-dedicated", NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 },
401 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
402 .tcg_features = TCG_KVM_FEATURES,
403 },
404 [FEAT_HYPERV_EAX] = {
405 .feat_names = {
406 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
407 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
408 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
409 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
410 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
411 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 },
418 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
419 },
420 [FEAT_HYPERV_EBX] = {
421 .feat_names = {
422 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
423 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
424 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
425 NULL /* hv_create_port */, NULL /* hv_connect_port */,
426 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
427 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
428 NULL, NULL,
429 NULL, NULL, NULL, NULL,
430 NULL, NULL, NULL, NULL,
431 NULL, NULL, NULL, NULL,
432 NULL, NULL, NULL, NULL,
433 },
434 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
435 },
436 [FEAT_HYPERV_EDX] = {
437 .feat_names = {
438 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
439 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
440 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
441 NULL, NULL,
442 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
445 NULL, NULL, NULL, NULL,
446 NULL, NULL, NULL, NULL,
447 NULL, NULL, NULL, NULL,
448 },
449 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
450 },
451 [FEAT_SVM] = {
452 .feat_names = {
453 "npt", "lbrv", "svm-lock", "nrip-save",
454 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
455 NULL, NULL, "pause-filter", NULL,
456 "pfthreshold", NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 },
462 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
463 .tcg_features = TCG_SVM_FEATURES,
464 },
465 [FEAT_7_0_EBX] = {
466 .feat_names = {
467 "fsgsbase", "tsc-adjust", NULL, "bmi1",
468 "hle", "avx2", NULL, "smep",
469 "bmi2", "erms", "invpcid", "rtm",
470 NULL, NULL, "mpx", NULL,
471 "avx512f", "avx512dq", "rdseed", "adx",
472 "smap", "avx512ifma", "pcommit", "clflushopt",
473 "clwb", "intel-pt", "avx512pf", "avx512er",
474 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
475 },
476 .cpuid_eax = 7,
477 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
478 .cpuid_reg = R_EBX,
479 .tcg_features = TCG_7_0_EBX_FEATURES,
480 },
481 [FEAT_7_0_ECX] = {
482 .feat_names = {
483 NULL, "avx512vbmi", "umip", "pku",
484 "ospke", NULL, "avx512vbmi2", NULL,
485 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
486 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
487 "la57", NULL, NULL, NULL,
488 NULL, NULL, "rdpid", NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 },
492 .cpuid_eax = 7,
493 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
494 .cpuid_reg = R_ECX,
495 .tcg_features = TCG_7_0_ECX_FEATURES,
496 },
497 [FEAT_7_0_EDX] = {
498 .feat_names = {
499 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, "spec-ctrl", NULL,
506 NULL, NULL, NULL, NULL,
507 },
508 .cpuid_eax = 7,
509 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
510 .cpuid_reg = R_EDX,
511 .tcg_features = TCG_7_0_EDX_FEATURES,
512 },
513 [FEAT_8000_0007_EDX] = {
514 .feat_names = {
515 NULL, NULL, NULL, NULL,
516 NULL, NULL, NULL, NULL,
517 "invtsc", NULL, NULL, NULL,
518 NULL, NULL, NULL, NULL,
519 NULL, NULL, NULL, NULL,
520 NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL,
522 NULL, NULL, NULL, NULL,
523 },
524 .cpuid_eax = 0x80000007,
525 .cpuid_reg = R_EDX,
526 .tcg_features = TCG_APM_FEATURES,
527 .unmigratable_flags = CPUID_APM_INVTSC,
528 },
529 [FEAT_8000_0008_EBX] = {
530 .feat_names = {
531 NULL, NULL, NULL, NULL,
532 NULL, NULL, NULL, NULL,
533 NULL, NULL, NULL, NULL,
534 "ibpb", NULL, NULL, NULL,
535 NULL, NULL, NULL, NULL,
536 NULL, NULL, NULL, NULL,
537 NULL, NULL, NULL, NULL,
538 NULL, NULL, NULL, NULL,
539 },
540 .cpuid_eax = 0x80000008,
541 .cpuid_reg = R_EBX,
542 .tcg_features = 0,
543 .unmigratable_flags = 0,
544 },
545 [FEAT_XSAVE] = {
546 .feat_names = {
547 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
548 NULL, NULL, NULL, NULL,
549 NULL, NULL, NULL, NULL,
550 NULL, NULL, NULL, NULL,
551 NULL, NULL, NULL, NULL,
552 NULL, NULL, NULL, NULL,
553 NULL, NULL, NULL, NULL,
554 NULL, NULL, NULL, NULL,
555 },
556 .cpuid_eax = 0xd,
557 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
558 .cpuid_reg = R_EAX,
559 .tcg_features = TCG_XSAVE_FEATURES,
560 },
561 [FEAT_6_EAX] = {
562 .feat_names = {
563 NULL, NULL, "arat", NULL,
564 NULL, NULL, NULL, NULL,
565 NULL, NULL, NULL, NULL,
566 NULL, NULL, NULL, NULL,
567 NULL, NULL, NULL, NULL,
568 NULL, NULL, NULL, NULL,
569 NULL, NULL, NULL, NULL,
570 NULL, NULL, NULL, NULL,
571 },
572 .cpuid_eax = 6, .cpuid_reg = R_EAX,
573 .tcg_features = TCG_6_EAX_FEATURES,
574 },
575 [FEAT_XSAVE_COMP_LO] = {
576 .cpuid_eax = 0xD,
577 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
578 .cpuid_reg = R_EAX,
579 .tcg_features = ~0U,
580 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
581 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
582 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
583 XSTATE_PKRU_MASK,
584 },
585 [FEAT_XSAVE_COMP_HI] = {
586 .cpuid_eax = 0xD,
587 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
588 .cpuid_reg = R_EDX,
589 .tcg_features = ~0U,
590 },
591 };
592
593 typedef struct X86RegisterInfo32 {
594 /* Name of register */
595 const char *name;
596 /* QAPI enum value register */
597 X86CPURegister32 qapi_enum;
598 } X86RegisterInfo32;
599
600 #define REGISTER(reg) \
601 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
602 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
603 REGISTER(EAX),
604 REGISTER(ECX),
605 REGISTER(EDX),
606 REGISTER(EBX),
607 REGISTER(ESP),
608 REGISTER(EBP),
609 REGISTER(ESI),
610 REGISTER(EDI),
611 };
612 #undef REGISTER
613
614 typedef struct ExtSaveArea {
615 uint32_t feature, bits;
616 uint32_t offset, size;
617 } ExtSaveArea;
618
619 static const ExtSaveArea x86_ext_save_areas[] = {
620 [XSTATE_FP_BIT] = {
621 /* x87 FP state component is always enabled if XSAVE is supported */
622 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
623 /* x87 state is in the legacy region of the XSAVE area */
624 .offset = 0,
625 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
626 },
627 [XSTATE_SSE_BIT] = {
628 /* SSE state component is always enabled if XSAVE is supported */
629 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
630 /* SSE state is in the legacy region of the XSAVE area */
631 .offset = 0,
632 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
633 },
634 [XSTATE_YMM_BIT] =
635 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
636 .offset = offsetof(X86XSaveArea, avx_state),
637 .size = sizeof(XSaveAVX) },
638 [XSTATE_BNDREGS_BIT] =
639 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
640 .offset = offsetof(X86XSaveArea, bndreg_state),
641 .size = sizeof(XSaveBNDREG) },
642 [XSTATE_BNDCSR_BIT] =
643 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
644 .offset = offsetof(X86XSaveArea, bndcsr_state),
645 .size = sizeof(XSaveBNDCSR) },
646 [XSTATE_OPMASK_BIT] =
647 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
648 .offset = offsetof(X86XSaveArea, opmask_state),
649 .size = sizeof(XSaveOpmask) },
650 [XSTATE_ZMM_Hi256_BIT] =
651 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
652 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
653 .size = sizeof(XSaveZMM_Hi256) },
654 [XSTATE_Hi16_ZMM_BIT] =
655 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
656 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
657 .size = sizeof(XSaveHi16_ZMM) },
658 [XSTATE_PKRU_BIT] =
659 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
660 .offset = offsetof(X86XSaveArea, pkru_state),
661 .size = sizeof(XSavePKRU) },
662 };
663
664 static uint32_t xsave_area_size(uint64_t mask)
665 {
666 int i;
667 uint64_t ret = 0;
668
669 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
670 const ExtSaveArea *esa = &x86_ext_save_areas[i];
671 if ((mask >> i) & 1) {
672 ret = MAX(ret, esa->offset + esa->size);
673 }
674 }
675 return ret;
676 }
677
678 static inline bool accel_uses_host_cpuid(void)
679 {
680 return kvm_enabled() || hvf_enabled();
681 }
682
683 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
684 {
685 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
686 cpu->env.features[FEAT_XSAVE_COMP_LO];
687 }
688
689 const char *get_register_name_32(unsigned int reg)
690 {
691 if (reg >= CPU_NB_REGS32) {
692 return NULL;
693 }
694 return x86_reg_info_32[reg].name;
695 }
696
697 /*
698 * Returns the set of feature flags that are supported and migratable by
699 * QEMU, for a given FeatureWord.
700 */
701 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
702 {
703 FeatureWordInfo *wi = &feature_word_info[w];
704 uint32_t r = 0;
705 int i;
706
707 for (i = 0; i < 32; i++) {
708 uint32_t f = 1U << i;
709
710 /* If the feature name is known, it is implicitly considered migratable,
711 * unless it is explicitly set in unmigratable_flags */
712 if ((wi->migratable_flags & f) ||
713 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
714 r |= f;
715 }
716 }
717 return r;
718 }
719
720 void host_cpuid(uint32_t function, uint32_t count,
721 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
722 {
723 uint32_t vec[4];
724
725 #ifdef __x86_64__
726 asm volatile("cpuid"
727 : "=a"(vec[0]), "=b"(vec[1]),
728 "=c"(vec[2]), "=d"(vec[3])
729 : "0"(function), "c"(count) : "cc");
730 #elif defined(__i386__)
731 asm volatile("pusha \n\t"
732 "cpuid \n\t"
733 "mov %%eax, 0(%2) \n\t"
734 "mov %%ebx, 4(%2) \n\t"
735 "mov %%ecx, 8(%2) \n\t"
736 "mov %%edx, 12(%2) \n\t"
737 "popa"
738 : : "a"(function), "c"(count), "S"(vec)
739 : "memory", "cc");
740 #else
741 abort();
742 #endif
743
744 if (eax)
745 *eax = vec[0];
746 if (ebx)
747 *ebx = vec[1];
748 if (ecx)
749 *ecx = vec[2];
750 if (edx)
751 *edx = vec[3];
752 }
753
754 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
755 {
756 uint32_t eax, ebx, ecx, edx;
757
758 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
759 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
760
761 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
762 if (family) {
763 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
764 }
765 if (model) {
766 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
767 }
768 if (stepping) {
769 *stepping = eax & 0x0F;
770 }
771 }
772
773 /* CPU class name definitions: */
774
775 /* Return type name for a given CPU model name
776 * Caller is responsible for freeing the returned string.
777 */
778 static char *x86_cpu_type_name(const char *model_name)
779 {
780 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
781 }
782
783 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
784 {
785 ObjectClass *oc;
786 char *typename = x86_cpu_type_name(cpu_model);
787 oc = object_class_by_name(typename);
788 g_free(typename);
789 return oc;
790 }
791
792 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
793 {
794 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
795 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
796 return g_strndup(class_name,
797 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
798 }
799
800 struct X86CPUDefinition {
801 const char *name;
802 uint32_t level;
803 uint32_t xlevel;
804 /* vendor is zero-terminated, 12 character ASCII string */
805 char vendor[CPUID_VENDOR_SZ + 1];
806 int family;
807 int model;
808 int stepping;
809 FeatureWordArray features;
810 const char *model_id;
811 };
812
813 static X86CPUDefinition builtin_x86_defs[] = {
814 {
815 .name = "qemu64",
816 .level = 0xd,
817 .vendor = CPUID_VENDOR_AMD,
818 .family = 6,
819 .model = 6,
820 .stepping = 3,
821 .features[FEAT_1_EDX] =
822 PPRO_FEATURES |
823 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
824 CPUID_PSE36,
825 .features[FEAT_1_ECX] =
826 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 .features[FEAT_8000_0001_ECX] =
830 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
831 .xlevel = 0x8000000A,
832 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
833 },
834 {
835 .name = "phenom",
836 .level = 5,
837 .vendor = CPUID_VENDOR_AMD,
838 .family = 16,
839 .model = 2,
840 .stepping = 3,
841 /* Missing: CPUID_HT */
842 .features[FEAT_1_EDX] =
843 PPRO_FEATURES |
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
845 CPUID_PSE36 | CPUID_VME,
846 .features[FEAT_1_ECX] =
847 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
848 CPUID_EXT_POPCNT,
849 .features[FEAT_8000_0001_EDX] =
850 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
851 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
852 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
853 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
854 CPUID_EXT3_CR8LEG,
855 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
856 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
857 .features[FEAT_8000_0001_ECX] =
858 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
859 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
860 /* Missing: CPUID_SVM_LBRV */
861 .features[FEAT_SVM] =
862 CPUID_SVM_NPT,
863 .xlevel = 0x8000001A,
864 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
865 },
866 {
867 .name = "core2duo",
868 .level = 10,
869 .vendor = CPUID_VENDOR_INTEL,
870 .family = 6,
871 .model = 15,
872 .stepping = 11,
873 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
874 .features[FEAT_1_EDX] =
875 PPRO_FEATURES |
876 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
877 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
878 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
879 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
880 .features[FEAT_1_ECX] =
881 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
882 CPUID_EXT_CX16,
883 .features[FEAT_8000_0001_EDX] =
884 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
885 .features[FEAT_8000_0001_ECX] =
886 CPUID_EXT3_LAHF_LM,
887 .xlevel = 0x80000008,
888 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
889 },
890 {
891 .name = "kvm64",
892 .level = 0xd,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 15,
895 .model = 6,
896 .stepping = 1,
897 /* Missing: CPUID_HT */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
901 CPUID_PSE36,
902 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
903 .features[FEAT_1_ECX] =
904 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
905 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
908 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
909 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
910 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
911 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
912 .features[FEAT_8000_0001_ECX] =
913 0,
914 .xlevel = 0x80000008,
915 .model_id = "Common KVM processor"
916 },
917 {
918 .name = "qemu32",
919 .level = 4,
920 .vendor = CPUID_VENDOR_INTEL,
921 .family = 6,
922 .model = 6,
923 .stepping = 3,
924 .features[FEAT_1_EDX] =
925 PPRO_FEATURES,
926 .features[FEAT_1_ECX] =
927 CPUID_EXT_SSE3,
928 .xlevel = 0x80000004,
929 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
930 },
931 {
932 .name = "kvm32",
933 .level = 5,
934 .vendor = CPUID_VENDOR_INTEL,
935 .family = 15,
936 .model = 6,
937 .stepping = 1,
938 .features[FEAT_1_EDX] =
939 PPRO_FEATURES | CPUID_VME |
940 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
941 .features[FEAT_1_ECX] =
942 CPUID_EXT_SSE3,
943 .features[FEAT_8000_0001_ECX] =
944 0,
945 .xlevel = 0x80000008,
946 .model_id = "Common 32-bit KVM processor"
947 },
948 {
949 .name = "coreduo",
950 .level = 10,
951 .vendor = CPUID_VENDOR_INTEL,
952 .family = 6,
953 .model = 14,
954 .stepping = 8,
955 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
956 .features[FEAT_1_EDX] =
957 PPRO_FEATURES | CPUID_VME |
958 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
959 CPUID_SS,
960 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
961 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
962 .features[FEAT_1_ECX] =
963 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
964 .features[FEAT_8000_0001_EDX] =
965 CPUID_EXT2_NX,
966 .xlevel = 0x80000008,
967 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
968 },
969 {
970 .name = "486",
971 .level = 1,
972 .vendor = CPUID_VENDOR_INTEL,
973 .family = 4,
974 .model = 8,
975 .stepping = 0,
976 .features[FEAT_1_EDX] =
977 I486_FEATURES,
978 .xlevel = 0,
979 .model_id = "",
980 },
981 {
982 .name = "pentium",
983 .level = 1,
984 .vendor = CPUID_VENDOR_INTEL,
985 .family = 5,
986 .model = 4,
987 .stepping = 3,
988 .features[FEAT_1_EDX] =
989 PENTIUM_FEATURES,
990 .xlevel = 0,
991 .model_id = "",
992 },
993 {
994 .name = "pentium2",
995 .level = 2,
996 .vendor = CPUID_VENDOR_INTEL,
997 .family = 6,
998 .model = 5,
999 .stepping = 2,
1000 .features[FEAT_1_EDX] =
1001 PENTIUM2_FEATURES,
1002 .xlevel = 0,
1003 .model_id = "",
1004 },
1005 {
1006 .name = "pentium3",
1007 .level = 3,
1008 .vendor = CPUID_VENDOR_INTEL,
1009 .family = 6,
1010 .model = 7,
1011 .stepping = 3,
1012 .features[FEAT_1_EDX] =
1013 PENTIUM3_FEATURES,
1014 .xlevel = 0,
1015 .model_id = "",
1016 },
1017 {
1018 .name = "athlon",
1019 .level = 2,
1020 .vendor = CPUID_VENDOR_AMD,
1021 .family = 6,
1022 .model = 2,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1026 CPUID_MCA,
1027 .features[FEAT_8000_0001_EDX] =
1028 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1029 .xlevel = 0x80000008,
1030 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1031 },
1032 {
1033 .name = "n270",
1034 .level = 10,
1035 .vendor = CPUID_VENDOR_INTEL,
1036 .family = 6,
1037 .model = 28,
1038 .stepping = 2,
1039 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1040 .features[FEAT_1_EDX] =
1041 PPRO_FEATURES |
1042 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1043 CPUID_ACPI | CPUID_SS,
1044 /* Some CPUs got no CPUID_SEP */
1045 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1046 * CPUID_EXT_XTPR */
1047 .features[FEAT_1_ECX] =
1048 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1049 CPUID_EXT_MOVBE,
1050 .features[FEAT_8000_0001_EDX] =
1051 CPUID_EXT2_NX,
1052 .features[FEAT_8000_0001_ECX] =
1053 CPUID_EXT3_LAHF_LM,
1054 .xlevel = 0x80000008,
1055 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1056 },
1057 {
1058 .name = "Conroe",
1059 .level = 10,
1060 .vendor = CPUID_VENDOR_INTEL,
1061 .family = 6,
1062 .model = 15,
1063 .stepping = 3,
1064 .features[FEAT_1_EDX] =
1065 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1066 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1067 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1068 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1069 CPUID_DE | CPUID_FP87,
1070 .features[FEAT_1_ECX] =
1071 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1072 .features[FEAT_8000_0001_EDX] =
1073 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1074 .features[FEAT_8000_0001_ECX] =
1075 CPUID_EXT3_LAHF_LM,
1076 .xlevel = 0x80000008,
1077 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1078 },
1079 {
1080 .name = "Penryn",
1081 .level = 10,
1082 .vendor = CPUID_VENDOR_INTEL,
1083 .family = 6,
1084 .model = 23,
1085 .stepping = 3,
1086 .features[FEAT_1_EDX] =
1087 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1088 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1089 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1090 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1091 CPUID_DE | CPUID_FP87,
1092 .features[FEAT_1_ECX] =
1093 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1094 CPUID_EXT_SSE3,
1095 .features[FEAT_8000_0001_EDX] =
1096 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1097 .features[FEAT_8000_0001_ECX] =
1098 CPUID_EXT3_LAHF_LM,
1099 .xlevel = 0x80000008,
1100 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1101 },
1102 {
1103 .name = "Nehalem",
1104 .level = 11,
1105 .vendor = CPUID_VENDOR_INTEL,
1106 .family = 6,
1107 .model = 26,
1108 .stepping = 3,
1109 .features[FEAT_1_EDX] =
1110 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1111 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1112 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1113 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1114 CPUID_DE | CPUID_FP87,
1115 .features[FEAT_1_ECX] =
1116 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1117 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1118 .features[FEAT_8000_0001_EDX] =
1119 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1120 .features[FEAT_8000_0001_ECX] =
1121 CPUID_EXT3_LAHF_LM,
1122 .xlevel = 0x80000008,
1123 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1124 },
1125 {
1126 .name = "Nehalem-IBRS",
1127 .level = 11,
1128 .vendor = CPUID_VENDOR_INTEL,
1129 .family = 6,
1130 .model = 26,
1131 .stepping = 3,
1132 .features[FEAT_1_EDX] =
1133 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1134 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1135 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1136 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1137 CPUID_DE | CPUID_FP87,
1138 .features[FEAT_1_ECX] =
1139 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1140 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1141 .features[FEAT_7_0_EDX] =
1142 CPUID_7_0_EDX_SPEC_CTRL,
1143 .features[FEAT_8000_0001_EDX] =
1144 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .xlevel = 0x80000008,
1148 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1149 },
1150 {
1151 .name = "Westmere",
1152 .level = 11,
1153 .vendor = CPUID_VENDOR_INTEL,
1154 .family = 6,
1155 .model = 44,
1156 .stepping = 1,
1157 .features[FEAT_1_EDX] =
1158 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1159 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1160 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1161 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1162 CPUID_DE | CPUID_FP87,
1163 .features[FEAT_1_ECX] =
1164 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1165 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1166 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1167 .features[FEAT_8000_0001_EDX] =
1168 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1169 .features[FEAT_8000_0001_ECX] =
1170 CPUID_EXT3_LAHF_LM,
1171 .features[FEAT_6_EAX] =
1172 CPUID_6_EAX_ARAT,
1173 .xlevel = 0x80000008,
1174 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1175 },
1176 {
1177 .name = "Westmere-IBRS",
1178 .level = 11,
1179 .vendor = CPUID_VENDOR_INTEL,
1180 .family = 6,
1181 .model = 44,
1182 .stepping = 1,
1183 .features[FEAT_1_EDX] =
1184 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1185 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1186 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1187 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1188 CPUID_DE | CPUID_FP87,
1189 .features[FEAT_1_ECX] =
1190 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1191 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1192 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1193 .features[FEAT_8000_0001_EDX] =
1194 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1195 .features[FEAT_8000_0001_ECX] =
1196 CPUID_EXT3_LAHF_LM,
1197 .features[FEAT_7_0_EDX] =
1198 CPUID_7_0_EDX_SPEC_CTRL,
1199 .features[FEAT_6_EAX] =
1200 CPUID_6_EAX_ARAT,
1201 .xlevel = 0x80000008,
1202 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1203 },
1204 {
1205 .name = "SandyBridge",
1206 .level = 0xd,
1207 .vendor = CPUID_VENDOR_INTEL,
1208 .family = 6,
1209 .model = 42,
1210 .stepping = 1,
1211 .features[FEAT_1_EDX] =
1212 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1213 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1214 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1215 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1216 CPUID_DE | CPUID_FP87,
1217 .features[FEAT_1_ECX] =
1218 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1219 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1220 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1221 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1222 CPUID_EXT_SSE3,
1223 .features[FEAT_8000_0001_EDX] =
1224 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1225 CPUID_EXT2_SYSCALL,
1226 .features[FEAT_8000_0001_ECX] =
1227 CPUID_EXT3_LAHF_LM,
1228 .features[FEAT_XSAVE] =
1229 CPUID_XSAVE_XSAVEOPT,
1230 .features[FEAT_6_EAX] =
1231 CPUID_6_EAX_ARAT,
1232 .xlevel = 0x80000008,
1233 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1234 },
1235 {
1236 .name = "SandyBridge-IBRS",
1237 .level = 0xd,
1238 .vendor = CPUID_VENDOR_INTEL,
1239 .family = 6,
1240 .model = 42,
1241 .stepping = 1,
1242 .features[FEAT_1_EDX] =
1243 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1244 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1245 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1246 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1247 CPUID_DE | CPUID_FP87,
1248 .features[FEAT_1_ECX] =
1249 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1250 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1251 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1252 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1253 CPUID_EXT_SSE3,
1254 .features[FEAT_8000_0001_EDX] =
1255 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1256 CPUID_EXT2_SYSCALL,
1257 .features[FEAT_8000_0001_ECX] =
1258 CPUID_EXT3_LAHF_LM,
1259 .features[FEAT_7_0_EDX] =
1260 CPUID_7_0_EDX_SPEC_CTRL,
1261 .features[FEAT_XSAVE] =
1262 CPUID_XSAVE_XSAVEOPT,
1263 .features[FEAT_6_EAX] =
1264 CPUID_6_EAX_ARAT,
1265 .xlevel = 0x80000008,
1266 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1267 },
1268 {
1269 .name = "IvyBridge",
1270 .level = 0xd,
1271 .vendor = CPUID_VENDOR_INTEL,
1272 .family = 6,
1273 .model = 58,
1274 .stepping = 9,
1275 .features[FEAT_1_EDX] =
1276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1280 CPUID_DE | CPUID_FP87,
1281 .features[FEAT_1_ECX] =
1282 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1283 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1284 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1285 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1286 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1287 .features[FEAT_7_0_EBX] =
1288 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1289 CPUID_7_0_EBX_ERMS,
1290 .features[FEAT_8000_0001_EDX] =
1291 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1292 CPUID_EXT2_SYSCALL,
1293 .features[FEAT_8000_0001_ECX] =
1294 CPUID_EXT3_LAHF_LM,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1301 },
1302 {
1303 .name = "IvyBridge-IBRS",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 58,
1308 .stepping = 9,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1318 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1319 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1320 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1321 .features[FEAT_7_0_EBX] =
1322 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1323 CPUID_7_0_EBX_ERMS,
1324 .features[FEAT_8000_0001_EDX] =
1325 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1326 CPUID_EXT2_SYSCALL,
1327 .features[FEAT_8000_0001_ECX] =
1328 CPUID_EXT3_LAHF_LM,
1329 .features[FEAT_7_0_EDX] =
1330 CPUID_7_0_EDX_SPEC_CTRL,
1331 .features[FEAT_XSAVE] =
1332 CPUID_XSAVE_XSAVEOPT,
1333 .features[FEAT_6_EAX] =
1334 CPUID_6_EAX_ARAT,
1335 .xlevel = 0x80000008,
1336 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1337 },
1338 {
1339 .name = "Haswell-noTSX",
1340 .level = 0xd,
1341 .vendor = CPUID_VENDOR_INTEL,
1342 .family = 6,
1343 .model = 60,
1344 .stepping = 1,
1345 .features[FEAT_1_EDX] =
1346 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1347 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1348 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1349 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1350 CPUID_DE | CPUID_FP87,
1351 .features[FEAT_1_ECX] =
1352 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1353 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1354 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1355 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1356 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1357 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1358 .features[FEAT_8000_0001_EDX] =
1359 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1360 CPUID_EXT2_SYSCALL,
1361 .features[FEAT_8000_0001_ECX] =
1362 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1363 .features[FEAT_7_0_EBX] =
1364 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1365 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1366 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1367 .features[FEAT_XSAVE] =
1368 CPUID_XSAVE_XSAVEOPT,
1369 .features[FEAT_6_EAX] =
1370 CPUID_6_EAX_ARAT,
1371 .xlevel = 0x80000008,
1372 .model_id = "Intel Core Processor (Haswell, no TSX)",
1373 },
1374 {
1375 .name = "Haswell-noTSX-IBRS",
1376 .level = 0xd,
1377 .vendor = CPUID_VENDOR_INTEL,
1378 .family = 6,
1379 .model = 60,
1380 .stepping = 1,
1381 .features[FEAT_1_EDX] =
1382 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1383 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1384 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1385 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1386 CPUID_DE | CPUID_FP87,
1387 .features[FEAT_1_ECX] =
1388 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1389 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1390 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1391 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1392 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1393 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1394 .features[FEAT_8000_0001_EDX] =
1395 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1396 CPUID_EXT2_SYSCALL,
1397 .features[FEAT_8000_0001_ECX] =
1398 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1399 .features[FEAT_7_0_EDX] =
1400 CPUID_7_0_EDX_SPEC_CTRL,
1401 .features[FEAT_7_0_EBX] =
1402 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1403 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1404 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1405 .features[FEAT_XSAVE] =
1406 CPUID_XSAVE_XSAVEOPT,
1407 .features[FEAT_6_EAX] =
1408 CPUID_6_EAX_ARAT,
1409 .xlevel = 0x80000008,
1410 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1411 },
1412 {
1413 .name = "Haswell",
1414 .level = 0xd,
1415 .vendor = CPUID_VENDOR_INTEL,
1416 .family = 6,
1417 .model = 60,
1418 .stepping = 4,
1419 .features[FEAT_1_EDX] =
1420 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1421 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1422 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1423 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1424 CPUID_DE | CPUID_FP87,
1425 .features[FEAT_1_ECX] =
1426 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1427 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1428 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1429 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1430 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1431 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1432 .features[FEAT_8000_0001_EDX] =
1433 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1434 CPUID_EXT2_SYSCALL,
1435 .features[FEAT_8000_0001_ECX] =
1436 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1437 .features[FEAT_7_0_EBX] =
1438 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1439 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1440 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1441 CPUID_7_0_EBX_RTM,
1442 .features[FEAT_XSAVE] =
1443 CPUID_XSAVE_XSAVEOPT,
1444 .features[FEAT_6_EAX] =
1445 CPUID_6_EAX_ARAT,
1446 .xlevel = 0x80000008,
1447 .model_id = "Intel Core Processor (Haswell)",
1448 },
1449 {
1450 .name = "Haswell-IBRS",
1451 .level = 0xd,
1452 .vendor = CPUID_VENDOR_INTEL,
1453 .family = 6,
1454 .model = 60,
1455 .stepping = 4,
1456 .features[FEAT_1_EDX] =
1457 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1458 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1459 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1460 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1461 CPUID_DE | CPUID_FP87,
1462 .features[FEAT_1_ECX] =
1463 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1464 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1465 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1466 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1467 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1468 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1471 CPUID_EXT2_SYSCALL,
1472 .features[FEAT_8000_0001_ECX] =
1473 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1474 .features[FEAT_7_0_EDX] =
1475 CPUID_7_0_EDX_SPEC_CTRL,
1476 .features[FEAT_7_0_EBX] =
1477 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1478 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1479 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1480 CPUID_7_0_EBX_RTM,
1481 .features[FEAT_XSAVE] =
1482 CPUID_XSAVE_XSAVEOPT,
1483 .features[FEAT_6_EAX] =
1484 CPUID_6_EAX_ARAT,
1485 .xlevel = 0x80000008,
1486 .model_id = "Intel Core Processor (Haswell, IBRS)",
1487 },
1488 {
1489 .name = "Broadwell-noTSX",
1490 .level = 0xd,
1491 .vendor = CPUID_VENDOR_INTEL,
1492 .family = 6,
1493 .model = 61,
1494 .stepping = 2,
1495 .features[FEAT_1_EDX] =
1496 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1497 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1498 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1499 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1500 CPUID_DE | CPUID_FP87,
1501 .features[FEAT_1_ECX] =
1502 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1503 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1504 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1505 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1506 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1507 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1508 .features[FEAT_8000_0001_EDX] =
1509 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1510 CPUID_EXT2_SYSCALL,
1511 .features[FEAT_8000_0001_ECX] =
1512 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1513 .features[FEAT_7_0_EBX] =
1514 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1515 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1516 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1517 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1518 CPUID_7_0_EBX_SMAP,
1519 .features[FEAT_XSAVE] =
1520 CPUID_XSAVE_XSAVEOPT,
1521 .features[FEAT_6_EAX] =
1522 CPUID_6_EAX_ARAT,
1523 .xlevel = 0x80000008,
1524 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1525 },
1526 {
1527 .name = "Broadwell-noTSX-IBRS",
1528 .level = 0xd,
1529 .vendor = CPUID_VENDOR_INTEL,
1530 .family = 6,
1531 .model = 61,
1532 .stepping = 2,
1533 .features[FEAT_1_EDX] =
1534 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1535 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1536 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1537 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1538 CPUID_DE | CPUID_FP87,
1539 .features[FEAT_1_ECX] =
1540 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1541 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1542 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1543 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1544 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1545 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1546 .features[FEAT_8000_0001_EDX] =
1547 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1548 CPUID_EXT2_SYSCALL,
1549 .features[FEAT_8000_0001_ECX] =
1550 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1551 .features[FEAT_7_0_EDX] =
1552 CPUID_7_0_EDX_SPEC_CTRL,
1553 .features[FEAT_7_0_EBX] =
1554 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1555 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1556 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1557 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1558 CPUID_7_0_EBX_SMAP,
1559 .features[FEAT_XSAVE] =
1560 CPUID_XSAVE_XSAVEOPT,
1561 .features[FEAT_6_EAX] =
1562 CPUID_6_EAX_ARAT,
1563 .xlevel = 0x80000008,
1564 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1565 },
1566 {
1567 .name = "Broadwell",
1568 .level = 0xd,
1569 .vendor = CPUID_VENDOR_INTEL,
1570 .family = 6,
1571 .model = 61,
1572 .stepping = 2,
1573 .features[FEAT_1_EDX] =
1574 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1575 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1576 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1577 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1578 CPUID_DE | CPUID_FP87,
1579 .features[FEAT_1_ECX] =
1580 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1581 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1582 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1583 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1584 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1585 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1586 .features[FEAT_8000_0001_EDX] =
1587 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1588 CPUID_EXT2_SYSCALL,
1589 .features[FEAT_8000_0001_ECX] =
1590 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1591 .features[FEAT_7_0_EBX] =
1592 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1593 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1594 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1595 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1596 CPUID_7_0_EBX_SMAP,
1597 .features[FEAT_XSAVE] =
1598 CPUID_XSAVE_XSAVEOPT,
1599 .features[FEAT_6_EAX] =
1600 CPUID_6_EAX_ARAT,
1601 .xlevel = 0x80000008,
1602 .model_id = "Intel Core Processor (Broadwell)",
1603 },
1604 {
1605 .name = "Broadwell-IBRS",
1606 .level = 0xd,
1607 .vendor = CPUID_VENDOR_INTEL,
1608 .family = 6,
1609 .model = 61,
1610 .stepping = 2,
1611 .features[FEAT_1_EDX] =
1612 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1613 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1614 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1615 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1616 CPUID_DE | CPUID_FP87,
1617 .features[FEAT_1_ECX] =
1618 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1619 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1620 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1621 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1622 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1623 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1624 .features[FEAT_8000_0001_EDX] =
1625 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1626 CPUID_EXT2_SYSCALL,
1627 .features[FEAT_8000_0001_ECX] =
1628 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1629 .features[FEAT_7_0_EDX] =
1630 CPUID_7_0_EDX_SPEC_CTRL,
1631 .features[FEAT_7_0_EBX] =
1632 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1633 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1634 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1635 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1636 CPUID_7_0_EBX_SMAP,
1637 .features[FEAT_XSAVE] =
1638 CPUID_XSAVE_XSAVEOPT,
1639 .features[FEAT_6_EAX] =
1640 CPUID_6_EAX_ARAT,
1641 .xlevel = 0x80000008,
1642 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1643 },
1644 {
1645 .name = "Skylake-Client",
1646 .level = 0xd,
1647 .vendor = CPUID_VENDOR_INTEL,
1648 .family = 6,
1649 .model = 94,
1650 .stepping = 3,
1651 .features[FEAT_1_EDX] =
1652 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1653 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1654 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1655 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1656 CPUID_DE | CPUID_FP87,
1657 .features[FEAT_1_ECX] =
1658 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1659 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1660 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1661 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1662 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1663 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1664 .features[FEAT_8000_0001_EDX] =
1665 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1666 CPUID_EXT2_SYSCALL,
1667 .features[FEAT_8000_0001_ECX] =
1668 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1669 .features[FEAT_7_0_EBX] =
1670 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1671 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1672 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1673 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1674 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1675 /* Missing: XSAVES (not supported by some Linux versions,
1676 * including v4.1 to v4.12).
1677 * KVM doesn't yet expose any XSAVES state save component,
1678 * and the only one defined in Skylake (processor tracing)
1679 * probably will block migration anyway.
1680 */
1681 .features[FEAT_XSAVE] =
1682 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1683 CPUID_XSAVE_XGETBV1,
1684 .features[FEAT_6_EAX] =
1685 CPUID_6_EAX_ARAT,
1686 .xlevel = 0x80000008,
1687 .model_id = "Intel Core Processor (Skylake)",
1688 },
1689 {
1690 .name = "Skylake-Client-IBRS",
1691 .level = 0xd,
1692 .vendor = CPUID_VENDOR_INTEL,
1693 .family = 6,
1694 .model = 94,
1695 .stepping = 3,
1696 .features[FEAT_1_EDX] =
1697 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1698 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1699 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1700 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1701 CPUID_DE | CPUID_FP87,
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1704 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1705 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1706 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1707 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1708 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1709 .features[FEAT_8000_0001_EDX] =
1710 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1711 CPUID_EXT2_SYSCALL,
1712 .features[FEAT_8000_0001_ECX] =
1713 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1714 .features[FEAT_7_0_EDX] =
1715 CPUID_7_0_EDX_SPEC_CTRL,
1716 .features[FEAT_7_0_EBX] =
1717 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1718 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1719 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1720 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1721 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1722 /* Missing: XSAVES (not supported by some Linux versions,
1723 * including v4.1 to v4.12).
1724 * KVM doesn't yet expose any XSAVES state save component,
1725 * and the only one defined in Skylake (processor tracing)
1726 * probably will block migration anyway.
1727 */
1728 .features[FEAT_XSAVE] =
1729 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1730 CPUID_XSAVE_XGETBV1,
1731 .features[FEAT_6_EAX] =
1732 CPUID_6_EAX_ARAT,
1733 .xlevel = 0x80000008,
1734 .model_id = "Intel Core Processor (Skylake, IBRS)",
1735 },
1736 {
1737 .name = "Skylake-Server",
1738 .level = 0xd,
1739 .vendor = CPUID_VENDOR_INTEL,
1740 .family = 6,
1741 .model = 85,
1742 .stepping = 4,
1743 .features[FEAT_1_EDX] =
1744 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1745 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1746 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1747 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1748 CPUID_DE | CPUID_FP87,
1749 .features[FEAT_1_ECX] =
1750 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1751 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1752 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1753 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1754 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1755 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1756 .features[FEAT_8000_0001_EDX] =
1757 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1758 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1759 .features[FEAT_8000_0001_ECX] =
1760 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1761 .features[FEAT_7_0_EBX] =
1762 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1763 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1764 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1765 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1766 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1767 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1768 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1769 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1770 /* Missing: XSAVES (not supported by some Linux versions,
1771 * including v4.1 to v4.12).
1772 * KVM doesn't yet expose any XSAVES state save component,
1773 * and the only one defined in Skylake (processor tracing)
1774 * probably will block migration anyway.
1775 */
1776 .features[FEAT_XSAVE] =
1777 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1778 CPUID_XSAVE_XGETBV1,
1779 .features[FEAT_6_EAX] =
1780 CPUID_6_EAX_ARAT,
1781 .xlevel = 0x80000008,
1782 .model_id = "Intel Xeon Processor (Skylake)",
1783 },
1784 {
1785 .name = "Skylake-Server-IBRS",
1786 .level = 0xd,
1787 .vendor = CPUID_VENDOR_INTEL,
1788 .family = 6,
1789 .model = 85,
1790 .stepping = 4,
1791 .features[FEAT_1_EDX] =
1792 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1793 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1794 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1795 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1796 CPUID_DE | CPUID_FP87,
1797 .features[FEAT_1_ECX] =
1798 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1799 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1800 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1801 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1802 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1803 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1804 .features[FEAT_8000_0001_EDX] =
1805 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1806 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1807 .features[FEAT_8000_0001_ECX] =
1808 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1809 .features[FEAT_7_0_EDX] =
1810 CPUID_7_0_EDX_SPEC_CTRL,
1811 .features[FEAT_7_0_EBX] =
1812 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1813 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1814 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1815 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1816 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1817 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1818 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1819 CPUID_7_0_EBX_AVX512VL,
1820 /* Missing: XSAVES (not supported by some Linux versions,
1821 * including v4.1 to v4.12).
1822 * KVM doesn't yet expose any XSAVES state save component,
1823 * and the only one defined in Skylake (processor tracing)
1824 * probably will block migration anyway.
1825 */
1826 .features[FEAT_XSAVE] =
1827 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1828 CPUID_XSAVE_XGETBV1,
1829 .features[FEAT_6_EAX] =
1830 CPUID_6_EAX_ARAT,
1831 .xlevel = 0x80000008,
1832 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1833 },
1834 {
1835 .name = "Opteron_G1",
1836 .level = 5,
1837 .vendor = CPUID_VENDOR_AMD,
1838 .family = 15,
1839 .model = 6,
1840 .stepping = 1,
1841 .features[FEAT_1_EDX] =
1842 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1843 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1844 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1845 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1846 CPUID_DE | CPUID_FP87,
1847 .features[FEAT_1_ECX] =
1848 CPUID_EXT_SSE3,
1849 .features[FEAT_8000_0001_EDX] =
1850 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1851 .xlevel = 0x80000008,
1852 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1853 },
1854 {
1855 .name = "Opteron_G2",
1856 .level = 5,
1857 .vendor = CPUID_VENDOR_AMD,
1858 .family = 15,
1859 .model = 6,
1860 .stepping = 1,
1861 .features[FEAT_1_EDX] =
1862 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1863 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1864 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1865 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1866 CPUID_DE | CPUID_FP87,
1867 .features[FEAT_1_ECX] =
1868 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1869 /* Missing: CPUID_EXT2_RDTSCP */
1870 .features[FEAT_8000_0001_EDX] =
1871 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1872 .features[FEAT_8000_0001_ECX] =
1873 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1874 .xlevel = 0x80000008,
1875 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1876 },
1877 {
1878 .name = "Opteron_G3",
1879 .level = 5,
1880 .vendor = CPUID_VENDOR_AMD,
1881 .family = 16,
1882 .model = 2,
1883 .stepping = 3,
1884 .features[FEAT_1_EDX] =
1885 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1886 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1887 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1888 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1889 CPUID_DE | CPUID_FP87,
1890 .features[FEAT_1_ECX] =
1891 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1892 CPUID_EXT_SSE3,
1893 /* Missing: CPUID_EXT2_RDTSCP */
1894 .features[FEAT_8000_0001_EDX] =
1895 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1896 .features[FEAT_8000_0001_ECX] =
1897 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1898 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1899 .xlevel = 0x80000008,
1900 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1901 },
1902 {
1903 .name = "Opteron_G4",
1904 .level = 0xd,
1905 .vendor = CPUID_VENDOR_AMD,
1906 .family = 21,
1907 .model = 1,
1908 .stepping = 2,
1909 .features[FEAT_1_EDX] =
1910 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1911 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1912 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1913 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1914 CPUID_DE | CPUID_FP87,
1915 .features[FEAT_1_ECX] =
1916 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1917 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1918 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1919 CPUID_EXT_SSE3,
1920 /* Missing: CPUID_EXT2_RDTSCP */
1921 .features[FEAT_8000_0001_EDX] =
1922 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1923 CPUID_EXT2_SYSCALL,
1924 .features[FEAT_8000_0001_ECX] =
1925 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1926 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1927 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1928 CPUID_EXT3_LAHF_LM,
1929 /* no xsaveopt! */
1930 .xlevel = 0x8000001A,
1931 .model_id = "AMD Opteron 62xx class CPU",
1932 },
1933 {
1934 .name = "Opteron_G5",
1935 .level = 0xd,
1936 .vendor = CPUID_VENDOR_AMD,
1937 .family = 21,
1938 .model = 2,
1939 .stepping = 0,
1940 .features[FEAT_1_EDX] =
1941 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1942 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1943 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1944 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1945 CPUID_DE | CPUID_FP87,
1946 .features[FEAT_1_ECX] =
1947 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1948 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1949 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1950 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1951 /* Missing: CPUID_EXT2_RDTSCP */
1952 .features[FEAT_8000_0001_EDX] =
1953 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1954 CPUID_EXT2_SYSCALL,
1955 .features[FEAT_8000_0001_ECX] =
1956 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1957 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1958 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1959 CPUID_EXT3_LAHF_LM,
1960 /* no xsaveopt! */
1961 .xlevel = 0x8000001A,
1962 .model_id = "AMD Opteron 63xx class CPU",
1963 },
1964 {
1965 .name = "EPYC",
1966 .level = 0xd,
1967 .vendor = CPUID_VENDOR_AMD,
1968 .family = 23,
1969 .model = 1,
1970 .stepping = 2,
1971 .features[FEAT_1_EDX] =
1972 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1973 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1974 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1975 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1976 CPUID_VME | CPUID_FP87,
1977 .features[FEAT_1_ECX] =
1978 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1979 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1980 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1981 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1982 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1983 .features[FEAT_8000_0001_EDX] =
1984 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1985 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1986 CPUID_EXT2_SYSCALL,
1987 .features[FEAT_8000_0001_ECX] =
1988 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1989 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1990 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1991 .features[FEAT_7_0_EBX] =
1992 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1993 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1994 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1995 CPUID_7_0_EBX_SHA_NI,
1996 /* Missing: XSAVES (not supported by some Linux versions,
1997 * including v4.1 to v4.12).
1998 * KVM doesn't yet expose any XSAVES state save component.
1999 */
2000 .features[FEAT_XSAVE] =
2001 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2002 CPUID_XSAVE_XGETBV1,
2003 .features[FEAT_6_EAX] =
2004 CPUID_6_EAX_ARAT,
2005 .xlevel = 0x8000000A,
2006 .model_id = "AMD EPYC Processor",
2007 },
2008 {
2009 .name = "EPYC-IBPB",
2010 .level = 0xd,
2011 .vendor = CPUID_VENDOR_AMD,
2012 .family = 23,
2013 .model = 1,
2014 .stepping = 2,
2015 .features[FEAT_1_EDX] =
2016 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2017 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2018 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2019 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2020 CPUID_VME | CPUID_FP87,
2021 .features[FEAT_1_ECX] =
2022 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2023 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2024 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2025 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2026 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2027 .features[FEAT_8000_0001_EDX] =
2028 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2029 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2030 CPUID_EXT2_SYSCALL,
2031 .features[FEAT_8000_0001_ECX] =
2032 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2033 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2034 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2035 .features[FEAT_8000_0008_EBX] =
2036 CPUID_8000_0008_EBX_IBPB,
2037 .features[FEAT_7_0_EBX] =
2038 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2039 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2040 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2041 CPUID_7_0_EBX_SHA_NI,
2042 /* Missing: XSAVES (not supported by some Linux versions,
2043 * including v4.1 to v4.12).
2044 * KVM doesn't yet expose any XSAVES state save component.
2045 */
2046 .features[FEAT_XSAVE] =
2047 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2048 CPUID_XSAVE_XGETBV1,
2049 .features[FEAT_6_EAX] =
2050 CPUID_6_EAX_ARAT,
2051 .xlevel = 0x8000000A,
2052 .model_id = "AMD EPYC Processor (with IBPB)",
2053 },
2054 };
2055
2056 typedef struct PropValue {
2057 const char *prop, *value;
2058 } PropValue;
2059
2060 /* KVM-specific features that are automatically added/removed
2061 * from all CPU models when KVM is enabled.
2062 */
2063 static PropValue kvm_default_props[] = {
2064 { "kvmclock", "on" },
2065 { "kvm-nopiodelay", "on" },
2066 { "kvm-asyncpf", "on" },
2067 { "kvm-steal-time", "on" },
2068 { "kvm-pv-eoi", "on" },
2069 { "kvmclock-stable-bit", "on" },
2070 { "x2apic", "on" },
2071 { "acpi", "off" },
2072 { "monitor", "off" },
2073 { "svm", "off" },
2074 { NULL, NULL },
2075 };
2076
2077 /* TCG-specific defaults that override all CPU models when using TCG
2078 */
2079 static PropValue tcg_default_props[] = {
2080 { "vme", "off" },
2081 { NULL, NULL },
2082 };
2083
2084
2085 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2086 {
2087 PropValue *pv;
2088 for (pv = kvm_default_props; pv->prop; pv++) {
2089 if (!strcmp(pv->prop, prop)) {
2090 pv->value = value;
2091 break;
2092 }
2093 }
2094
2095 /* It is valid to call this function only for properties that
2096 * are already present in the kvm_default_props table.
2097 */
2098 assert(pv->prop);
2099 }
2100
2101 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2102 bool migratable_only);
2103
2104 static bool lmce_supported(void)
2105 {
2106 uint64_t mce_cap = 0;
2107
2108 #ifdef CONFIG_KVM
2109 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2110 return false;
2111 }
2112 #endif
2113
2114 return !!(mce_cap & MCG_LMCE_P);
2115 }
2116
2117 #define CPUID_MODEL_ID_SZ 48
2118
2119 /**
2120 * cpu_x86_fill_model_id:
2121 * Get CPUID model ID string from host CPU.
2122 *
2123 * @str should have at least CPUID_MODEL_ID_SZ bytes
2124 *
2125 * The function does NOT add a null terminator to the string
2126 * automatically.
2127 */
2128 static int cpu_x86_fill_model_id(char *str)
2129 {
2130 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2131 int i;
2132
2133 for (i = 0; i < 3; i++) {
2134 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2135 memcpy(str + i * 16 + 0, &eax, 4);
2136 memcpy(str + i * 16 + 4, &ebx, 4);
2137 memcpy(str + i * 16 + 8, &ecx, 4);
2138 memcpy(str + i * 16 + 12, &edx, 4);
2139 }
2140 return 0;
2141 }
2142
2143 static Property max_x86_cpu_properties[] = {
2144 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2145 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2146 DEFINE_PROP_END_OF_LIST()
2147 };
2148
2149 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2150 {
2151 DeviceClass *dc = DEVICE_CLASS(oc);
2152 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2153
2154 xcc->ordering = 9;
2155
2156 xcc->model_description =
2157 "Enables all features supported by the accelerator in the current host";
2158
2159 dc->props = max_x86_cpu_properties;
2160 }
2161
2162 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2163
2164 static void max_x86_cpu_initfn(Object *obj)
2165 {
2166 X86CPU *cpu = X86_CPU(obj);
2167 CPUX86State *env = &cpu->env;
2168 KVMState *s = kvm_state;
2169
2170 /* We can't fill the features array here because we don't know yet if
2171 * "migratable" is true or false.
2172 */
2173 cpu->max_features = true;
2174
2175 if (accel_uses_host_cpuid()) {
2176 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2177 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2178 int family, model, stepping;
2179 X86CPUDefinition host_cpudef = { };
2180 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2181
2182 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2183 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2184
2185 host_vendor_fms(vendor, &family, &model, &stepping);
2186
2187 cpu_x86_fill_model_id(model_id);
2188
2189 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2190 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2191 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2192 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2193 &error_abort);
2194 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2195 &error_abort);
2196
2197 if (kvm_enabled()) {
2198 env->cpuid_min_level =
2199 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2200 env->cpuid_min_xlevel =
2201 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2202 env->cpuid_min_xlevel2 =
2203 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2204 } else {
2205 env->cpuid_min_level =
2206 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2207 env->cpuid_min_xlevel =
2208 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2209 env->cpuid_min_xlevel2 =
2210 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2211 }
2212
2213 if (lmce_supported()) {
2214 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2215 }
2216 } else {
2217 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2218 "vendor", &error_abort);
2219 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2220 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2221 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2222 object_property_set_str(OBJECT(cpu),
2223 "QEMU TCG CPU version " QEMU_HW_VERSION,
2224 "model-id", &error_abort);
2225 }
2226
2227 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2228 }
2229
2230 static const TypeInfo max_x86_cpu_type_info = {
2231 .name = X86_CPU_TYPE_NAME("max"),
2232 .parent = TYPE_X86_CPU,
2233 .instance_init = max_x86_cpu_initfn,
2234 .class_init = max_x86_cpu_class_init,
2235 };
2236
2237 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2238 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2239 {
2240 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2241
2242 xcc->host_cpuid_required = true;
2243 xcc->ordering = 8;
2244
2245 if (kvm_enabled()) {
2246 xcc->model_description =
2247 "KVM processor with all supported host features ";
2248 } else if (hvf_enabled()) {
2249 xcc->model_description =
2250 "HVF processor with all supported host features ";
2251 }
2252 }
2253
2254 static const TypeInfo host_x86_cpu_type_info = {
2255 .name = X86_CPU_TYPE_NAME("host"),
2256 .parent = X86_CPU_TYPE_NAME("max"),
2257 .class_init = host_x86_cpu_class_init,
2258 };
2259
2260 #endif
2261
2262 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2263 {
2264 FeatureWordInfo *f = &feature_word_info[w];
2265 int i;
2266
2267 for (i = 0; i < 32; ++i) {
2268 if ((1UL << i) & mask) {
2269 const char *reg = get_register_name_32(f->cpuid_reg);
2270 assert(reg);
2271 warn_report("%s doesn't support requested feature: "
2272 "CPUID.%02XH:%s%s%s [bit %d]",
2273 accel_uses_host_cpuid() ? "host" : "TCG",
2274 f->cpuid_eax, reg,
2275 f->feat_names[i] ? "." : "",
2276 f->feat_names[i] ? f->feat_names[i] : "", i);
2277 }
2278 }
2279 }
2280
2281 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2282 const char *name, void *opaque,
2283 Error **errp)
2284 {
2285 X86CPU *cpu = X86_CPU(obj);
2286 CPUX86State *env = &cpu->env;
2287 int64_t value;
2288
2289 value = (env->cpuid_version >> 8) & 0xf;
2290 if (value == 0xf) {
2291 value += (env->cpuid_version >> 20) & 0xff;
2292 }
2293 visit_type_int(v, name, &value, errp);
2294 }
2295
2296 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2297 const char *name, void *opaque,
2298 Error **errp)
2299 {
2300 X86CPU *cpu = X86_CPU(obj);
2301 CPUX86State *env = &cpu->env;
2302 const int64_t min = 0;
2303 const int64_t max = 0xff + 0xf;
2304 Error *local_err = NULL;
2305 int64_t value;
2306
2307 visit_type_int(v, name, &value, &local_err);
2308 if (local_err) {
2309 error_propagate(errp, local_err);
2310 return;
2311 }
2312 if (value < min || value > max) {
2313 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2314 name ? name : "null", value, min, max);
2315 return;
2316 }
2317
2318 env->cpuid_version &= ~0xff00f00;
2319 if (value > 0x0f) {
2320 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2321 } else {
2322 env->cpuid_version |= value << 8;
2323 }
2324 }
2325
2326 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2327 const char *name, void *opaque,
2328 Error **errp)
2329 {
2330 X86CPU *cpu = X86_CPU(obj);
2331 CPUX86State *env = &cpu->env;
2332 int64_t value;
2333
2334 value = (env->cpuid_version >> 4) & 0xf;
2335 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2336 visit_type_int(v, name, &value, errp);
2337 }
2338
2339 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2340 const char *name, void *opaque,
2341 Error **errp)
2342 {
2343 X86CPU *cpu = X86_CPU(obj);
2344 CPUX86State *env = &cpu->env;
2345 const int64_t min = 0;
2346 const int64_t max = 0xff;
2347 Error *local_err = NULL;
2348 int64_t value;
2349
2350 visit_type_int(v, name, &value, &local_err);
2351 if (local_err) {
2352 error_propagate(errp, local_err);
2353 return;
2354 }
2355 if (value < min || value > max) {
2356 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2357 name ? name : "null", value, min, max);
2358 return;
2359 }
2360
2361 env->cpuid_version &= ~0xf00f0;
2362 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2363 }
2364
2365 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2366 const char *name, void *opaque,
2367 Error **errp)
2368 {
2369 X86CPU *cpu = X86_CPU(obj);
2370 CPUX86State *env = &cpu->env;
2371 int64_t value;
2372
2373 value = env->cpuid_version & 0xf;
2374 visit_type_int(v, name, &value, errp);
2375 }
2376
2377 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2378 const char *name, void *opaque,
2379 Error **errp)
2380 {
2381 X86CPU *cpu = X86_CPU(obj);
2382 CPUX86State *env = &cpu->env;
2383 const int64_t min = 0;
2384 const int64_t max = 0xf;
2385 Error *local_err = NULL;
2386 int64_t value;
2387
2388 visit_type_int(v, name, &value, &local_err);
2389 if (local_err) {
2390 error_propagate(errp, local_err);
2391 return;
2392 }
2393 if (value < min || value > max) {
2394 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2395 name ? name : "null", value, min, max);
2396 return;
2397 }
2398
2399 env->cpuid_version &= ~0xf;
2400 env->cpuid_version |= value & 0xf;
2401 }
2402
2403 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2404 {
2405 X86CPU *cpu = X86_CPU(obj);
2406 CPUX86State *env = &cpu->env;
2407 char *value;
2408
2409 value = g_malloc(CPUID_VENDOR_SZ + 1);
2410 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2411 env->cpuid_vendor3);
2412 return value;
2413 }
2414
2415 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2416 Error **errp)
2417 {
2418 X86CPU *cpu = X86_CPU(obj);
2419 CPUX86State *env = &cpu->env;
2420 int i;
2421
2422 if (strlen(value) != CPUID_VENDOR_SZ) {
2423 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2424 return;
2425 }
2426
2427 env->cpuid_vendor1 = 0;
2428 env->cpuid_vendor2 = 0;
2429 env->cpuid_vendor3 = 0;
2430 for (i = 0; i < 4; i++) {
2431 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2432 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2433 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2434 }
2435 }
2436
2437 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2438 {
2439 X86CPU *cpu = X86_CPU(obj);
2440 CPUX86State *env = &cpu->env;
2441 char *value;
2442 int i;
2443
2444 value = g_malloc(48 + 1);
2445 for (i = 0; i < 48; i++) {
2446 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2447 }
2448 value[48] = '\0';
2449 return value;
2450 }
2451
2452 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2453 Error **errp)
2454 {
2455 X86CPU *cpu = X86_CPU(obj);
2456 CPUX86State *env = &cpu->env;
2457 int c, len, i;
2458
2459 if (model_id == NULL) {
2460 model_id = "";
2461 }
2462 len = strlen(model_id);
2463 memset(env->cpuid_model, 0, 48);
2464 for (i = 0; i < 48; i++) {
2465 if (i >= len) {
2466 c = '\0';
2467 } else {
2468 c = (uint8_t)model_id[i];
2469 }
2470 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2471 }
2472 }
2473
2474 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2475 void *opaque, Error **errp)
2476 {
2477 X86CPU *cpu = X86_CPU(obj);
2478 int64_t value;
2479
2480 value = cpu->env.tsc_khz * 1000;
2481 visit_type_int(v, name, &value, errp);
2482 }
2483
2484 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2485 void *opaque, Error **errp)
2486 {
2487 X86CPU *cpu = X86_CPU(obj);
2488 const int64_t min = 0;
2489 const int64_t max = INT64_MAX;
2490 Error *local_err = NULL;
2491 int64_t value;
2492
2493 visit_type_int(v, name, &value, &local_err);
2494 if (local_err) {
2495 error_propagate(errp, local_err);
2496 return;
2497 }
2498 if (value < min || value > max) {
2499 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2500 name ? name : "null", value, min, max);
2501 return;
2502 }
2503
2504 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2505 }
2506
2507 /* Generic getter for "feature-words" and "filtered-features" properties */
2508 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2509 const char *name, void *opaque,
2510 Error **errp)
2511 {
2512 uint32_t *array = (uint32_t *)opaque;
2513 FeatureWord w;
2514 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2515 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2516 X86CPUFeatureWordInfoList *list = NULL;
2517
2518 for (w = 0; w < FEATURE_WORDS; w++) {
2519 FeatureWordInfo *wi = &feature_word_info[w];
2520 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2521 qwi->cpuid_input_eax = wi->cpuid_eax;
2522 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2523 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2524 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2525 qwi->features = array[w];
2526
2527 /* List will be in reverse order, but order shouldn't matter */
2528 list_entries[w].next = list;
2529 list_entries[w].value = &word_infos[w];
2530 list = &list_entries[w];
2531 }
2532
2533 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2534 }
2535
2536 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2537 void *opaque, Error **errp)
2538 {
2539 X86CPU *cpu = X86_CPU(obj);
2540 int64_t value = cpu->hyperv_spinlock_attempts;
2541
2542 visit_type_int(v, name, &value, errp);
2543 }
2544
2545 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2546 void *opaque, Error **errp)
2547 {
2548 const int64_t min = 0xFFF;
2549 const int64_t max = UINT_MAX;
2550 X86CPU *cpu = X86_CPU(obj);
2551 Error *err = NULL;
2552 int64_t value;
2553
2554 visit_type_int(v, name, &value, &err);
2555 if (err) {
2556 error_propagate(errp, err);
2557 return;
2558 }
2559
2560 if (value < min || value > max) {
2561 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2562 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2563 object_get_typename(obj), name ? name : "null",
2564 value, min, max);
2565 return;
2566 }
2567 cpu->hyperv_spinlock_attempts = value;
2568 }
2569
2570 static const PropertyInfo qdev_prop_spinlocks = {
2571 .name = "int",
2572 .get = x86_get_hv_spinlocks,
2573 .set = x86_set_hv_spinlocks,
2574 };
2575
2576 /* Convert all '_' in a feature string option name to '-', to make feature
2577 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2578 */
2579 static inline void feat2prop(char *s)
2580 {
2581 while ((s = strchr(s, '_'))) {
2582 *s = '-';
2583 }
2584 }
2585
2586 /* Return the feature property name for a feature flag bit */
2587 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2588 {
2589 /* XSAVE components are automatically enabled by other features,
2590 * so return the original feature name instead
2591 */
2592 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2593 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2594
2595 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2596 x86_ext_save_areas[comp].bits) {
2597 w = x86_ext_save_areas[comp].feature;
2598 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2599 }
2600 }
2601
2602 assert(bitnr < 32);
2603 assert(w < FEATURE_WORDS);
2604 return feature_word_info[w].feat_names[bitnr];
2605 }
2606
2607 /* Compatibily hack to maintain legacy +-feat semantic,
2608 * where +-feat overwrites any feature set by
2609 * feat=on|feat even if the later is parsed after +-feat
2610 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2611 */
2612 static GList *plus_features, *minus_features;
2613
2614 static gint compare_string(gconstpointer a, gconstpointer b)
2615 {
2616 return g_strcmp0(a, b);
2617 }
2618
2619 /* Parse "+feature,-feature,feature=foo" CPU feature string
2620 */
2621 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2622 Error **errp)
2623 {
2624 char *featurestr; /* Single 'key=value" string being parsed */
2625 static bool cpu_globals_initialized;
2626 bool ambiguous = false;
2627
2628 if (cpu_globals_initialized) {
2629 return;
2630 }
2631 cpu_globals_initialized = true;
2632
2633 if (!features) {
2634 return;
2635 }
2636
2637 for (featurestr = strtok(features, ",");
2638 featurestr;
2639 featurestr = strtok(NULL, ",")) {
2640 const char *name;
2641 const char *val = NULL;
2642 char *eq = NULL;
2643 char num[32];
2644 GlobalProperty *prop;
2645
2646 /* Compatibility syntax: */
2647 if (featurestr[0] == '+') {
2648 plus_features = g_list_append(plus_features,
2649 g_strdup(featurestr + 1));
2650 continue;
2651 } else if (featurestr[0] == '-') {
2652 minus_features = g_list_append(minus_features,
2653 g_strdup(featurestr + 1));
2654 continue;
2655 }
2656
2657 eq = strchr(featurestr, '=');
2658 if (eq) {
2659 *eq++ = 0;
2660 val = eq;
2661 } else {
2662 val = "on";
2663 }
2664
2665 feat2prop(featurestr);
2666 name = featurestr;
2667
2668 if (g_list_find_custom(plus_features, name, compare_string)) {
2669 warn_report("Ambiguous CPU model string. "
2670 "Don't mix both \"+%s\" and \"%s=%s\"",
2671 name, name, val);
2672 ambiguous = true;
2673 }
2674 if (g_list_find_custom(minus_features, name, compare_string)) {
2675 warn_report("Ambiguous CPU model string. "
2676 "Don't mix both \"-%s\" and \"%s=%s\"",
2677 name, name, val);
2678 ambiguous = true;
2679 }
2680
2681 /* Special case: */
2682 if (!strcmp(name, "tsc-freq")) {
2683 int ret;
2684 uint64_t tsc_freq;
2685
2686 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2687 if (ret < 0 || tsc_freq > INT64_MAX) {
2688 error_setg(errp, "bad numerical value %s", val);
2689 return;
2690 }
2691 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2692 val = num;
2693 name = "tsc-frequency";
2694 }
2695
2696 prop = g_new0(typeof(*prop), 1);
2697 prop->driver = typename;
2698 prop->property = g_strdup(name);
2699 prop->value = g_strdup(val);
2700 prop->errp = &error_fatal;
2701 qdev_prop_register_global(prop);
2702 }
2703
2704 if (ambiguous) {
2705 warn_report("Compatibility of ambiguous CPU model "
2706 "strings won't be kept on future QEMU versions");
2707 }
2708 }
2709
2710 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2711 static int x86_cpu_filter_features(X86CPU *cpu);
2712
2713 /* Check for missing features that may prevent the CPU class from
2714 * running using the current machine and accelerator.
2715 */
2716 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2717 strList **missing_feats)
2718 {
2719 X86CPU *xc;
2720 FeatureWord w;
2721 Error *err = NULL;
2722 strList **next = missing_feats;
2723
2724 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2725 strList *new = g_new0(strList, 1);
2726 new->value = g_strdup("kvm");
2727 *missing_feats = new;
2728 return;
2729 }
2730
2731 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2732
2733 x86_cpu_expand_features(xc, &err);
2734 if (err) {
2735 /* Errors at x86_cpu_expand_features should never happen,
2736 * but in case it does, just report the model as not
2737 * runnable at all using the "type" property.
2738 */
2739 strList *new = g_new0(strList, 1);
2740 new->value = g_strdup("type");
2741 *next = new;
2742 next = &new->next;
2743 }
2744
2745 x86_cpu_filter_features(xc);
2746
2747 for (w = 0; w < FEATURE_WORDS; w++) {
2748 uint32_t filtered = xc->filtered_features[w];
2749 int i;
2750 for (i = 0; i < 32; i++) {
2751 if (filtered & (1UL << i)) {
2752 strList *new = g_new0(strList, 1);
2753 new->value = g_strdup(x86_cpu_feature_name(w, i));
2754 *next = new;
2755 next = &new->next;
2756 }
2757 }
2758 }
2759
2760 object_unref(OBJECT(xc));
2761 }
2762
2763 /* Print all cpuid feature names in featureset
2764 */
2765 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2766 {
2767 int bit;
2768 bool first = true;
2769
2770 for (bit = 0; bit < 32; bit++) {
2771 if (featureset[bit]) {
2772 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2773 first = false;
2774 }
2775 }
2776 }
2777
2778 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2779 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2780 {
2781 ObjectClass *class_a = (ObjectClass *)a;
2782 ObjectClass *class_b = (ObjectClass *)b;
2783 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2784 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2785 const char *name_a, *name_b;
2786
2787 if (cc_a->ordering != cc_b->ordering) {
2788 return cc_a->ordering - cc_b->ordering;
2789 } else {
2790 name_a = object_class_get_name(class_a);
2791 name_b = object_class_get_name(class_b);
2792 return strcmp(name_a, name_b);
2793 }
2794 }
2795
2796 static GSList *get_sorted_cpu_model_list(void)
2797 {
2798 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2799 list = g_slist_sort(list, x86_cpu_list_compare);
2800 return list;
2801 }
2802
2803 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2804 {
2805 ObjectClass *oc = data;
2806 X86CPUClass *cc = X86_CPU_CLASS(oc);
2807 CPUListState *s = user_data;
2808 char *name = x86_cpu_class_get_model_name(cc);
2809 const char *desc = cc->model_description;
2810 if (!desc && cc->cpu_def) {
2811 desc = cc->cpu_def->model_id;
2812 }
2813
2814 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2815 name, desc);
2816 g_free(name);
2817 }
2818
2819 /* list available CPU models and flags */
2820 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2821 {
2822 int i;
2823 CPUListState s = {
2824 .file = f,
2825 .cpu_fprintf = cpu_fprintf,
2826 };
2827 GSList *list;
2828
2829 (*cpu_fprintf)(f, "Available CPUs:\n");
2830 list = get_sorted_cpu_model_list();
2831 g_slist_foreach(list, x86_cpu_list_entry, &s);
2832 g_slist_free(list);
2833
2834 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2835 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2836 FeatureWordInfo *fw = &feature_word_info[i];
2837
2838 (*cpu_fprintf)(f, " ");
2839 listflags(f, cpu_fprintf, fw->feat_names);
2840 (*cpu_fprintf)(f, "\n");
2841 }
2842 }
2843
2844 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2845 {
2846 ObjectClass *oc = data;
2847 X86CPUClass *cc = X86_CPU_CLASS(oc);
2848 CpuDefinitionInfoList **cpu_list = user_data;
2849 CpuDefinitionInfoList *entry;
2850 CpuDefinitionInfo *info;
2851
2852 info = g_malloc0(sizeof(*info));
2853 info->name = x86_cpu_class_get_model_name(cc);
2854 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2855 info->has_unavailable_features = true;
2856 info->q_typename = g_strdup(object_class_get_name(oc));
2857 info->migration_safe = cc->migration_safe;
2858 info->has_migration_safe = true;
2859 info->q_static = cc->static_model;
2860
2861 entry = g_malloc0(sizeof(*entry));
2862 entry->value = info;
2863 entry->next = *cpu_list;
2864 *cpu_list = entry;
2865 }
2866
2867 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2868 {
2869 CpuDefinitionInfoList *cpu_list = NULL;
2870 GSList *list = get_sorted_cpu_model_list();
2871 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2872 g_slist_free(list);
2873 return cpu_list;
2874 }
2875
2876 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2877 bool migratable_only)
2878 {
2879 FeatureWordInfo *wi = &feature_word_info[w];
2880 uint32_t r;
2881
2882 if (kvm_enabled()) {
2883 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2884 wi->cpuid_ecx,
2885 wi->cpuid_reg);
2886 } else if (hvf_enabled()) {
2887 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2888 wi->cpuid_ecx,
2889 wi->cpuid_reg);
2890 } else if (tcg_enabled()) {
2891 r = wi->tcg_features;
2892 } else {
2893 return ~0;
2894 }
2895 if (migratable_only) {
2896 r &= x86_cpu_get_migratable_flags(w);
2897 }
2898 return r;
2899 }
2900
2901 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2902 {
2903 FeatureWord w;
2904
2905 for (w = 0; w < FEATURE_WORDS; w++) {
2906 report_unavailable_features(w, cpu->filtered_features[w]);
2907 }
2908 }
2909
2910 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2911 {
2912 PropValue *pv;
2913 for (pv = props; pv->prop; pv++) {
2914 if (!pv->value) {
2915 continue;
2916 }
2917 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2918 &error_abort);
2919 }
2920 }
2921
2922 /* Load data from X86CPUDefinition into a X86CPU object
2923 */
2924 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2925 {
2926 CPUX86State *env = &cpu->env;
2927 const char *vendor;
2928 char host_vendor[CPUID_VENDOR_SZ + 1];
2929 FeatureWord w;
2930
2931 /*NOTE: any property set by this function should be returned by
2932 * x86_cpu_static_props(), so static expansion of
2933 * query-cpu-model-expansion is always complete.
2934 */
2935
2936 /* CPU models only set _minimum_ values for level/xlevel: */
2937 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2938 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2939
2940 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2941 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2942 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2943 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2944 for (w = 0; w < FEATURE_WORDS; w++) {
2945 env->features[w] = def->features[w];
2946 }
2947
2948 /* Special cases not set in the X86CPUDefinition structs: */
2949 /* TODO: in-kernel irqchip for hvf */
2950 if (kvm_enabled()) {
2951 if (!kvm_irqchip_in_kernel()) {
2952 x86_cpu_change_kvm_default("x2apic", "off");
2953 }
2954
2955 x86_cpu_apply_props(cpu, kvm_default_props);
2956 } else if (tcg_enabled()) {
2957 x86_cpu_apply_props(cpu, tcg_default_props);
2958 }
2959
2960 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2961
2962 /* sysenter isn't supported in compatibility mode on AMD,
2963 * syscall isn't supported in compatibility mode on Intel.
2964 * Normally we advertise the actual CPU vendor, but you can
2965 * override this using the 'vendor' property if you want to use
2966 * KVM's sysenter/syscall emulation in compatibility mode and
2967 * when doing cross vendor migration
2968 */
2969 vendor = def->vendor;
2970 if (accel_uses_host_cpuid()) {
2971 uint32_t ebx = 0, ecx = 0, edx = 0;
2972 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2973 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2974 vendor = host_vendor;
2975 }
2976
2977 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2978
2979 }
2980
2981 /* Return a QDict containing keys for all properties that can be included
2982 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2983 * must be included in the dictionary.
2984 */
2985 static QDict *x86_cpu_static_props(void)
2986 {
2987 FeatureWord w;
2988 int i;
2989 static const char *props[] = {
2990 "min-level",
2991 "min-xlevel",
2992 "family",
2993 "model",
2994 "stepping",
2995 "model-id",
2996 "vendor",
2997 "lmce",
2998 NULL,
2999 };
3000 static QDict *d;
3001
3002 if (d) {
3003 return d;
3004 }
3005
3006 d = qdict_new();
3007 for (i = 0; props[i]; i++) {
3008 qdict_put_null(d, props[i]);
3009 }
3010
3011 for (w = 0; w < FEATURE_WORDS; w++) {
3012 FeatureWordInfo *fi = &feature_word_info[w];
3013 int bit;
3014 for (bit = 0; bit < 32; bit++) {
3015 if (!fi->feat_names[bit]) {
3016 continue;
3017 }
3018 qdict_put_null(d, fi->feat_names[bit]);
3019 }
3020 }
3021
3022 return d;
3023 }
3024
3025 /* Add an entry to @props dict, with the value for property. */
3026 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3027 {
3028 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3029 &error_abort);
3030
3031 qdict_put_obj(props, prop, value);
3032 }
3033
3034 /* Convert CPU model data from X86CPU object to a property dictionary
3035 * that can recreate exactly the same CPU model.
3036 */
3037 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3038 {
3039 QDict *sprops = x86_cpu_static_props();
3040 const QDictEntry *e;
3041
3042 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3043 const char *prop = qdict_entry_key(e);
3044 x86_cpu_expand_prop(cpu, props, prop);
3045 }
3046 }
3047
3048 /* Convert CPU model data from X86CPU object to a property dictionary
3049 * that can recreate exactly the same CPU model, including every
3050 * writeable QOM property.
3051 */
3052 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3053 {
3054 ObjectPropertyIterator iter;
3055 ObjectProperty *prop;
3056
3057 object_property_iter_init(&iter, OBJECT(cpu));
3058 while ((prop = object_property_iter_next(&iter))) {
3059 /* skip read-only or write-only properties */
3060 if (!prop->get || !prop->set) {
3061 continue;
3062 }
3063
3064 /* "hotplugged" is the only property that is configurable
3065 * on the command-line but will be set differently on CPUs
3066 * created using "-cpu ... -smp ..." and by CPUs created
3067 * on the fly by x86_cpu_from_model() for querying. Skip it.
3068 */
3069 if (!strcmp(prop->name, "hotplugged")) {
3070 continue;
3071 }
3072 x86_cpu_expand_prop(cpu, props, prop->name);
3073 }
3074 }
3075
3076 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3077 {
3078 const QDictEntry *prop;
3079 Error *err = NULL;
3080
3081 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3082 object_property_set_qobject(obj, qdict_entry_value(prop),
3083 qdict_entry_key(prop), &err);
3084 if (err) {
3085 break;
3086 }
3087 }
3088
3089 error_propagate(errp, err);
3090 }
3091
3092 /* Create X86CPU object according to model+props specification */
3093 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3094 {
3095 X86CPU *xc = NULL;
3096 X86CPUClass *xcc;
3097 Error *err = NULL;
3098
3099 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3100 if (xcc == NULL) {
3101 error_setg(&err, "CPU model '%s' not found", model);
3102 goto out;
3103 }
3104
3105 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3106 if (props) {
3107 object_apply_props(OBJECT(xc), props, &err);
3108 if (err) {
3109 goto out;
3110 }
3111 }
3112
3113 x86_cpu_expand_features(xc, &err);
3114 if (err) {
3115 goto out;
3116 }
3117
3118 out:
3119 if (err) {
3120 error_propagate(errp, err);
3121 object_unref(OBJECT(xc));
3122 xc = NULL;
3123 }
3124 return xc;
3125 }
3126
3127 CpuModelExpansionInfo *
3128 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3129 CpuModelInfo *model,
3130 Error **errp)
3131 {
3132 X86CPU *xc = NULL;
3133 Error *err = NULL;
3134 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3135 QDict *props = NULL;
3136 const char *base_name;
3137
3138 xc = x86_cpu_from_model(model->name,
3139 model->has_props ?
3140 qobject_to(QDict, model->props) :
3141 NULL, &err);
3142 if (err) {
3143 goto out;
3144 }
3145
3146 props = qdict_new();
3147
3148 switch (type) {
3149 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3150 /* Static expansion will be based on "base" only */
3151 base_name = "base";
3152 x86_cpu_to_dict(xc, props);
3153 break;
3154 case CPU_MODEL_EXPANSION_TYPE_FULL:
3155 /* As we don't return every single property, full expansion needs
3156 * to keep the original model name+props, and add extra
3157 * properties on top of that.
3158 */
3159 base_name = model->name;
3160 x86_cpu_to_dict_full(xc, props);
3161 break;
3162 default:
3163 error_setg(&err, "Unsupportted expansion type");
3164 goto out;
3165 }
3166
3167 if (!props) {
3168 props = qdict_new();
3169 }
3170 x86_cpu_to_dict(xc, props);
3171
3172 ret->model = g_new0(CpuModelInfo, 1);
3173 ret->model->name = g_strdup(base_name);
3174 ret->model->props = QOBJECT(props);
3175 ret->model->has_props = true;
3176
3177 out:
3178 object_unref(OBJECT(xc));
3179 if (err) {
3180 error_propagate(errp, err);
3181 qapi_free_CpuModelExpansionInfo(ret);
3182 ret = NULL;
3183 }
3184 return ret;
3185 }
3186
3187 static gchar *x86_gdb_arch_name(CPUState *cs)
3188 {
3189 #ifdef TARGET_X86_64
3190 return g_strdup("i386:x86-64");
3191 #else
3192 return g_strdup("i386");
3193 #endif
3194 }
3195
3196 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3197 {
3198 X86CPUDefinition *cpudef = data;
3199 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3200
3201 xcc->cpu_def = cpudef;
3202 xcc->migration_safe = true;
3203 }
3204
3205 static void x86_register_cpudef_type(X86CPUDefinition *def)
3206 {
3207 char *typename = x86_cpu_type_name(def->name);
3208 TypeInfo ti = {
3209 .name = typename,
3210 .parent = TYPE_X86_CPU,
3211 .class_init = x86_cpu_cpudef_class_init,
3212 .class_data = def,
3213 };
3214
3215 /* AMD aliases are handled at runtime based on CPUID vendor, so
3216 * they shouldn't be set on the CPU model table.
3217 */
3218 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3219 /* catch mistakes instead of silently truncating model_id when too long */
3220 assert(def->model_id && strlen(def->model_id) <= 48);
3221
3222
3223 type_register(&ti);
3224 g_free(typename);
3225 }
3226
3227 #if !defined(CONFIG_USER_ONLY)
3228
3229 void cpu_clear_apic_feature(CPUX86State *env)
3230 {
3231 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3232 }
3233
3234 #endif /* !CONFIG_USER_ONLY */
3235
3236 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3237 uint32_t *eax, uint32_t *ebx,
3238 uint32_t *ecx, uint32_t *edx)
3239 {
3240 X86CPU *cpu = x86_env_get_cpu(env);
3241 CPUState *cs = CPU(cpu);
3242 uint32_t pkg_offset;
3243 uint32_t limit;
3244 uint32_t signature[3];
3245
3246 /* Calculate & apply limits for different index ranges */
3247 if (index >= 0xC0000000) {
3248 limit = env->cpuid_xlevel2;
3249 } else if (index >= 0x80000000) {
3250 limit = env->cpuid_xlevel;
3251 } else if (index >= 0x40000000) {
3252 limit = 0x40000001;
3253 } else {
3254 limit = env->cpuid_level;
3255 }
3256
3257 if (index > limit) {
3258 /* Intel documentation states that invalid EAX input will
3259 * return the same information as EAX=cpuid_level
3260 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3261 */
3262 index = env->cpuid_level;
3263 }
3264
3265 switch(index) {
3266 case 0:
3267 *eax = env->cpuid_level;
3268 *ebx = env->cpuid_vendor1;
3269 *edx = env->cpuid_vendor2;
3270 *ecx = env->cpuid_vendor3;
3271 break;
3272 case 1:
3273 *eax = env->cpuid_version;
3274 *ebx = (cpu->apic_id << 24) |
3275 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3276 *ecx = env->features[FEAT_1_ECX];
3277 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3278 *ecx |= CPUID_EXT_OSXSAVE;
3279 }
3280 *edx = env->features[FEAT_1_EDX];
3281 if (cs->nr_cores * cs->nr_threads > 1) {
3282 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3283 *edx |= CPUID_HT;
3284 }
3285 break;
3286 case 2:
3287 /* cache info: needed for Pentium Pro compatibility */
3288 if (cpu->cache_info_passthrough) {
3289 host_cpuid(index, 0, eax, ebx, ecx, edx);
3290 break;
3291 }
3292 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3293 *ebx = 0;
3294 if (!cpu->enable_l3_cache) {
3295 *ecx = 0;
3296 } else {
3297 *ecx = L3_N_DESCRIPTOR;
3298 }
3299 *edx = (L1D_DESCRIPTOR << 16) | \
3300 (L1I_DESCRIPTOR << 8) | \
3301 (L2_DESCRIPTOR);
3302 break;
3303 case 4:
3304 /* cache info: needed for Core compatibility */
3305 if (cpu->cache_info_passthrough) {
3306 host_cpuid(index, count, eax, ebx, ecx, edx);
3307 *eax &= ~0xFC000000;
3308 } else {
3309 *eax = 0;
3310 switch (count) {
3311 case 0: /* L1 dcache info */
3312 *eax |= CPUID_4_TYPE_DCACHE | \
3313 CPUID_4_LEVEL(1) | \
3314 CPUID_4_SELF_INIT_LEVEL;
3315 *ebx = (L1D_LINE_SIZE - 1) | \
3316 ((L1D_PARTITIONS - 1) << 12) | \
3317 ((L1D_ASSOCIATIVITY - 1) << 22);
3318 *ecx = L1D_SETS - 1;
3319 *edx = CPUID_4_NO_INVD_SHARING;
3320 break;
3321 case 1: /* L1 icache info */
3322 *eax |= CPUID_4_TYPE_ICACHE | \
3323 CPUID_4_LEVEL(1) | \
3324 CPUID_4_SELF_INIT_LEVEL;
3325 *ebx = (L1I_LINE_SIZE - 1) | \
3326 ((L1I_PARTITIONS - 1) << 12) | \
3327 ((L1I_ASSOCIATIVITY - 1) << 22);
3328 *ecx = L1I_SETS - 1;
3329 *edx = CPUID_4_NO_INVD_SHARING;
3330 break;
3331 case 2: /* L2 cache info */
3332 *eax |= CPUID_4_TYPE_UNIFIED | \
3333 CPUID_4_LEVEL(2) | \
3334 CPUID_4_SELF_INIT_LEVEL;
3335 if (cs->nr_threads > 1) {
3336 *eax |= (cs->nr_threads - 1) << 14;
3337 }
3338 *ebx = (L2_LINE_SIZE - 1) | \
3339 ((L2_PARTITIONS - 1) << 12) | \
3340 ((L2_ASSOCIATIVITY - 1) << 22);
3341 *ecx = L2_SETS - 1;
3342 *edx = CPUID_4_NO_INVD_SHARING;
3343 break;
3344 case 3: /* L3 cache info */
3345 if (!cpu->enable_l3_cache) {
3346 *eax = 0;
3347 *ebx = 0;
3348 *ecx = 0;
3349 *edx = 0;
3350 break;
3351 }
3352 *eax |= CPUID_4_TYPE_UNIFIED | \
3353 CPUID_4_LEVEL(3) | \
3354 CPUID_4_SELF_INIT_LEVEL;
3355 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3356 *eax |= ((1 << pkg_offset) - 1) << 14;
3357 *ebx = (L3_N_LINE_SIZE - 1) | \
3358 ((L3_N_PARTITIONS - 1) << 12) | \
3359 ((L3_N_ASSOCIATIVITY - 1) << 22);
3360 *ecx = L3_N_SETS - 1;
3361 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3362 break;
3363 default: /* end of info */
3364 *eax = 0;
3365 *ebx = 0;
3366 *ecx = 0;
3367 *edx = 0;
3368 break;
3369 }
3370 }
3371
3372 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3373 if ((*eax & 31) && cs->nr_cores > 1) {
3374 *eax |= (cs->nr_cores - 1) << 26;
3375 }
3376 break;
3377 case 5:
3378 /* mwait info: needed for Core compatibility */
3379 *eax = 0; /* Smallest monitor-line size in bytes */
3380 *ebx = 0; /* Largest monitor-line size in bytes */
3381 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3382 *edx = 0;
3383 break;
3384 case 6:
3385 /* Thermal and Power Leaf */
3386 *eax = env->features[FEAT_6_EAX];
3387 *ebx = 0;
3388 *ecx = 0;
3389 *edx = 0;
3390 break;
3391 case 7:
3392 /* Structured Extended Feature Flags Enumeration Leaf */
3393 if (count == 0) {
3394 *eax = 0; /* Maximum ECX value for sub-leaves */
3395 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3396 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3397 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3398 *ecx |= CPUID_7_0_ECX_OSPKE;
3399 }
3400 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3401 } else {
3402 *eax = 0;
3403 *ebx = 0;
3404 *ecx = 0;
3405 *edx = 0;
3406 }
3407 break;
3408 case 9:
3409 /* Direct Cache Access Information Leaf */
3410 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3411 *ebx = 0;
3412 *ecx = 0;
3413 *edx = 0;
3414 break;
3415 case 0xA:
3416 /* Architectural Performance Monitoring Leaf */
3417 if (kvm_enabled() && cpu->enable_pmu) {
3418 KVMState *s = cs->kvm_state;
3419
3420 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3421 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3422 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3423 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3424 } else if (hvf_enabled() && cpu->enable_pmu) {
3425 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3426 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3427 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3428 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3429 } else {
3430 *eax = 0;
3431 *ebx = 0;
3432 *ecx = 0;
3433 *edx = 0;
3434 }
3435 break;
3436 case 0xB:
3437 /* Extended Topology Enumeration Leaf */
3438 if (!cpu->enable_cpuid_0xb) {
3439 *eax = *ebx = *ecx = *edx = 0;
3440 break;
3441 }
3442
3443 *ecx = count & 0xff;
3444 *edx = cpu->apic_id;
3445
3446 switch (count) {
3447 case 0:
3448 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3449 *ebx = cs->nr_threads;
3450 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3451 break;
3452 case 1:
3453 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3454 *ebx = cs->nr_cores * cs->nr_threads;
3455 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3456 break;
3457 default:
3458 *eax = 0;
3459 *ebx = 0;
3460 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3461 }
3462
3463 assert(!(*eax & ~0x1f));
3464 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3465 break;
3466 case 0xD: {
3467 /* Processor Extended State */
3468 *eax = 0;
3469 *ebx = 0;
3470 *ecx = 0;
3471 *edx = 0;
3472 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3473 break;
3474 }
3475
3476 if (count == 0) {
3477 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3478 *eax = env->features[FEAT_XSAVE_COMP_LO];
3479 *edx = env->features[FEAT_XSAVE_COMP_HI];
3480 *ebx = *ecx;
3481 } else if (count == 1) {
3482 *eax = env->features[FEAT_XSAVE];
3483 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3484 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3485 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3486 *eax = esa->size;
3487 *ebx = esa->offset;
3488 }
3489 }
3490 break;
3491 }
3492 case 0x14: {
3493 /* Intel Processor Trace Enumeration */
3494 *eax = 0;
3495 *ebx = 0;
3496 *ecx = 0;
3497 *edx = 0;
3498 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3499 !kvm_enabled()) {
3500 break;
3501 }
3502
3503 if (count == 0) {
3504 *eax = INTEL_PT_MAX_SUBLEAF;
3505 *ebx = INTEL_PT_MINIMAL_EBX;
3506 *ecx = INTEL_PT_MINIMAL_ECX;
3507 } else if (count == 1) {
3508 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3509 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3510 }
3511 break;
3512 }
3513 case 0x40000000:
3514 /*
3515 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3516 * set here, but we restrict to TCG none the less.
3517 */
3518 if (tcg_enabled() && cpu->expose_tcg) {
3519 memcpy(signature, "TCGTCGTCGTCG", 12);
3520 *eax = 0x40000001;
3521 *ebx = signature[0];
3522 *ecx = signature[1];
3523 *edx = signature[2];
3524 } else {
3525 *eax = 0;
3526 *ebx = 0;
3527 *ecx = 0;
3528 *edx = 0;
3529 }
3530 break;
3531 case 0x40000001:
3532 *eax = 0;
3533 *ebx = 0;
3534 *ecx = 0;
3535 *edx = 0;
3536 break;
3537 case 0x80000000:
3538 *eax = env->cpuid_xlevel;
3539 *ebx = env->cpuid_vendor1;
3540 *edx = env->cpuid_vendor2;
3541 *ecx = env->cpuid_vendor3;
3542 break;
3543 case 0x80000001:
3544 *eax = env->cpuid_version;
3545 *ebx = 0;
3546 *ecx = env->features[FEAT_8000_0001_ECX];
3547 *edx = env->features[FEAT_8000_0001_EDX];
3548
3549 /* The Linux kernel checks for the CMPLegacy bit and
3550 * discards multiple thread information if it is set.
3551 * So don't set it here for Intel to make Linux guests happy.
3552 */
3553 if (cs->nr_cores * cs->nr_threads > 1) {
3554 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3555 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3556 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3557 *ecx |= 1 << 1; /* CmpLegacy bit */
3558 }
3559 }
3560 break;
3561 case 0x80000002:
3562 case 0x80000003:
3563 case 0x80000004:
3564 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3565 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3566 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3567 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3568 break;
3569 case 0x80000005:
3570 /* cache info (L1 cache) */
3571 if (cpu->cache_info_passthrough) {
3572 host_cpuid(index, 0, eax, ebx, ecx, edx);
3573 break;
3574 }
3575 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3576 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3577 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3578 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3579 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3580 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3581 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3582 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3583 break;
3584 case 0x80000006:
3585 /* cache info (L2 cache) */
3586 if (cpu->cache_info_passthrough) {
3587 host_cpuid(index, 0, eax, ebx, ecx, edx);
3588 break;
3589 }
3590 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3591 (L2_DTLB_2M_ENTRIES << 16) | \
3592 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3593 (L2_ITLB_2M_ENTRIES);
3594 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3595 (L2_DTLB_4K_ENTRIES << 16) | \
3596 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3597 (L2_ITLB_4K_ENTRIES);
3598 *ecx = (L2_SIZE_KB_AMD << 16) | \
3599 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3600 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3601 if (!cpu->enable_l3_cache) {
3602 *edx = ((L3_SIZE_KB / 512) << 18) | \
3603 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3604 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3605 } else {
3606 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3607 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3608 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3609 }
3610 break;
3611 case 0x80000007:
3612 *eax = 0;
3613 *ebx = 0;
3614 *ecx = 0;
3615 *edx = env->features[FEAT_8000_0007_EDX];
3616 break;
3617 case 0x80000008:
3618 /* virtual & phys address size in low 2 bytes. */
3619 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3620 /* 64 bit processor */
3621 *eax = cpu->phys_bits; /* configurable physical bits */
3622 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3623 *eax |= 0x00003900; /* 57 bits virtual */
3624 } else {
3625 *eax |= 0x00003000; /* 48 bits virtual */
3626 }
3627 } else {
3628 *eax = cpu->phys_bits;
3629 }
3630 *ebx = env->features[FEAT_8000_0008_EBX];
3631 *ecx = 0;
3632 *edx = 0;
3633 if (cs->nr_cores * cs->nr_threads > 1) {
3634 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3635 }
3636 break;
3637 case 0x8000000A:
3638 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3639 *eax = 0x00000001; /* SVM Revision */
3640 *ebx = 0x00000010; /* nr of ASIDs */
3641 *ecx = 0;
3642 *edx = env->features[FEAT_SVM]; /* optional features */
3643 } else {
3644 *eax = 0;
3645 *ebx = 0;
3646 *ecx = 0;
3647 *edx = 0;
3648 }
3649 break;
3650 case 0xC0000000:
3651 *eax = env->cpuid_xlevel2;
3652 *ebx = 0;
3653 *ecx = 0;
3654 *edx = 0;
3655 break;
3656 case 0xC0000001:
3657 /* Support for VIA CPU's CPUID instruction */
3658 *eax = env->cpuid_version;
3659 *ebx = 0;
3660 *ecx = 0;
3661 *edx = env->features[FEAT_C000_0001_EDX];
3662 break;
3663 case 0xC0000002:
3664 case 0xC0000003:
3665 case 0xC0000004:
3666 /* Reserved for the future, and now filled with zero */
3667 *eax = 0;
3668 *ebx = 0;
3669 *ecx = 0;
3670 *edx = 0;
3671 break;
3672 case 0x8000001F:
3673 *eax = sev_enabled() ? 0x2 : 0;
3674 *ebx = sev_get_cbit_position();
3675 *ebx |= sev_get_reduced_phys_bits() << 6;
3676 *ecx = 0;
3677 *edx = 0;
3678 break;
3679 default:
3680 /* reserved values: zero */
3681 *eax = 0;
3682 *ebx = 0;
3683 *ecx = 0;
3684 *edx = 0;
3685 break;
3686 }
3687 }
3688
3689 /* CPUClass::reset() */
3690 static void x86_cpu_reset(CPUState *s)
3691 {
3692 X86CPU *cpu = X86_CPU(s);
3693 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3694 CPUX86State *env = &cpu->env;
3695 target_ulong cr4;
3696 uint64_t xcr0;
3697 int i;
3698
3699 xcc->parent_reset(s);
3700
3701 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3702
3703 env->old_exception = -1;
3704
3705 /* init to reset state */
3706
3707 env->hflags2 |= HF2_GIF_MASK;
3708
3709 cpu_x86_update_cr0(env, 0x60000010);
3710 env->a20_mask = ~0x0;
3711 env->smbase = 0x30000;
3712 env->msr_smi_count = 0;
3713
3714 env->idt.limit = 0xffff;
3715 env->gdt.limit = 0xffff;
3716 env->ldt.limit = 0xffff;
3717 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3718 env->tr.limit = 0xffff;
3719 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3720
3721 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3722 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3723 DESC_R_MASK | DESC_A_MASK);
3724 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3725 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3726 DESC_A_MASK);
3727 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3728 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3729 DESC_A_MASK);
3730 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3731 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3732 DESC_A_MASK);
3733 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3734 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3735 DESC_A_MASK);
3736 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3737 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3738 DESC_A_MASK);
3739
3740 env->eip = 0xfff0;
3741 env->regs[R_EDX] = env->cpuid_version;
3742
3743 env->eflags = 0x2;
3744
3745 /* FPU init */
3746 for (i = 0; i < 8; i++) {
3747 env->fptags[i] = 1;
3748 }
3749 cpu_set_fpuc(env, 0x37f);
3750
3751 env->mxcsr = 0x1f80;
3752 /* All units are in INIT state. */
3753 env->xstate_bv = 0;
3754
3755 env->pat = 0x0007040600070406ULL;
3756 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3757
3758 memset(env->dr, 0, sizeof(env->dr));
3759 env->dr[6] = DR6_FIXED_1;
3760 env->dr[7] = DR7_FIXED_1;
3761 cpu_breakpoint_remove_all(s, BP_CPU);
3762 cpu_watchpoint_remove_all(s, BP_CPU);
3763
3764 cr4 = 0;
3765 xcr0 = XSTATE_FP_MASK;
3766
3767 #ifdef CONFIG_USER_ONLY
3768 /* Enable all the features for user-mode. */
3769 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3770 xcr0 |= XSTATE_SSE_MASK;
3771 }
3772 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3773 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3774 if (env->features[esa->feature] & esa->bits) {
3775 xcr0 |= 1ull << i;
3776 }
3777 }
3778
3779 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3780 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3781 }
3782 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3783 cr4 |= CR4_FSGSBASE_MASK;
3784 }
3785 #endif
3786
3787 env->xcr0 = xcr0;
3788 cpu_x86_update_cr4(env, cr4);
3789
3790 /*
3791 * SDM 11.11.5 requires:
3792 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3793 * - IA32_MTRR_PHYSMASKn.V = 0
3794 * All other bits are undefined. For simplification, zero it all.
3795 */
3796 env->mtrr_deftype = 0;
3797 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3798 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3799
3800 env->interrupt_injected = -1;
3801 env->exception_injected = -1;
3802 env->nmi_injected = false;
3803 #if !defined(CONFIG_USER_ONLY)
3804 /* We hard-wire the BSP to the first CPU. */
3805 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3806
3807 s->halted = !cpu_is_bsp(cpu);
3808
3809 if (kvm_enabled()) {
3810 kvm_arch_reset_vcpu(cpu);
3811 }
3812 else if (hvf_enabled()) {
3813 hvf_reset_vcpu(s);
3814 }
3815 #endif
3816 }
3817
3818 #ifndef CONFIG_USER_ONLY
3819 bool cpu_is_bsp(X86CPU *cpu)
3820 {
3821 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3822 }
3823
3824 /* TODO: remove me, when reset over QOM tree is implemented */
3825 static void x86_cpu_machine_reset_cb(void *opaque)
3826 {
3827 X86CPU *cpu = opaque;
3828 cpu_reset(CPU(cpu));
3829 }
3830 #endif
3831
3832 static void mce_init(X86CPU *cpu)
3833 {
3834 CPUX86State *cenv = &cpu->env;
3835 unsigned int bank;
3836
3837 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3838 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3839 (CPUID_MCE | CPUID_MCA)) {
3840 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3841 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3842 cenv->mcg_ctl = ~(uint64_t)0;
3843 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3844 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3845 }
3846 }
3847 }
3848
3849 #ifndef CONFIG_USER_ONLY
3850 APICCommonClass *apic_get_class(void)
3851 {
3852 const char *apic_type = "apic";
3853
3854 /* TODO: in-kernel irqchip for hvf */
3855 if (kvm_apic_in_kernel()) {
3856 apic_type = "kvm-apic";
3857 } else if (xen_enabled()) {
3858 apic_type = "xen-apic";
3859 }
3860
3861 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3862 }
3863
3864 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3865 {
3866 APICCommonState *apic;
3867 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3868
3869 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3870
3871 object_property_add_child(OBJECT(cpu), "lapic",
3872 OBJECT(cpu->apic_state), &error_abort);
3873 object_unref(OBJECT(cpu->apic_state));
3874
3875 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3876 /* TODO: convert to link<> */
3877 apic = APIC_COMMON(cpu->apic_state);
3878 apic->cpu = cpu;
3879 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3880 }
3881
3882 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3883 {
3884 APICCommonState *apic;
3885 static bool apic_mmio_map_once;
3886
3887 if (cpu->apic_state == NULL) {
3888 return;
3889 }
3890 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3891 errp);
3892
3893 /* Map APIC MMIO area */
3894 apic = APIC_COMMON(cpu->apic_state);
3895 if (!apic_mmio_map_once) {
3896 memory_region_add_subregion_overlap(get_system_memory(),
3897 apic->apicbase &
3898 MSR_IA32_APICBASE_BASE,
3899 &apic->io_memory,
3900 0x1000);
3901 apic_mmio_map_once = true;
3902 }
3903 }
3904
3905 static void x86_cpu_machine_done(Notifier *n, void *unused)
3906 {
3907 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3908 MemoryRegion *smram =
3909 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3910
3911 if (smram) {
3912 cpu->smram = g_new(MemoryRegion, 1);
3913 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3914 smram, 0, 1ull << 32);
3915 memory_region_set_enabled(cpu->smram, true);
3916 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3917 }
3918 }
3919 #else
3920 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3921 {
3922 }
3923 #endif
3924
3925 /* Note: Only safe for use on x86(-64) hosts */
3926 static uint32_t x86_host_phys_bits(void)
3927 {
3928 uint32_t eax;
3929 uint32_t host_phys_bits;
3930
3931 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3932 if (eax >= 0x80000008) {
3933 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3934 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3935 * at 23:16 that can specify a maximum physical address bits for
3936 * the guest that can override this value; but I've not seen
3937 * anything with that set.
3938 */
3939 host_phys_bits = eax & 0xff;
3940 } else {
3941 /* It's an odd 64 bit machine that doesn't have the leaf for
3942 * physical address bits; fall back to 36 that's most older
3943 * Intel.
3944 */
3945 host_phys_bits = 36;
3946 }
3947
3948 return host_phys_bits;
3949 }
3950
3951 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3952 {
3953 if (*min < value) {
3954 *min = value;
3955 }
3956 }
3957
3958 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3959 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3960 {
3961 CPUX86State *env = &cpu->env;
3962 FeatureWordInfo *fi = &feature_word_info[w];
3963 uint32_t eax = fi->cpuid_eax;
3964 uint32_t region = eax & 0xF0000000;
3965
3966 if (!env->features[w]) {
3967 return;
3968 }
3969
3970 switch (region) {
3971 case 0x00000000:
3972 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3973 break;
3974 case 0x80000000:
3975 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3976 break;
3977 case 0xC0000000:
3978 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3979 break;
3980 }
3981 }
3982
3983 /* Calculate XSAVE components based on the configured CPU feature flags */
3984 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3985 {
3986 CPUX86State *env = &cpu->env;
3987 int i;
3988 uint64_t mask;
3989
3990 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3991 return;
3992 }
3993
3994 mask = 0;
3995 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3996 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3997 if (env->features[esa->feature] & esa->bits) {
3998 mask |= (1ULL << i);
3999 }
4000 }
4001
4002 env->features[FEAT_XSAVE_COMP_LO] = mask;
4003 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4004 }
4005
4006 /***** Steps involved on loading and filtering CPUID data
4007 *
4008 * When initializing and realizing a CPU object, the steps
4009 * involved in setting up CPUID data are:
4010 *
4011 * 1) Loading CPU model definition (X86CPUDefinition). This is
4012 * implemented by x86_cpu_load_def() and should be completely
4013 * transparent, as it is done automatically by instance_init.
4014 * No code should need to look at X86CPUDefinition structs
4015 * outside instance_init.
4016 *
4017 * 2) CPU expansion. This is done by realize before CPUID
4018 * filtering, and will make sure host/accelerator data is
4019 * loaded for CPU models that depend on host capabilities
4020 * (e.g. "host"). Done by x86_cpu_expand_features().
4021 *
4022 * 3) CPUID filtering. This initializes extra data related to
4023 * CPUID, and checks if the host supports all capabilities
4024 * required by the CPU. Runnability of a CPU model is
4025 * determined at this step. Done by x86_cpu_filter_features().
4026 *
4027 * Some operations don't require all steps to be performed.
4028 * More precisely:
4029 *
4030 * - CPU instance creation (instance_init) will run only CPU
4031 * model loading. CPU expansion can't run at instance_init-time
4032 * because host/accelerator data may be not available yet.
4033 * - CPU realization will perform both CPU model expansion and CPUID
4034 * filtering, and return an error in case one of them fails.
4035 * - query-cpu-definitions needs to run all 3 steps. It needs
4036 * to run CPUID filtering, as the 'unavailable-features'
4037 * field is set based on the filtering results.
4038 * - The query-cpu-model-expansion QMP command only needs to run
4039 * CPU model loading and CPU expansion. It should not filter
4040 * any CPUID data based on host capabilities.
4041 */
4042
4043 /* Expand CPU configuration data, based on configured features
4044 * and host/accelerator capabilities when appropriate.
4045 */
4046 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4047 {
4048 CPUX86State *env = &cpu->env;
4049 FeatureWord w;
4050 GList *l;
4051 Error *local_err = NULL;
4052
4053 /*TODO: Now cpu->max_features doesn't overwrite features
4054 * set using QOM properties, and we can convert
4055 * plus_features & minus_features to global properties
4056 * inside x86_cpu_parse_featurestr() too.
4057 */
4058 if (cpu->max_features) {
4059 for (w = 0; w < FEATURE_WORDS; w++) {
4060 /* Override only features that weren't set explicitly
4061 * by the user.
4062 */
4063 env->features[w] |=
4064 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4065 ~env->user_features[w];
4066 }
4067 }
4068
4069 for (l = plus_features; l; l = l->next) {
4070 const char *prop = l->data;
4071 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4072 if (local_err) {
4073 goto out;
4074 }
4075 }
4076
4077 for (l = minus_features; l; l = l->next) {
4078 const char *prop = l->data;
4079 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4080 if (local_err) {
4081 goto out;
4082 }
4083 }
4084
4085 if (!kvm_enabled() || !cpu->expose_kvm) {
4086 env->features[FEAT_KVM] = 0;
4087 }
4088
4089 x86_cpu_enable_xsave_components(cpu);
4090
4091 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4092 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4093 if (cpu->full_cpuid_auto_level) {
4094 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4095 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4096 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4097 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4098 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4099 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4100 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4101 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4102 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4103 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4104 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4105 /* SVM requires CPUID[0x8000000A] */
4106 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4107 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4108 }
4109
4110 /* SEV requires CPUID[0x8000001F] */
4111 if (sev_enabled()) {
4112 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4113 }
4114 }
4115
4116 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4117 if (env->cpuid_level == UINT32_MAX) {
4118 env->cpuid_level = env->cpuid_min_level;
4119 }
4120 if (env->cpuid_xlevel == UINT32_MAX) {
4121 env->cpuid_xlevel = env->cpuid_min_xlevel;
4122 }
4123 if (env->cpuid_xlevel2 == UINT32_MAX) {
4124 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4125 }
4126
4127 out:
4128 if (local_err != NULL) {
4129 error_propagate(errp, local_err);
4130 }
4131 }
4132
4133 /*
4134 * Finishes initialization of CPUID data, filters CPU feature
4135 * words based on host availability of each feature.
4136 *
4137 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4138 */
4139 static int x86_cpu_filter_features(X86CPU *cpu)
4140 {
4141 CPUX86State *env = &cpu->env;
4142 FeatureWord w;
4143 int rv = 0;
4144
4145 for (w = 0; w < FEATURE_WORDS; w++) {
4146 uint32_t host_feat =
4147 x86_cpu_get_supported_feature_word(w, false);
4148 uint32_t requested_features = env->features[w];
4149 env->features[w] &= host_feat;
4150 cpu->filtered_features[w] = requested_features & ~env->features[w];
4151 if (cpu->filtered_features[w]) {
4152 rv = 1;
4153 }
4154 }
4155
4156 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4157 kvm_enabled()) {
4158 KVMState *s = CPU(cpu)->kvm_state;
4159 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4160 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4161 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4162 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4163 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4164
4165 if (!eax_0 ||
4166 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4167 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4168 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4169 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4170 INTEL_PT_ADDR_RANGES_NUM) ||
4171 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4172 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4173 (ecx_0 & INTEL_PT_IP_LIP)) {
4174 /*
4175 * Processor Trace capabilities aren't configurable, so if the
4176 * host can't emulate the capabilities we report on
4177 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4178 */
4179 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4180 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4181 rv = 1;
4182 }
4183 }
4184
4185 return rv;
4186 }
4187
4188 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4189 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4190 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4191 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4192 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4193 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4194 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4195 {
4196 CPUState *cs = CPU(dev);
4197 X86CPU *cpu = X86_CPU(dev);
4198 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4199 CPUX86State *env = &cpu->env;
4200 Error *local_err = NULL;
4201 static bool ht_warned;
4202
4203 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4204 char *name = x86_cpu_class_get_model_name(xcc);
4205 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4206 g_free(name);
4207 goto out;
4208 }
4209
4210 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4211 error_setg(errp, "apic-id property was not initialized properly");
4212 return;
4213 }
4214
4215 x86_cpu_expand_features(cpu, &local_err);
4216 if (local_err) {
4217 goto out;
4218 }
4219
4220 if (x86_cpu_filter_features(cpu) &&
4221 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4222 x86_cpu_report_filtered_features(cpu);
4223 if (cpu->enforce_cpuid) {
4224 error_setg(&local_err,
4225 accel_uses_host_cpuid() ?
4226 "Host doesn't support requested features" :
4227 "TCG doesn't support requested features");
4228 goto out;
4229 }
4230 }
4231
4232 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4233 * CPUID[1].EDX.
4234 */
4235 if (IS_AMD_CPU(env)) {
4236 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4237 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4238 & CPUID_EXT2_AMD_ALIASES);
4239 }
4240
4241 /* For 64bit systems think about the number of physical bits to present.
4242 * ideally this should be the same as the host; anything other than matching
4243 * the host can cause incorrect guest behaviour.
4244 * QEMU used to pick the magic value of 40 bits that corresponds to
4245 * consumer AMD devices but nothing else.
4246 */
4247 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4248 if (accel_uses_host_cpuid()) {
4249 uint32_t host_phys_bits = x86_host_phys_bits();
4250 static bool warned;
4251
4252 if (cpu->host_phys_bits) {
4253 /* The user asked for us to use the host physical bits */
4254 cpu->phys_bits = host_phys_bits;
4255 }
4256
4257 /* Print a warning if the user set it to a value that's not the
4258 * host value.
4259 */
4260 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4261 !warned) {
4262 warn_report("Host physical bits (%u)"
4263 " does not match phys-bits property (%u)",
4264 host_phys_bits, cpu->phys_bits);
4265 warned = true;
4266 }
4267
4268 if (cpu->phys_bits &&
4269 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4270 cpu->phys_bits < 32)) {
4271 error_setg(errp, "phys-bits should be between 32 and %u "
4272 " (but is %u)",
4273 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4274 return;
4275 }
4276 } else {
4277 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4278 error_setg(errp, "TCG only supports phys-bits=%u",
4279 TCG_PHYS_ADDR_BITS);
4280 return;
4281 }
4282 }
4283 /* 0 means it was not explicitly set by the user (or by machine
4284 * compat_props or by the host code above). In this case, the default
4285 * is the value used by TCG (40).
4286 */
4287 if (cpu->phys_bits == 0) {
4288 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4289 }
4290 } else {
4291 /* For 32 bit systems don't use the user set value, but keep
4292 * phys_bits consistent with what we tell the guest.
4293 */
4294 if (cpu->phys_bits != 0) {
4295 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4296 return;
4297 }
4298
4299 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4300 cpu->phys_bits = 36;
4301 } else {
4302 cpu->phys_bits = 32;
4303 }
4304 }
4305 cpu_exec_realizefn(cs, &local_err);
4306 if (local_err != NULL) {
4307 error_propagate(errp, local_err);
4308 return;
4309 }
4310
4311 #ifndef CONFIG_USER_ONLY
4312 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4313
4314 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4315 x86_cpu_apic_create(cpu, &local_err);
4316 if (local_err != NULL) {
4317 goto out;
4318 }
4319 }
4320 #endif
4321
4322 mce_init(cpu);
4323
4324 #ifndef CONFIG_USER_ONLY
4325 if (tcg_enabled()) {
4326 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4327 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4328
4329 /* Outer container... */
4330 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4331 memory_region_set_enabled(cpu->cpu_as_root, true);
4332
4333 /* ... with two regions inside: normal system memory with low
4334 * priority, and...
4335 */
4336 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4337 get_system_memory(), 0, ~0ull);
4338 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4339 memory_region_set_enabled(cpu->cpu_as_mem, true);
4340
4341 cs->num_ases = 2;
4342 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4343 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4344
4345 /* ... SMRAM with higher priority, linked from /machine/smram. */
4346 cpu->machine_done.notify = x86_cpu_machine_done;
4347 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4348 }
4349 #endif
4350
4351 qemu_init_vcpu(cs);
4352
4353 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4354 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4355 * based on inputs (sockets,cores,threads), it is still better to gives
4356 * users a warning.
4357 *
4358 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4359 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4360 */
4361 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4362 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4363 " -smp options properly.");
4364 ht_warned = true;
4365 }
4366
4367 x86_cpu_apic_realize(cpu, &local_err);
4368 if (local_err != NULL) {
4369 goto out;
4370 }
4371 cpu_reset(cs);
4372
4373 xcc->parent_realize(dev, &local_err);
4374
4375 out:
4376 if (local_err != NULL) {
4377 error_propagate(errp, local_err);
4378 return;
4379 }
4380 }
4381
4382 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4383 {
4384 X86CPU *cpu = X86_CPU(dev);
4385 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4386 Error *local_err = NULL;
4387
4388 #ifndef CONFIG_USER_ONLY
4389 cpu_remove_sync(CPU(dev));
4390 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4391 #endif
4392
4393 if (cpu->apic_state) {
4394 object_unparent(OBJECT(cpu->apic_state));
4395 cpu->apic_state = NULL;
4396 }
4397
4398 xcc->parent_unrealize(dev, &local_err);
4399 if (local_err != NULL) {
4400 error_propagate(errp, local_err);
4401 return;
4402 }
4403 }
4404
4405 typedef struct BitProperty {
4406 FeatureWord w;
4407 uint32_t mask;
4408 } BitProperty;
4409
4410 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4411 void *opaque, Error **errp)
4412 {
4413 X86CPU *cpu = X86_CPU(obj);
4414 BitProperty *fp = opaque;
4415 uint32_t f = cpu->env.features[fp->w];
4416 bool value = (f & fp->mask) == fp->mask;
4417 visit_type_bool(v, name, &value, errp);
4418 }
4419
4420 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4421 void *opaque, Error **errp)
4422 {
4423 DeviceState *dev = DEVICE(obj);
4424 X86CPU *cpu = X86_CPU(obj);
4425 BitProperty *fp = opaque;
4426 Error *local_err = NULL;
4427 bool value;
4428
4429 if (dev->realized) {
4430 qdev_prop_set_after_realize(dev, name, errp);
4431 return;
4432 }
4433
4434 visit_type_bool(v, name, &value, &local_err);
4435 if (local_err) {
4436 error_propagate(errp, local_err);
4437 return;
4438 }
4439
4440 if (value) {
4441 cpu->env.features[fp->w] |= fp->mask;
4442 } else {
4443 cpu->env.features[fp->w] &= ~fp->mask;
4444 }
4445 cpu->env.user_features[fp->w] |= fp->mask;
4446 }
4447
4448 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4449 void *opaque)
4450 {
4451 BitProperty *prop = opaque;
4452 g_free(prop);
4453 }
4454
4455 /* Register a boolean property to get/set a single bit in a uint32_t field.
4456 *
4457 * The same property name can be registered multiple times to make it affect
4458 * multiple bits in the same FeatureWord. In that case, the getter will return
4459 * true only if all bits are set.
4460 */
4461 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4462 const char *prop_name,
4463 FeatureWord w,
4464 int bitnr)
4465 {
4466 BitProperty *fp;
4467 ObjectProperty *op;
4468 uint32_t mask = (1UL << bitnr);
4469
4470 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4471 if (op) {
4472 fp = op->opaque;
4473 assert(fp->w == w);
4474 fp->mask |= mask;
4475 } else {
4476 fp = g_new0(BitProperty, 1);
4477 fp->w = w;
4478 fp->mask = mask;
4479 object_property_add(OBJECT(cpu), prop_name, "bool",
4480 x86_cpu_get_bit_prop,
4481 x86_cpu_set_bit_prop,
4482 x86_cpu_release_bit_prop, fp, &error_abort);
4483 }
4484 }
4485
4486 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4487 FeatureWord w,
4488 int bitnr)
4489 {
4490 FeatureWordInfo *fi = &feature_word_info[w];
4491 const char *name = fi->feat_names[bitnr];
4492
4493 if (!name) {
4494 return;
4495 }
4496
4497 /* Property names should use "-" instead of "_".
4498 * Old names containing underscores are registered as aliases
4499 * using object_property_add_alias()
4500 */
4501 assert(!strchr(name, '_'));
4502 /* aliases don't use "|" delimiters anymore, they are registered
4503 * manually using object_property_add_alias() */
4504 assert(!strchr(name, '|'));
4505 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4506 }
4507
4508 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4509 {
4510 X86CPU *cpu = X86_CPU(cs);
4511 CPUX86State *env = &cpu->env;
4512 GuestPanicInformation *panic_info = NULL;
4513
4514 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4515 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4516
4517 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4518
4519 assert(HV_CRASH_PARAMS >= 5);
4520 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4521 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4522 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4523 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4524 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4525 }
4526
4527 return panic_info;
4528 }
4529 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4530 const char *name, void *opaque,
4531 Error **errp)
4532 {
4533 CPUState *cs = CPU(obj);
4534 GuestPanicInformation *panic_info;
4535
4536 if (!cs->crash_occurred) {
4537 error_setg(errp, "No crash occured");
4538 return;
4539 }
4540
4541 panic_info = x86_cpu_get_crash_info(cs);
4542 if (panic_info == NULL) {
4543 error_setg(errp, "No crash information");
4544 return;
4545 }
4546
4547 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4548 errp);
4549 qapi_free_GuestPanicInformation(panic_info);
4550 }
4551
4552 static void x86_cpu_initfn(Object *obj)
4553 {
4554 CPUState *cs = CPU(obj);
4555 X86CPU *cpu = X86_CPU(obj);
4556 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4557 CPUX86State *env = &cpu->env;
4558 FeatureWord w;
4559
4560 cs->env_ptr = env;
4561
4562 object_property_add(obj, "family", "int",
4563 x86_cpuid_version_get_family,
4564 x86_cpuid_version_set_family, NULL, NULL, NULL);
4565 object_property_add(obj, "model", "int",
4566 x86_cpuid_version_get_model,
4567 x86_cpuid_version_set_model, NULL, NULL, NULL);
4568 object_property_add(obj, "stepping", "int",
4569 x86_cpuid_version_get_stepping,
4570 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4571 object_property_add_str(obj, "vendor",
4572 x86_cpuid_get_vendor,
4573 x86_cpuid_set_vendor, NULL);
4574 object_property_add_str(obj, "model-id",
4575 x86_cpuid_get_model_id,
4576 x86_cpuid_set_model_id, NULL);
4577 object_property_add(obj, "tsc-frequency", "int",
4578 x86_cpuid_get_tsc_freq,
4579 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4580 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4581 x86_cpu_get_feature_words,
4582 NULL, NULL, (void *)env->features, NULL);
4583 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4584 x86_cpu_get_feature_words,
4585 NULL, NULL, (void *)cpu->filtered_features, NULL);
4586
4587 object_property_add(obj, "crash-information", "GuestPanicInformation",
4588 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4589
4590 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4591
4592 for (w = 0; w < FEATURE_WORDS; w++) {
4593 int bitnr;
4594
4595 for (bitnr = 0; bitnr < 32; bitnr++) {
4596 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4597 }
4598 }
4599
4600 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4601 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4602 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4603 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4604 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4605 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4606 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4607
4608 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4609 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4610 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4611 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4612 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4613 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4614 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4615 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4616 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4617 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4618 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4619 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4620 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4621 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4622 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4623 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4624 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4625 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4626 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4627 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4628 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4629
4630 if (xcc->cpu_def) {
4631 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4632 }
4633 }
4634
4635 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4636 {
4637 X86CPU *cpu = X86_CPU(cs);
4638
4639 return cpu->apic_id;
4640 }
4641
4642 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4643 {
4644 X86CPU *cpu = X86_CPU(cs);
4645
4646 return cpu->env.cr[0] & CR0_PG_MASK;
4647 }
4648
4649 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4650 {
4651 X86CPU *cpu = X86_CPU(cs);
4652
4653 cpu->env.eip = value;
4654 }
4655
4656 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4657 {
4658 X86CPU *cpu = X86_CPU(cs);
4659
4660 cpu->env.eip = tb->pc - tb->cs_base;
4661 }
4662
4663 static bool x86_cpu_has_work(CPUState *cs)
4664 {
4665 X86CPU *cpu = X86_CPU(cs);
4666 CPUX86State *env = &cpu->env;
4667
4668 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4669 CPU_INTERRUPT_POLL)) &&
4670 (env->eflags & IF_MASK)) ||
4671 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4672 CPU_INTERRUPT_INIT |
4673 CPU_INTERRUPT_SIPI |
4674 CPU_INTERRUPT_MCE)) ||
4675 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4676 !(env->hflags & HF_SMM_MASK));
4677 }
4678
4679 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4680 {
4681 X86CPU *cpu = X86_CPU(cs);
4682 CPUX86State *env = &cpu->env;
4683
4684 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4685 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4686 : bfd_mach_i386_i8086);
4687 info->print_insn = print_insn_i386;
4688
4689 info->cap_arch = CS_ARCH_X86;
4690 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4691 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4692 : CS_MODE_16);
4693 info->cap_insn_unit = 1;
4694 info->cap_insn_split = 8;
4695 }
4696
4697 void x86_update_hflags(CPUX86State *env)
4698 {
4699 uint32_t hflags;
4700 #define HFLAG_COPY_MASK \
4701 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4702 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4703 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4704 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4705
4706 hflags = env->hflags & HFLAG_COPY_MASK;
4707 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4708 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4709 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4710 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4711 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4712
4713 if (env->cr[4] & CR4_OSFXSR_MASK) {
4714 hflags |= HF_OSFXSR_MASK;
4715 }
4716
4717 if (env->efer & MSR_EFER_LMA) {
4718 hflags |= HF_LMA_MASK;
4719 }
4720
4721 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4722 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4723 } else {
4724 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4725 (DESC_B_SHIFT - HF_CS32_SHIFT);
4726 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4727 (DESC_B_SHIFT - HF_SS32_SHIFT);
4728 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4729 !(hflags & HF_CS32_MASK)) {
4730 hflags |= HF_ADDSEG_MASK;
4731 } else {
4732 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4733 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4734 }
4735 }
4736 env->hflags = hflags;
4737 }
4738
4739 static Property x86_cpu_properties[] = {
4740 #ifdef CONFIG_USER_ONLY
4741 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4742 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4743 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4744 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4745 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4746 #else
4747 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4748 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4749 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4750 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4751 #endif
4752 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4753 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4754 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4755 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4756 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4757 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4758 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4759 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4760 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4761 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4762 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4763 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4764 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4765 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4766 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4767 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4768 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4769 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4770 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4771 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4772 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4773 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4774 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4775 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4776 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4777 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4778 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4779 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4780 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4781 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4782 false),
4783 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4784 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4785
4786 /*
4787 * From "Requirements for Implementing the Microsoft
4788 * Hypervisor Interface":
4789 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4790 *
4791 * "Starting with Windows Server 2012 and Windows 8, if
4792 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4793 * the hypervisor imposes no specific limit to the number of VPs.
4794 * In this case, Windows Server 2012 guest VMs may use more than
4795 * 64 VPs, up to the maximum supported number of processors applicable
4796 * to the specific Windows version being used."
4797 */
4798 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4799 DEFINE_PROP_END_OF_LIST()
4800 };
4801
4802 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4803 {
4804 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4805 CPUClass *cc = CPU_CLASS(oc);
4806 DeviceClass *dc = DEVICE_CLASS(oc);
4807
4808 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4809 &xcc->parent_realize);
4810 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4811 &xcc->parent_unrealize);
4812 dc->props = x86_cpu_properties;
4813
4814 xcc->parent_reset = cc->reset;
4815 cc->reset = x86_cpu_reset;
4816 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4817
4818 cc->class_by_name = x86_cpu_class_by_name;
4819 cc->parse_features = x86_cpu_parse_featurestr;
4820 cc->has_work = x86_cpu_has_work;
4821 #ifdef CONFIG_TCG
4822 cc->do_interrupt = x86_cpu_do_interrupt;
4823 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4824 #endif
4825 cc->dump_state = x86_cpu_dump_state;
4826 cc->get_crash_info = x86_cpu_get_crash_info;
4827 cc->set_pc = x86_cpu_set_pc;
4828 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4829 cc->gdb_read_register = x86_cpu_gdb_read_register;
4830 cc->gdb_write_register = x86_cpu_gdb_write_register;
4831 cc->get_arch_id = x86_cpu_get_arch_id;
4832 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4833 #ifdef CONFIG_USER_ONLY
4834 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4835 #else
4836 cc->asidx_from_attrs = x86_asidx_from_attrs;
4837 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4838 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4839 cc->write_elf64_note = x86_cpu_write_elf64_note;
4840 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4841 cc->write_elf32_note = x86_cpu_write_elf32_note;
4842 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4843 cc->vmsd = &vmstate_x86_cpu;
4844 #endif
4845 cc->gdb_arch_name = x86_gdb_arch_name;
4846 #ifdef TARGET_X86_64
4847 cc->gdb_core_xml_file = "i386-64bit.xml";
4848 cc->gdb_num_core_regs = 57;
4849 #else
4850 cc->gdb_core_xml_file = "i386-32bit.xml";
4851 cc->gdb_num_core_regs = 41;
4852 #endif
4853 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4854 cc->debug_excp_handler = breakpoint_handler;
4855 #endif
4856 cc->cpu_exec_enter = x86_cpu_exec_enter;
4857 cc->cpu_exec_exit = x86_cpu_exec_exit;
4858 #ifdef CONFIG_TCG
4859 cc->tcg_initialize = tcg_x86_init;
4860 #endif
4861 cc->disas_set_info = x86_disas_set_info;
4862
4863 dc->user_creatable = true;
4864 }
4865
4866 static const TypeInfo x86_cpu_type_info = {
4867 .name = TYPE_X86_CPU,
4868 .parent = TYPE_CPU,
4869 .instance_size = sizeof(X86CPU),
4870 .instance_init = x86_cpu_initfn,
4871 .abstract = true,
4872 .class_size = sizeof(X86CPUClass),
4873 .class_init = x86_cpu_common_class_init,
4874 };
4875
4876
4877 /* "base" CPU model, used by query-cpu-model-expansion */
4878 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4879 {
4880 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4881
4882 xcc->static_model = true;
4883 xcc->migration_safe = true;
4884 xcc->model_description = "base CPU model type with no features enabled";
4885 xcc->ordering = 8;
4886 }
4887
4888 static const TypeInfo x86_base_cpu_type_info = {
4889 .name = X86_CPU_TYPE_NAME("base"),
4890 .parent = TYPE_X86_CPU,
4891 .class_init = x86_cpu_base_class_init,
4892 };
4893
4894 static void x86_cpu_register_types(void)
4895 {
4896 int i;
4897
4898 type_register_static(&x86_cpu_type_info);
4899 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4900 x86_register_cpudef_type(&builtin_x86_defs[i]);
4901 }
4902 type_register_static(&max_x86_cpu_type_info);
4903 type_register_static(&x86_base_cpu_type_info);
4904 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4905 type_register_static(&host_x86_cpu_type_info);
4906 #endif
4907 }
4908
4909 type_init(x86_cpu_register_types)