]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
x86/cpu: Enable CLDEMOTE(Demote Cache Line) cpu feature
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59
60 /* Cache topology CPUID constants: */
61
62 /* CPUID Leaf 2 Descriptors */
63
64 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
65 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
66 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
67 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
68
69
70 /* CPUID Leaf 4 constants: */
71
72 /* EAX: */
73 #define CPUID_4_TYPE_DCACHE 1
74 #define CPUID_4_TYPE_ICACHE 2
75 #define CPUID_4_TYPE_UNIFIED 3
76
77 #define CPUID_4_LEVEL(l) ((l) << 5)
78
79 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
80 #define CPUID_4_FULLY_ASSOC (1 << 9)
81
82 /* EDX: */
83 #define CPUID_4_NO_INVD_SHARING (1 << 0)
84 #define CPUID_4_INCLUSIVE (1 << 1)
85 #define CPUID_4_COMPLEX_IDX (1 << 2)
86
87 #define ASSOC_FULL 0xFF
88
89 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
90 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
91 a == 2 ? 0x2 : \
92 a == 4 ? 0x4 : \
93 a == 8 ? 0x6 : \
94 a == 16 ? 0x8 : \
95 a == 32 ? 0xA : \
96 a == 48 ? 0xB : \
97 a == 64 ? 0xC : \
98 a == 96 ? 0xD : \
99 a == 128 ? 0xE : \
100 a == ASSOC_FULL ? 0xF : \
101 0 /* invalid value */)
102
103
104 /* Definitions of the hardcoded cache entries we expose: */
105
106 /* L1 data cache: */
107 #define L1D_LINE_SIZE 64
108 #define L1D_ASSOCIATIVITY 8
109 #define L1D_SETS 64
110 #define L1D_PARTITIONS 1
111 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
112 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
113 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
114 #define L1D_LINES_PER_TAG 1
115 #define L1D_SIZE_KB_AMD 64
116 #define L1D_ASSOCIATIVITY_AMD 2
117
118 /* L1 instruction cache: */
119 #define L1I_LINE_SIZE 64
120 #define L1I_ASSOCIATIVITY 8
121 #define L1I_SETS 64
122 #define L1I_PARTITIONS 1
123 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
124 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
125 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
126 #define L1I_LINES_PER_TAG 1
127 #define L1I_SIZE_KB_AMD 64
128 #define L1I_ASSOCIATIVITY_AMD 2
129
130 /* Level 2 unified cache: */
131 #define L2_LINE_SIZE 64
132 #define L2_ASSOCIATIVITY 16
133 #define L2_SETS 4096
134 #define L2_PARTITIONS 1
135 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
136 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
137 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
138 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
139 #define L2_LINES_PER_TAG 1
140 #define L2_SIZE_KB_AMD 512
141
142 /* Level 3 unified cache: */
143 #define L3_SIZE_KB 0 /* disabled */
144 #define L3_ASSOCIATIVITY 0 /* disabled */
145 #define L3_LINES_PER_TAG 0 /* disabled */
146 #define L3_LINE_SIZE 0 /* disabled */
147 #define L3_N_LINE_SIZE 64
148 #define L3_N_ASSOCIATIVITY 16
149 #define L3_N_SETS 16384
150 #define L3_N_PARTITIONS 1
151 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
152 #define L3_N_LINES_PER_TAG 1
153 #define L3_N_SIZE_KB_AMD 16384
154
155 /* TLB definitions: */
156
157 #define L1_DTLB_2M_ASSOC 1
158 #define L1_DTLB_2M_ENTRIES 255
159 #define L1_DTLB_4K_ASSOC 1
160 #define L1_DTLB_4K_ENTRIES 255
161
162 #define L1_ITLB_2M_ASSOC 1
163 #define L1_ITLB_2M_ENTRIES 255
164 #define L1_ITLB_4K_ASSOC 1
165 #define L1_ITLB_4K_ENTRIES 255
166
167 #define L2_DTLB_2M_ASSOC 0 /* disabled */
168 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
169 #define L2_DTLB_4K_ASSOC 4
170 #define L2_DTLB_4K_ENTRIES 512
171
172 #define L2_ITLB_2M_ASSOC 0 /* disabled */
173 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
174 #define L2_ITLB_4K_ASSOC 4
175 #define L2_ITLB_4K_ENTRIES 512
176
177 /* CPUID Leaf 0x14 constants: */
178 #define INTEL_PT_MAX_SUBLEAF 0x1
179 /*
180 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
181 * MSR can be accessed;
182 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
183 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
184 * of Intel PT MSRs across warm reset;
185 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
186 */
187 #define INTEL_PT_MINIMAL_EBX 0xf
188 /*
189 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
190 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
191 * accessed;
192 * bit[01]: ToPA tables can hold any number of output entries, up to the
193 * maximum allowed by the MaskOrTableOffset field of
194 * IA32_RTIT_OUTPUT_MASK_PTRS;
195 * bit[02]: Support Single-Range Output scheme;
196 */
197 #define INTEL_PT_MINIMAL_ECX 0x7
198 /* generated packets which contain IP payloads have LIP values */
199 #define INTEL_PT_IP_LIP (1 << 31)
200 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
201 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
202 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
203 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
204 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
205
206 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
207 uint32_t vendor2, uint32_t vendor3)
208 {
209 int i;
210 for (i = 0; i < 4; i++) {
211 dst[i] = vendor1 >> (8 * i);
212 dst[i + 4] = vendor2 >> (8 * i);
213 dst[i + 8] = vendor3 >> (8 * i);
214 }
215 dst[CPUID_VENDOR_SZ] = '\0';
216 }
217
218 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
219 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
220 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
221 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
222 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
223 CPUID_PSE36 | CPUID_FXSR)
224 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
225 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
226 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
227 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
228 CPUID_PAE | CPUID_SEP | CPUID_APIC)
229
230 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
231 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
232 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
233 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
234 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
235 /* partly implemented:
236 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
237 /* missing:
238 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
239 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
240 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
241 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
242 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
243 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
244 /* missing:
245 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
246 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
247 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
248 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
249 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
250
251 #ifdef TARGET_X86_64
252 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
253 #else
254 #define TCG_EXT2_X86_64_FEATURES 0
255 #endif
256
257 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
258 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
259 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
260 TCG_EXT2_X86_64_FEATURES)
261 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
262 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
263 #define TCG_EXT4_FEATURES 0
264 #define TCG_SVM_FEATURES 0
265 #define TCG_KVM_FEATURES 0
266 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
267 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
268 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
269 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
270 CPUID_7_0_EBX_ERMS)
271 /* missing:
272 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
273 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
274 CPUID_7_0_EBX_RDSEED */
275 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
276 CPUID_7_0_ECX_LA57)
277 #define TCG_7_0_EDX_FEATURES 0
278 #define TCG_APM_FEATURES 0
279 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
280 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
281 /* missing:
282 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
283
284 typedef struct FeatureWordInfo {
285 /* feature flags names are taken from "Intel Processor Identification and
286 * the CPUID Instruction" and AMD's "CPUID Specification".
287 * In cases of disagreement between feature naming conventions,
288 * aliases may be added.
289 */
290 const char *feat_names[32];
291 uint32_t cpuid_eax; /* Input EAX for CPUID */
292 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
293 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
294 int cpuid_reg; /* output register (R_* constant) */
295 uint32_t tcg_features; /* Feature flags supported by TCG */
296 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
297 uint32_t migratable_flags; /* Feature flags known to be migratable */
298 /* Features that shouldn't be auto-enabled by "-cpu host" */
299 uint32_t no_autoenable_flags;
300 } FeatureWordInfo;
301
302 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
303 [FEAT_1_EDX] = {
304 .feat_names = {
305 "fpu", "vme", "de", "pse",
306 "tsc", "msr", "pae", "mce",
307 "cx8", "apic", NULL, "sep",
308 "mtrr", "pge", "mca", "cmov",
309 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
310 NULL, "ds" /* Intel dts */, "acpi", "mmx",
311 "fxsr", "sse", "sse2", "ss",
312 "ht" /* Intel htt */, "tm", "ia64", "pbe",
313 },
314 .cpuid_eax = 1, .cpuid_reg = R_EDX,
315 .tcg_features = TCG_FEATURES,
316 },
317 [FEAT_1_ECX] = {
318 .feat_names = {
319 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
320 "ds-cpl", "vmx", "smx", "est",
321 "tm2", "ssse3", "cid", NULL,
322 "fma", "cx16", "xtpr", "pdcm",
323 NULL, "pcid", "dca", "sse4.1",
324 "sse4.2", "x2apic", "movbe", "popcnt",
325 "tsc-deadline", "aes", "xsave", "osxsave",
326 "avx", "f16c", "rdrand", "hypervisor",
327 },
328 .cpuid_eax = 1, .cpuid_reg = R_ECX,
329 .tcg_features = TCG_EXT_FEATURES,
330 },
331 /* Feature names that are already defined on feature_name[] but
332 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
333 * names on feat_names below. They are copied automatically
334 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
335 */
336 [FEAT_8000_0001_EDX] = {
337 .feat_names = {
338 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
339 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
340 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
341 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
342 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
343 "nx", NULL, "mmxext", NULL /* mmx */,
344 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
345 NULL, "lm", "3dnowext", "3dnow",
346 },
347 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
348 .tcg_features = TCG_EXT2_FEATURES,
349 },
350 [FEAT_8000_0001_ECX] = {
351 .feat_names = {
352 "lahf-lm", "cmp-legacy", "svm", "extapic",
353 "cr8legacy", "abm", "sse4a", "misalignsse",
354 "3dnowprefetch", "osvw", "ibs", "xop",
355 "skinit", "wdt", NULL, "lwp",
356 "fma4", "tce", NULL, "nodeid-msr",
357 NULL, "tbm", "topoext", "perfctr-core",
358 "perfctr-nb", NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL,
360 },
361 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
362 .tcg_features = TCG_EXT3_FEATURES,
363 },
364 [FEAT_C000_0001_EDX] = {
365 .feat_names = {
366 NULL, NULL, "xstore", "xstore-en",
367 NULL, NULL, "xcrypt", "xcrypt-en",
368 "ace2", "ace2-en", "phe", "phe-en",
369 "pmm", "pmm-en", NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 },
375 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
376 .tcg_features = TCG_EXT4_FEATURES,
377 },
378 [FEAT_KVM] = {
379 .feat_names = {
380 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
381 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
382 NULL, "kvm-pv-tlb-flush", NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 "kvmclock-stable-bit", NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 },
389 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
390 .tcg_features = TCG_KVM_FEATURES,
391 },
392 [FEAT_KVM_HINTS] = {
393 .feat_names = {
394 "kvm-hint-dedicated", NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 },
403 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
404 .tcg_features = TCG_KVM_FEATURES,
405 /*
406 * KVM hints aren't auto-enabled by -cpu host, they need to be
407 * explicitly enabled in the command-line.
408 */
409 .no_autoenable_flags = ~0U,
410 },
411 [FEAT_HYPERV_EAX] = {
412 .feat_names = {
413 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
414 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
415 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
416 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
417 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
418 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
419 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
420 NULL, NULL,
421 NULL, NULL, NULL, NULL,
422 NULL, NULL, NULL, NULL,
423 NULL, NULL, NULL, NULL,
424 NULL, NULL, NULL, NULL,
425 },
426 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
427 },
428 [FEAT_HYPERV_EBX] = {
429 .feat_names = {
430 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
431 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
432 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
433 NULL /* hv_create_port */, NULL /* hv_connect_port */,
434 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
435 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
436 NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
441 },
442 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
443 },
444 [FEAT_HYPERV_EDX] = {
445 .feat_names = {
446 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
447 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
448 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
449 NULL, NULL,
450 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 },
457 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
458 },
459 [FEAT_SVM] = {
460 .feat_names = {
461 "npt", "lbrv", "svm-lock", "nrip-save",
462 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
463 NULL, NULL, "pause-filter", NULL,
464 "pfthreshold", NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 },
470 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
471 .tcg_features = TCG_SVM_FEATURES,
472 },
473 [FEAT_7_0_EBX] = {
474 .feat_names = {
475 "fsgsbase", "tsc-adjust", NULL, "bmi1",
476 "hle", "avx2", NULL, "smep",
477 "bmi2", "erms", "invpcid", "rtm",
478 NULL, NULL, "mpx", NULL,
479 "avx512f", "avx512dq", "rdseed", "adx",
480 "smap", "avx512ifma", "pcommit", "clflushopt",
481 "clwb", "intel-pt", "avx512pf", "avx512er",
482 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
483 },
484 .cpuid_eax = 7,
485 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
486 .cpuid_reg = R_EBX,
487 .tcg_features = TCG_7_0_EBX_FEATURES,
488 },
489 [FEAT_7_0_ECX] = {
490 .feat_names = {
491 NULL, "avx512vbmi", "umip", "pku",
492 "ospke", NULL, "avx512vbmi2", NULL,
493 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
494 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
495 "la57", NULL, NULL, NULL,
496 NULL, NULL, "rdpid", NULL,
497 NULL, "cldemote", NULL, NULL,
498 NULL, NULL, NULL, NULL,
499 },
500 .cpuid_eax = 7,
501 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
502 .cpuid_reg = R_ECX,
503 .tcg_features = TCG_7_0_ECX_FEATURES,
504 },
505 [FEAT_7_0_EDX] = {
506 .feat_names = {
507 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 NULL, NULL, "spec-ctrl", NULL,
514 NULL, NULL, NULL, NULL,
515 },
516 .cpuid_eax = 7,
517 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
518 .cpuid_reg = R_EDX,
519 .tcg_features = TCG_7_0_EDX_FEATURES,
520 },
521 [FEAT_8000_0007_EDX] = {
522 .feat_names = {
523 NULL, NULL, NULL, NULL,
524 NULL, NULL, NULL, NULL,
525 "invtsc", NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 NULL, NULL, NULL, NULL,
530 NULL, NULL, NULL, NULL,
531 },
532 .cpuid_eax = 0x80000007,
533 .cpuid_reg = R_EDX,
534 .tcg_features = TCG_APM_FEATURES,
535 .unmigratable_flags = CPUID_APM_INVTSC,
536 },
537 [FEAT_8000_0008_EBX] = {
538 .feat_names = {
539 NULL, NULL, NULL, NULL,
540 NULL, NULL, NULL, NULL,
541 NULL, NULL, NULL, NULL,
542 "ibpb", NULL, NULL, NULL,
543 NULL, NULL, NULL, NULL,
544 NULL, NULL, NULL, NULL,
545 NULL, NULL, NULL, NULL,
546 NULL, NULL, NULL, NULL,
547 },
548 .cpuid_eax = 0x80000008,
549 .cpuid_reg = R_EBX,
550 .tcg_features = 0,
551 .unmigratable_flags = 0,
552 },
553 [FEAT_XSAVE] = {
554 .feat_names = {
555 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
556 NULL, NULL, NULL, NULL,
557 NULL, NULL, NULL, NULL,
558 NULL, NULL, NULL, NULL,
559 NULL, NULL, NULL, NULL,
560 NULL, NULL, NULL, NULL,
561 NULL, NULL, NULL, NULL,
562 NULL, NULL, NULL, NULL,
563 },
564 .cpuid_eax = 0xd,
565 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
566 .cpuid_reg = R_EAX,
567 .tcg_features = TCG_XSAVE_FEATURES,
568 },
569 [FEAT_6_EAX] = {
570 .feat_names = {
571 NULL, NULL, "arat", NULL,
572 NULL, NULL, NULL, NULL,
573 NULL, NULL, NULL, NULL,
574 NULL, NULL, NULL, NULL,
575 NULL, NULL, NULL, NULL,
576 NULL, NULL, NULL, NULL,
577 NULL, NULL, NULL, NULL,
578 NULL, NULL, NULL, NULL,
579 },
580 .cpuid_eax = 6, .cpuid_reg = R_EAX,
581 .tcg_features = TCG_6_EAX_FEATURES,
582 },
583 [FEAT_XSAVE_COMP_LO] = {
584 .cpuid_eax = 0xD,
585 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
586 .cpuid_reg = R_EAX,
587 .tcg_features = ~0U,
588 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
589 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
590 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
591 XSTATE_PKRU_MASK,
592 },
593 [FEAT_XSAVE_COMP_HI] = {
594 .cpuid_eax = 0xD,
595 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
596 .cpuid_reg = R_EDX,
597 .tcg_features = ~0U,
598 },
599 };
600
601 typedef struct X86RegisterInfo32 {
602 /* Name of register */
603 const char *name;
604 /* QAPI enum value register */
605 X86CPURegister32 qapi_enum;
606 } X86RegisterInfo32;
607
608 #define REGISTER(reg) \
609 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
610 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
611 REGISTER(EAX),
612 REGISTER(ECX),
613 REGISTER(EDX),
614 REGISTER(EBX),
615 REGISTER(ESP),
616 REGISTER(EBP),
617 REGISTER(ESI),
618 REGISTER(EDI),
619 };
620 #undef REGISTER
621
622 typedef struct ExtSaveArea {
623 uint32_t feature, bits;
624 uint32_t offset, size;
625 } ExtSaveArea;
626
627 static const ExtSaveArea x86_ext_save_areas[] = {
628 [XSTATE_FP_BIT] = {
629 /* x87 FP state component is always enabled if XSAVE is supported */
630 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
631 /* x87 state is in the legacy region of the XSAVE area */
632 .offset = 0,
633 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
634 },
635 [XSTATE_SSE_BIT] = {
636 /* SSE state component is always enabled if XSAVE is supported */
637 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
638 /* SSE state is in the legacy region of the XSAVE area */
639 .offset = 0,
640 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
641 },
642 [XSTATE_YMM_BIT] =
643 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
644 .offset = offsetof(X86XSaveArea, avx_state),
645 .size = sizeof(XSaveAVX) },
646 [XSTATE_BNDREGS_BIT] =
647 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
648 .offset = offsetof(X86XSaveArea, bndreg_state),
649 .size = sizeof(XSaveBNDREG) },
650 [XSTATE_BNDCSR_BIT] =
651 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
652 .offset = offsetof(X86XSaveArea, bndcsr_state),
653 .size = sizeof(XSaveBNDCSR) },
654 [XSTATE_OPMASK_BIT] =
655 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
656 .offset = offsetof(X86XSaveArea, opmask_state),
657 .size = sizeof(XSaveOpmask) },
658 [XSTATE_ZMM_Hi256_BIT] =
659 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
660 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
661 .size = sizeof(XSaveZMM_Hi256) },
662 [XSTATE_Hi16_ZMM_BIT] =
663 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
664 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
665 .size = sizeof(XSaveHi16_ZMM) },
666 [XSTATE_PKRU_BIT] =
667 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
668 .offset = offsetof(X86XSaveArea, pkru_state),
669 .size = sizeof(XSavePKRU) },
670 };
671
672 static uint32_t xsave_area_size(uint64_t mask)
673 {
674 int i;
675 uint64_t ret = 0;
676
677 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
678 const ExtSaveArea *esa = &x86_ext_save_areas[i];
679 if ((mask >> i) & 1) {
680 ret = MAX(ret, esa->offset + esa->size);
681 }
682 }
683 return ret;
684 }
685
686 static inline bool accel_uses_host_cpuid(void)
687 {
688 return kvm_enabled() || hvf_enabled();
689 }
690
691 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
692 {
693 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
694 cpu->env.features[FEAT_XSAVE_COMP_LO];
695 }
696
697 const char *get_register_name_32(unsigned int reg)
698 {
699 if (reg >= CPU_NB_REGS32) {
700 return NULL;
701 }
702 return x86_reg_info_32[reg].name;
703 }
704
705 /*
706 * Returns the set of feature flags that are supported and migratable by
707 * QEMU, for a given FeatureWord.
708 */
709 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
710 {
711 FeatureWordInfo *wi = &feature_word_info[w];
712 uint32_t r = 0;
713 int i;
714
715 for (i = 0; i < 32; i++) {
716 uint32_t f = 1U << i;
717
718 /* If the feature name is known, it is implicitly considered migratable,
719 * unless it is explicitly set in unmigratable_flags */
720 if ((wi->migratable_flags & f) ||
721 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
722 r |= f;
723 }
724 }
725 return r;
726 }
727
728 void host_cpuid(uint32_t function, uint32_t count,
729 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
730 {
731 uint32_t vec[4];
732
733 #ifdef __x86_64__
734 asm volatile("cpuid"
735 : "=a"(vec[0]), "=b"(vec[1]),
736 "=c"(vec[2]), "=d"(vec[3])
737 : "0"(function), "c"(count) : "cc");
738 #elif defined(__i386__)
739 asm volatile("pusha \n\t"
740 "cpuid \n\t"
741 "mov %%eax, 0(%2) \n\t"
742 "mov %%ebx, 4(%2) \n\t"
743 "mov %%ecx, 8(%2) \n\t"
744 "mov %%edx, 12(%2) \n\t"
745 "popa"
746 : : "a"(function), "c"(count), "S"(vec)
747 : "memory", "cc");
748 #else
749 abort();
750 #endif
751
752 if (eax)
753 *eax = vec[0];
754 if (ebx)
755 *ebx = vec[1];
756 if (ecx)
757 *ecx = vec[2];
758 if (edx)
759 *edx = vec[3];
760 }
761
762 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
763 {
764 uint32_t eax, ebx, ecx, edx;
765
766 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
767 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
768
769 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
770 if (family) {
771 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
772 }
773 if (model) {
774 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
775 }
776 if (stepping) {
777 *stepping = eax & 0x0F;
778 }
779 }
780
781 /* CPU class name definitions: */
782
783 /* Return type name for a given CPU model name
784 * Caller is responsible for freeing the returned string.
785 */
786 static char *x86_cpu_type_name(const char *model_name)
787 {
788 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
789 }
790
791 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
792 {
793 ObjectClass *oc;
794 char *typename = x86_cpu_type_name(cpu_model);
795 oc = object_class_by_name(typename);
796 g_free(typename);
797 return oc;
798 }
799
800 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
801 {
802 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
803 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
804 return g_strndup(class_name,
805 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
806 }
807
808 struct X86CPUDefinition {
809 const char *name;
810 uint32_t level;
811 uint32_t xlevel;
812 /* vendor is zero-terminated, 12 character ASCII string */
813 char vendor[CPUID_VENDOR_SZ + 1];
814 int family;
815 int model;
816 int stepping;
817 FeatureWordArray features;
818 const char *model_id;
819 };
820
821 static X86CPUDefinition builtin_x86_defs[] = {
822 {
823 .name = "qemu64",
824 .level = 0xd,
825 .vendor = CPUID_VENDOR_AMD,
826 .family = 6,
827 .model = 6,
828 .stepping = 3,
829 .features[FEAT_1_EDX] =
830 PPRO_FEATURES |
831 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
832 CPUID_PSE36,
833 .features[FEAT_1_ECX] =
834 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
835 .features[FEAT_8000_0001_EDX] =
836 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
837 .features[FEAT_8000_0001_ECX] =
838 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
839 .xlevel = 0x8000000A,
840 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
841 },
842 {
843 .name = "phenom",
844 .level = 5,
845 .vendor = CPUID_VENDOR_AMD,
846 .family = 16,
847 .model = 2,
848 .stepping = 3,
849 /* Missing: CPUID_HT */
850 .features[FEAT_1_EDX] =
851 PPRO_FEATURES |
852 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
853 CPUID_PSE36 | CPUID_VME,
854 .features[FEAT_1_ECX] =
855 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
856 CPUID_EXT_POPCNT,
857 .features[FEAT_8000_0001_EDX] =
858 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
859 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
860 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
861 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
862 CPUID_EXT3_CR8LEG,
863 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
864 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
865 .features[FEAT_8000_0001_ECX] =
866 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
867 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
868 /* Missing: CPUID_SVM_LBRV */
869 .features[FEAT_SVM] =
870 CPUID_SVM_NPT,
871 .xlevel = 0x8000001A,
872 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
873 },
874 {
875 .name = "core2duo",
876 .level = 10,
877 .vendor = CPUID_VENDOR_INTEL,
878 .family = 6,
879 .model = 15,
880 .stepping = 11,
881 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
885 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
886 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
887 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
890 CPUID_EXT_CX16,
891 .features[FEAT_8000_0001_EDX] =
892 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
893 .features[FEAT_8000_0001_ECX] =
894 CPUID_EXT3_LAHF_LM,
895 .xlevel = 0x80000008,
896 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
897 },
898 {
899 .name = "kvm64",
900 .level = 0xd,
901 .vendor = CPUID_VENDOR_INTEL,
902 .family = 15,
903 .model = 6,
904 .stepping = 1,
905 /* Missing: CPUID_HT */
906 .features[FEAT_1_EDX] =
907 PPRO_FEATURES | CPUID_VME |
908 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
909 CPUID_PSE36,
910 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
911 .features[FEAT_1_ECX] =
912 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
913 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
914 .features[FEAT_8000_0001_EDX] =
915 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
916 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
917 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
918 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
919 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
920 .features[FEAT_8000_0001_ECX] =
921 0,
922 .xlevel = 0x80000008,
923 .model_id = "Common KVM processor"
924 },
925 {
926 .name = "qemu32",
927 .level = 4,
928 .vendor = CPUID_VENDOR_INTEL,
929 .family = 6,
930 .model = 6,
931 .stepping = 3,
932 .features[FEAT_1_EDX] =
933 PPRO_FEATURES,
934 .features[FEAT_1_ECX] =
935 CPUID_EXT_SSE3,
936 .xlevel = 0x80000004,
937 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
938 },
939 {
940 .name = "kvm32",
941 .level = 5,
942 .vendor = CPUID_VENDOR_INTEL,
943 .family = 15,
944 .model = 6,
945 .stepping = 1,
946 .features[FEAT_1_EDX] =
947 PPRO_FEATURES | CPUID_VME |
948 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
949 .features[FEAT_1_ECX] =
950 CPUID_EXT_SSE3,
951 .features[FEAT_8000_0001_ECX] =
952 0,
953 .xlevel = 0x80000008,
954 .model_id = "Common 32-bit KVM processor"
955 },
956 {
957 .name = "coreduo",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 14,
962 .stepping = 8,
963 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
964 .features[FEAT_1_EDX] =
965 PPRO_FEATURES | CPUID_VME |
966 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
967 CPUID_SS,
968 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
969 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
970 .features[FEAT_1_ECX] =
971 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
972 .features[FEAT_8000_0001_EDX] =
973 CPUID_EXT2_NX,
974 .xlevel = 0x80000008,
975 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
976 },
977 {
978 .name = "486",
979 .level = 1,
980 .vendor = CPUID_VENDOR_INTEL,
981 .family = 4,
982 .model = 8,
983 .stepping = 0,
984 .features[FEAT_1_EDX] =
985 I486_FEATURES,
986 .xlevel = 0,
987 .model_id = "",
988 },
989 {
990 .name = "pentium",
991 .level = 1,
992 .vendor = CPUID_VENDOR_INTEL,
993 .family = 5,
994 .model = 4,
995 .stepping = 3,
996 .features[FEAT_1_EDX] =
997 PENTIUM_FEATURES,
998 .xlevel = 0,
999 .model_id = "",
1000 },
1001 {
1002 .name = "pentium2",
1003 .level = 2,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 5,
1007 .stepping = 2,
1008 .features[FEAT_1_EDX] =
1009 PENTIUM2_FEATURES,
1010 .xlevel = 0,
1011 .model_id = "",
1012 },
1013 {
1014 .name = "pentium3",
1015 .level = 3,
1016 .vendor = CPUID_VENDOR_INTEL,
1017 .family = 6,
1018 .model = 7,
1019 .stepping = 3,
1020 .features[FEAT_1_EDX] =
1021 PENTIUM3_FEATURES,
1022 .xlevel = 0,
1023 .model_id = "",
1024 },
1025 {
1026 .name = "athlon",
1027 .level = 2,
1028 .vendor = CPUID_VENDOR_AMD,
1029 .family = 6,
1030 .model = 2,
1031 .stepping = 3,
1032 .features[FEAT_1_EDX] =
1033 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1034 CPUID_MCA,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1037 .xlevel = 0x80000008,
1038 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1039 },
1040 {
1041 .name = "n270",
1042 .level = 10,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 28,
1046 .stepping = 2,
1047 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1048 .features[FEAT_1_EDX] =
1049 PPRO_FEATURES |
1050 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1051 CPUID_ACPI | CPUID_SS,
1052 /* Some CPUs got no CPUID_SEP */
1053 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1054 * CPUID_EXT_XTPR */
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1057 CPUID_EXT_MOVBE,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .xlevel = 0x80000008,
1063 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1064 },
1065 {
1066 .name = "Conroe",
1067 .level = 10,
1068 .vendor = CPUID_VENDOR_INTEL,
1069 .family = 6,
1070 .model = 15,
1071 .stepping = 3,
1072 .features[FEAT_1_EDX] =
1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1077 CPUID_DE | CPUID_FP87,
1078 .features[FEAT_1_ECX] =
1079 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .xlevel = 0x80000008,
1085 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1086 },
1087 {
1088 .name = "Penryn",
1089 .level = 10,
1090 .vendor = CPUID_VENDOR_INTEL,
1091 .family = 6,
1092 .model = 23,
1093 .stepping = 3,
1094 .features[FEAT_1_EDX] =
1095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1099 CPUID_DE | CPUID_FP87,
1100 .features[FEAT_1_ECX] =
1101 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1102 CPUID_EXT_SSE3,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1105 .features[FEAT_8000_0001_ECX] =
1106 CPUID_EXT3_LAHF_LM,
1107 .xlevel = 0x80000008,
1108 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1109 },
1110 {
1111 .name = "Nehalem",
1112 .level = 11,
1113 .vendor = CPUID_VENDOR_INTEL,
1114 .family = 6,
1115 .model = 26,
1116 .stepping = 3,
1117 .features[FEAT_1_EDX] =
1118 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1119 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1120 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1121 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1122 CPUID_DE | CPUID_FP87,
1123 .features[FEAT_1_ECX] =
1124 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1125 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1126 .features[FEAT_8000_0001_EDX] =
1127 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1128 .features[FEAT_8000_0001_ECX] =
1129 CPUID_EXT3_LAHF_LM,
1130 .xlevel = 0x80000008,
1131 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1132 },
1133 {
1134 .name = "Nehalem-IBRS",
1135 .level = 11,
1136 .vendor = CPUID_VENDOR_INTEL,
1137 .family = 6,
1138 .model = 26,
1139 .stepping = 3,
1140 .features[FEAT_1_EDX] =
1141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1145 CPUID_DE | CPUID_FP87,
1146 .features[FEAT_1_ECX] =
1147 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1148 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1149 .features[FEAT_7_0_EDX] =
1150 CPUID_7_0_EDX_SPEC_CTRL,
1151 .features[FEAT_8000_0001_EDX] =
1152 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1153 .features[FEAT_8000_0001_ECX] =
1154 CPUID_EXT3_LAHF_LM,
1155 .xlevel = 0x80000008,
1156 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1157 },
1158 {
1159 .name = "Westmere",
1160 .level = 11,
1161 .vendor = CPUID_VENDOR_INTEL,
1162 .family = 6,
1163 .model = 44,
1164 .stepping = 1,
1165 .features[FEAT_1_EDX] =
1166 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1167 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1168 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1169 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1170 CPUID_DE | CPUID_FP87,
1171 .features[FEAT_1_ECX] =
1172 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1173 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1174 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1175 .features[FEAT_8000_0001_EDX] =
1176 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_6_EAX] =
1180 CPUID_6_EAX_ARAT,
1181 .xlevel = 0x80000008,
1182 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1183 },
1184 {
1185 .name = "Westmere-IBRS",
1186 .level = 11,
1187 .vendor = CPUID_VENDOR_INTEL,
1188 .family = 6,
1189 .model = 44,
1190 .stepping = 1,
1191 .features[FEAT_1_EDX] =
1192 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1193 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1194 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1195 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1196 CPUID_DE | CPUID_FP87,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1199 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1200 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1201 .features[FEAT_8000_0001_EDX] =
1202 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EDX] =
1206 CPUID_7_0_EDX_SPEC_CTRL,
1207 .features[FEAT_6_EAX] =
1208 CPUID_6_EAX_ARAT,
1209 .xlevel = 0x80000008,
1210 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1211 },
1212 {
1213 .name = "SandyBridge",
1214 .level = 0xd,
1215 .vendor = CPUID_VENDOR_INTEL,
1216 .family = 6,
1217 .model = 42,
1218 .stepping = 1,
1219 .features[FEAT_1_EDX] =
1220 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1221 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1222 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1223 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1224 CPUID_DE | CPUID_FP87,
1225 .features[FEAT_1_ECX] =
1226 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1227 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1228 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1229 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1230 CPUID_EXT_SSE3,
1231 .features[FEAT_8000_0001_EDX] =
1232 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1233 CPUID_EXT2_SYSCALL,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_LAHF_LM,
1236 .features[FEAT_XSAVE] =
1237 CPUID_XSAVE_XSAVEOPT,
1238 .features[FEAT_6_EAX] =
1239 CPUID_6_EAX_ARAT,
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1242 },
1243 {
1244 .name = "SandyBridge-IBRS",
1245 .level = 0xd,
1246 .vendor = CPUID_VENDOR_INTEL,
1247 .family = 6,
1248 .model = 42,
1249 .stepping = 1,
1250 .features[FEAT_1_EDX] =
1251 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1252 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1253 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1254 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1255 CPUID_DE | CPUID_FP87,
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1258 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1259 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1260 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1261 CPUID_EXT_SSE3,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_LAHF_LM,
1267 .features[FEAT_7_0_EDX] =
1268 CPUID_7_0_EDX_SPEC_CTRL,
1269 .features[FEAT_XSAVE] =
1270 CPUID_XSAVE_XSAVEOPT,
1271 .features[FEAT_6_EAX] =
1272 CPUID_6_EAX_ARAT,
1273 .xlevel = 0x80000008,
1274 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1275 },
1276 {
1277 .name = "IvyBridge",
1278 .level = 0xd,
1279 .vendor = CPUID_VENDOR_INTEL,
1280 .family = 6,
1281 .model = 58,
1282 .stepping = 9,
1283 .features[FEAT_1_EDX] =
1284 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1285 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1286 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1287 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1288 CPUID_DE | CPUID_FP87,
1289 .features[FEAT_1_ECX] =
1290 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1291 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1292 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1293 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1294 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1295 .features[FEAT_7_0_EBX] =
1296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1297 CPUID_7_0_EBX_ERMS,
1298 .features[FEAT_8000_0001_EDX] =
1299 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1300 CPUID_EXT2_SYSCALL,
1301 .features[FEAT_8000_0001_ECX] =
1302 CPUID_EXT3_LAHF_LM,
1303 .features[FEAT_XSAVE] =
1304 CPUID_XSAVE_XSAVEOPT,
1305 .features[FEAT_6_EAX] =
1306 CPUID_6_EAX_ARAT,
1307 .xlevel = 0x80000008,
1308 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1309 },
1310 {
1311 .name = "IvyBridge-IBRS",
1312 .level = 0xd,
1313 .vendor = CPUID_VENDOR_INTEL,
1314 .family = 6,
1315 .model = 58,
1316 .stepping = 9,
1317 .features[FEAT_1_EDX] =
1318 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1319 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1320 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1321 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1322 CPUID_DE | CPUID_FP87,
1323 .features[FEAT_1_ECX] =
1324 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1325 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1326 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1327 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1328 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1329 .features[FEAT_7_0_EBX] =
1330 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1331 CPUID_7_0_EBX_ERMS,
1332 .features[FEAT_8000_0001_EDX] =
1333 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1334 CPUID_EXT2_SYSCALL,
1335 .features[FEAT_8000_0001_ECX] =
1336 CPUID_EXT3_LAHF_LM,
1337 .features[FEAT_7_0_EDX] =
1338 CPUID_7_0_EDX_SPEC_CTRL,
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT,
1341 .features[FEAT_6_EAX] =
1342 CPUID_6_EAX_ARAT,
1343 .xlevel = 0x80000008,
1344 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1345 },
1346 {
1347 .name = "Haswell-noTSX",
1348 .level = 0xd,
1349 .vendor = CPUID_VENDOR_INTEL,
1350 .family = 6,
1351 .model = 60,
1352 .stepping = 1,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1361 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1362 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1363 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1364 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1365 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1368 CPUID_EXT2_SYSCALL,
1369 .features[FEAT_8000_0001_ECX] =
1370 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1371 .features[FEAT_7_0_EBX] =
1372 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1373 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1374 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1375 .features[FEAT_XSAVE] =
1376 CPUID_XSAVE_XSAVEOPT,
1377 .features[FEAT_6_EAX] =
1378 CPUID_6_EAX_ARAT,
1379 .xlevel = 0x80000008,
1380 .model_id = "Intel Core Processor (Haswell, no TSX)",
1381 },
1382 {
1383 .name = "Haswell-noTSX-IBRS",
1384 .level = 0xd,
1385 .vendor = CPUID_VENDOR_INTEL,
1386 .family = 6,
1387 .model = 60,
1388 .stepping = 1,
1389 .features[FEAT_1_EDX] =
1390 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1391 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1392 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1393 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1394 CPUID_DE | CPUID_FP87,
1395 .features[FEAT_1_ECX] =
1396 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1397 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1398 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1399 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1400 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1401 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1402 .features[FEAT_8000_0001_EDX] =
1403 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1404 CPUID_EXT2_SYSCALL,
1405 .features[FEAT_8000_0001_ECX] =
1406 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1407 .features[FEAT_7_0_EDX] =
1408 CPUID_7_0_EDX_SPEC_CTRL,
1409 .features[FEAT_7_0_EBX] =
1410 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1411 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1412 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1413 .features[FEAT_XSAVE] =
1414 CPUID_XSAVE_XSAVEOPT,
1415 .features[FEAT_6_EAX] =
1416 CPUID_6_EAX_ARAT,
1417 .xlevel = 0x80000008,
1418 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1419 },
1420 {
1421 .name = "Haswell",
1422 .level = 0xd,
1423 .vendor = CPUID_VENDOR_INTEL,
1424 .family = 6,
1425 .model = 60,
1426 .stepping = 4,
1427 .features[FEAT_1_EDX] =
1428 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1429 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1430 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1431 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1432 CPUID_DE | CPUID_FP87,
1433 .features[FEAT_1_ECX] =
1434 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1435 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1436 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1437 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1438 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1439 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1440 .features[FEAT_8000_0001_EDX] =
1441 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1442 CPUID_EXT2_SYSCALL,
1443 .features[FEAT_8000_0001_ECX] =
1444 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1445 .features[FEAT_7_0_EBX] =
1446 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1447 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1448 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1449 CPUID_7_0_EBX_RTM,
1450 .features[FEAT_XSAVE] =
1451 CPUID_XSAVE_XSAVEOPT,
1452 .features[FEAT_6_EAX] =
1453 CPUID_6_EAX_ARAT,
1454 .xlevel = 0x80000008,
1455 .model_id = "Intel Core Processor (Haswell)",
1456 },
1457 {
1458 .name = "Haswell-IBRS",
1459 .level = 0xd,
1460 .vendor = CPUID_VENDOR_INTEL,
1461 .family = 6,
1462 .model = 60,
1463 .stepping = 4,
1464 .features[FEAT_1_EDX] =
1465 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1466 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1467 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1468 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1469 CPUID_DE | CPUID_FP87,
1470 .features[FEAT_1_ECX] =
1471 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1472 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1473 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1474 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1475 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1476 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1477 .features[FEAT_8000_0001_EDX] =
1478 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1479 CPUID_EXT2_SYSCALL,
1480 .features[FEAT_8000_0001_ECX] =
1481 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1482 .features[FEAT_7_0_EDX] =
1483 CPUID_7_0_EDX_SPEC_CTRL,
1484 .features[FEAT_7_0_EBX] =
1485 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1486 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1487 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1488 CPUID_7_0_EBX_RTM,
1489 .features[FEAT_XSAVE] =
1490 CPUID_XSAVE_XSAVEOPT,
1491 .features[FEAT_6_EAX] =
1492 CPUID_6_EAX_ARAT,
1493 .xlevel = 0x80000008,
1494 .model_id = "Intel Core Processor (Haswell, IBRS)",
1495 },
1496 {
1497 .name = "Broadwell-noTSX",
1498 .level = 0xd,
1499 .vendor = CPUID_VENDOR_INTEL,
1500 .family = 6,
1501 .model = 61,
1502 .stepping = 2,
1503 .features[FEAT_1_EDX] =
1504 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1505 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1506 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1507 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1508 CPUID_DE | CPUID_FP87,
1509 .features[FEAT_1_ECX] =
1510 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1511 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1512 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1513 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1514 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1515 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1516 .features[FEAT_8000_0001_EDX] =
1517 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1518 CPUID_EXT2_SYSCALL,
1519 .features[FEAT_8000_0001_ECX] =
1520 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1521 .features[FEAT_7_0_EBX] =
1522 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1523 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1524 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1525 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1526 CPUID_7_0_EBX_SMAP,
1527 .features[FEAT_XSAVE] =
1528 CPUID_XSAVE_XSAVEOPT,
1529 .features[FEAT_6_EAX] =
1530 CPUID_6_EAX_ARAT,
1531 .xlevel = 0x80000008,
1532 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1533 },
1534 {
1535 .name = "Broadwell-noTSX-IBRS",
1536 .level = 0xd,
1537 .vendor = CPUID_VENDOR_INTEL,
1538 .family = 6,
1539 .model = 61,
1540 .stepping = 2,
1541 .features[FEAT_1_EDX] =
1542 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1543 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1544 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1545 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1546 CPUID_DE | CPUID_FP87,
1547 .features[FEAT_1_ECX] =
1548 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1549 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1550 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1551 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1552 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1553 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1554 .features[FEAT_8000_0001_EDX] =
1555 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1556 CPUID_EXT2_SYSCALL,
1557 .features[FEAT_8000_0001_ECX] =
1558 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1559 .features[FEAT_7_0_EDX] =
1560 CPUID_7_0_EDX_SPEC_CTRL,
1561 .features[FEAT_7_0_EBX] =
1562 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1563 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1564 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1565 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1566 CPUID_7_0_EBX_SMAP,
1567 .features[FEAT_XSAVE] =
1568 CPUID_XSAVE_XSAVEOPT,
1569 .features[FEAT_6_EAX] =
1570 CPUID_6_EAX_ARAT,
1571 .xlevel = 0x80000008,
1572 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1573 },
1574 {
1575 .name = "Broadwell",
1576 .level = 0xd,
1577 .vendor = CPUID_VENDOR_INTEL,
1578 .family = 6,
1579 .model = 61,
1580 .stepping = 2,
1581 .features[FEAT_1_EDX] =
1582 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1583 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1584 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1585 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1586 CPUID_DE | CPUID_FP87,
1587 .features[FEAT_1_ECX] =
1588 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1589 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1590 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1591 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1592 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1593 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1594 .features[FEAT_8000_0001_EDX] =
1595 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1596 CPUID_EXT2_SYSCALL,
1597 .features[FEAT_8000_0001_ECX] =
1598 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1599 .features[FEAT_7_0_EBX] =
1600 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1601 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1602 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1603 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1604 CPUID_7_0_EBX_SMAP,
1605 .features[FEAT_XSAVE] =
1606 CPUID_XSAVE_XSAVEOPT,
1607 .features[FEAT_6_EAX] =
1608 CPUID_6_EAX_ARAT,
1609 .xlevel = 0x80000008,
1610 .model_id = "Intel Core Processor (Broadwell)",
1611 },
1612 {
1613 .name = "Broadwell-IBRS",
1614 .level = 0xd,
1615 .vendor = CPUID_VENDOR_INTEL,
1616 .family = 6,
1617 .model = 61,
1618 .stepping = 2,
1619 .features[FEAT_1_EDX] =
1620 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1621 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1622 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1623 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1624 CPUID_DE | CPUID_FP87,
1625 .features[FEAT_1_ECX] =
1626 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1627 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1628 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1629 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1630 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1631 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1632 .features[FEAT_8000_0001_EDX] =
1633 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1634 CPUID_EXT2_SYSCALL,
1635 .features[FEAT_8000_0001_ECX] =
1636 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1637 .features[FEAT_7_0_EDX] =
1638 CPUID_7_0_EDX_SPEC_CTRL,
1639 .features[FEAT_7_0_EBX] =
1640 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1641 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1642 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1643 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1644 CPUID_7_0_EBX_SMAP,
1645 .features[FEAT_XSAVE] =
1646 CPUID_XSAVE_XSAVEOPT,
1647 .features[FEAT_6_EAX] =
1648 CPUID_6_EAX_ARAT,
1649 .xlevel = 0x80000008,
1650 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1651 },
1652 {
1653 .name = "Skylake-Client",
1654 .level = 0xd,
1655 .vendor = CPUID_VENDOR_INTEL,
1656 .family = 6,
1657 .model = 94,
1658 .stepping = 3,
1659 .features[FEAT_1_EDX] =
1660 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1661 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1662 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1663 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1664 CPUID_DE | CPUID_FP87,
1665 .features[FEAT_1_ECX] =
1666 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1667 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1668 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1669 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1670 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1671 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1672 .features[FEAT_8000_0001_EDX] =
1673 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1674 CPUID_EXT2_SYSCALL,
1675 .features[FEAT_8000_0001_ECX] =
1676 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1677 .features[FEAT_7_0_EBX] =
1678 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1679 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1680 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1681 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1682 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1683 /* Missing: XSAVES (not supported by some Linux versions,
1684 * including v4.1 to v4.12).
1685 * KVM doesn't yet expose any XSAVES state save component,
1686 * and the only one defined in Skylake (processor tracing)
1687 * probably will block migration anyway.
1688 */
1689 .features[FEAT_XSAVE] =
1690 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1691 CPUID_XSAVE_XGETBV1,
1692 .features[FEAT_6_EAX] =
1693 CPUID_6_EAX_ARAT,
1694 .xlevel = 0x80000008,
1695 .model_id = "Intel Core Processor (Skylake)",
1696 },
1697 {
1698 .name = "Skylake-Client-IBRS",
1699 .level = 0xd,
1700 .vendor = CPUID_VENDOR_INTEL,
1701 .family = 6,
1702 .model = 94,
1703 .stepping = 3,
1704 .features[FEAT_1_EDX] =
1705 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1706 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1707 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1708 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1709 CPUID_DE | CPUID_FP87,
1710 .features[FEAT_1_ECX] =
1711 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1712 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1713 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1714 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1715 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1716 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1717 .features[FEAT_8000_0001_EDX] =
1718 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1719 CPUID_EXT2_SYSCALL,
1720 .features[FEAT_8000_0001_ECX] =
1721 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1722 .features[FEAT_7_0_EDX] =
1723 CPUID_7_0_EDX_SPEC_CTRL,
1724 .features[FEAT_7_0_EBX] =
1725 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1726 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1727 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1728 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1729 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1730 /* Missing: XSAVES (not supported by some Linux versions,
1731 * including v4.1 to v4.12).
1732 * KVM doesn't yet expose any XSAVES state save component,
1733 * and the only one defined in Skylake (processor tracing)
1734 * probably will block migration anyway.
1735 */
1736 .features[FEAT_XSAVE] =
1737 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1738 CPUID_XSAVE_XGETBV1,
1739 .features[FEAT_6_EAX] =
1740 CPUID_6_EAX_ARAT,
1741 .xlevel = 0x80000008,
1742 .model_id = "Intel Core Processor (Skylake, IBRS)",
1743 },
1744 {
1745 .name = "Skylake-Server",
1746 .level = 0xd,
1747 .vendor = CPUID_VENDOR_INTEL,
1748 .family = 6,
1749 .model = 85,
1750 .stepping = 4,
1751 .features[FEAT_1_EDX] =
1752 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1753 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1754 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1755 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1756 CPUID_DE | CPUID_FP87,
1757 .features[FEAT_1_ECX] =
1758 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1759 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1760 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1761 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1762 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1763 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1764 .features[FEAT_8000_0001_EDX] =
1765 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1766 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1767 .features[FEAT_8000_0001_ECX] =
1768 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1769 .features[FEAT_7_0_EBX] =
1770 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1771 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1772 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1773 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1774 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1775 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1776 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1777 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1778 /* Missing: XSAVES (not supported by some Linux versions,
1779 * including v4.1 to v4.12).
1780 * KVM doesn't yet expose any XSAVES state save component,
1781 * and the only one defined in Skylake (processor tracing)
1782 * probably will block migration anyway.
1783 */
1784 .features[FEAT_XSAVE] =
1785 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1786 CPUID_XSAVE_XGETBV1,
1787 .features[FEAT_6_EAX] =
1788 CPUID_6_EAX_ARAT,
1789 .xlevel = 0x80000008,
1790 .model_id = "Intel Xeon Processor (Skylake)",
1791 },
1792 {
1793 .name = "Skylake-Server-IBRS",
1794 .level = 0xd,
1795 .vendor = CPUID_VENDOR_INTEL,
1796 .family = 6,
1797 .model = 85,
1798 .stepping = 4,
1799 .features[FEAT_1_EDX] =
1800 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1801 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1802 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1803 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1804 CPUID_DE | CPUID_FP87,
1805 .features[FEAT_1_ECX] =
1806 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1807 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1808 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1809 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1810 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1811 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1812 .features[FEAT_8000_0001_EDX] =
1813 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1814 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1815 .features[FEAT_8000_0001_ECX] =
1816 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1817 .features[FEAT_7_0_EDX] =
1818 CPUID_7_0_EDX_SPEC_CTRL,
1819 .features[FEAT_7_0_EBX] =
1820 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1821 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1822 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1823 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1824 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1825 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1826 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1827 CPUID_7_0_EBX_AVX512VL,
1828 /* Missing: XSAVES (not supported by some Linux versions,
1829 * including v4.1 to v4.12).
1830 * KVM doesn't yet expose any XSAVES state save component,
1831 * and the only one defined in Skylake (processor tracing)
1832 * probably will block migration anyway.
1833 */
1834 .features[FEAT_XSAVE] =
1835 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1836 CPUID_XSAVE_XGETBV1,
1837 .features[FEAT_6_EAX] =
1838 CPUID_6_EAX_ARAT,
1839 .xlevel = 0x80000008,
1840 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1841 },
1842 {
1843 .name = "KnightsMill",
1844 .level = 0xd,
1845 .vendor = CPUID_VENDOR_INTEL,
1846 .family = 6,
1847 .model = 133,
1848 .stepping = 0,
1849 .features[FEAT_1_EDX] =
1850 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
1851 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
1852 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
1853 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
1854 CPUID_PSE | CPUID_DE | CPUID_FP87,
1855 .features[FEAT_1_ECX] =
1856 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1857 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1858 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1859 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1860 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1861 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1862 .features[FEAT_8000_0001_EDX] =
1863 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1864 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1865 .features[FEAT_8000_0001_ECX] =
1866 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1867 .features[FEAT_7_0_EBX] =
1868 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1869 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
1870 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
1871 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
1872 CPUID_7_0_EBX_AVX512ER,
1873 .features[FEAT_7_0_ECX] =
1874 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
1875 .features[FEAT_7_0_EDX] =
1876 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
1877 .features[FEAT_XSAVE] =
1878 CPUID_XSAVE_XSAVEOPT,
1879 .features[FEAT_6_EAX] =
1880 CPUID_6_EAX_ARAT,
1881 .xlevel = 0x80000008,
1882 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
1883 },
1884 {
1885 .name = "Opteron_G1",
1886 .level = 5,
1887 .vendor = CPUID_VENDOR_AMD,
1888 .family = 15,
1889 .model = 6,
1890 .stepping = 1,
1891 .features[FEAT_1_EDX] =
1892 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1893 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1894 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1895 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1896 CPUID_DE | CPUID_FP87,
1897 .features[FEAT_1_ECX] =
1898 CPUID_EXT_SSE3,
1899 .features[FEAT_8000_0001_EDX] =
1900 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1901 .xlevel = 0x80000008,
1902 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1903 },
1904 {
1905 .name = "Opteron_G2",
1906 .level = 5,
1907 .vendor = CPUID_VENDOR_AMD,
1908 .family = 15,
1909 .model = 6,
1910 .stepping = 1,
1911 .features[FEAT_1_EDX] =
1912 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1913 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1914 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1915 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1916 CPUID_DE | CPUID_FP87,
1917 .features[FEAT_1_ECX] =
1918 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1919 /* Missing: CPUID_EXT2_RDTSCP */
1920 .features[FEAT_8000_0001_EDX] =
1921 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1922 .features[FEAT_8000_0001_ECX] =
1923 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1924 .xlevel = 0x80000008,
1925 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1926 },
1927 {
1928 .name = "Opteron_G3",
1929 .level = 5,
1930 .vendor = CPUID_VENDOR_AMD,
1931 .family = 16,
1932 .model = 2,
1933 .stepping = 3,
1934 .features[FEAT_1_EDX] =
1935 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1936 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1937 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1938 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1939 CPUID_DE | CPUID_FP87,
1940 .features[FEAT_1_ECX] =
1941 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1942 CPUID_EXT_SSE3,
1943 /* Missing: CPUID_EXT2_RDTSCP */
1944 .features[FEAT_8000_0001_EDX] =
1945 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1946 .features[FEAT_8000_0001_ECX] =
1947 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1948 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1949 .xlevel = 0x80000008,
1950 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1951 },
1952 {
1953 .name = "Opteron_G4",
1954 .level = 0xd,
1955 .vendor = CPUID_VENDOR_AMD,
1956 .family = 21,
1957 .model = 1,
1958 .stepping = 2,
1959 .features[FEAT_1_EDX] =
1960 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1961 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1962 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1963 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1964 CPUID_DE | CPUID_FP87,
1965 .features[FEAT_1_ECX] =
1966 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1967 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1968 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1969 CPUID_EXT_SSE3,
1970 /* Missing: CPUID_EXT2_RDTSCP */
1971 .features[FEAT_8000_0001_EDX] =
1972 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1973 CPUID_EXT2_SYSCALL,
1974 .features[FEAT_8000_0001_ECX] =
1975 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1976 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1977 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1978 CPUID_EXT3_LAHF_LM,
1979 /* no xsaveopt! */
1980 .xlevel = 0x8000001A,
1981 .model_id = "AMD Opteron 62xx class CPU",
1982 },
1983 {
1984 .name = "Opteron_G5",
1985 .level = 0xd,
1986 .vendor = CPUID_VENDOR_AMD,
1987 .family = 21,
1988 .model = 2,
1989 .stepping = 0,
1990 .features[FEAT_1_EDX] =
1991 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1992 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1993 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1994 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1995 CPUID_DE | CPUID_FP87,
1996 .features[FEAT_1_ECX] =
1997 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1998 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1999 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2000 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2001 /* Missing: CPUID_EXT2_RDTSCP */
2002 .features[FEAT_8000_0001_EDX] =
2003 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2004 CPUID_EXT2_SYSCALL,
2005 .features[FEAT_8000_0001_ECX] =
2006 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2007 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2008 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2009 CPUID_EXT3_LAHF_LM,
2010 /* no xsaveopt! */
2011 .xlevel = 0x8000001A,
2012 .model_id = "AMD Opteron 63xx class CPU",
2013 },
2014 {
2015 .name = "EPYC",
2016 .level = 0xd,
2017 .vendor = CPUID_VENDOR_AMD,
2018 .family = 23,
2019 .model = 1,
2020 .stepping = 2,
2021 .features[FEAT_1_EDX] =
2022 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2023 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2024 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2025 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2026 CPUID_VME | CPUID_FP87,
2027 .features[FEAT_1_ECX] =
2028 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2029 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2030 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2031 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2032 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2033 .features[FEAT_8000_0001_EDX] =
2034 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2035 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2036 CPUID_EXT2_SYSCALL,
2037 .features[FEAT_8000_0001_ECX] =
2038 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2039 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2040 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2041 .features[FEAT_7_0_EBX] =
2042 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2043 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2044 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2045 CPUID_7_0_EBX_SHA_NI,
2046 /* Missing: XSAVES (not supported by some Linux versions,
2047 * including v4.1 to v4.12).
2048 * KVM doesn't yet expose any XSAVES state save component.
2049 */
2050 .features[FEAT_XSAVE] =
2051 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2052 CPUID_XSAVE_XGETBV1,
2053 .features[FEAT_6_EAX] =
2054 CPUID_6_EAX_ARAT,
2055 .xlevel = 0x8000000A,
2056 .model_id = "AMD EPYC Processor",
2057 },
2058 {
2059 .name = "EPYC-IBPB",
2060 .level = 0xd,
2061 .vendor = CPUID_VENDOR_AMD,
2062 .family = 23,
2063 .model = 1,
2064 .stepping = 2,
2065 .features[FEAT_1_EDX] =
2066 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2067 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2068 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2069 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2070 CPUID_VME | CPUID_FP87,
2071 .features[FEAT_1_ECX] =
2072 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2073 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2074 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2075 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2076 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2077 .features[FEAT_8000_0001_EDX] =
2078 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2079 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2080 CPUID_EXT2_SYSCALL,
2081 .features[FEAT_8000_0001_ECX] =
2082 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2083 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2084 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2085 .features[FEAT_8000_0008_EBX] =
2086 CPUID_8000_0008_EBX_IBPB,
2087 .features[FEAT_7_0_EBX] =
2088 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2089 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2090 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2091 CPUID_7_0_EBX_SHA_NI,
2092 /* Missing: XSAVES (not supported by some Linux versions,
2093 * including v4.1 to v4.12).
2094 * KVM doesn't yet expose any XSAVES state save component.
2095 */
2096 .features[FEAT_XSAVE] =
2097 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2098 CPUID_XSAVE_XGETBV1,
2099 .features[FEAT_6_EAX] =
2100 CPUID_6_EAX_ARAT,
2101 .xlevel = 0x8000000A,
2102 .model_id = "AMD EPYC Processor (with IBPB)",
2103 },
2104 };
2105
2106 typedef struct PropValue {
2107 const char *prop, *value;
2108 } PropValue;
2109
2110 /* KVM-specific features that are automatically added/removed
2111 * from all CPU models when KVM is enabled.
2112 */
2113 static PropValue kvm_default_props[] = {
2114 { "kvmclock", "on" },
2115 { "kvm-nopiodelay", "on" },
2116 { "kvm-asyncpf", "on" },
2117 { "kvm-steal-time", "on" },
2118 { "kvm-pv-eoi", "on" },
2119 { "kvmclock-stable-bit", "on" },
2120 { "x2apic", "on" },
2121 { "acpi", "off" },
2122 { "monitor", "off" },
2123 { "svm", "off" },
2124 { NULL, NULL },
2125 };
2126
2127 /* TCG-specific defaults that override all CPU models when using TCG
2128 */
2129 static PropValue tcg_default_props[] = {
2130 { "vme", "off" },
2131 { NULL, NULL },
2132 };
2133
2134
2135 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2136 {
2137 PropValue *pv;
2138 for (pv = kvm_default_props; pv->prop; pv++) {
2139 if (!strcmp(pv->prop, prop)) {
2140 pv->value = value;
2141 break;
2142 }
2143 }
2144
2145 /* It is valid to call this function only for properties that
2146 * are already present in the kvm_default_props table.
2147 */
2148 assert(pv->prop);
2149 }
2150
2151 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2152 bool migratable_only);
2153
2154 static bool lmce_supported(void)
2155 {
2156 uint64_t mce_cap = 0;
2157
2158 #ifdef CONFIG_KVM
2159 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2160 return false;
2161 }
2162 #endif
2163
2164 return !!(mce_cap & MCG_LMCE_P);
2165 }
2166
2167 #define CPUID_MODEL_ID_SZ 48
2168
2169 /**
2170 * cpu_x86_fill_model_id:
2171 * Get CPUID model ID string from host CPU.
2172 *
2173 * @str should have at least CPUID_MODEL_ID_SZ bytes
2174 *
2175 * The function does NOT add a null terminator to the string
2176 * automatically.
2177 */
2178 static int cpu_x86_fill_model_id(char *str)
2179 {
2180 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2181 int i;
2182
2183 for (i = 0; i < 3; i++) {
2184 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2185 memcpy(str + i * 16 + 0, &eax, 4);
2186 memcpy(str + i * 16 + 4, &ebx, 4);
2187 memcpy(str + i * 16 + 8, &ecx, 4);
2188 memcpy(str + i * 16 + 12, &edx, 4);
2189 }
2190 return 0;
2191 }
2192
2193 static Property max_x86_cpu_properties[] = {
2194 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2195 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2196 DEFINE_PROP_END_OF_LIST()
2197 };
2198
2199 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2200 {
2201 DeviceClass *dc = DEVICE_CLASS(oc);
2202 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2203
2204 xcc->ordering = 9;
2205
2206 xcc->model_description =
2207 "Enables all features supported by the accelerator in the current host";
2208
2209 dc->props = max_x86_cpu_properties;
2210 }
2211
2212 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2213
2214 static void max_x86_cpu_initfn(Object *obj)
2215 {
2216 X86CPU *cpu = X86_CPU(obj);
2217 CPUX86State *env = &cpu->env;
2218 KVMState *s = kvm_state;
2219
2220 /* We can't fill the features array here because we don't know yet if
2221 * "migratable" is true or false.
2222 */
2223 cpu->max_features = true;
2224
2225 if (accel_uses_host_cpuid()) {
2226 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2227 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2228 int family, model, stepping;
2229 X86CPUDefinition host_cpudef = { };
2230 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2231
2232 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2233 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2234
2235 host_vendor_fms(vendor, &family, &model, &stepping);
2236
2237 cpu_x86_fill_model_id(model_id);
2238
2239 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2240 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2241 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2242 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2243 &error_abort);
2244 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2245 &error_abort);
2246
2247 if (kvm_enabled()) {
2248 env->cpuid_min_level =
2249 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2250 env->cpuid_min_xlevel =
2251 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2252 env->cpuid_min_xlevel2 =
2253 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2254 } else {
2255 env->cpuid_min_level =
2256 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2257 env->cpuid_min_xlevel =
2258 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2259 env->cpuid_min_xlevel2 =
2260 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2261 }
2262
2263 if (lmce_supported()) {
2264 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2265 }
2266 } else {
2267 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2268 "vendor", &error_abort);
2269 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2270 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2271 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2272 object_property_set_str(OBJECT(cpu),
2273 "QEMU TCG CPU version " QEMU_HW_VERSION,
2274 "model-id", &error_abort);
2275 }
2276
2277 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2278 }
2279
2280 static const TypeInfo max_x86_cpu_type_info = {
2281 .name = X86_CPU_TYPE_NAME("max"),
2282 .parent = TYPE_X86_CPU,
2283 .instance_init = max_x86_cpu_initfn,
2284 .class_init = max_x86_cpu_class_init,
2285 };
2286
2287 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2288 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2289 {
2290 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2291
2292 xcc->host_cpuid_required = true;
2293 xcc->ordering = 8;
2294
2295 if (kvm_enabled()) {
2296 xcc->model_description =
2297 "KVM processor with all supported host features ";
2298 } else if (hvf_enabled()) {
2299 xcc->model_description =
2300 "HVF processor with all supported host features ";
2301 }
2302 }
2303
2304 static const TypeInfo host_x86_cpu_type_info = {
2305 .name = X86_CPU_TYPE_NAME("host"),
2306 .parent = X86_CPU_TYPE_NAME("max"),
2307 .class_init = host_x86_cpu_class_init,
2308 };
2309
2310 #endif
2311
2312 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2313 {
2314 FeatureWordInfo *f = &feature_word_info[w];
2315 int i;
2316
2317 for (i = 0; i < 32; ++i) {
2318 if ((1UL << i) & mask) {
2319 const char *reg = get_register_name_32(f->cpuid_reg);
2320 assert(reg);
2321 warn_report("%s doesn't support requested feature: "
2322 "CPUID.%02XH:%s%s%s [bit %d]",
2323 accel_uses_host_cpuid() ? "host" : "TCG",
2324 f->cpuid_eax, reg,
2325 f->feat_names[i] ? "." : "",
2326 f->feat_names[i] ? f->feat_names[i] : "", i);
2327 }
2328 }
2329 }
2330
2331 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2332 const char *name, void *opaque,
2333 Error **errp)
2334 {
2335 X86CPU *cpu = X86_CPU(obj);
2336 CPUX86State *env = &cpu->env;
2337 int64_t value;
2338
2339 value = (env->cpuid_version >> 8) & 0xf;
2340 if (value == 0xf) {
2341 value += (env->cpuid_version >> 20) & 0xff;
2342 }
2343 visit_type_int(v, name, &value, errp);
2344 }
2345
2346 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2347 const char *name, void *opaque,
2348 Error **errp)
2349 {
2350 X86CPU *cpu = X86_CPU(obj);
2351 CPUX86State *env = &cpu->env;
2352 const int64_t min = 0;
2353 const int64_t max = 0xff + 0xf;
2354 Error *local_err = NULL;
2355 int64_t value;
2356
2357 visit_type_int(v, name, &value, &local_err);
2358 if (local_err) {
2359 error_propagate(errp, local_err);
2360 return;
2361 }
2362 if (value < min || value > max) {
2363 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2364 name ? name : "null", value, min, max);
2365 return;
2366 }
2367
2368 env->cpuid_version &= ~0xff00f00;
2369 if (value > 0x0f) {
2370 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2371 } else {
2372 env->cpuid_version |= value << 8;
2373 }
2374 }
2375
2376 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2377 const char *name, void *opaque,
2378 Error **errp)
2379 {
2380 X86CPU *cpu = X86_CPU(obj);
2381 CPUX86State *env = &cpu->env;
2382 int64_t value;
2383
2384 value = (env->cpuid_version >> 4) & 0xf;
2385 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2386 visit_type_int(v, name, &value, errp);
2387 }
2388
2389 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2390 const char *name, void *opaque,
2391 Error **errp)
2392 {
2393 X86CPU *cpu = X86_CPU(obj);
2394 CPUX86State *env = &cpu->env;
2395 const int64_t min = 0;
2396 const int64_t max = 0xff;
2397 Error *local_err = NULL;
2398 int64_t value;
2399
2400 visit_type_int(v, name, &value, &local_err);
2401 if (local_err) {
2402 error_propagate(errp, local_err);
2403 return;
2404 }
2405 if (value < min || value > max) {
2406 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2407 name ? name : "null", value, min, max);
2408 return;
2409 }
2410
2411 env->cpuid_version &= ~0xf00f0;
2412 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2413 }
2414
2415 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2416 const char *name, void *opaque,
2417 Error **errp)
2418 {
2419 X86CPU *cpu = X86_CPU(obj);
2420 CPUX86State *env = &cpu->env;
2421 int64_t value;
2422
2423 value = env->cpuid_version & 0xf;
2424 visit_type_int(v, name, &value, errp);
2425 }
2426
2427 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2428 const char *name, void *opaque,
2429 Error **errp)
2430 {
2431 X86CPU *cpu = X86_CPU(obj);
2432 CPUX86State *env = &cpu->env;
2433 const int64_t min = 0;
2434 const int64_t max = 0xf;
2435 Error *local_err = NULL;
2436 int64_t value;
2437
2438 visit_type_int(v, name, &value, &local_err);
2439 if (local_err) {
2440 error_propagate(errp, local_err);
2441 return;
2442 }
2443 if (value < min || value > max) {
2444 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2445 name ? name : "null", value, min, max);
2446 return;
2447 }
2448
2449 env->cpuid_version &= ~0xf;
2450 env->cpuid_version |= value & 0xf;
2451 }
2452
2453 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2454 {
2455 X86CPU *cpu = X86_CPU(obj);
2456 CPUX86State *env = &cpu->env;
2457 char *value;
2458
2459 value = g_malloc(CPUID_VENDOR_SZ + 1);
2460 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2461 env->cpuid_vendor3);
2462 return value;
2463 }
2464
2465 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2466 Error **errp)
2467 {
2468 X86CPU *cpu = X86_CPU(obj);
2469 CPUX86State *env = &cpu->env;
2470 int i;
2471
2472 if (strlen(value) != CPUID_VENDOR_SZ) {
2473 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2474 return;
2475 }
2476
2477 env->cpuid_vendor1 = 0;
2478 env->cpuid_vendor2 = 0;
2479 env->cpuid_vendor3 = 0;
2480 for (i = 0; i < 4; i++) {
2481 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2482 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2483 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2484 }
2485 }
2486
2487 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2488 {
2489 X86CPU *cpu = X86_CPU(obj);
2490 CPUX86State *env = &cpu->env;
2491 char *value;
2492 int i;
2493
2494 value = g_malloc(48 + 1);
2495 for (i = 0; i < 48; i++) {
2496 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2497 }
2498 value[48] = '\0';
2499 return value;
2500 }
2501
2502 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2503 Error **errp)
2504 {
2505 X86CPU *cpu = X86_CPU(obj);
2506 CPUX86State *env = &cpu->env;
2507 int c, len, i;
2508
2509 if (model_id == NULL) {
2510 model_id = "";
2511 }
2512 len = strlen(model_id);
2513 memset(env->cpuid_model, 0, 48);
2514 for (i = 0; i < 48; i++) {
2515 if (i >= len) {
2516 c = '\0';
2517 } else {
2518 c = (uint8_t)model_id[i];
2519 }
2520 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2521 }
2522 }
2523
2524 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2525 void *opaque, Error **errp)
2526 {
2527 X86CPU *cpu = X86_CPU(obj);
2528 int64_t value;
2529
2530 value = cpu->env.tsc_khz * 1000;
2531 visit_type_int(v, name, &value, errp);
2532 }
2533
2534 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2535 void *opaque, Error **errp)
2536 {
2537 X86CPU *cpu = X86_CPU(obj);
2538 const int64_t min = 0;
2539 const int64_t max = INT64_MAX;
2540 Error *local_err = NULL;
2541 int64_t value;
2542
2543 visit_type_int(v, name, &value, &local_err);
2544 if (local_err) {
2545 error_propagate(errp, local_err);
2546 return;
2547 }
2548 if (value < min || value > max) {
2549 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2550 name ? name : "null", value, min, max);
2551 return;
2552 }
2553
2554 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2555 }
2556
2557 /* Generic getter for "feature-words" and "filtered-features" properties */
2558 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2559 const char *name, void *opaque,
2560 Error **errp)
2561 {
2562 uint32_t *array = (uint32_t *)opaque;
2563 FeatureWord w;
2564 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2565 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2566 X86CPUFeatureWordInfoList *list = NULL;
2567
2568 for (w = 0; w < FEATURE_WORDS; w++) {
2569 FeatureWordInfo *wi = &feature_word_info[w];
2570 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2571 qwi->cpuid_input_eax = wi->cpuid_eax;
2572 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2573 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2574 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2575 qwi->features = array[w];
2576
2577 /* List will be in reverse order, but order shouldn't matter */
2578 list_entries[w].next = list;
2579 list_entries[w].value = &word_infos[w];
2580 list = &list_entries[w];
2581 }
2582
2583 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2584 }
2585
2586 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2587 void *opaque, Error **errp)
2588 {
2589 X86CPU *cpu = X86_CPU(obj);
2590 int64_t value = cpu->hyperv_spinlock_attempts;
2591
2592 visit_type_int(v, name, &value, errp);
2593 }
2594
2595 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2596 void *opaque, Error **errp)
2597 {
2598 const int64_t min = 0xFFF;
2599 const int64_t max = UINT_MAX;
2600 X86CPU *cpu = X86_CPU(obj);
2601 Error *err = NULL;
2602 int64_t value;
2603
2604 visit_type_int(v, name, &value, &err);
2605 if (err) {
2606 error_propagate(errp, err);
2607 return;
2608 }
2609
2610 if (value < min || value > max) {
2611 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2612 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2613 object_get_typename(obj), name ? name : "null",
2614 value, min, max);
2615 return;
2616 }
2617 cpu->hyperv_spinlock_attempts = value;
2618 }
2619
2620 static const PropertyInfo qdev_prop_spinlocks = {
2621 .name = "int",
2622 .get = x86_get_hv_spinlocks,
2623 .set = x86_set_hv_spinlocks,
2624 };
2625
2626 /* Convert all '_' in a feature string option name to '-', to make feature
2627 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2628 */
2629 static inline void feat2prop(char *s)
2630 {
2631 while ((s = strchr(s, '_'))) {
2632 *s = '-';
2633 }
2634 }
2635
2636 /* Return the feature property name for a feature flag bit */
2637 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2638 {
2639 /* XSAVE components are automatically enabled by other features,
2640 * so return the original feature name instead
2641 */
2642 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2643 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2644
2645 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2646 x86_ext_save_areas[comp].bits) {
2647 w = x86_ext_save_areas[comp].feature;
2648 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2649 }
2650 }
2651
2652 assert(bitnr < 32);
2653 assert(w < FEATURE_WORDS);
2654 return feature_word_info[w].feat_names[bitnr];
2655 }
2656
2657 /* Compatibily hack to maintain legacy +-feat semantic,
2658 * where +-feat overwrites any feature set by
2659 * feat=on|feat even if the later is parsed after +-feat
2660 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2661 */
2662 static GList *plus_features, *minus_features;
2663
2664 static gint compare_string(gconstpointer a, gconstpointer b)
2665 {
2666 return g_strcmp0(a, b);
2667 }
2668
2669 /* Parse "+feature,-feature,feature=foo" CPU feature string
2670 */
2671 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2672 Error **errp)
2673 {
2674 char *featurestr; /* Single 'key=value" string being parsed */
2675 static bool cpu_globals_initialized;
2676 bool ambiguous = false;
2677
2678 if (cpu_globals_initialized) {
2679 return;
2680 }
2681 cpu_globals_initialized = true;
2682
2683 if (!features) {
2684 return;
2685 }
2686
2687 for (featurestr = strtok(features, ",");
2688 featurestr;
2689 featurestr = strtok(NULL, ",")) {
2690 const char *name;
2691 const char *val = NULL;
2692 char *eq = NULL;
2693 char num[32];
2694 GlobalProperty *prop;
2695
2696 /* Compatibility syntax: */
2697 if (featurestr[0] == '+') {
2698 plus_features = g_list_append(plus_features,
2699 g_strdup(featurestr + 1));
2700 continue;
2701 } else if (featurestr[0] == '-') {
2702 minus_features = g_list_append(minus_features,
2703 g_strdup(featurestr + 1));
2704 continue;
2705 }
2706
2707 eq = strchr(featurestr, '=');
2708 if (eq) {
2709 *eq++ = 0;
2710 val = eq;
2711 } else {
2712 val = "on";
2713 }
2714
2715 feat2prop(featurestr);
2716 name = featurestr;
2717
2718 if (g_list_find_custom(plus_features, name, compare_string)) {
2719 warn_report("Ambiguous CPU model string. "
2720 "Don't mix both \"+%s\" and \"%s=%s\"",
2721 name, name, val);
2722 ambiguous = true;
2723 }
2724 if (g_list_find_custom(minus_features, name, compare_string)) {
2725 warn_report("Ambiguous CPU model string. "
2726 "Don't mix both \"-%s\" and \"%s=%s\"",
2727 name, name, val);
2728 ambiguous = true;
2729 }
2730
2731 /* Special case: */
2732 if (!strcmp(name, "tsc-freq")) {
2733 int ret;
2734 uint64_t tsc_freq;
2735
2736 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2737 if (ret < 0 || tsc_freq > INT64_MAX) {
2738 error_setg(errp, "bad numerical value %s", val);
2739 return;
2740 }
2741 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2742 val = num;
2743 name = "tsc-frequency";
2744 }
2745
2746 prop = g_new0(typeof(*prop), 1);
2747 prop->driver = typename;
2748 prop->property = g_strdup(name);
2749 prop->value = g_strdup(val);
2750 prop->errp = &error_fatal;
2751 qdev_prop_register_global(prop);
2752 }
2753
2754 if (ambiguous) {
2755 warn_report("Compatibility of ambiguous CPU model "
2756 "strings won't be kept on future QEMU versions");
2757 }
2758 }
2759
2760 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2761 static int x86_cpu_filter_features(X86CPU *cpu);
2762
2763 /* Check for missing features that may prevent the CPU class from
2764 * running using the current machine and accelerator.
2765 */
2766 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2767 strList **missing_feats)
2768 {
2769 X86CPU *xc;
2770 FeatureWord w;
2771 Error *err = NULL;
2772 strList **next = missing_feats;
2773
2774 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2775 strList *new = g_new0(strList, 1);
2776 new->value = g_strdup("kvm");
2777 *missing_feats = new;
2778 return;
2779 }
2780
2781 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2782
2783 x86_cpu_expand_features(xc, &err);
2784 if (err) {
2785 /* Errors at x86_cpu_expand_features should never happen,
2786 * but in case it does, just report the model as not
2787 * runnable at all using the "type" property.
2788 */
2789 strList *new = g_new0(strList, 1);
2790 new->value = g_strdup("type");
2791 *next = new;
2792 next = &new->next;
2793 }
2794
2795 x86_cpu_filter_features(xc);
2796
2797 for (w = 0; w < FEATURE_WORDS; w++) {
2798 uint32_t filtered = xc->filtered_features[w];
2799 int i;
2800 for (i = 0; i < 32; i++) {
2801 if (filtered & (1UL << i)) {
2802 strList *new = g_new0(strList, 1);
2803 new->value = g_strdup(x86_cpu_feature_name(w, i));
2804 *next = new;
2805 next = &new->next;
2806 }
2807 }
2808 }
2809
2810 object_unref(OBJECT(xc));
2811 }
2812
2813 /* Print all cpuid feature names in featureset
2814 */
2815 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2816 {
2817 int bit;
2818 bool first = true;
2819
2820 for (bit = 0; bit < 32; bit++) {
2821 if (featureset[bit]) {
2822 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2823 first = false;
2824 }
2825 }
2826 }
2827
2828 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2829 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2830 {
2831 ObjectClass *class_a = (ObjectClass *)a;
2832 ObjectClass *class_b = (ObjectClass *)b;
2833 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2834 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2835 const char *name_a, *name_b;
2836
2837 if (cc_a->ordering != cc_b->ordering) {
2838 return cc_a->ordering - cc_b->ordering;
2839 } else {
2840 name_a = object_class_get_name(class_a);
2841 name_b = object_class_get_name(class_b);
2842 return strcmp(name_a, name_b);
2843 }
2844 }
2845
2846 static GSList *get_sorted_cpu_model_list(void)
2847 {
2848 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2849 list = g_slist_sort(list, x86_cpu_list_compare);
2850 return list;
2851 }
2852
2853 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2854 {
2855 ObjectClass *oc = data;
2856 X86CPUClass *cc = X86_CPU_CLASS(oc);
2857 CPUListState *s = user_data;
2858 char *name = x86_cpu_class_get_model_name(cc);
2859 const char *desc = cc->model_description;
2860 if (!desc && cc->cpu_def) {
2861 desc = cc->cpu_def->model_id;
2862 }
2863
2864 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2865 name, desc);
2866 g_free(name);
2867 }
2868
2869 /* list available CPU models and flags */
2870 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2871 {
2872 int i;
2873 CPUListState s = {
2874 .file = f,
2875 .cpu_fprintf = cpu_fprintf,
2876 };
2877 GSList *list;
2878
2879 (*cpu_fprintf)(f, "Available CPUs:\n");
2880 list = get_sorted_cpu_model_list();
2881 g_slist_foreach(list, x86_cpu_list_entry, &s);
2882 g_slist_free(list);
2883
2884 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2885 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2886 FeatureWordInfo *fw = &feature_word_info[i];
2887
2888 (*cpu_fprintf)(f, " ");
2889 listflags(f, cpu_fprintf, fw->feat_names);
2890 (*cpu_fprintf)(f, "\n");
2891 }
2892 }
2893
2894 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2895 {
2896 ObjectClass *oc = data;
2897 X86CPUClass *cc = X86_CPU_CLASS(oc);
2898 CpuDefinitionInfoList **cpu_list = user_data;
2899 CpuDefinitionInfoList *entry;
2900 CpuDefinitionInfo *info;
2901
2902 info = g_malloc0(sizeof(*info));
2903 info->name = x86_cpu_class_get_model_name(cc);
2904 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2905 info->has_unavailable_features = true;
2906 info->q_typename = g_strdup(object_class_get_name(oc));
2907 info->migration_safe = cc->migration_safe;
2908 info->has_migration_safe = true;
2909 info->q_static = cc->static_model;
2910
2911 entry = g_malloc0(sizeof(*entry));
2912 entry->value = info;
2913 entry->next = *cpu_list;
2914 *cpu_list = entry;
2915 }
2916
2917 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2918 {
2919 CpuDefinitionInfoList *cpu_list = NULL;
2920 GSList *list = get_sorted_cpu_model_list();
2921 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2922 g_slist_free(list);
2923 return cpu_list;
2924 }
2925
2926 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2927 bool migratable_only)
2928 {
2929 FeatureWordInfo *wi = &feature_word_info[w];
2930 uint32_t r;
2931
2932 if (kvm_enabled()) {
2933 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2934 wi->cpuid_ecx,
2935 wi->cpuid_reg);
2936 } else if (hvf_enabled()) {
2937 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2938 wi->cpuid_ecx,
2939 wi->cpuid_reg);
2940 } else if (tcg_enabled()) {
2941 r = wi->tcg_features;
2942 } else {
2943 return ~0;
2944 }
2945 if (migratable_only) {
2946 r &= x86_cpu_get_migratable_flags(w);
2947 }
2948 return r;
2949 }
2950
2951 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2952 {
2953 FeatureWord w;
2954
2955 for (w = 0; w < FEATURE_WORDS; w++) {
2956 report_unavailable_features(w, cpu->filtered_features[w]);
2957 }
2958 }
2959
2960 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2961 {
2962 PropValue *pv;
2963 for (pv = props; pv->prop; pv++) {
2964 if (!pv->value) {
2965 continue;
2966 }
2967 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2968 &error_abort);
2969 }
2970 }
2971
2972 /* Load data from X86CPUDefinition into a X86CPU object
2973 */
2974 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2975 {
2976 CPUX86State *env = &cpu->env;
2977 const char *vendor;
2978 char host_vendor[CPUID_VENDOR_SZ + 1];
2979 FeatureWord w;
2980
2981 /*NOTE: any property set by this function should be returned by
2982 * x86_cpu_static_props(), so static expansion of
2983 * query-cpu-model-expansion is always complete.
2984 */
2985
2986 /* CPU models only set _minimum_ values for level/xlevel: */
2987 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2988 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2989
2990 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2991 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2992 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2993 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2994 for (w = 0; w < FEATURE_WORDS; w++) {
2995 env->features[w] = def->features[w];
2996 }
2997
2998 /* Special cases not set in the X86CPUDefinition structs: */
2999 /* TODO: in-kernel irqchip for hvf */
3000 if (kvm_enabled()) {
3001 if (!kvm_irqchip_in_kernel()) {
3002 x86_cpu_change_kvm_default("x2apic", "off");
3003 }
3004
3005 x86_cpu_apply_props(cpu, kvm_default_props);
3006 } else if (tcg_enabled()) {
3007 x86_cpu_apply_props(cpu, tcg_default_props);
3008 }
3009
3010 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3011
3012 /* sysenter isn't supported in compatibility mode on AMD,
3013 * syscall isn't supported in compatibility mode on Intel.
3014 * Normally we advertise the actual CPU vendor, but you can
3015 * override this using the 'vendor' property if you want to use
3016 * KVM's sysenter/syscall emulation in compatibility mode and
3017 * when doing cross vendor migration
3018 */
3019 vendor = def->vendor;
3020 if (accel_uses_host_cpuid()) {
3021 uint32_t ebx = 0, ecx = 0, edx = 0;
3022 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3023 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3024 vendor = host_vendor;
3025 }
3026
3027 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3028
3029 }
3030
3031 /* Return a QDict containing keys for all properties that can be included
3032 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3033 * must be included in the dictionary.
3034 */
3035 static QDict *x86_cpu_static_props(void)
3036 {
3037 FeatureWord w;
3038 int i;
3039 static const char *props[] = {
3040 "min-level",
3041 "min-xlevel",
3042 "family",
3043 "model",
3044 "stepping",
3045 "model-id",
3046 "vendor",
3047 "lmce",
3048 NULL,
3049 };
3050 static QDict *d;
3051
3052 if (d) {
3053 return d;
3054 }
3055
3056 d = qdict_new();
3057 for (i = 0; props[i]; i++) {
3058 qdict_put_null(d, props[i]);
3059 }
3060
3061 for (w = 0; w < FEATURE_WORDS; w++) {
3062 FeatureWordInfo *fi = &feature_word_info[w];
3063 int bit;
3064 for (bit = 0; bit < 32; bit++) {
3065 if (!fi->feat_names[bit]) {
3066 continue;
3067 }
3068 qdict_put_null(d, fi->feat_names[bit]);
3069 }
3070 }
3071
3072 return d;
3073 }
3074
3075 /* Add an entry to @props dict, with the value for property. */
3076 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3077 {
3078 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3079 &error_abort);
3080
3081 qdict_put_obj(props, prop, value);
3082 }
3083
3084 /* Convert CPU model data from X86CPU object to a property dictionary
3085 * that can recreate exactly the same CPU model.
3086 */
3087 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3088 {
3089 QDict *sprops = x86_cpu_static_props();
3090 const QDictEntry *e;
3091
3092 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3093 const char *prop = qdict_entry_key(e);
3094 x86_cpu_expand_prop(cpu, props, prop);
3095 }
3096 }
3097
3098 /* Convert CPU model data from X86CPU object to a property dictionary
3099 * that can recreate exactly the same CPU model, including every
3100 * writeable QOM property.
3101 */
3102 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3103 {
3104 ObjectPropertyIterator iter;
3105 ObjectProperty *prop;
3106
3107 object_property_iter_init(&iter, OBJECT(cpu));
3108 while ((prop = object_property_iter_next(&iter))) {
3109 /* skip read-only or write-only properties */
3110 if (!prop->get || !prop->set) {
3111 continue;
3112 }
3113
3114 /* "hotplugged" is the only property that is configurable
3115 * on the command-line but will be set differently on CPUs
3116 * created using "-cpu ... -smp ..." and by CPUs created
3117 * on the fly by x86_cpu_from_model() for querying. Skip it.
3118 */
3119 if (!strcmp(prop->name, "hotplugged")) {
3120 continue;
3121 }
3122 x86_cpu_expand_prop(cpu, props, prop->name);
3123 }
3124 }
3125
3126 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3127 {
3128 const QDictEntry *prop;
3129 Error *err = NULL;
3130
3131 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3132 object_property_set_qobject(obj, qdict_entry_value(prop),
3133 qdict_entry_key(prop), &err);
3134 if (err) {
3135 break;
3136 }
3137 }
3138
3139 error_propagate(errp, err);
3140 }
3141
3142 /* Create X86CPU object according to model+props specification */
3143 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3144 {
3145 X86CPU *xc = NULL;
3146 X86CPUClass *xcc;
3147 Error *err = NULL;
3148
3149 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3150 if (xcc == NULL) {
3151 error_setg(&err, "CPU model '%s' not found", model);
3152 goto out;
3153 }
3154
3155 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3156 if (props) {
3157 object_apply_props(OBJECT(xc), props, &err);
3158 if (err) {
3159 goto out;
3160 }
3161 }
3162
3163 x86_cpu_expand_features(xc, &err);
3164 if (err) {
3165 goto out;
3166 }
3167
3168 out:
3169 if (err) {
3170 error_propagate(errp, err);
3171 object_unref(OBJECT(xc));
3172 xc = NULL;
3173 }
3174 return xc;
3175 }
3176
3177 CpuModelExpansionInfo *
3178 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3179 CpuModelInfo *model,
3180 Error **errp)
3181 {
3182 X86CPU *xc = NULL;
3183 Error *err = NULL;
3184 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3185 QDict *props = NULL;
3186 const char *base_name;
3187
3188 xc = x86_cpu_from_model(model->name,
3189 model->has_props ?
3190 qobject_to(QDict, model->props) :
3191 NULL, &err);
3192 if (err) {
3193 goto out;
3194 }
3195
3196 props = qdict_new();
3197
3198 switch (type) {
3199 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3200 /* Static expansion will be based on "base" only */
3201 base_name = "base";
3202 x86_cpu_to_dict(xc, props);
3203 break;
3204 case CPU_MODEL_EXPANSION_TYPE_FULL:
3205 /* As we don't return every single property, full expansion needs
3206 * to keep the original model name+props, and add extra
3207 * properties on top of that.
3208 */
3209 base_name = model->name;
3210 x86_cpu_to_dict_full(xc, props);
3211 break;
3212 default:
3213 error_setg(&err, "Unsupportted expansion type");
3214 goto out;
3215 }
3216
3217 if (!props) {
3218 props = qdict_new();
3219 }
3220 x86_cpu_to_dict(xc, props);
3221
3222 ret->model = g_new0(CpuModelInfo, 1);
3223 ret->model->name = g_strdup(base_name);
3224 ret->model->props = QOBJECT(props);
3225 ret->model->has_props = true;
3226
3227 out:
3228 object_unref(OBJECT(xc));
3229 if (err) {
3230 error_propagate(errp, err);
3231 qapi_free_CpuModelExpansionInfo(ret);
3232 ret = NULL;
3233 }
3234 return ret;
3235 }
3236
3237 static gchar *x86_gdb_arch_name(CPUState *cs)
3238 {
3239 #ifdef TARGET_X86_64
3240 return g_strdup("i386:x86-64");
3241 #else
3242 return g_strdup("i386");
3243 #endif
3244 }
3245
3246 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3247 {
3248 X86CPUDefinition *cpudef = data;
3249 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3250
3251 xcc->cpu_def = cpudef;
3252 xcc->migration_safe = true;
3253 }
3254
3255 static void x86_register_cpudef_type(X86CPUDefinition *def)
3256 {
3257 char *typename = x86_cpu_type_name(def->name);
3258 TypeInfo ti = {
3259 .name = typename,
3260 .parent = TYPE_X86_CPU,
3261 .class_init = x86_cpu_cpudef_class_init,
3262 .class_data = def,
3263 };
3264
3265 /* AMD aliases are handled at runtime based on CPUID vendor, so
3266 * they shouldn't be set on the CPU model table.
3267 */
3268 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3269 /* catch mistakes instead of silently truncating model_id when too long */
3270 assert(def->model_id && strlen(def->model_id) <= 48);
3271
3272
3273 type_register(&ti);
3274 g_free(typename);
3275 }
3276
3277 #if !defined(CONFIG_USER_ONLY)
3278
3279 void cpu_clear_apic_feature(CPUX86State *env)
3280 {
3281 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3282 }
3283
3284 #endif /* !CONFIG_USER_ONLY */
3285
3286 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3287 uint32_t *eax, uint32_t *ebx,
3288 uint32_t *ecx, uint32_t *edx)
3289 {
3290 X86CPU *cpu = x86_env_get_cpu(env);
3291 CPUState *cs = CPU(cpu);
3292 uint32_t pkg_offset;
3293 uint32_t limit;
3294 uint32_t signature[3];
3295
3296 /* Calculate & apply limits for different index ranges */
3297 if (index >= 0xC0000000) {
3298 limit = env->cpuid_xlevel2;
3299 } else if (index >= 0x80000000) {
3300 limit = env->cpuid_xlevel;
3301 } else if (index >= 0x40000000) {
3302 limit = 0x40000001;
3303 } else {
3304 limit = env->cpuid_level;
3305 }
3306
3307 if (index > limit) {
3308 /* Intel documentation states that invalid EAX input will
3309 * return the same information as EAX=cpuid_level
3310 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3311 */
3312 index = env->cpuid_level;
3313 }
3314
3315 switch(index) {
3316 case 0:
3317 *eax = env->cpuid_level;
3318 *ebx = env->cpuid_vendor1;
3319 *edx = env->cpuid_vendor2;
3320 *ecx = env->cpuid_vendor3;
3321 break;
3322 case 1:
3323 *eax = env->cpuid_version;
3324 *ebx = (cpu->apic_id << 24) |
3325 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3326 *ecx = env->features[FEAT_1_ECX];
3327 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3328 *ecx |= CPUID_EXT_OSXSAVE;
3329 }
3330 *edx = env->features[FEAT_1_EDX];
3331 if (cs->nr_cores * cs->nr_threads > 1) {
3332 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3333 *edx |= CPUID_HT;
3334 }
3335 break;
3336 case 2:
3337 /* cache info: needed for Pentium Pro compatibility */
3338 if (cpu->cache_info_passthrough) {
3339 host_cpuid(index, 0, eax, ebx, ecx, edx);
3340 break;
3341 }
3342 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3343 *ebx = 0;
3344 if (!cpu->enable_l3_cache) {
3345 *ecx = 0;
3346 } else {
3347 *ecx = L3_N_DESCRIPTOR;
3348 }
3349 *edx = (L1D_DESCRIPTOR << 16) | \
3350 (L1I_DESCRIPTOR << 8) | \
3351 (L2_DESCRIPTOR);
3352 break;
3353 case 4:
3354 /* cache info: needed for Core compatibility */
3355 if (cpu->cache_info_passthrough) {
3356 host_cpuid(index, count, eax, ebx, ecx, edx);
3357 *eax &= ~0xFC000000;
3358 } else {
3359 *eax = 0;
3360 switch (count) {
3361 case 0: /* L1 dcache info */
3362 *eax |= CPUID_4_TYPE_DCACHE | \
3363 CPUID_4_LEVEL(1) | \
3364 CPUID_4_SELF_INIT_LEVEL;
3365 *ebx = (L1D_LINE_SIZE - 1) | \
3366 ((L1D_PARTITIONS - 1) << 12) | \
3367 ((L1D_ASSOCIATIVITY - 1) << 22);
3368 *ecx = L1D_SETS - 1;
3369 *edx = CPUID_4_NO_INVD_SHARING;
3370 break;
3371 case 1: /* L1 icache info */
3372 *eax |= CPUID_4_TYPE_ICACHE | \
3373 CPUID_4_LEVEL(1) | \
3374 CPUID_4_SELF_INIT_LEVEL;
3375 *ebx = (L1I_LINE_SIZE - 1) | \
3376 ((L1I_PARTITIONS - 1) << 12) | \
3377 ((L1I_ASSOCIATIVITY - 1) << 22);
3378 *ecx = L1I_SETS - 1;
3379 *edx = CPUID_4_NO_INVD_SHARING;
3380 break;
3381 case 2: /* L2 cache info */
3382 *eax |= CPUID_4_TYPE_UNIFIED | \
3383 CPUID_4_LEVEL(2) | \
3384 CPUID_4_SELF_INIT_LEVEL;
3385 if (cs->nr_threads > 1) {
3386 *eax |= (cs->nr_threads - 1) << 14;
3387 }
3388 *ebx = (L2_LINE_SIZE - 1) | \
3389 ((L2_PARTITIONS - 1) << 12) | \
3390 ((L2_ASSOCIATIVITY - 1) << 22);
3391 *ecx = L2_SETS - 1;
3392 *edx = CPUID_4_NO_INVD_SHARING;
3393 break;
3394 case 3: /* L3 cache info */
3395 if (!cpu->enable_l3_cache) {
3396 *eax = 0;
3397 *ebx = 0;
3398 *ecx = 0;
3399 *edx = 0;
3400 break;
3401 }
3402 *eax |= CPUID_4_TYPE_UNIFIED | \
3403 CPUID_4_LEVEL(3) | \
3404 CPUID_4_SELF_INIT_LEVEL;
3405 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3406 *eax |= ((1 << pkg_offset) - 1) << 14;
3407 *ebx = (L3_N_LINE_SIZE - 1) | \
3408 ((L3_N_PARTITIONS - 1) << 12) | \
3409 ((L3_N_ASSOCIATIVITY - 1) << 22);
3410 *ecx = L3_N_SETS - 1;
3411 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3412 break;
3413 default: /* end of info */
3414 *eax = 0;
3415 *ebx = 0;
3416 *ecx = 0;
3417 *edx = 0;
3418 break;
3419 }
3420 }
3421
3422 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3423 if ((*eax & 31) && cs->nr_cores > 1) {
3424 *eax |= (cs->nr_cores - 1) << 26;
3425 }
3426 break;
3427 case 5:
3428 /* mwait info: needed for Core compatibility */
3429 *eax = 0; /* Smallest monitor-line size in bytes */
3430 *ebx = 0; /* Largest monitor-line size in bytes */
3431 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3432 *edx = 0;
3433 break;
3434 case 6:
3435 /* Thermal and Power Leaf */
3436 *eax = env->features[FEAT_6_EAX];
3437 *ebx = 0;
3438 *ecx = 0;
3439 *edx = 0;
3440 break;
3441 case 7:
3442 /* Structured Extended Feature Flags Enumeration Leaf */
3443 if (count == 0) {
3444 *eax = 0; /* Maximum ECX value for sub-leaves */
3445 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3446 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3447 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3448 *ecx |= CPUID_7_0_ECX_OSPKE;
3449 }
3450 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3451 } else {
3452 *eax = 0;
3453 *ebx = 0;
3454 *ecx = 0;
3455 *edx = 0;
3456 }
3457 break;
3458 case 9:
3459 /* Direct Cache Access Information Leaf */
3460 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3461 *ebx = 0;
3462 *ecx = 0;
3463 *edx = 0;
3464 break;
3465 case 0xA:
3466 /* Architectural Performance Monitoring Leaf */
3467 if (kvm_enabled() && cpu->enable_pmu) {
3468 KVMState *s = cs->kvm_state;
3469
3470 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3471 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3472 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3473 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3474 } else if (hvf_enabled() && cpu->enable_pmu) {
3475 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3476 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3477 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3478 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3479 } else {
3480 *eax = 0;
3481 *ebx = 0;
3482 *ecx = 0;
3483 *edx = 0;
3484 }
3485 break;
3486 case 0xB:
3487 /* Extended Topology Enumeration Leaf */
3488 if (!cpu->enable_cpuid_0xb) {
3489 *eax = *ebx = *ecx = *edx = 0;
3490 break;
3491 }
3492
3493 *ecx = count & 0xff;
3494 *edx = cpu->apic_id;
3495
3496 switch (count) {
3497 case 0:
3498 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3499 *ebx = cs->nr_threads;
3500 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3501 break;
3502 case 1:
3503 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3504 *ebx = cs->nr_cores * cs->nr_threads;
3505 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3506 break;
3507 default:
3508 *eax = 0;
3509 *ebx = 0;
3510 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3511 }
3512
3513 assert(!(*eax & ~0x1f));
3514 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3515 break;
3516 case 0xD: {
3517 /* Processor Extended State */
3518 *eax = 0;
3519 *ebx = 0;
3520 *ecx = 0;
3521 *edx = 0;
3522 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3523 break;
3524 }
3525
3526 if (count == 0) {
3527 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3528 *eax = env->features[FEAT_XSAVE_COMP_LO];
3529 *edx = env->features[FEAT_XSAVE_COMP_HI];
3530 *ebx = *ecx;
3531 } else if (count == 1) {
3532 *eax = env->features[FEAT_XSAVE];
3533 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3534 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3535 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3536 *eax = esa->size;
3537 *ebx = esa->offset;
3538 }
3539 }
3540 break;
3541 }
3542 case 0x14: {
3543 /* Intel Processor Trace Enumeration */
3544 *eax = 0;
3545 *ebx = 0;
3546 *ecx = 0;
3547 *edx = 0;
3548 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3549 !kvm_enabled()) {
3550 break;
3551 }
3552
3553 if (count == 0) {
3554 *eax = INTEL_PT_MAX_SUBLEAF;
3555 *ebx = INTEL_PT_MINIMAL_EBX;
3556 *ecx = INTEL_PT_MINIMAL_ECX;
3557 } else if (count == 1) {
3558 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3559 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3560 }
3561 break;
3562 }
3563 case 0x40000000:
3564 /*
3565 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3566 * set here, but we restrict to TCG none the less.
3567 */
3568 if (tcg_enabled() && cpu->expose_tcg) {
3569 memcpy(signature, "TCGTCGTCGTCG", 12);
3570 *eax = 0x40000001;
3571 *ebx = signature[0];
3572 *ecx = signature[1];
3573 *edx = signature[2];
3574 } else {
3575 *eax = 0;
3576 *ebx = 0;
3577 *ecx = 0;
3578 *edx = 0;
3579 }
3580 break;
3581 case 0x40000001:
3582 *eax = 0;
3583 *ebx = 0;
3584 *ecx = 0;
3585 *edx = 0;
3586 break;
3587 case 0x80000000:
3588 *eax = env->cpuid_xlevel;
3589 *ebx = env->cpuid_vendor1;
3590 *edx = env->cpuid_vendor2;
3591 *ecx = env->cpuid_vendor3;
3592 break;
3593 case 0x80000001:
3594 *eax = env->cpuid_version;
3595 *ebx = 0;
3596 *ecx = env->features[FEAT_8000_0001_ECX];
3597 *edx = env->features[FEAT_8000_0001_EDX];
3598
3599 /* The Linux kernel checks for the CMPLegacy bit and
3600 * discards multiple thread information if it is set.
3601 * So don't set it here for Intel to make Linux guests happy.
3602 */
3603 if (cs->nr_cores * cs->nr_threads > 1) {
3604 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3605 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3606 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3607 *ecx |= 1 << 1; /* CmpLegacy bit */
3608 }
3609 }
3610 break;
3611 case 0x80000002:
3612 case 0x80000003:
3613 case 0x80000004:
3614 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3615 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3616 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3617 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3618 break;
3619 case 0x80000005:
3620 /* cache info (L1 cache) */
3621 if (cpu->cache_info_passthrough) {
3622 host_cpuid(index, 0, eax, ebx, ecx, edx);
3623 break;
3624 }
3625 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3626 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3627 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3628 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3629 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3630 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3631 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3632 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3633 break;
3634 case 0x80000006:
3635 /* cache info (L2 cache) */
3636 if (cpu->cache_info_passthrough) {
3637 host_cpuid(index, 0, eax, ebx, ecx, edx);
3638 break;
3639 }
3640 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3641 (L2_DTLB_2M_ENTRIES << 16) | \
3642 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3643 (L2_ITLB_2M_ENTRIES);
3644 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3645 (L2_DTLB_4K_ENTRIES << 16) | \
3646 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3647 (L2_ITLB_4K_ENTRIES);
3648 *ecx = (L2_SIZE_KB_AMD << 16) | \
3649 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3650 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3651 if (!cpu->enable_l3_cache) {
3652 *edx = ((L3_SIZE_KB / 512) << 18) | \
3653 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3654 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3655 } else {
3656 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3657 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3658 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3659 }
3660 break;
3661 case 0x80000007:
3662 *eax = 0;
3663 *ebx = 0;
3664 *ecx = 0;
3665 *edx = env->features[FEAT_8000_0007_EDX];
3666 break;
3667 case 0x80000008:
3668 /* virtual & phys address size in low 2 bytes. */
3669 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3670 /* 64 bit processor */
3671 *eax = cpu->phys_bits; /* configurable physical bits */
3672 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3673 *eax |= 0x00003900; /* 57 bits virtual */
3674 } else {
3675 *eax |= 0x00003000; /* 48 bits virtual */
3676 }
3677 } else {
3678 *eax = cpu->phys_bits;
3679 }
3680 *ebx = env->features[FEAT_8000_0008_EBX];
3681 *ecx = 0;
3682 *edx = 0;
3683 if (cs->nr_cores * cs->nr_threads > 1) {
3684 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3685 }
3686 break;
3687 case 0x8000000A:
3688 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3689 *eax = 0x00000001; /* SVM Revision */
3690 *ebx = 0x00000010; /* nr of ASIDs */
3691 *ecx = 0;
3692 *edx = env->features[FEAT_SVM]; /* optional features */
3693 } else {
3694 *eax = 0;
3695 *ebx = 0;
3696 *ecx = 0;
3697 *edx = 0;
3698 }
3699 break;
3700 case 0xC0000000:
3701 *eax = env->cpuid_xlevel2;
3702 *ebx = 0;
3703 *ecx = 0;
3704 *edx = 0;
3705 break;
3706 case 0xC0000001:
3707 /* Support for VIA CPU's CPUID instruction */
3708 *eax = env->cpuid_version;
3709 *ebx = 0;
3710 *ecx = 0;
3711 *edx = env->features[FEAT_C000_0001_EDX];
3712 break;
3713 case 0xC0000002:
3714 case 0xC0000003:
3715 case 0xC0000004:
3716 /* Reserved for the future, and now filled with zero */
3717 *eax = 0;
3718 *ebx = 0;
3719 *ecx = 0;
3720 *edx = 0;
3721 break;
3722 case 0x8000001F:
3723 *eax = sev_enabled() ? 0x2 : 0;
3724 *ebx = sev_get_cbit_position();
3725 *ebx |= sev_get_reduced_phys_bits() << 6;
3726 *ecx = 0;
3727 *edx = 0;
3728 break;
3729 default:
3730 /* reserved values: zero */
3731 *eax = 0;
3732 *ebx = 0;
3733 *ecx = 0;
3734 *edx = 0;
3735 break;
3736 }
3737 }
3738
3739 /* CPUClass::reset() */
3740 static void x86_cpu_reset(CPUState *s)
3741 {
3742 X86CPU *cpu = X86_CPU(s);
3743 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3744 CPUX86State *env = &cpu->env;
3745 target_ulong cr4;
3746 uint64_t xcr0;
3747 int i;
3748
3749 xcc->parent_reset(s);
3750
3751 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3752
3753 env->old_exception = -1;
3754
3755 /* init to reset state */
3756
3757 env->hflags2 |= HF2_GIF_MASK;
3758
3759 cpu_x86_update_cr0(env, 0x60000010);
3760 env->a20_mask = ~0x0;
3761 env->smbase = 0x30000;
3762 env->msr_smi_count = 0;
3763
3764 env->idt.limit = 0xffff;
3765 env->gdt.limit = 0xffff;
3766 env->ldt.limit = 0xffff;
3767 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3768 env->tr.limit = 0xffff;
3769 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3770
3771 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3772 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3773 DESC_R_MASK | DESC_A_MASK);
3774 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3775 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3776 DESC_A_MASK);
3777 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3778 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3779 DESC_A_MASK);
3780 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3781 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3782 DESC_A_MASK);
3783 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3784 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3785 DESC_A_MASK);
3786 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3787 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3788 DESC_A_MASK);
3789
3790 env->eip = 0xfff0;
3791 env->regs[R_EDX] = env->cpuid_version;
3792
3793 env->eflags = 0x2;
3794
3795 /* FPU init */
3796 for (i = 0; i < 8; i++) {
3797 env->fptags[i] = 1;
3798 }
3799 cpu_set_fpuc(env, 0x37f);
3800
3801 env->mxcsr = 0x1f80;
3802 /* All units are in INIT state. */
3803 env->xstate_bv = 0;
3804
3805 env->pat = 0x0007040600070406ULL;
3806 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3807
3808 memset(env->dr, 0, sizeof(env->dr));
3809 env->dr[6] = DR6_FIXED_1;
3810 env->dr[7] = DR7_FIXED_1;
3811 cpu_breakpoint_remove_all(s, BP_CPU);
3812 cpu_watchpoint_remove_all(s, BP_CPU);
3813
3814 cr4 = 0;
3815 xcr0 = XSTATE_FP_MASK;
3816
3817 #ifdef CONFIG_USER_ONLY
3818 /* Enable all the features for user-mode. */
3819 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3820 xcr0 |= XSTATE_SSE_MASK;
3821 }
3822 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3823 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3824 if (env->features[esa->feature] & esa->bits) {
3825 xcr0 |= 1ull << i;
3826 }
3827 }
3828
3829 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3830 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3831 }
3832 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3833 cr4 |= CR4_FSGSBASE_MASK;
3834 }
3835 #endif
3836
3837 env->xcr0 = xcr0;
3838 cpu_x86_update_cr4(env, cr4);
3839
3840 /*
3841 * SDM 11.11.5 requires:
3842 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3843 * - IA32_MTRR_PHYSMASKn.V = 0
3844 * All other bits are undefined. For simplification, zero it all.
3845 */
3846 env->mtrr_deftype = 0;
3847 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3848 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3849
3850 env->interrupt_injected = -1;
3851 env->exception_injected = -1;
3852 env->nmi_injected = false;
3853 #if !defined(CONFIG_USER_ONLY)
3854 /* We hard-wire the BSP to the first CPU. */
3855 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3856
3857 s->halted = !cpu_is_bsp(cpu);
3858
3859 if (kvm_enabled()) {
3860 kvm_arch_reset_vcpu(cpu);
3861 }
3862 else if (hvf_enabled()) {
3863 hvf_reset_vcpu(s);
3864 }
3865 #endif
3866 }
3867
3868 #ifndef CONFIG_USER_ONLY
3869 bool cpu_is_bsp(X86CPU *cpu)
3870 {
3871 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3872 }
3873
3874 /* TODO: remove me, when reset over QOM tree is implemented */
3875 static void x86_cpu_machine_reset_cb(void *opaque)
3876 {
3877 X86CPU *cpu = opaque;
3878 cpu_reset(CPU(cpu));
3879 }
3880 #endif
3881
3882 static void mce_init(X86CPU *cpu)
3883 {
3884 CPUX86State *cenv = &cpu->env;
3885 unsigned int bank;
3886
3887 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3888 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3889 (CPUID_MCE | CPUID_MCA)) {
3890 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3891 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3892 cenv->mcg_ctl = ~(uint64_t)0;
3893 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3894 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3895 }
3896 }
3897 }
3898
3899 #ifndef CONFIG_USER_ONLY
3900 APICCommonClass *apic_get_class(void)
3901 {
3902 const char *apic_type = "apic";
3903
3904 /* TODO: in-kernel irqchip for hvf */
3905 if (kvm_apic_in_kernel()) {
3906 apic_type = "kvm-apic";
3907 } else if (xen_enabled()) {
3908 apic_type = "xen-apic";
3909 }
3910
3911 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3912 }
3913
3914 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3915 {
3916 APICCommonState *apic;
3917 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3918
3919 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3920
3921 object_property_add_child(OBJECT(cpu), "lapic",
3922 OBJECT(cpu->apic_state), &error_abort);
3923 object_unref(OBJECT(cpu->apic_state));
3924
3925 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3926 /* TODO: convert to link<> */
3927 apic = APIC_COMMON(cpu->apic_state);
3928 apic->cpu = cpu;
3929 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3930 }
3931
3932 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3933 {
3934 APICCommonState *apic;
3935 static bool apic_mmio_map_once;
3936
3937 if (cpu->apic_state == NULL) {
3938 return;
3939 }
3940 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3941 errp);
3942
3943 /* Map APIC MMIO area */
3944 apic = APIC_COMMON(cpu->apic_state);
3945 if (!apic_mmio_map_once) {
3946 memory_region_add_subregion_overlap(get_system_memory(),
3947 apic->apicbase &
3948 MSR_IA32_APICBASE_BASE,
3949 &apic->io_memory,
3950 0x1000);
3951 apic_mmio_map_once = true;
3952 }
3953 }
3954
3955 static void x86_cpu_machine_done(Notifier *n, void *unused)
3956 {
3957 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3958 MemoryRegion *smram =
3959 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3960
3961 if (smram) {
3962 cpu->smram = g_new(MemoryRegion, 1);
3963 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3964 smram, 0, 1ull << 32);
3965 memory_region_set_enabled(cpu->smram, true);
3966 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3967 }
3968 }
3969 #else
3970 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3971 {
3972 }
3973 #endif
3974
3975 /* Note: Only safe for use on x86(-64) hosts */
3976 static uint32_t x86_host_phys_bits(void)
3977 {
3978 uint32_t eax;
3979 uint32_t host_phys_bits;
3980
3981 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3982 if (eax >= 0x80000008) {
3983 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3984 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3985 * at 23:16 that can specify a maximum physical address bits for
3986 * the guest that can override this value; but I've not seen
3987 * anything with that set.
3988 */
3989 host_phys_bits = eax & 0xff;
3990 } else {
3991 /* It's an odd 64 bit machine that doesn't have the leaf for
3992 * physical address bits; fall back to 36 that's most older
3993 * Intel.
3994 */
3995 host_phys_bits = 36;
3996 }
3997
3998 return host_phys_bits;
3999 }
4000
4001 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4002 {
4003 if (*min < value) {
4004 *min = value;
4005 }
4006 }
4007
4008 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4009 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4010 {
4011 CPUX86State *env = &cpu->env;
4012 FeatureWordInfo *fi = &feature_word_info[w];
4013 uint32_t eax = fi->cpuid_eax;
4014 uint32_t region = eax & 0xF0000000;
4015
4016 if (!env->features[w]) {
4017 return;
4018 }
4019
4020 switch (region) {
4021 case 0x00000000:
4022 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4023 break;
4024 case 0x80000000:
4025 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4026 break;
4027 case 0xC0000000:
4028 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4029 break;
4030 }
4031 }
4032
4033 /* Calculate XSAVE components based on the configured CPU feature flags */
4034 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4035 {
4036 CPUX86State *env = &cpu->env;
4037 int i;
4038 uint64_t mask;
4039
4040 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4041 return;
4042 }
4043
4044 mask = 0;
4045 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4046 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4047 if (env->features[esa->feature] & esa->bits) {
4048 mask |= (1ULL << i);
4049 }
4050 }
4051
4052 env->features[FEAT_XSAVE_COMP_LO] = mask;
4053 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4054 }
4055
4056 /***** Steps involved on loading and filtering CPUID data
4057 *
4058 * When initializing and realizing a CPU object, the steps
4059 * involved in setting up CPUID data are:
4060 *
4061 * 1) Loading CPU model definition (X86CPUDefinition). This is
4062 * implemented by x86_cpu_load_def() and should be completely
4063 * transparent, as it is done automatically by instance_init.
4064 * No code should need to look at X86CPUDefinition structs
4065 * outside instance_init.
4066 *
4067 * 2) CPU expansion. This is done by realize before CPUID
4068 * filtering, and will make sure host/accelerator data is
4069 * loaded for CPU models that depend on host capabilities
4070 * (e.g. "host"). Done by x86_cpu_expand_features().
4071 *
4072 * 3) CPUID filtering. This initializes extra data related to
4073 * CPUID, and checks if the host supports all capabilities
4074 * required by the CPU. Runnability of a CPU model is
4075 * determined at this step. Done by x86_cpu_filter_features().
4076 *
4077 * Some operations don't require all steps to be performed.
4078 * More precisely:
4079 *
4080 * - CPU instance creation (instance_init) will run only CPU
4081 * model loading. CPU expansion can't run at instance_init-time
4082 * because host/accelerator data may be not available yet.
4083 * - CPU realization will perform both CPU model expansion and CPUID
4084 * filtering, and return an error in case one of them fails.
4085 * - query-cpu-definitions needs to run all 3 steps. It needs
4086 * to run CPUID filtering, as the 'unavailable-features'
4087 * field is set based on the filtering results.
4088 * - The query-cpu-model-expansion QMP command only needs to run
4089 * CPU model loading and CPU expansion. It should not filter
4090 * any CPUID data based on host capabilities.
4091 */
4092
4093 /* Expand CPU configuration data, based on configured features
4094 * and host/accelerator capabilities when appropriate.
4095 */
4096 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4097 {
4098 CPUX86State *env = &cpu->env;
4099 FeatureWord w;
4100 GList *l;
4101 Error *local_err = NULL;
4102
4103 /*TODO: Now cpu->max_features doesn't overwrite features
4104 * set using QOM properties, and we can convert
4105 * plus_features & minus_features to global properties
4106 * inside x86_cpu_parse_featurestr() too.
4107 */
4108 if (cpu->max_features) {
4109 for (w = 0; w < FEATURE_WORDS; w++) {
4110 /* Override only features that weren't set explicitly
4111 * by the user.
4112 */
4113 env->features[w] |=
4114 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4115 ~env->user_features[w] & \
4116 ~feature_word_info[w].no_autoenable_flags;
4117 }
4118 }
4119
4120 for (l = plus_features; l; l = l->next) {
4121 const char *prop = l->data;
4122 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4123 if (local_err) {
4124 goto out;
4125 }
4126 }
4127
4128 for (l = minus_features; l; l = l->next) {
4129 const char *prop = l->data;
4130 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4131 if (local_err) {
4132 goto out;
4133 }
4134 }
4135
4136 if (!kvm_enabled() || !cpu->expose_kvm) {
4137 env->features[FEAT_KVM] = 0;
4138 }
4139
4140 x86_cpu_enable_xsave_components(cpu);
4141
4142 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4143 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4144 if (cpu->full_cpuid_auto_level) {
4145 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4146 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4147 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4148 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4149 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4150 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4151 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4152 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4153 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4154 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4155 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4156 /* SVM requires CPUID[0x8000000A] */
4157 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4158 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4159 }
4160
4161 /* SEV requires CPUID[0x8000001F] */
4162 if (sev_enabled()) {
4163 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4164 }
4165 }
4166
4167 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4168 if (env->cpuid_level == UINT32_MAX) {
4169 env->cpuid_level = env->cpuid_min_level;
4170 }
4171 if (env->cpuid_xlevel == UINT32_MAX) {
4172 env->cpuid_xlevel = env->cpuid_min_xlevel;
4173 }
4174 if (env->cpuid_xlevel2 == UINT32_MAX) {
4175 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4176 }
4177
4178 out:
4179 if (local_err != NULL) {
4180 error_propagate(errp, local_err);
4181 }
4182 }
4183
4184 /*
4185 * Finishes initialization of CPUID data, filters CPU feature
4186 * words based on host availability of each feature.
4187 *
4188 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4189 */
4190 static int x86_cpu_filter_features(X86CPU *cpu)
4191 {
4192 CPUX86State *env = &cpu->env;
4193 FeatureWord w;
4194 int rv = 0;
4195
4196 for (w = 0; w < FEATURE_WORDS; w++) {
4197 uint32_t host_feat =
4198 x86_cpu_get_supported_feature_word(w, false);
4199 uint32_t requested_features = env->features[w];
4200 env->features[w] &= host_feat;
4201 cpu->filtered_features[w] = requested_features & ~env->features[w];
4202 if (cpu->filtered_features[w]) {
4203 rv = 1;
4204 }
4205 }
4206
4207 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4208 kvm_enabled()) {
4209 KVMState *s = CPU(cpu)->kvm_state;
4210 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4211 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4212 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4213 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4214 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4215
4216 if (!eax_0 ||
4217 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4218 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4219 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4220 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4221 INTEL_PT_ADDR_RANGES_NUM) ||
4222 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4223 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4224 (ecx_0 & INTEL_PT_IP_LIP)) {
4225 /*
4226 * Processor Trace capabilities aren't configurable, so if the
4227 * host can't emulate the capabilities we report on
4228 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4229 */
4230 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4231 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4232 rv = 1;
4233 }
4234 }
4235
4236 return rv;
4237 }
4238
4239 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4240 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4241 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4242 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4243 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4244 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4245 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4246 {
4247 CPUState *cs = CPU(dev);
4248 X86CPU *cpu = X86_CPU(dev);
4249 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4250 CPUX86State *env = &cpu->env;
4251 Error *local_err = NULL;
4252 static bool ht_warned;
4253
4254 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4255 char *name = x86_cpu_class_get_model_name(xcc);
4256 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4257 g_free(name);
4258 goto out;
4259 }
4260
4261 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4262 error_setg(errp, "apic-id property was not initialized properly");
4263 return;
4264 }
4265
4266 x86_cpu_expand_features(cpu, &local_err);
4267 if (local_err) {
4268 goto out;
4269 }
4270
4271 if (x86_cpu_filter_features(cpu) &&
4272 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4273 x86_cpu_report_filtered_features(cpu);
4274 if (cpu->enforce_cpuid) {
4275 error_setg(&local_err,
4276 accel_uses_host_cpuid() ?
4277 "Host doesn't support requested features" :
4278 "TCG doesn't support requested features");
4279 goto out;
4280 }
4281 }
4282
4283 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4284 * CPUID[1].EDX.
4285 */
4286 if (IS_AMD_CPU(env)) {
4287 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4288 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4289 & CPUID_EXT2_AMD_ALIASES);
4290 }
4291
4292 /* For 64bit systems think about the number of physical bits to present.
4293 * ideally this should be the same as the host; anything other than matching
4294 * the host can cause incorrect guest behaviour.
4295 * QEMU used to pick the magic value of 40 bits that corresponds to
4296 * consumer AMD devices but nothing else.
4297 */
4298 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4299 if (accel_uses_host_cpuid()) {
4300 uint32_t host_phys_bits = x86_host_phys_bits();
4301 static bool warned;
4302
4303 if (cpu->host_phys_bits) {
4304 /* The user asked for us to use the host physical bits */
4305 cpu->phys_bits = host_phys_bits;
4306 }
4307
4308 /* Print a warning if the user set it to a value that's not the
4309 * host value.
4310 */
4311 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4312 !warned) {
4313 warn_report("Host physical bits (%u)"
4314 " does not match phys-bits property (%u)",
4315 host_phys_bits, cpu->phys_bits);
4316 warned = true;
4317 }
4318
4319 if (cpu->phys_bits &&
4320 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4321 cpu->phys_bits < 32)) {
4322 error_setg(errp, "phys-bits should be between 32 and %u "
4323 " (but is %u)",
4324 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4325 return;
4326 }
4327 } else {
4328 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4329 error_setg(errp, "TCG only supports phys-bits=%u",
4330 TCG_PHYS_ADDR_BITS);
4331 return;
4332 }
4333 }
4334 /* 0 means it was not explicitly set by the user (or by machine
4335 * compat_props or by the host code above). In this case, the default
4336 * is the value used by TCG (40).
4337 */
4338 if (cpu->phys_bits == 0) {
4339 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4340 }
4341 } else {
4342 /* For 32 bit systems don't use the user set value, but keep
4343 * phys_bits consistent with what we tell the guest.
4344 */
4345 if (cpu->phys_bits != 0) {
4346 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4347 return;
4348 }
4349
4350 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4351 cpu->phys_bits = 36;
4352 } else {
4353 cpu->phys_bits = 32;
4354 }
4355 }
4356 cpu_exec_realizefn(cs, &local_err);
4357 if (local_err != NULL) {
4358 error_propagate(errp, local_err);
4359 return;
4360 }
4361
4362 #ifndef CONFIG_USER_ONLY
4363 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4364
4365 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4366 x86_cpu_apic_create(cpu, &local_err);
4367 if (local_err != NULL) {
4368 goto out;
4369 }
4370 }
4371 #endif
4372
4373 mce_init(cpu);
4374
4375 #ifndef CONFIG_USER_ONLY
4376 if (tcg_enabled()) {
4377 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4378 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4379
4380 /* Outer container... */
4381 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4382 memory_region_set_enabled(cpu->cpu_as_root, true);
4383
4384 /* ... with two regions inside: normal system memory with low
4385 * priority, and...
4386 */
4387 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4388 get_system_memory(), 0, ~0ull);
4389 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4390 memory_region_set_enabled(cpu->cpu_as_mem, true);
4391
4392 cs->num_ases = 2;
4393 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4394 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4395
4396 /* ... SMRAM with higher priority, linked from /machine/smram. */
4397 cpu->machine_done.notify = x86_cpu_machine_done;
4398 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4399 }
4400 #endif
4401
4402 qemu_init_vcpu(cs);
4403
4404 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4405 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4406 * based on inputs (sockets,cores,threads), it is still better to gives
4407 * users a warning.
4408 *
4409 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4410 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4411 */
4412 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4413 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4414 " -smp options properly.");
4415 ht_warned = true;
4416 }
4417
4418 x86_cpu_apic_realize(cpu, &local_err);
4419 if (local_err != NULL) {
4420 goto out;
4421 }
4422 cpu_reset(cs);
4423
4424 xcc->parent_realize(dev, &local_err);
4425
4426 out:
4427 if (local_err != NULL) {
4428 error_propagate(errp, local_err);
4429 return;
4430 }
4431 }
4432
4433 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4434 {
4435 X86CPU *cpu = X86_CPU(dev);
4436 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4437 Error *local_err = NULL;
4438
4439 #ifndef CONFIG_USER_ONLY
4440 cpu_remove_sync(CPU(dev));
4441 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4442 #endif
4443
4444 if (cpu->apic_state) {
4445 object_unparent(OBJECT(cpu->apic_state));
4446 cpu->apic_state = NULL;
4447 }
4448
4449 xcc->parent_unrealize(dev, &local_err);
4450 if (local_err != NULL) {
4451 error_propagate(errp, local_err);
4452 return;
4453 }
4454 }
4455
4456 typedef struct BitProperty {
4457 FeatureWord w;
4458 uint32_t mask;
4459 } BitProperty;
4460
4461 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4462 void *opaque, Error **errp)
4463 {
4464 X86CPU *cpu = X86_CPU(obj);
4465 BitProperty *fp = opaque;
4466 uint32_t f = cpu->env.features[fp->w];
4467 bool value = (f & fp->mask) == fp->mask;
4468 visit_type_bool(v, name, &value, errp);
4469 }
4470
4471 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4472 void *opaque, Error **errp)
4473 {
4474 DeviceState *dev = DEVICE(obj);
4475 X86CPU *cpu = X86_CPU(obj);
4476 BitProperty *fp = opaque;
4477 Error *local_err = NULL;
4478 bool value;
4479
4480 if (dev->realized) {
4481 qdev_prop_set_after_realize(dev, name, errp);
4482 return;
4483 }
4484
4485 visit_type_bool(v, name, &value, &local_err);
4486 if (local_err) {
4487 error_propagate(errp, local_err);
4488 return;
4489 }
4490
4491 if (value) {
4492 cpu->env.features[fp->w] |= fp->mask;
4493 } else {
4494 cpu->env.features[fp->w] &= ~fp->mask;
4495 }
4496 cpu->env.user_features[fp->w] |= fp->mask;
4497 }
4498
4499 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4500 void *opaque)
4501 {
4502 BitProperty *prop = opaque;
4503 g_free(prop);
4504 }
4505
4506 /* Register a boolean property to get/set a single bit in a uint32_t field.
4507 *
4508 * The same property name can be registered multiple times to make it affect
4509 * multiple bits in the same FeatureWord. In that case, the getter will return
4510 * true only if all bits are set.
4511 */
4512 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4513 const char *prop_name,
4514 FeatureWord w,
4515 int bitnr)
4516 {
4517 BitProperty *fp;
4518 ObjectProperty *op;
4519 uint32_t mask = (1UL << bitnr);
4520
4521 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4522 if (op) {
4523 fp = op->opaque;
4524 assert(fp->w == w);
4525 fp->mask |= mask;
4526 } else {
4527 fp = g_new0(BitProperty, 1);
4528 fp->w = w;
4529 fp->mask = mask;
4530 object_property_add(OBJECT(cpu), prop_name, "bool",
4531 x86_cpu_get_bit_prop,
4532 x86_cpu_set_bit_prop,
4533 x86_cpu_release_bit_prop, fp, &error_abort);
4534 }
4535 }
4536
4537 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4538 FeatureWord w,
4539 int bitnr)
4540 {
4541 FeatureWordInfo *fi = &feature_word_info[w];
4542 const char *name = fi->feat_names[bitnr];
4543
4544 if (!name) {
4545 return;
4546 }
4547
4548 /* Property names should use "-" instead of "_".
4549 * Old names containing underscores are registered as aliases
4550 * using object_property_add_alias()
4551 */
4552 assert(!strchr(name, '_'));
4553 /* aliases don't use "|" delimiters anymore, they are registered
4554 * manually using object_property_add_alias() */
4555 assert(!strchr(name, '|'));
4556 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4557 }
4558
4559 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4560 {
4561 X86CPU *cpu = X86_CPU(cs);
4562 CPUX86State *env = &cpu->env;
4563 GuestPanicInformation *panic_info = NULL;
4564
4565 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4566 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4567
4568 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4569
4570 assert(HV_CRASH_PARAMS >= 5);
4571 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4572 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4573 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4574 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4575 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4576 }
4577
4578 return panic_info;
4579 }
4580 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4581 const char *name, void *opaque,
4582 Error **errp)
4583 {
4584 CPUState *cs = CPU(obj);
4585 GuestPanicInformation *panic_info;
4586
4587 if (!cs->crash_occurred) {
4588 error_setg(errp, "No crash occured");
4589 return;
4590 }
4591
4592 panic_info = x86_cpu_get_crash_info(cs);
4593 if (panic_info == NULL) {
4594 error_setg(errp, "No crash information");
4595 return;
4596 }
4597
4598 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4599 errp);
4600 qapi_free_GuestPanicInformation(panic_info);
4601 }
4602
4603 static void x86_cpu_initfn(Object *obj)
4604 {
4605 CPUState *cs = CPU(obj);
4606 X86CPU *cpu = X86_CPU(obj);
4607 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4608 CPUX86State *env = &cpu->env;
4609 FeatureWord w;
4610
4611 cs->env_ptr = env;
4612
4613 object_property_add(obj, "family", "int",
4614 x86_cpuid_version_get_family,
4615 x86_cpuid_version_set_family, NULL, NULL, NULL);
4616 object_property_add(obj, "model", "int",
4617 x86_cpuid_version_get_model,
4618 x86_cpuid_version_set_model, NULL, NULL, NULL);
4619 object_property_add(obj, "stepping", "int",
4620 x86_cpuid_version_get_stepping,
4621 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4622 object_property_add_str(obj, "vendor",
4623 x86_cpuid_get_vendor,
4624 x86_cpuid_set_vendor, NULL);
4625 object_property_add_str(obj, "model-id",
4626 x86_cpuid_get_model_id,
4627 x86_cpuid_set_model_id, NULL);
4628 object_property_add(obj, "tsc-frequency", "int",
4629 x86_cpuid_get_tsc_freq,
4630 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4631 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4632 x86_cpu_get_feature_words,
4633 NULL, NULL, (void *)env->features, NULL);
4634 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4635 x86_cpu_get_feature_words,
4636 NULL, NULL, (void *)cpu->filtered_features, NULL);
4637
4638 object_property_add(obj, "crash-information", "GuestPanicInformation",
4639 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4640
4641 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4642
4643 for (w = 0; w < FEATURE_WORDS; w++) {
4644 int bitnr;
4645
4646 for (bitnr = 0; bitnr < 32; bitnr++) {
4647 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4648 }
4649 }
4650
4651 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4652 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4653 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4654 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4655 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4656 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4657 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4658
4659 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4660 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4661 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4662 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4663 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4664 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4665 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4666 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4667 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4668 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4669 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4670 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4671 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4672 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4673 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4674 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4675 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4676 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4677 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4678 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4679 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4680
4681 if (xcc->cpu_def) {
4682 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4683 }
4684 }
4685
4686 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4687 {
4688 X86CPU *cpu = X86_CPU(cs);
4689
4690 return cpu->apic_id;
4691 }
4692
4693 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4694 {
4695 X86CPU *cpu = X86_CPU(cs);
4696
4697 return cpu->env.cr[0] & CR0_PG_MASK;
4698 }
4699
4700 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4701 {
4702 X86CPU *cpu = X86_CPU(cs);
4703
4704 cpu->env.eip = value;
4705 }
4706
4707 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4708 {
4709 X86CPU *cpu = X86_CPU(cs);
4710
4711 cpu->env.eip = tb->pc - tb->cs_base;
4712 }
4713
4714 static bool x86_cpu_has_work(CPUState *cs)
4715 {
4716 X86CPU *cpu = X86_CPU(cs);
4717 CPUX86State *env = &cpu->env;
4718
4719 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4720 CPU_INTERRUPT_POLL)) &&
4721 (env->eflags & IF_MASK)) ||
4722 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4723 CPU_INTERRUPT_INIT |
4724 CPU_INTERRUPT_SIPI |
4725 CPU_INTERRUPT_MCE)) ||
4726 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4727 !(env->hflags & HF_SMM_MASK));
4728 }
4729
4730 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4731 {
4732 X86CPU *cpu = X86_CPU(cs);
4733 CPUX86State *env = &cpu->env;
4734
4735 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4736 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4737 : bfd_mach_i386_i8086);
4738 info->print_insn = print_insn_i386;
4739
4740 info->cap_arch = CS_ARCH_X86;
4741 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4742 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4743 : CS_MODE_16);
4744 info->cap_insn_unit = 1;
4745 info->cap_insn_split = 8;
4746 }
4747
4748 void x86_update_hflags(CPUX86State *env)
4749 {
4750 uint32_t hflags;
4751 #define HFLAG_COPY_MASK \
4752 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4753 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4754 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4755 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4756
4757 hflags = env->hflags & HFLAG_COPY_MASK;
4758 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4759 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4760 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4761 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4762 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4763
4764 if (env->cr[4] & CR4_OSFXSR_MASK) {
4765 hflags |= HF_OSFXSR_MASK;
4766 }
4767
4768 if (env->efer & MSR_EFER_LMA) {
4769 hflags |= HF_LMA_MASK;
4770 }
4771
4772 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4773 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4774 } else {
4775 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4776 (DESC_B_SHIFT - HF_CS32_SHIFT);
4777 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4778 (DESC_B_SHIFT - HF_SS32_SHIFT);
4779 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4780 !(hflags & HF_CS32_MASK)) {
4781 hflags |= HF_ADDSEG_MASK;
4782 } else {
4783 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4784 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4785 }
4786 }
4787 env->hflags = hflags;
4788 }
4789
4790 static Property x86_cpu_properties[] = {
4791 #ifdef CONFIG_USER_ONLY
4792 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4793 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4794 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4795 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4796 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4797 #else
4798 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4799 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4800 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4801 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4802 #endif
4803 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4804 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4805 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4806 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4807 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4808 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4809 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4810 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4811 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4812 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4813 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4814 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4815 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
4816 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
4817 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4818 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4819 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4820 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4821 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4822 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4823 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4824 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4825 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4826 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4827 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4828 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4829 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4830 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4831 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4832 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4833 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4834 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4835 false),
4836 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4837 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4838
4839 /*
4840 * From "Requirements for Implementing the Microsoft
4841 * Hypervisor Interface":
4842 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4843 *
4844 * "Starting with Windows Server 2012 and Windows 8, if
4845 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4846 * the hypervisor imposes no specific limit to the number of VPs.
4847 * In this case, Windows Server 2012 guest VMs may use more than
4848 * 64 VPs, up to the maximum supported number of processors applicable
4849 * to the specific Windows version being used."
4850 */
4851 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4852 DEFINE_PROP_END_OF_LIST()
4853 };
4854
4855 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4856 {
4857 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4858 CPUClass *cc = CPU_CLASS(oc);
4859 DeviceClass *dc = DEVICE_CLASS(oc);
4860
4861 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4862 &xcc->parent_realize);
4863 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4864 &xcc->parent_unrealize);
4865 dc->props = x86_cpu_properties;
4866
4867 xcc->parent_reset = cc->reset;
4868 cc->reset = x86_cpu_reset;
4869 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4870
4871 cc->class_by_name = x86_cpu_class_by_name;
4872 cc->parse_features = x86_cpu_parse_featurestr;
4873 cc->has_work = x86_cpu_has_work;
4874 #ifdef CONFIG_TCG
4875 cc->do_interrupt = x86_cpu_do_interrupt;
4876 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4877 #endif
4878 cc->dump_state = x86_cpu_dump_state;
4879 cc->get_crash_info = x86_cpu_get_crash_info;
4880 cc->set_pc = x86_cpu_set_pc;
4881 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4882 cc->gdb_read_register = x86_cpu_gdb_read_register;
4883 cc->gdb_write_register = x86_cpu_gdb_write_register;
4884 cc->get_arch_id = x86_cpu_get_arch_id;
4885 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4886 #ifdef CONFIG_USER_ONLY
4887 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4888 #else
4889 cc->asidx_from_attrs = x86_asidx_from_attrs;
4890 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4891 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4892 cc->write_elf64_note = x86_cpu_write_elf64_note;
4893 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4894 cc->write_elf32_note = x86_cpu_write_elf32_note;
4895 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4896 cc->vmsd = &vmstate_x86_cpu;
4897 #endif
4898 cc->gdb_arch_name = x86_gdb_arch_name;
4899 #ifdef TARGET_X86_64
4900 cc->gdb_core_xml_file = "i386-64bit.xml";
4901 cc->gdb_num_core_regs = 57;
4902 #else
4903 cc->gdb_core_xml_file = "i386-32bit.xml";
4904 cc->gdb_num_core_regs = 41;
4905 #endif
4906 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4907 cc->debug_excp_handler = breakpoint_handler;
4908 #endif
4909 cc->cpu_exec_enter = x86_cpu_exec_enter;
4910 cc->cpu_exec_exit = x86_cpu_exec_exit;
4911 #ifdef CONFIG_TCG
4912 cc->tcg_initialize = tcg_x86_init;
4913 #endif
4914 cc->disas_set_info = x86_disas_set_info;
4915
4916 dc->user_creatable = true;
4917 }
4918
4919 static const TypeInfo x86_cpu_type_info = {
4920 .name = TYPE_X86_CPU,
4921 .parent = TYPE_CPU,
4922 .instance_size = sizeof(X86CPU),
4923 .instance_init = x86_cpu_initfn,
4924 .abstract = true,
4925 .class_size = sizeof(X86CPUClass),
4926 .class_init = x86_cpu_common_class_init,
4927 };
4928
4929
4930 /* "base" CPU model, used by query-cpu-model-expansion */
4931 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4932 {
4933 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4934
4935 xcc->static_model = true;
4936 xcc->migration_safe = true;
4937 xcc->model_description = "base CPU model type with no features enabled";
4938 xcc->ordering = 8;
4939 }
4940
4941 static const TypeInfo x86_base_cpu_type_info = {
4942 .name = X86_CPU_TYPE_NAME("base"),
4943 .parent = TYPE_X86_CPU,
4944 .class_init = x86_cpu_base_class_init,
4945 };
4946
4947 static void x86_cpu_register_types(void)
4948 {
4949 int i;
4950
4951 type_register_static(&x86_cpu_type_info);
4952 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4953 x86_register_cpudef_type(&builtin_x86_defs[i]);
4954 }
4955 type_register_static(&max_x86_cpu_type_info);
4956 type_register_static(&x86_base_cpu_type_info);
4957 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4958 type_register_static(&host_x86_cpu_type_info);
4959 #endif
4960 }
4961
4962 type_init(x86_cpu_register_types)