]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
KVM: x86: Add support for save/load MSR_SMI_COUNT
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29
30 #include "qemu/error-report.h"
31 #include "qemu/option.h"
32 #include "qemu/config-file.h"
33 #include "qapi/error.h"
34 #include "qapi/qapi-visit-misc.h"
35 #include "qapi/qapi-visit-run-state.h"
36 #include "qapi/qmp/qdict.h"
37 #include "qapi/qmp/qerror.h"
38 #include "qapi/visitor.h"
39 #include "qom/qom-qobject.h"
40 #include "sysemu/arch_init.h"
41
42 #if defined(CONFIG_KVM)
43 #include <linux/kvm_para.h>
44 #endif
45
46 #include "sysemu/sysemu.h"
47 #include "hw/qdev-properties.h"
48 #include "hw/i386/topology.h"
49 #ifndef CONFIG_USER_ONLY
50 #include "exec/address-spaces.h"
51 #include "hw/hw.h"
52 #include "hw/xen/xen.h"
53 #include "hw/i386/apic_internal.h"
54 #endif
55
56 #include "disas/capstone.h"
57
58
59 /* Cache topology CPUID constants: */
60
61 /* CPUID Leaf 2 Descriptors */
62
63 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
64 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
65 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
66 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
67
68
69 /* CPUID Leaf 4 constants: */
70
71 /* EAX: */
72 #define CPUID_4_TYPE_DCACHE 1
73 #define CPUID_4_TYPE_ICACHE 2
74 #define CPUID_4_TYPE_UNIFIED 3
75
76 #define CPUID_4_LEVEL(l) ((l) << 5)
77
78 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
79 #define CPUID_4_FULLY_ASSOC (1 << 9)
80
81 /* EDX: */
82 #define CPUID_4_NO_INVD_SHARING (1 << 0)
83 #define CPUID_4_INCLUSIVE (1 << 1)
84 #define CPUID_4_COMPLEX_IDX (1 << 2)
85
86 #define ASSOC_FULL 0xFF
87
88 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
89 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
90 a == 2 ? 0x2 : \
91 a == 4 ? 0x4 : \
92 a == 8 ? 0x6 : \
93 a == 16 ? 0x8 : \
94 a == 32 ? 0xA : \
95 a == 48 ? 0xB : \
96 a == 64 ? 0xC : \
97 a == 96 ? 0xD : \
98 a == 128 ? 0xE : \
99 a == ASSOC_FULL ? 0xF : \
100 0 /* invalid value */)
101
102
103 /* Definitions of the hardcoded cache entries we expose: */
104
105 /* L1 data cache: */
106 #define L1D_LINE_SIZE 64
107 #define L1D_ASSOCIATIVITY 8
108 #define L1D_SETS 64
109 #define L1D_PARTITIONS 1
110 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
111 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
112 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
113 #define L1D_LINES_PER_TAG 1
114 #define L1D_SIZE_KB_AMD 64
115 #define L1D_ASSOCIATIVITY_AMD 2
116
117 /* L1 instruction cache: */
118 #define L1I_LINE_SIZE 64
119 #define L1I_ASSOCIATIVITY 8
120 #define L1I_SETS 64
121 #define L1I_PARTITIONS 1
122 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
123 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
124 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
125 #define L1I_LINES_PER_TAG 1
126 #define L1I_SIZE_KB_AMD 64
127 #define L1I_ASSOCIATIVITY_AMD 2
128
129 /* Level 2 unified cache: */
130 #define L2_LINE_SIZE 64
131 #define L2_ASSOCIATIVITY 16
132 #define L2_SETS 4096
133 #define L2_PARTITIONS 1
134 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
135 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
136 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
137 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
138 #define L2_LINES_PER_TAG 1
139 #define L2_SIZE_KB_AMD 512
140
141 /* Level 3 unified cache: */
142 #define L3_SIZE_KB 0 /* disabled */
143 #define L3_ASSOCIATIVITY 0 /* disabled */
144 #define L3_LINES_PER_TAG 0 /* disabled */
145 #define L3_LINE_SIZE 0 /* disabled */
146 #define L3_N_LINE_SIZE 64
147 #define L3_N_ASSOCIATIVITY 16
148 #define L3_N_SETS 16384
149 #define L3_N_PARTITIONS 1
150 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
151 #define L3_N_LINES_PER_TAG 1
152 #define L3_N_SIZE_KB_AMD 16384
153
154 /* TLB definitions: */
155
156 #define L1_DTLB_2M_ASSOC 1
157 #define L1_DTLB_2M_ENTRIES 255
158 #define L1_DTLB_4K_ASSOC 1
159 #define L1_DTLB_4K_ENTRIES 255
160
161 #define L1_ITLB_2M_ASSOC 1
162 #define L1_ITLB_2M_ENTRIES 255
163 #define L1_ITLB_4K_ASSOC 1
164 #define L1_ITLB_4K_ENTRIES 255
165
166 #define L2_DTLB_2M_ASSOC 0 /* disabled */
167 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
168 #define L2_DTLB_4K_ASSOC 4
169 #define L2_DTLB_4K_ENTRIES 512
170
171 #define L2_ITLB_2M_ASSOC 0 /* disabled */
172 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
173 #define L2_ITLB_4K_ASSOC 4
174 #define L2_ITLB_4K_ENTRIES 512
175
176
177
178 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
179 uint32_t vendor2, uint32_t vendor3)
180 {
181 int i;
182 for (i = 0; i < 4; i++) {
183 dst[i] = vendor1 >> (8 * i);
184 dst[i + 4] = vendor2 >> (8 * i);
185 dst[i + 8] = vendor3 >> (8 * i);
186 }
187 dst[CPUID_VENDOR_SZ] = '\0';
188 }
189
190 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
191 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
193 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
194 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
195 CPUID_PSE36 | CPUID_FXSR)
196 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
197 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
198 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
199 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
200 CPUID_PAE | CPUID_SEP | CPUID_APIC)
201
202 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
203 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
204 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
205 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
206 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
207 /* partly implemented:
208 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
209 /* missing:
210 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
211 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
212 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
213 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
214 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
215 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
216 /* missing:
217 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
218 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
219 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
220 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
221 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
222
223 #ifdef TARGET_X86_64
224 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
225 #else
226 #define TCG_EXT2_X86_64_FEATURES 0
227 #endif
228
229 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
230 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
231 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
232 TCG_EXT2_X86_64_FEATURES)
233 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
234 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
235 #define TCG_EXT4_FEATURES 0
236 #define TCG_SVM_FEATURES 0
237 #define TCG_KVM_FEATURES 0
238 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
239 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
240 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
241 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
242 CPUID_7_0_EBX_ERMS)
243 /* missing:
244 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
245 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
246 CPUID_7_0_EBX_RDSEED */
247 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
248 CPUID_7_0_ECX_LA57)
249 #define TCG_7_0_EDX_FEATURES 0
250 #define TCG_APM_FEATURES 0
251 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
252 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
253 /* missing:
254 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
255
256 typedef struct FeatureWordInfo {
257 /* feature flags names are taken from "Intel Processor Identification and
258 * the CPUID Instruction" and AMD's "CPUID Specification".
259 * In cases of disagreement between feature naming conventions,
260 * aliases may be added.
261 */
262 const char *feat_names[32];
263 uint32_t cpuid_eax; /* Input EAX for CPUID */
264 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
265 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
266 int cpuid_reg; /* output register (R_* constant) */
267 uint32_t tcg_features; /* Feature flags supported by TCG */
268 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
269 uint32_t migratable_flags; /* Feature flags known to be migratable */
270 } FeatureWordInfo;
271
272 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
273 [FEAT_1_EDX] = {
274 .feat_names = {
275 "fpu", "vme", "de", "pse",
276 "tsc", "msr", "pae", "mce",
277 "cx8", "apic", NULL, "sep",
278 "mtrr", "pge", "mca", "cmov",
279 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
280 NULL, "ds" /* Intel dts */, "acpi", "mmx",
281 "fxsr", "sse", "sse2", "ss",
282 "ht" /* Intel htt */, "tm", "ia64", "pbe",
283 },
284 .cpuid_eax = 1, .cpuid_reg = R_EDX,
285 .tcg_features = TCG_FEATURES,
286 },
287 [FEAT_1_ECX] = {
288 .feat_names = {
289 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
290 "ds-cpl", "vmx", "smx", "est",
291 "tm2", "ssse3", "cid", NULL,
292 "fma", "cx16", "xtpr", "pdcm",
293 NULL, "pcid", "dca", "sse4.1",
294 "sse4.2", "x2apic", "movbe", "popcnt",
295 "tsc-deadline", "aes", "xsave", "osxsave",
296 "avx", "f16c", "rdrand", "hypervisor",
297 },
298 .cpuid_eax = 1, .cpuid_reg = R_ECX,
299 .tcg_features = TCG_EXT_FEATURES,
300 },
301 /* Feature names that are already defined on feature_name[] but
302 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
303 * names on feat_names below. They are copied automatically
304 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
305 */
306 [FEAT_8000_0001_EDX] = {
307 .feat_names = {
308 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
309 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
310 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
311 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
312 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
313 "nx", NULL, "mmxext", NULL /* mmx */,
314 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
315 NULL, "lm", "3dnowext", "3dnow",
316 },
317 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
318 .tcg_features = TCG_EXT2_FEATURES,
319 },
320 [FEAT_8000_0001_ECX] = {
321 .feat_names = {
322 "lahf-lm", "cmp-legacy", "svm", "extapic",
323 "cr8legacy", "abm", "sse4a", "misalignsse",
324 "3dnowprefetch", "osvw", "ibs", "xop",
325 "skinit", "wdt", NULL, "lwp",
326 "fma4", "tce", NULL, "nodeid-msr",
327 NULL, "tbm", "topoext", "perfctr-core",
328 "perfctr-nb", NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 },
331 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
332 .tcg_features = TCG_EXT3_FEATURES,
333 },
334 [FEAT_C000_0001_EDX] = {
335 .feat_names = {
336 NULL, NULL, "xstore", "xstore-en",
337 NULL, NULL, "xcrypt", "xcrypt-en",
338 "ace2", "ace2-en", "phe", "phe-en",
339 "pmm", "pmm-en", NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 },
345 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT4_FEATURES,
347 },
348 [FEAT_KVM] = {
349 .feat_names = {
350 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
351 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
352 NULL, "kvm-pv-tlb-flush", NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 "kvmclock-stable-bit", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 },
359 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
360 .tcg_features = TCG_KVM_FEATURES,
361 },
362 [FEAT_HYPERV_EAX] = {
363 .feat_names = {
364 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
365 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
366 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
367 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
368 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
369 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
375 },
376 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
377 },
378 [FEAT_HYPERV_EBX] = {
379 .feat_names = {
380 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
381 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
382 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
383 NULL /* hv_create_port */, NULL /* hv_connect_port */,
384 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
385 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
386 NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
391 },
392 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
393 },
394 [FEAT_HYPERV_EDX] = {
395 .feat_names = {
396 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
397 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
398 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
399 NULL, NULL,
400 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
406 },
407 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
408 },
409 [FEAT_SVM] = {
410 .feat_names = {
411 "npt", "lbrv", "svm-lock", "nrip-save",
412 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
413 NULL, NULL, "pause-filter", NULL,
414 "pfthreshold", NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
419 },
420 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
421 .tcg_features = TCG_SVM_FEATURES,
422 },
423 [FEAT_7_0_EBX] = {
424 .feat_names = {
425 "fsgsbase", "tsc-adjust", NULL, "bmi1",
426 "hle", "avx2", NULL, "smep",
427 "bmi2", "erms", "invpcid", "rtm",
428 NULL, NULL, "mpx", NULL,
429 "avx512f", "avx512dq", "rdseed", "adx",
430 "smap", "avx512ifma", "pcommit", "clflushopt",
431 "clwb", NULL, "avx512pf", "avx512er",
432 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
433 },
434 .cpuid_eax = 7,
435 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
436 .cpuid_reg = R_EBX,
437 .tcg_features = TCG_7_0_EBX_FEATURES,
438 },
439 [FEAT_7_0_ECX] = {
440 .feat_names = {
441 NULL, "avx512vbmi", "umip", "pku",
442 "ospke", NULL, "avx512vbmi2", NULL,
443 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
444 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
445 "la57", NULL, NULL, NULL,
446 NULL, NULL, "rdpid", NULL,
447 NULL, NULL, NULL, NULL,
448 NULL, NULL, NULL, NULL,
449 },
450 .cpuid_eax = 7,
451 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
452 .cpuid_reg = R_ECX,
453 .tcg_features = TCG_7_0_ECX_FEATURES,
454 },
455 [FEAT_7_0_EDX] = {
456 .feat_names = {
457 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, "spec-ctrl", NULL,
464 NULL, NULL, NULL, NULL,
465 },
466 .cpuid_eax = 7,
467 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
468 .cpuid_reg = R_EDX,
469 .tcg_features = TCG_7_0_EDX_FEATURES,
470 },
471 [FEAT_8000_0007_EDX] = {
472 .feat_names = {
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 "invtsc", NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
481 },
482 .cpuid_eax = 0x80000007,
483 .cpuid_reg = R_EDX,
484 .tcg_features = TCG_APM_FEATURES,
485 .unmigratable_flags = CPUID_APM_INVTSC,
486 },
487 [FEAT_8000_0008_EBX] = {
488 .feat_names = {
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 "ibpb", NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
497 },
498 .cpuid_eax = 0x80000008,
499 .cpuid_reg = R_EBX,
500 .tcg_features = 0,
501 .unmigratable_flags = 0,
502 },
503 [FEAT_XSAVE] = {
504 .feat_names = {
505 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 },
514 .cpuid_eax = 0xd,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
516 .cpuid_reg = R_EAX,
517 .tcg_features = TCG_XSAVE_FEATURES,
518 },
519 [FEAT_6_EAX] = {
520 .feat_names = {
521 NULL, NULL, "arat", NULL,
522 NULL, NULL, NULL, NULL,
523 NULL, NULL, NULL, NULL,
524 NULL, NULL, NULL, NULL,
525 NULL, NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 NULL, NULL, NULL, NULL,
528 NULL, NULL, NULL, NULL,
529 },
530 .cpuid_eax = 6, .cpuid_reg = R_EAX,
531 .tcg_features = TCG_6_EAX_FEATURES,
532 },
533 [FEAT_XSAVE_COMP_LO] = {
534 .cpuid_eax = 0xD,
535 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
536 .cpuid_reg = R_EAX,
537 .tcg_features = ~0U,
538 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
539 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
540 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
541 XSTATE_PKRU_MASK,
542 },
543 [FEAT_XSAVE_COMP_HI] = {
544 .cpuid_eax = 0xD,
545 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
546 .cpuid_reg = R_EDX,
547 .tcg_features = ~0U,
548 },
549 };
550
551 typedef struct X86RegisterInfo32 {
552 /* Name of register */
553 const char *name;
554 /* QAPI enum value register */
555 X86CPURegister32 qapi_enum;
556 } X86RegisterInfo32;
557
558 #define REGISTER(reg) \
559 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
560 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
561 REGISTER(EAX),
562 REGISTER(ECX),
563 REGISTER(EDX),
564 REGISTER(EBX),
565 REGISTER(ESP),
566 REGISTER(EBP),
567 REGISTER(ESI),
568 REGISTER(EDI),
569 };
570 #undef REGISTER
571
572 typedef struct ExtSaveArea {
573 uint32_t feature, bits;
574 uint32_t offset, size;
575 } ExtSaveArea;
576
577 static const ExtSaveArea x86_ext_save_areas[] = {
578 [XSTATE_FP_BIT] = {
579 /* x87 FP state component is always enabled if XSAVE is supported */
580 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
581 /* x87 state is in the legacy region of the XSAVE area */
582 .offset = 0,
583 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
584 },
585 [XSTATE_SSE_BIT] = {
586 /* SSE state component is always enabled if XSAVE is supported */
587 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
588 /* SSE state is in the legacy region of the XSAVE area */
589 .offset = 0,
590 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
591 },
592 [XSTATE_YMM_BIT] =
593 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
594 .offset = offsetof(X86XSaveArea, avx_state),
595 .size = sizeof(XSaveAVX) },
596 [XSTATE_BNDREGS_BIT] =
597 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
598 .offset = offsetof(X86XSaveArea, bndreg_state),
599 .size = sizeof(XSaveBNDREG) },
600 [XSTATE_BNDCSR_BIT] =
601 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
602 .offset = offsetof(X86XSaveArea, bndcsr_state),
603 .size = sizeof(XSaveBNDCSR) },
604 [XSTATE_OPMASK_BIT] =
605 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
606 .offset = offsetof(X86XSaveArea, opmask_state),
607 .size = sizeof(XSaveOpmask) },
608 [XSTATE_ZMM_Hi256_BIT] =
609 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
610 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
611 .size = sizeof(XSaveZMM_Hi256) },
612 [XSTATE_Hi16_ZMM_BIT] =
613 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
614 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
615 .size = sizeof(XSaveHi16_ZMM) },
616 [XSTATE_PKRU_BIT] =
617 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
618 .offset = offsetof(X86XSaveArea, pkru_state),
619 .size = sizeof(XSavePKRU) },
620 };
621
622 static uint32_t xsave_area_size(uint64_t mask)
623 {
624 int i;
625 uint64_t ret = 0;
626
627 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
628 const ExtSaveArea *esa = &x86_ext_save_areas[i];
629 if ((mask >> i) & 1) {
630 ret = MAX(ret, esa->offset + esa->size);
631 }
632 }
633 return ret;
634 }
635
636 static inline bool accel_uses_host_cpuid(void)
637 {
638 return kvm_enabled() || hvf_enabled();
639 }
640
641 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
642 {
643 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
644 cpu->env.features[FEAT_XSAVE_COMP_LO];
645 }
646
647 const char *get_register_name_32(unsigned int reg)
648 {
649 if (reg >= CPU_NB_REGS32) {
650 return NULL;
651 }
652 return x86_reg_info_32[reg].name;
653 }
654
655 /*
656 * Returns the set of feature flags that are supported and migratable by
657 * QEMU, for a given FeatureWord.
658 */
659 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
660 {
661 FeatureWordInfo *wi = &feature_word_info[w];
662 uint32_t r = 0;
663 int i;
664
665 for (i = 0; i < 32; i++) {
666 uint32_t f = 1U << i;
667
668 /* If the feature name is known, it is implicitly considered migratable,
669 * unless it is explicitly set in unmigratable_flags */
670 if ((wi->migratable_flags & f) ||
671 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
672 r |= f;
673 }
674 }
675 return r;
676 }
677
678 void host_cpuid(uint32_t function, uint32_t count,
679 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
680 {
681 uint32_t vec[4];
682
683 #ifdef __x86_64__
684 asm volatile("cpuid"
685 : "=a"(vec[0]), "=b"(vec[1]),
686 "=c"(vec[2]), "=d"(vec[3])
687 : "0"(function), "c"(count) : "cc");
688 #elif defined(__i386__)
689 asm volatile("pusha \n\t"
690 "cpuid \n\t"
691 "mov %%eax, 0(%2) \n\t"
692 "mov %%ebx, 4(%2) \n\t"
693 "mov %%ecx, 8(%2) \n\t"
694 "mov %%edx, 12(%2) \n\t"
695 "popa"
696 : : "a"(function), "c"(count), "S"(vec)
697 : "memory", "cc");
698 #else
699 abort();
700 #endif
701
702 if (eax)
703 *eax = vec[0];
704 if (ebx)
705 *ebx = vec[1];
706 if (ecx)
707 *ecx = vec[2];
708 if (edx)
709 *edx = vec[3];
710 }
711
712 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
713 {
714 uint32_t eax, ebx, ecx, edx;
715
716 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
717 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
718
719 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
720 if (family) {
721 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
722 }
723 if (model) {
724 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
725 }
726 if (stepping) {
727 *stepping = eax & 0x0F;
728 }
729 }
730
731 /* CPU class name definitions: */
732
733 /* Return type name for a given CPU model name
734 * Caller is responsible for freeing the returned string.
735 */
736 static char *x86_cpu_type_name(const char *model_name)
737 {
738 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
739 }
740
741 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
742 {
743 ObjectClass *oc;
744 char *typename;
745
746 if (cpu_model == NULL) {
747 return NULL;
748 }
749
750 typename = x86_cpu_type_name(cpu_model);
751 oc = object_class_by_name(typename);
752 g_free(typename);
753 return oc;
754 }
755
756 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
757 {
758 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
759 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
760 return g_strndup(class_name,
761 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
762 }
763
764 struct X86CPUDefinition {
765 const char *name;
766 uint32_t level;
767 uint32_t xlevel;
768 /* vendor is zero-terminated, 12 character ASCII string */
769 char vendor[CPUID_VENDOR_SZ + 1];
770 int family;
771 int model;
772 int stepping;
773 FeatureWordArray features;
774 const char *model_id;
775 };
776
777 static X86CPUDefinition builtin_x86_defs[] = {
778 {
779 .name = "qemu64",
780 .level = 0xd,
781 .vendor = CPUID_VENDOR_AMD,
782 .family = 6,
783 .model = 6,
784 .stepping = 3,
785 .features[FEAT_1_EDX] =
786 PPRO_FEATURES |
787 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
788 CPUID_PSE36,
789 .features[FEAT_1_ECX] =
790 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
793 .features[FEAT_8000_0001_ECX] =
794 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
795 .xlevel = 0x8000000A,
796 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
797 },
798 {
799 .name = "phenom",
800 .level = 5,
801 .vendor = CPUID_VENDOR_AMD,
802 .family = 16,
803 .model = 2,
804 .stepping = 3,
805 /* Missing: CPUID_HT */
806 .features[FEAT_1_EDX] =
807 PPRO_FEATURES |
808 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
809 CPUID_PSE36 | CPUID_VME,
810 .features[FEAT_1_ECX] =
811 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
812 CPUID_EXT_POPCNT,
813 .features[FEAT_8000_0001_EDX] =
814 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
815 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
816 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
817 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
818 CPUID_EXT3_CR8LEG,
819 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
820 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
821 .features[FEAT_8000_0001_ECX] =
822 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
823 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
824 /* Missing: CPUID_SVM_LBRV */
825 .features[FEAT_SVM] =
826 CPUID_SVM_NPT,
827 .xlevel = 0x8000001A,
828 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
829 },
830 {
831 .name = "core2duo",
832 .level = 10,
833 .vendor = CPUID_VENDOR_INTEL,
834 .family = 6,
835 .model = 15,
836 .stepping = 11,
837 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
838 .features[FEAT_1_EDX] =
839 PPRO_FEATURES |
840 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
841 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
842 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
843 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
846 CPUID_EXT_CX16,
847 .features[FEAT_8000_0001_EDX] =
848 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
849 .features[FEAT_8000_0001_ECX] =
850 CPUID_EXT3_LAHF_LM,
851 .xlevel = 0x80000008,
852 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
853 },
854 {
855 .name = "kvm64",
856 .level = 0xd,
857 .vendor = CPUID_VENDOR_INTEL,
858 .family = 15,
859 .model = 6,
860 .stepping = 1,
861 /* Missing: CPUID_HT */
862 .features[FEAT_1_EDX] =
863 PPRO_FEATURES | CPUID_VME |
864 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
865 CPUID_PSE36,
866 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
867 .features[FEAT_1_ECX] =
868 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
869 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
870 .features[FEAT_8000_0001_EDX] =
871 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
872 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
873 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
874 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
875 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
876 .features[FEAT_8000_0001_ECX] =
877 0,
878 .xlevel = 0x80000008,
879 .model_id = "Common KVM processor"
880 },
881 {
882 .name = "qemu32",
883 .level = 4,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 6,
887 .stepping = 3,
888 .features[FEAT_1_EDX] =
889 PPRO_FEATURES,
890 .features[FEAT_1_ECX] =
891 CPUID_EXT_SSE3,
892 .xlevel = 0x80000004,
893 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
894 },
895 {
896 .name = "kvm32",
897 .level = 5,
898 .vendor = CPUID_VENDOR_INTEL,
899 .family = 15,
900 .model = 6,
901 .stepping = 1,
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_VME |
904 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
905 .features[FEAT_1_ECX] =
906 CPUID_EXT_SSE3,
907 .features[FEAT_8000_0001_ECX] =
908 0,
909 .xlevel = 0x80000008,
910 .model_id = "Common 32-bit KVM processor"
911 },
912 {
913 .name = "coreduo",
914 .level = 10,
915 .vendor = CPUID_VENDOR_INTEL,
916 .family = 6,
917 .model = 14,
918 .stepping = 8,
919 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
920 .features[FEAT_1_EDX] =
921 PPRO_FEATURES | CPUID_VME |
922 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
923 CPUID_SS,
924 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
925 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
926 .features[FEAT_1_ECX] =
927 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
928 .features[FEAT_8000_0001_EDX] =
929 CPUID_EXT2_NX,
930 .xlevel = 0x80000008,
931 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
932 },
933 {
934 .name = "486",
935 .level = 1,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 4,
938 .model = 8,
939 .stepping = 0,
940 .features[FEAT_1_EDX] =
941 I486_FEATURES,
942 .xlevel = 0,
943 .model_id = "",
944 },
945 {
946 .name = "pentium",
947 .level = 1,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 5,
950 .model = 4,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 PENTIUM_FEATURES,
954 .xlevel = 0,
955 .model_id = "",
956 },
957 {
958 .name = "pentium2",
959 .level = 2,
960 .vendor = CPUID_VENDOR_INTEL,
961 .family = 6,
962 .model = 5,
963 .stepping = 2,
964 .features[FEAT_1_EDX] =
965 PENTIUM2_FEATURES,
966 .xlevel = 0,
967 .model_id = "",
968 },
969 {
970 .name = "pentium3",
971 .level = 3,
972 .vendor = CPUID_VENDOR_INTEL,
973 .family = 6,
974 .model = 7,
975 .stepping = 3,
976 .features[FEAT_1_EDX] =
977 PENTIUM3_FEATURES,
978 .xlevel = 0,
979 .model_id = "",
980 },
981 {
982 .name = "athlon",
983 .level = 2,
984 .vendor = CPUID_VENDOR_AMD,
985 .family = 6,
986 .model = 2,
987 .stepping = 3,
988 .features[FEAT_1_EDX] =
989 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
990 CPUID_MCA,
991 .features[FEAT_8000_0001_EDX] =
992 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
993 .xlevel = 0x80000008,
994 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
995 },
996 {
997 .name = "n270",
998 .level = 10,
999 .vendor = CPUID_VENDOR_INTEL,
1000 .family = 6,
1001 .model = 28,
1002 .stepping = 2,
1003 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1004 .features[FEAT_1_EDX] =
1005 PPRO_FEATURES |
1006 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1007 CPUID_ACPI | CPUID_SS,
1008 /* Some CPUs got no CPUID_SEP */
1009 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1010 * CPUID_EXT_XTPR */
1011 .features[FEAT_1_ECX] =
1012 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1013 CPUID_EXT_MOVBE,
1014 .features[FEAT_8000_0001_EDX] =
1015 CPUID_EXT2_NX,
1016 .features[FEAT_8000_0001_ECX] =
1017 CPUID_EXT3_LAHF_LM,
1018 .xlevel = 0x80000008,
1019 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1020 },
1021 {
1022 .name = "Conroe",
1023 .level = 10,
1024 .vendor = CPUID_VENDOR_INTEL,
1025 .family = 6,
1026 .model = 15,
1027 .stepping = 3,
1028 .features[FEAT_1_EDX] =
1029 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1030 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1031 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1032 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1033 CPUID_DE | CPUID_FP87,
1034 .features[FEAT_1_ECX] =
1035 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1036 .features[FEAT_8000_0001_EDX] =
1037 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1038 .features[FEAT_8000_0001_ECX] =
1039 CPUID_EXT3_LAHF_LM,
1040 .xlevel = 0x80000008,
1041 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1042 },
1043 {
1044 .name = "Penryn",
1045 .level = 10,
1046 .vendor = CPUID_VENDOR_INTEL,
1047 .family = 6,
1048 .model = 23,
1049 .stepping = 3,
1050 .features[FEAT_1_EDX] =
1051 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1052 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1053 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1054 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1055 CPUID_DE | CPUID_FP87,
1056 .features[FEAT_1_ECX] =
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1058 CPUID_EXT_SSE3,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1061 .features[FEAT_8000_0001_ECX] =
1062 CPUID_EXT3_LAHF_LM,
1063 .xlevel = 0x80000008,
1064 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1065 },
1066 {
1067 .name = "Nehalem",
1068 .level = 11,
1069 .vendor = CPUID_VENDOR_INTEL,
1070 .family = 6,
1071 .model = 26,
1072 .stepping = 3,
1073 .features[FEAT_1_EDX] =
1074 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1075 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1076 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1077 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1078 CPUID_DE | CPUID_FP87,
1079 .features[FEAT_1_ECX] =
1080 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1081 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .xlevel = 0x80000008,
1087 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1088 },
1089 {
1090 .name = "Nehalem-IBRS",
1091 .level = 11,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 26,
1095 .stepping = 3,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1104 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1105 .features[FEAT_7_0_EDX] =
1106 CPUID_7_0_EDX_SPEC_CTRL,
1107 .features[FEAT_8000_0001_EDX] =
1108 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1109 .features[FEAT_8000_0001_ECX] =
1110 CPUID_EXT3_LAHF_LM,
1111 .xlevel = 0x80000008,
1112 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1113 },
1114 {
1115 .name = "Westmere",
1116 .level = 11,
1117 .vendor = CPUID_VENDOR_INTEL,
1118 .family = 6,
1119 .model = 44,
1120 .stepping = 1,
1121 .features[FEAT_1_EDX] =
1122 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1123 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1124 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1125 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1126 CPUID_DE | CPUID_FP87,
1127 .features[FEAT_1_ECX] =
1128 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1129 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1130 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1131 .features[FEAT_8000_0001_EDX] =
1132 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1133 .features[FEAT_8000_0001_ECX] =
1134 CPUID_EXT3_LAHF_LM,
1135 .features[FEAT_6_EAX] =
1136 CPUID_6_EAX_ARAT,
1137 .xlevel = 0x80000008,
1138 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1139 },
1140 {
1141 .name = "Westmere-IBRS",
1142 .level = 11,
1143 .vendor = CPUID_VENDOR_INTEL,
1144 .family = 6,
1145 .model = 44,
1146 .stepping = 1,
1147 .features[FEAT_1_EDX] =
1148 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1149 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1150 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1151 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1152 CPUID_DE | CPUID_FP87,
1153 .features[FEAT_1_ECX] =
1154 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1155 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1156 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1157 .features[FEAT_8000_0001_EDX] =
1158 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1159 .features[FEAT_8000_0001_ECX] =
1160 CPUID_EXT3_LAHF_LM,
1161 .features[FEAT_7_0_EDX] =
1162 CPUID_7_0_EDX_SPEC_CTRL,
1163 .features[FEAT_6_EAX] =
1164 CPUID_6_EAX_ARAT,
1165 .xlevel = 0x80000008,
1166 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1167 },
1168 {
1169 .name = "SandyBridge",
1170 .level = 0xd,
1171 .vendor = CPUID_VENDOR_INTEL,
1172 .family = 6,
1173 .model = 42,
1174 .stepping = 1,
1175 .features[FEAT_1_EDX] =
1176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1184 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1185 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1186 CPUID_EXT_SSE3,
1187 .features[FEAT_8000_0001_EDX] =
1188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1189 CPUID_EXT2_SYSCALL,
1190 .features[FEAT_8000_0001_ECX] =
1191 CPUID_EXT3_LAHF_LM,
1192 .features[FEAT_XSAVE] =
1193 CPUID_XSAVE_XSAVEOPT,
1194 .features[FEAT_6_EAX] =
1195 CPUID_6_EAX_ARAT,
1196 .xlevel = 0x80000008,
1197 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1198 },
1199 {
1200 .name = "SandyBridge-IBRS",
1201 .level = 0xd,
1202 .vendor = CPUID_VENDOR_INTEL,
1203 .family = 6,
1204 .model = 42,
1205 .stepping = 1,
1206 .features[FEAT_1_EDX] =
1207 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1208 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1209 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1210 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1211 CPUID_DE | CPUID_FP87,
1212 .features[FEAT_1_ECX] =
1213 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1214 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1215 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1216 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1217 CPUID_EXT_SSE3,
1218 .features[FEAT_8000_0001_EDX] =
1219 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1220 CPUID_EXT2_SYSCALL,
1221 .features[FEAT_8000_0001_ECX] =
1222 CPUID_EXT3_LAHF_LM,
1223 .features[FEAT_7_0_EDX] =
1224 CPUID_7_0_EDX_SPEC_CTRL,
1225 .features[FEAT_XSAVE] =
1226 CPUID_XSAVE_XSAVEOPT,
1227 .features[FEAT_6_EAX] =
1228 CPUID_6_EAX_ARAT,
1229 .xlevel = 0x80000008,
1230 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1231 },
1232 {
1233 .name = "IvyBridge",
1234 .level = 0xd,
1235 .vendor = CPUID_VENDOR_INTEL,
1236 .family = 6,
1237 .model = 58,
1238 .stepping = 9,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1247 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1248 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1249 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1250 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1253 CPUID_7_0_EBX_ERMS,
1254 .features[FEAT_8000_0001_EDX] =
1255 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1256 CPUID_EXT2_SYSCALL,
1257 .features[FEAT_8000_0001_ECX] =
1258 CPUID_EXT3_LAHF_LM,
1259 .features[FEAT_XSAVE] =
1260 CPUID_XSAVE_XSAVEOPT,
1261 .features[FEAT_6_EAX] =
1262 CPUID_6_EAX_ARAT,
1263 .xlevel = 0x80000008,
1264 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1265 },
1266 {
1267 .name = "IvyBridge-IBRS",
1268 .level = 0xd,
1269 .vendor = CPUID_VENDOR_INTEL,
1270 .family = 6,
1271 .model = 58,
1272 .stepping = 9,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1281 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1282 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1283 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1284 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1285 .features[FEAT_7_0_EBX] =
1286 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1287 CPUID_7_0_EBX_ERMS,
1288 .features[FEAT_8000_0001_EDX] =
1289 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1290 CPUID_EXT2_SYSCALL,
1291 .features[FEAT_8000_0001_ECX] =
1292 CPUID_EXT3_LAHF_LM,
1293 .features[FEAT_7_0_EDX] =
1294 CPUID_7_0_EDX_SPEC_CTRL,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1301 },
1302 {
1303 .name = "Haswell-noTSX",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 60,
1308 .stepping = 1,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1331 .features[FEAT_XSAVE] =
1332 CPUID_XSAVE_XSAVEOPT,
1333 .features[FEAT_6_EAX] =
1334 CPUID_6_EAX_ARAT,
1335 .xlevel = 0x80000008,
1336 .model_id = "Intel Core Processor (Haswell, no TSX)",
1337 },
1338 {
1339 .name = "Haswell-noTSX-IBRS",
1340 .level = 0xd,
1341 .vendor = CPUID_VENDOR_INTEL,
1342 .family = 6,
1343 .model = 60,
1344 .stepping = 1,
1345 .features[FEAT_1_EDX] =
1346 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1347 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1348 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1349 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1350 CPUID_DE | CPUID_FP87,
1351 .features[FEAT_1_ECX] =
1352 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1353 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1354 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1355 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1356 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1357 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1358 .features[FEAT_8000_0001_EDX] =
1359 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1360 CPUID_EXT2_SYSCALL,
1361 .features[FEAT_8000_0001_ECX] =
1362 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1363 .features[FEAT_7_0_EDX] =
1364 CPUID_7_0_EDX_SPEC_CTRL,
1365 .features[FEAT_7_0_EBX] =
1366 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1367 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1368 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1369 .features[FEAT_XSAVE] =
1370 CPUID_XSAVE_XSAVEOPT,
1371 .features[FEAT_6_EAX] =
1372 CPUID_6_EAX_ARAT,
1373 .xlevel = 0x80000008,
1374 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1375 },
1376 {
1377 .name = "Haswell",
1378 .level = 0xd,
1379 .vendor = CPUID_VENDOR_INTEL,
1380 .family = 6,
1381 .model = 60,
1382 .stepping = 4,
1383 .features[FEAT_1_EDX] =
1384 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1385 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1386 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1387 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1388 CPUID_DE | CPUID_FP87,
1389 .features[FEAT_1_ECX] =
1390 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1391 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1392 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1393 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1394 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1395 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1396 .features[FEAT_8000_0001_EDX] =
1397 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1398 CPUID_EXT2_SYSCALL,
1399 .features[FEAT_8000_0001_ECX] =
1400 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1401 .features[FEAT_7_0_EBX] =
1402 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1403 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1404 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1405 CPUID_7_0_EBX_RTM,
1406 .features[FEAT_XSAVE] =
1407 CPUID_XSAVE_XSAVEOPT,
1408 .features[FEAT_6_EAX] =
1409 CPUID_6_EAX_ARAT,
1410 .xlevel = 0x80000008,
1411 .model_id = "Intel Core Processor (Haswell)",
1412 },
1413 {
1414 .name = "Haswell-IBRS",
1415 .level = 0xd,
1416 .vendor = CPUID_VENDOR_INTEL,
1417 .family = 6,
1418 .model = 60,
1419 .stepping = 4,
1420 .features[FEAT_1_EDX] =
1421 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1422 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1423 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1424 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1425 CPUID_DE | CPUID_FP87,
1426 .features[FEAT_1_ECX] =
1427 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1428 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1429 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1430 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1431 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1432 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1433 .features[FEAT_8000_0001_EDX] =
1434 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1435 CPUID_EXT2_SYSCALL,
1436 .features[FEAT_8000_0001_ECX] =
1437 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1438 .features[FEAT_7_0_EDX] =
1439 CPUID_7_0_EDX_SPEC_CTRL,
1440 .features[FEAT_7_0_EBX] =
1441 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1442 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1443 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1444 CPUID_7_0_EBX_RTM,
1445 .features[FEAT_XSAVE] =
1446 CPUID_XSAVE_XSAVEOPT,
1447 .features[FEAT_6_EAX] =
1448 CPUID_6_EAX_ARAT,
1449 .xlevel = 0x80000008,
1450 .model_id = "Intel Core Processor (Haswell, IBRS)",
1451 },
1452 {
1453 .name = "Broadwell-noTSX",
1454 .level = 0xd,
1455 .vendor = CPUID_VENDOR_INTEL,
1456 .family = 6,
1457 .model = 61,
1458 .stepping = 2,
1459 .features[FEAT_1_EDX] =
1460 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1461 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1462 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1463 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1464 CPUID_DE | CPUID_FP87,
1465 .features[FEAT_1_ECX] =
1466 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1467 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1468 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1469 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1470 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1471 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1472 .features[FEAT_8000_0001_EDX] =
1473 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1474 CPUID_EXT2_SYSCALL,
1475 .features[FEAT_8000_0001_ECX] =
1476 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1477 .features[FEAT_7_0_EBX] =
1478 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1479 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1480 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1481 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1482 CPUID_7_0_EBX_SMAP,
1483 .features[FEAT_XSAVE] =
1484 CPUID_XSAVE_XSAVEOPT,
1485 .features[FEAT_6_EAX] =
1486 CPUID_6_EAX_ARAT,
1487 .xlevel = 0x80000008,
1488 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1489 },
1490 {
1491 .name = "Broadwell-noTSX-IBRS",
1492 .level = 0xd,
1493 .vendor = CPUID_VENDOR_INTEL,
1494 .family = 6,
1495 .model = 61,
1496 .stepping = 2,
1497 .features[FEAT_1_EDX] =
1498 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1499 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1500 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1501 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1502 CPUID_DE | CPUID_FP87,
1503 .features[FEAT_1_ECX] =
1504 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1505 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1506 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1507 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1508 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1509 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1510 .features[FEAT_8000_0001_EDX] =
1511 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1512 CPUID_EXT2_SYSCALL,
1513 .features[FEAT_8000_0001_ECX] =
1514 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1515 .features[FEAT_7_0_EDX] =
1516 CPUID_7_0_EDX_SPEC_CTRL,
1517 .features[FEAT_7_0_EBX] =
1518 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1519 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1520 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1521 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1522 CPUID_7_0_EBX_SMAP,
1523 .features[FEAT_XSAVE] =
1524 CPUID_XSAVE_XSAVEOPT,
1525 .features[FEAT_6_EAX] =
1526 CPUID_6_EAX_ARAT,
1527 .xlevel = 0x80000008,
1528 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1529 },
1530 {
1531 .name = "Broadwell",
1532 .level = 0xd,
1533 .vendor = CPUID_VENDOR_INTEL,
1534 .family = 6,
1535 .model = 61,
1536 .stepping = 2,
1537 .features[FEAT_1_EDX] =
1538 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1539 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1540 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1541 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1542 CPUID_DE | CPUID_FP87,
1543 .features[FEAT_1_ECX] =
1544 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1545 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1546 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1547 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1548 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1549 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1550 .features[FEAT_8000_0001_EDX] =
1551 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1552 CPUID_EXT2_SYSCALL,
1553 .features[FEAT_8000_0001_ECX] =
1554 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1555 .features[FEAT_7_0_EBX] =
1556 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1557 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1558 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1559 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1560 CPUID_7_0_EBX_SMAP,
1561 .features[FEAT_XSAVE] =
1562 CPUID_XSAVE_XSAVEOPT,
1563 .features[FEAT_6_EAX] =
1564 CPUID_6_EAX_ARAT,
1565 .xlevel = 0x80000008,
1566 .model_id = "Intel Core Processor (Broadwell)",
1567 },
1568 {
1569 .name = "Broadwell-IBRS",
1570 .level = 0xd,
1571 .vendor = CPUID_VENDOR_INTEL,
1572 .family = 6,
1573 .model = 61,
1574 .stepping = 2,
1575 .features[FEAT_1_EDX] =
1576 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1577 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1578 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1579 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1580 CPUID_DE | CPUID_FP87,
1581 .features[FEAT_1_ECX] =
1582 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1583 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1584 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1585 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1586 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1587 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1588 .features[FEAT_8000_0001_EDX] =
1589 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1590 CPUID_EXT2_SYSCALL,
1591 .features[FEAT_8000_0001_ECX] =
1592 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1593 .features[FEAT_7_0_EDX] =
1594 CPUID_7_0_EDX_SPEC_CTRL,
1595 .features[FEAT_7_0_EBX] =
1596 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1597 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1598 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1599 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1600 CPUID_7_0_EBX_SMAP,
1601 .features[FEAT_XSAVE] =
1602 CPUID_XSAVE_XSAVEOPT,
1603 .features[FEAT_6_EAX] =
1604 CPUID_6_EAX_ARAT,
1605 .xlevel = 0x80000008,
1606 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1607 },
1608 {
1609 .name = "Skylake-Client",
1610 .level = 0xd,
1611 .vendor = CPUID_VENDOR_INTEL,
1612 .family = 6,
1613 .model = 94,
1614 .stepping = 3,
1615 .features[FEAT_1_EDX] =
1616 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1617 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1618 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1619 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1620 CPUID_DE | CPUID_FP87,
1621 .features[FEAT_1_ECX] =
1622 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1623 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1624 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1625 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1626 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1627 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1628 .features[FEAT_8000_0001_EDX] =
1629 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1630 CPUID_EXT2_SYSCALL,
1631 .features[FEAT_8000_0001_ECX] =
1632 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1633 .features[FEAT_7_0_EBX] =
1634 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1635 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1636 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1637 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1638 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1639 /* Missing: XSAVES (not supported by some Linux versions,
1640 * including v4.1 to v4.12).
1641 * KVM doesn't yet expose any XSAVES state save component,
1642 * and the only one defined in Skylake (processor tracing)
1643 * probably will block migration anyway.
1644 */
1645 .features[FEAT_XSAVE] =
1646 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1647 CPUID_XSAVE_XGETBV1,
1648 .features[FEAT_6_EAX] =
1649 CPUID_6_EAX_ARAT,
1650 .xlevel = 0x80000008,
1651 .model_id = "Intel Core Processor (Skylake)",
1652 },
1653 {
1654 .name = "Skylake-Client-IBRS",
1655 .level = 0xd,
1656 .vendor = CPUID_VENDOR_INTEL,
1657 .family = 6,
1658 .model = 94,
1659 .stepping = 3,
1660 .features[FEAT_1_EDX] =
1661 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1662 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1663 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1664 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1665 CPUID_DE | CPUID_FP87,
1666 .features[FEAT_1_ECX] =
1667 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1668 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1669 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1670 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1671 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1672 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1673 .features[FEAT_8000_0001_EDX] =
1674 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1675 CPUID_EXT2_SYSCALL,
1676 .features[FEAT_8000_0001_ECX] =
1677 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1678 .features[FEAT_7_0_EDX] =
1679 CPUID_7_0_EDX_SPEC_CTRL,
1680 .features[FEAT_7_0_EBX] =
1681 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1682 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1683 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1684 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1685 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1686 /* Missing: XSAVES (not supported by some Linux versions,
1687 * including v4.1 to v4.12).
1688 * KVM doesn't yet expose any XSAVES state save component,
1689 * and the only one defined in Skylake (processor tracing)
1690 * probably will block migration anyway.
1691 */
1692 .features[FEAT_XSAVE] =
1693 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1694 CPUID_XSAVE_XGETBV1,
1695 .features[FEAT_6_EAX] =
1696 CPUID_6_EAX_ARAT,
1697 .xlevel = 0x80000008,
1698 .model_id = "Intel Core Processor (Skylake, IBRS)",
1699 },
1700 {
1701 .name = "Skylake-Server",
1702 .level = 0xd,
1703 .vendor = CPUID_VENDOR_INTEL,
1704 .family = 6,
1705 .model = 85,
1706 .stepping = 4,
1707 .features[FEAT_1_EDX] =
1708 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1709 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1710 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1711 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1712 CPUID_DE | CPUID_FP87,
1713 .features[FEAT_1_ECX] =
1714 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1715 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1716 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1717 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1718 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1719 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1720 .features[FEAT_8000_0001_EDX] =
1721 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1722 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1723 .features[FEAT_8000_0001_ECX] =
1724 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1725 .features[FEAT_7_0_EBX] =
1726 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1727 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1728 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1729 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1730 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1731 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1732 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1733 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1734 /* Missing: XSAVES (not supported by some Linux versions,
1735 * including v4.1 to v4.12).
1736 * KVM doesn't yet expose any XSAVES state save component,
1737 * and the only one defined in Skylake (processor tracing)
1738 * probably will block migration anyway.
1739 */
1740 .features[FEAT_XSAVE] =
1741 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1742 CPUID_XSAVE_XGETBV1,
1743 .features[FEAT_6_EAX] =
1744 CPUID_6_EAX_ARAT,
1745 .xlevel = 0x80000008,
1746 .model_id = "Intel Xeon Processor (Skylake)",
1747 },
1748 {
1749 .name = "Skylake-Server-IBRS",
1750 .level = 0xd,
1751 .vendor = CPUID_VENDOR_INTEL,
1752 .family = 6,
1753 .model = 85,
1754 .stepping = 4,
1755 .features[FEAT_1_EDX] =
1756 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1757 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1758 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1759 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1760 CPUID_DE | CPUID_FP87,
1761 .features[FEAT_1_ECX] =
1762 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1763 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1764 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1765 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1766 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1767 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1768 .features[FEAT_8000_0001_EDX] =
1769 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1770 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1771 .features[FEAT_8000_0001_ECX] =
1772 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1773 .features[FEAT_7_0_EDX] =
1774 CPUID_7_0_EDX_SPEC_CTRL,
1775 .features[FEAT_7_0_EBX] =
1776 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1777 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1778 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1779 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1780 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1781 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1782 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1783 CPUID_7_0_EBX_AVX512VL,
1784 /* Missing: XSAVES (not supported by some Linux versions,
1785 * including v4.1 to v4.12).
1786 * KVM doesn't yet expose any XSAVES state save component,
1787 * and the only one defined in Skylake (processor tracing)
1788 * probably will block migration anyway.
1789 */
1790 .features[FEAT_XSAVE] =
1791 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1792 CPUID_XSAVE_XGETBV1,
1793 .features[FEAT_6_EAX] =
1794 CPUID_6_EAX_ARAT,
1795 .xlevel = 0x80000008,
1796 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1797 },
1798 {
1799 .name = "Opteron_G1",
1800 .level = 5,
1801 .vendor = CPUID_VENDOR_AMD,
1802 .family = 15,
1803 .model = 6,
1804 .stepping = 1,
1805 .features[FEAT_1_EDX] =
1806 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1807 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1808 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1809 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1810 CPUID_DE | CPUID_FP87,
1811 .features[FEAT_1_ECX] =
1812 CPUID_EXT_SSE3,
1813 .features[FEAT_8000_0001_EDX] =
1814 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1815 .xlevel = 0x80000008,
1816 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1817 },
1818 {
1819 .name = "Opteron_G2",
1820 .level = 5,
1821 .vendor = CPUID_VENDOR_AMD,
1822 .family = 15,
1823 .model = 6,
1824 .stepping = 1,
1825 .features[FEAT_1_EDX] =
1826 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1827 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1828 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1829 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1830 CPUID_DE | CPUID_FP87,
1831 .features[FEAT_1_ECX] =
1832 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1833 /* Missing: CPUID_EXT2_RDTSCP */
1834 .features[FEAT_8000_0001_EDX] =
1835 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1836 .features[FEAT_8000_0001_ECX] =
1837 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1838 .xlevel = 0x80000008,
1839 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1840 },
1841 {
1842 .name = "Opteron_G3",
1843 .level = 5,
1844 .vendor = CPUID_VENDOR_AMD,
1845 .family = 16,
1846 .model = 2,
1847 .stepping = 3,
1848 .features[FEAT_1_EDX] =
1849 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1850 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1851 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1852 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1853 CPUID_DE | CPUID_FP87,
1854 .features[FEAT_1_ECX] =
1855 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1856 CPUID_EXT_SSE3,
1857 /* Missing: CPUID_EXT2_RDTSCP */
1858 .features[FEAT_8000_0001_EDX] =
1859 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1860 .features[FEAT_8000_0001_ECX] =
1861 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1862 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1863 .xlevel = 0x80000008,
1864 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1865 },
1866 {
1867 .name = "Opteron_G4",
1868 .level = 0xd,
1869 .vendor = CPUID_VENDOR_AMD,
1870 .family = 21,
1871 .model = 1,
1872 .stepping = 2,
1873 .features[FEAT_1_EDX] =
1874 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1875 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1876 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1877 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1878 CPUID_DE | CPUID_FP87,
1879 .features[FEAT_1_ECX] =
1880 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1881 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1882 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1883 CPUID_EXT_SSE3,
1884 /* Missing: CPUID_EXT2_RDTSCP */
1885 .features[FEAT_8000_0001_EDX] =
1886 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1887 CPUID_EXT2_SYSCALL,
1888 .features[FEAT_8000_0001_ECX] =
1889 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1890 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1891 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1892 CPUID_EXT3_LAHF_LM,
1893 /* no xsaveopt! */
1894 .xlevel = 0x8000001A,
1895 .model_id = "AMD Opteron 62xx class CPU",
1896 },
1897 {
1898 .name = "Opteron_G5",
1899 .level = 0xd,
1900 .vendor = CPUID_VENDOR_AMD,
1901 .family = 21,
1902 .model = 2,
1903 .stepping = 0,
1904 .features[FEAT_1_EDX] =
1905 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1906 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1907 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1908 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1909 CPUID_DE | CPUID_FP87,
1910 .features[FEAT_1_ECX] =
1911 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1912 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1913 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1914 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1915 /* Missing: CPUID_EXT2_RDTSCP */
1916 .features[FEAT_8000_0001_EDX] =
1917 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1918 CPUID_EXT2_SYSCALL,
1919 .features[FEAT_8000_0001_ECX] =
1920 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1921 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1922 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1923 CPUID_EXT3_LAHF_LM,
1924 /* no xsaveopt! */
1925 .xlevel = 0x8000001A,
1926 .model_id = "AMD Opteron 63xx class CPU",
1927 },
1928 {
1929 .name = "EPYC",
1930 .level = 0xd,
1931 .vendor = CPUID_VENDOR_AMD,
1932 .family = 23,
1933 .model = 1,
1934 .stepping = 2,
1935 .features[FEAT_1_EDX] =
1936 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1937 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1938 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1939 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1940 CPUID_VME | CPUID_FP87,
1941 .features[FEAT_1_ECX] =
1942 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1943 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1944 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1945 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1946 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1947 .features[FEAT_8000_0001_EDX] =
1948 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1949 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1950 CPUID_EXT2_SYSCALL,
1951 .features[FEAT_8000_0001_ECX] =
1952 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1953 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1954 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1955 .features[FEAT_7_0_EBX] =
1956 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1957 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1958 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1959 CPUID_7_0_EBX_SHA_NI,
1960 /* Missing: XSAVES (not supported by some Linux versions,
1961 * including v4.1 to v4.12).
1962 * KVM doesn't yet expose any XSAVES state save component.
1963 */
1964 .features[FEAT_XSAVE] =
1965 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1966 CPUID_XSAVE_XGETBV1,
1967 .features[FEAT_6_EAX] =
1968 CPUID_6_EAX_ARAT,
1969 .xlevel = 0x8000000A,
1970 .model_id = "AMD EPYC Processor",
1971 },
1972 {
1973 .name = "EPYC-IBPB",
1974 .level = 0xd,
1975 .vendor = CPUID_VENDOR_AMD,
1976 .family = 23,
1977 .model = 1,
1978 .stepping = 2,
1979 .features[FEAT_1_EDX] =
1980 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1981 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1982 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1983 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1984 CPUID_VME | CPUID_FP87,
1985 .features[FEAT_1_ECX] =
1986 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1987 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1988 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1989 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1990 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1991 .features[FEAT_8000_0001_EDX] =
1992 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1993 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1994 CPUID_EXT2_SYSCALL,
1995 .features[FEAT_8000_0001_ECX] =
1996 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1997 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1998 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1999 .features[FEAT_8000_0008_EBX] =
2000 CPUID_8000_0008_EBX_IBPB,
2001 .features[FEAT_7_0_EBX] =
2002 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2003 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2004 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2005 CPUID_7_0_EBX_SHA_NI,
2006 /* Missing: XSAVES (not supported by some Linux versions,
2007 * including v4.1 to v4.12).
2008 * KVM doesn't yet expose any XSAVES state save component.
2009 */
2010 .features[FEAT_XSAVE] =
2011 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2012 CPUID_XSAVE_XGETBV1,
2013 .features[FEAT_6_EAX] =
2014 CPUID_6_EAX_ARAT,
2015 .xlevel = 0x8000000A,
2016 .model_id = "AMD EPYC Processor (with IBPB)",
2017 },
2018 };
2019
2020 typedef struct PropValue {
2021 const char *prop, *value;
2022 } PropValue;
2023
2024 /* KVM-specific features that are automatically added/removed
2025 * from all CPU models when KVM is enabled.
2026 */
2027 static PropValue kvm_default_props[] = {
2028 { "kvmclock", "on" },
2029 { "kvm-nopiodelay", "on" },
2030 { "kvm-asyncpf", "on" },
2031 { "kvm-steal-time", "on" },
2032 { "kvm-pv-eoi", "on" },
2033 { "kvmclock-stable-bit", "on" },
2034 { "x2apic", "on" },
2035 { "acpi", "off" },
2036 { "monitor", "off" },
2037 { "svm", "off" },
2038 { NULL, NULL },
2039 };
2040
2041 /* TCG-specific defaults that override all CPU models when using TCG
2042 */
2043 static PropValue tcg_default_props[] = {
2044 { "vme", "off" },
2045 { NULL, NULL },
2046 };
2047
2048
2049 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2050 {
2051 PropValue *pv;
2052 for (pv = kvm_default_props; pv->prop; pv++) {
2053 if (!strcmp(pv->prop, prop)) {
2054 pv->value = value;
2055 break;
2056 }
2057 }
2058
2059 /* It is valid to call this function only for properties that
2060 * are already present in the kvm_default_props table.
2061 */
2062 assert(pv->prop);
2063 }
2064
2065 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2066 bool migratable_only);
2067
2068 static bool lmce_supported(void)
2069 {
2070 uint64_t mce_cap = 0;
2071
2072 #ifdef CONFIG_KVM
2073 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2074 return false;
2075 }
2076 #endif
2077
2078 return !!(mce_cap & MCG_LMCE_P);
2079 }
2080
2081 #define CPUID_MODEL_ID_SZ 48
2082
2083 /**
2084 * cpu_x86_fill_model_id:
2085 * Get CPUID model ID string from host CPU.
2086 *
2087 * @str should have at least CPUID_MODEL_ID_SZ bytes
2088 *
2089 * The function does NOT add a null terminator to the string
2090 * automatically.
2091 */
2092 static int cpu_x86_fill_model_id(char *str)
2093 {
2094 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2095 int i;
2096
2097 for (i = 0; i < 3; i++) {
2098 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2099 memcpy(str + i * 16 + 0, &eax, 4);
2100 memcpy(str + i * 16 + 4, &ebx, 4);
2101 memcpy(str + i * 16 + 8, &ecx, 4);
2102 memcpy(str + i * 16 + 12, &edx, 4);
2103 }
2104 return 0;
2105 }
2106
2107 static Property max_x86_cpu_properties[] = {
2108 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2109 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2110 DEFINE_PROP_END_OF_LIST()
2111 };
2112
2113 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2114 {
2115 DeviceClass *dc = DEVICE_CLASS(oc);
2116 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2117
2118 xcc->ordering = 9;
2119
2120 xcc->model_description =
2121 "Enables all features supported by the accelerator in the current host";
2122
2123 dc->props = max_x86_cpu_properties;
2124 }
2125
2126 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2127
2128 static void max_x86_cpu_initfn(Object *obj)
2129 {
2130 X86CPU *cpu = X86_CPU(obj);
2131 CPUX86State *env = &cpu->env;
2132 KVMState *s = kvm_state;
2133
2134 /* We can't fill the features array here because we don't know yet if
2135 * "migratable" is true or false.
2136 */
2137 cpu->max_features = true;
2138
2139 if (accel_uses_host_cpuid()) {
2140 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2141 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2142 int family, model, stepping;
2143 X86CPUDefinition host_cpudef = { };
2144 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2145
2146 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2147 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2148
2149 host_vendor_fms(vendor, &family, &model, &stepping);
2150
2151 cpu_x86_fill_model_id(model_id);
2152
2153 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2154 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2155 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2156 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2157 &error_abort);
2158 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2159 &error_abort);
2160
2161 if (kvm_enabled()) {
2162 env->cpuid_min_level =
2163 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2164 env->cpuid_min_xlevel =
2165 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2166 env->cpuid_min_xlevel2 =
2167 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2168 } else {
2169 env->cpuid_min_level =
2170 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2171 env->cpuid_min_xlevel =
2172 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2173 env->cpuid_min_xlevel2 =
2174 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2175 }
2176
2177 if (lmce_supported()) {
2178 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2179 }
2180 } else {
2181 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2182 "vendor", &error_abort);
2183 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2184 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2185 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2186 object_property_set_str(OBJECT(cpu),
2187 "QEMU TCG CPU version " QEMU_HW_VERSION,
2188 "model-id", &error_abort);
2189 }
2190
2191 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2192 }
2193
2194 static const TypeInfo max_x86_cpu_type_info = {
2195 .name = X86_CPU_TYPE_NAME("max"),
2196 .parent = TYPE_X86_CPU,
2197 .instance_init = max_x86_cpu_initfn,
2198 .class_init = max_x86_cpu_class_init,
2199 };
2200
2201 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2202 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2203 {
2204 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2205
2206 xcc->host_cpuid_required = true;
2207 xcc->ordering = 8;
2208
2209 if (kvm_enabled()) {
2210 xcc->model_description =
2211 "KVM processor with all supported host features ";
2212 } else if (hvf_enabled()) {
2213 xcc->model_description =
2214 "HVF processor with all supported host features ";
2215 }
2216 }
2217
2218 static const TypeInfo host_x86_cpu_type_info = {
2219 .name = X86_CPU_TYPE_NAME("host"),
2220 .parent = X86_CPU_TYPE_NAME("max"),
2221 .class_init = host_x86_cpu_class_init,
2222 };
2223
2224 #endif
2225
2226 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2227 {
2228 FeatureWordInfo *f = &feature_word_info[w];
2229 int i;
2230
2231 for (i = 0; i < 32; ++i) {
2232 if ((1UL << i) & mask) {
2233 const char *reg = get_register_name_32(f->cpuid_reg);
2234 assert(reg);
2235 warn_report("%s doesn't support requested feature: "
2236 "CPUID.%02XH:%s%s%s [bit %d]",
2237 accel_uses_host_cpuid() ? "host" : "TCG",
2238 f->cpuid_eax, reg,
2239 f->feat_names[i] ? "." : "",
2240 f->feat_names[i] ? f->feat_names[i] : "", i);
2241 }
2242 }
2243 }
2244
2245 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2246 const char *name, void *opaque,
2247 Error **errp)
2248 {
2249 X86CPU *cpu = X86_CPU(obj);
2250 CPUX86State *env = &cpu->env;
2251 int64_t value;
2252
2253 value = (env->cpuid_version >> 8) & 0xf;
2254 if (value == 0xf) {
2255 value += (env->cpuid_version >> 20) & 0xff;
2256 }
2257 visit_type_int(v, name, &value, errp);
2258 }
2259
2260 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2261 const char *name, void *opaque,
2262 Error **errp)
2263 {
2264 X86CPU *cpu = X86_CPU(obj);
2265 CPUX86State *env = &cpu->env;
2266 const int64_t min = 0;
2267 const int64_t max = 0xff + 0xf;
2268 Error *local_err = NULL;
2269 int64_t value;
2270
2271 visit_type_int(v, name, &value, &local_err);
2272 if (local_err) {
2273 error_propagate(errp, local_err);
2274 return;
2275 }
2276 if (value < min || value > max) {
2277 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2278 name ? name : "null", value, min, max);
2279 return;
2280 }
2281
2282 env->cpuid_version &= ~0xff00f00;
2283 if (value > 0x0f) {
2284 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2285 } else {
2286 env->cpuid_version |= value << 8;
2287 }
2288 }
2289
2290 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2291 const char *name, void *opaque,
2292 Error **errp)
2293 {
2294 X86CPU *cpu = X86_CPU(obj);
2295 CPUX86State *env = &cpu->env;
2296 int64_t value;
2297
2298 value = (env->cpuid_version >> 4) & 0xf;
2299 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2300 visit_type_int(v, name, &value, errp);
2301 }
2302
2303 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2304 const char *name, void *opaque,
2305 Error **errp)
2306 {
2307 X86CPU *cpu = X86_CPU(obj);
2308 CPUX86State *env = &cpu->env;
2309 const int64_t min = 0;
2310 const int64_t max = 0xff;
2311 Error *local_err = NULL;
2312 int64_t value;
2313
2314 visit_type_int(v, name, &value, &local_err);
2315 if (local_err) {
2316 error_propagate(errp, local_err);
2317 return;
2318 }
2319 if (value < min || value > max) {
2320 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2321 name ? name : "null", value, min, max);
2322 return;
2323 }
2324
2325 env->cpuid_version &= ~0xf00f0;
2326 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2327 }
2328
2329 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2330 const char *name, void *opaque,
2331 Error **errp)
2332 {
2333 X86CPU *cpu = X86_CPU(obj);
2334 CPUX86State *env = &cpu->env;
2335 int64_t value;
2336
2337 value = env->cpuid_version & 0xf;
2338 visit_type_int(v, name, &value, errp);
2339 }
2340
2341 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2342 const char *name, void *opaque,
2343 Error **errp)
2344 {
2345 X86CPU *cpu = X86_CPU(obj);
2346 CPUX86State *env = &cpu->env;
2347 const int64_t min = 0;
2348 const int64_t max = 0xf;
2349 Error *local_err = NULL;
2350 int64_t value;
2351
2352 visit_type_int(v, name, &value, &local_err);
2353 if (local_err) {
2354 error_propagate(errp, local_err);
2355 return;
2356 }
2357 if (value < min || value > max) {
2358 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2359 name ? name : "null", value, min, max);
2360 return;
2361 }
2362
2363 env->cpuid_version &= ~0xf;
2364 env->cpuid_version |= value & 0xf;
2365 }
2366
2367 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2368 {
2369 X86CPU *cpu = X86_CPU(obj);
2370 CPUX86State *env = &cpu->env;
2371 char *value;
2372
2373 value = g_malloc(CPUID_VENDOR_SZ + 1);
2374 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2375 env->cpuid_vendor3);
2376 return value;
2377 }
2378
2379 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2380 Error **errp)
2381 {
2382 X86CPU *cpu = X86_CPU(obj);
2383 CPUX86State *env = &cpu->env;
2384 int i;
2385
2386 if (strlen(value) != CPUID_VENDOR_SZ) {
2387 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2388 return;
2389 }
2390
2391 env->cpuid_vendor1 = 0;
2392 env->cpuid_vendor2 = 0;
2393 env->cpuid_vendor3 = 0;
2394 for (i = 0; i < 4; i++) {
2395 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2396 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2397 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2398 }
2399 }
2400
2401 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2402 {
2403 X86CPU *cpu = X86_CPU(obj);
2404 CPUX86State *env = &cpu->env;
2405 char *value;
2406 int i;
2407
2408 value = g_malloc(48 + 1);
2409 for (i = 0; i < 48; i++) {
2410 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2411 }
2412 value[48] = '\0';
2413 return value;
2414 }
2415
2416 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2417 Error **errp)
2418 {
2419 X86CPU *cpu = X86_CPU(obj);
2420 CPUX86State *env = &cpu->env;
2421 int c, len, i;
2422
2423 if (model_id == NULL) {
2424 model_id = "";
2425 }
2426 len = strlen(model_id);
2427 memset(env->cpuid_model, 0, 48);
2428 for (i = 0; i < 48; i++) {
2429 if (i >= len) {
2430 c = '\0';
2431 } else {
2432 c = (uint8_t)model_id[i];
2433 }
2434 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2435 }
2436 }
2437
2438 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2439 void *opaque, Error **errp)
2440 {
2441 X86CPU *cpu = X86_CPU(obj);
2442 int64_t value;
2443
2444 value = cpu->env.tsc_khz * 1000;
2445 visit_type_int(v, name, &value, errp);
2446 }
2447
2448 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2449 void *opaque, Error **errp)
2450 {
2451 X86CPU *cpu = X86_CPU(obj);
2452 const int64_t min = 0;
2453 const int64_t max = INT64_MAX;
2454 Error *local_err = NULL;
2455 int64_t value;
2456
2457 visit_type_int(v, name, &value, &local_err);
2458 if (local_err) {
2459 error_propagate(errp, local_err);
2460 return;
2461 }
2462 if (value < min || value > max) {
2463 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2464 name ? name : "null", value, min, max);
2465 return;
2466 }
2467
2468 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2469 }
2470
2471 /* Generic getter for "feature-words" and "filtered-features" properties */
2472 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2473 const char *name, void *opaque,
2474 Error **errp)
2475 {
2476 uint32_t *array = (uint32_t *)opaque;
2477 FeatureWord w;
2478 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2479 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2480 X86CPUFeatureWordInfoList *list = NULL;
2481
2482 for (w = 0; w < FEATURE_WORDS; w++) {
2483 FeatureWordInfo *wi = &feature_word_info[w];
2484 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2485 qwi->cpuid_input_eax = wi->cpuid_eax;
2486 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2487 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2488 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2489 qwi->features = array[w];
2490
2491 /* List will be in reverse order, but order shouldn't matter */
2492 list_entries[w].next = list;
2493 list_entries[w].value = &word_infos[w];
2494 list = &list_entries[w];
2495 }
2496
2497 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2498 }
2499
2500 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2501 void *opaque, Error **errp)
2502 {
2503 X86CPU *cpu = X86_CPU(obj);
2504 int64_t value = cpu->hyperv_spinlock_attempts;
2505
2506 visit_type_int(v, name, &value, errp);
2507 }
2508
2509 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2510 void *opaque, Error **errp)
2511 {
2512 const int64_t min = 0xFFF;
2513 const int64_t max = UINT_MAX;
2514 X86CPU *cpu = X86_CPU(obj);
2515 Error *err = NULL;
2516 int64_t value;
2517
2518 visit_type_int(v, name, &value, &err);
2519 if (err) {
2520 error_propagate(errp, err);
2521 return;
2522 }
2523
2524 if (value < min || value > max) {
2525 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2526 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2527 object_get_typename(obj), name ? name : "null",
2528 value, min, max);
2529 return;
2530 }
2531 cpu->hyperv_spinlock_attempts = value;
2532 }
2533
2534 static const PropertyInfo qdev_prop_spinlocks = {
2535 .name = "int",
2536 .get = x86_get_hv_spinlocks,
2537 .set = x86_set_hv_spinlocks,
2538 };
2539
2540 /* Convert all '_' in a feature string option name to '-', to make feature
2541 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2542 */
2543 static inline void feat2prop(char *s)
2544 {
2545 while ((s = strchr(s, '_'))) {
2546 *s = '-';
2547 }
2548 }
2549
2550 /* Return the feature property name for a feature flag bit */
2551 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2552 {
2553 /* XSAVE components are automatically enabled by other features,
2554 * so return the original feature name instead
2555 */
2556 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2557 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2558
2559 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2560 x86_ext_save_areas[comp].bits) {
2561 w = x86_ext_save_areas[comp].feature;
2562 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2563 }
2564 }
2565
2566 assert(bitnr < 32);
2567 assert(w < FEATURE_WORDS);
2568 return feature_word_info[w].feat_names[bitnr];
2569 }
2570
2571 /* Compatibily hack to maintain legacy +-feat semantic,
2572 * where +-feat overwrites any feature set by
2573 * feat=on|feat even if the later is parsed after +-feat
2574 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2575 */
2576 static GList *plus_features, *minus_features;
2577
2578 static gint compare_string(gconstpointer a, gconstpointer b)
2579 {
2580 return g_strcmp0(a, b);
2581 }
2582
2583 /* Parse "+feature,-feature,feature=foo" CPU feature string
2584 */
2585 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2586 Error **errp)
2587 {
2588 char *featurestr; /* Single 'key=value" string being parsed */
2589 static bool cpu_globals_initialized;
2590 bool ambiguous = false;
2591
2592 if (cpu_globals_initialized) {
2593 return;
2594 }
2595 cpu_globals_initialized = true;
2596
2597 if (!features) {
2598 return;
2599 }
2600
2601 for (featurestr = strtok(features, ",");
2602 featurestr;
2603 featurestr = strtok(NULL, ",")) {
2604 const char *name;
2605 const char *val = NULL;
2606 char *eq = NULL;
2607 char num[32];
2608 GlobalProperty *prop;
2609
2610 /* Compatibility syntax: */
2611 if (featurestr[0] == '+') {
2612 plus_features = g_list_append(plus_features,
2613 g_strdup(featurestr + 1));
2614 continue;
2615 } else if (featurestr[0] == '-') {
2616 minus_features = g_list_append(minus_features,
2617 g_strdup(featurestr + 1));
2618 continue;
2619 }
2620
2621 eq = strchr(featurestr, '=');
2622 if (eq) {
2623 *eq++ = 0;
2624 val = eq;
2625 } else {
2626 val = "on";
2627 }
2628
2629 feat2prop(featurestr);
2630 name = featurestr;
2631
2632 if (g_list_find_custom(plus_features, name, compare_string)) {
2633 warn_report("Ambiguous CPU model string. "
2634 "Don't mix both \"+%s\" and \"%s=%s\"",
2635 name, name, val);
2636 ambiguous = true;
2637 }
2638 if (g_list_find_custom(minus_features, name, compare_string)) {
2639 warn_report("Ambiguous CPU model string. "
2640 "Don't mix both \"-%s\" and \"%s=%s\"",
2641 name, name, val);
2642 ambiguous = true;
2643 }
2644
2645 /* Special case: */
2646 if (!strcmp(name, "tsc-freq")) {
2647 int ret;
2648 uint64_t tsc_freq;
2649
2650 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2651 if (ret < 0 || tsc_freq > INT64_MAX) {
2652 error_setg(errp, "bad numerical value %s", val);
2653 return;
2654 }
2655 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2656 val = num;
2657 name = "tsc-frequency";
2658 }
2659
2660 prop = g_new0(typeof(*prop), 1);
2661 prop->driver = typename;
2662 prop->property = g_strdup(name);
2663 prop->value = g_strdup(val);
2664 prop->errp = &error_fatal;
2665 qdev_prop_register_global(prop);
2666 }
2667
2668 if (ambiguous) {
2669 warn_report("Compatibility of ambiguous CPU model "
2670 "strings won't be kept on future QEMU versions");
2671 }
2672 }
2673
2674 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2675 static int x86_cpu_filter_features(X86CPU *cpu);
2676
2677 /* Check for missing features that may prevent the CPU class from
2678 * running using the current machine and accelerator.
2679 */
2680 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2681 strList **missing_feats)
2682 {
2683 X86CPU *xc;
2684 FeatureWord w;
2685 Error *err = NULL;
2686 strList **next = missing_feats;
2687
2688 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2689 strList *new = g_new0(strList, 1);
2690 new->value = g_strdup("kvm");
2691 *missing_feats = new;
2692 return;
2693 }
2694
2695 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2696
2697 x86_cpu_expand_features(xc, &err);
2698 if (err) {
2699 /* Errors at x86_cpu_expand_features should never happen,
2700 * but in case it does, just report the model as not
2701 * runnable at all using the "type" property.
2702 */
2703 strList *new = g_new0(strList, 1);
2704 new->value = g_strdup("type");
2705 *next = new;
2706 next = &new->next;
2707 }
2708
2709 x86_cpu_filter_features(xc);
2710
2711 for (w = 0; w < FEATURE_WORDS; w++) {
2712 uint32_t filtered = xc->filtered_features[w];
2713 int i;
2714 for (i = 0; i < 32; i++) {
2715 if (filtered & (1UL << i)) {
2716 strList *new = g_new0(strList, 1);
2717 new->value = g_strdup(x86_cpu_feature_name(w, i));
2718 *next = new;
2719 next = &new->next;
2720 }
2721 }
2722 }
2723
2724 object_unref(OBJECT(xc));
2725 }
2726
2727 /* Print all cpuid feature names in featureset
2728 */
2729 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2730 {
2731 int bit;
2732 bool first = true;
2733
2734 for (bit = 0; bit < 32; bit++) {
2735 if (featureset[bit]) {
2736 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2737 first = false;
2738 }
2739 }
2740 }
2741
2742 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2743 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2744 {
2745 ObjectClass *class_a = (ObjectClass *)a;
2746 ObjectClass *class_b = (ObjectClass *)b;
2747 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2748 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2749 const char *name_a, *name_b;
2750
2751 if (cc_a->ordering != cc_b->ordering) {
2752 return cc_a->ordering - cc_b->ordering;
2753 } else {
2754 name_a = object_class_get_name(class_a);
2755 name_b = object_class_get_name(class_b);
2756 return strcmp(name_a, name_b);
2757 }
2758 }
2759
2760 static GSList *get_sorted_cpu_model_list(void)
2761 {
2762 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2763 list = g_slist_sort(list, x86_cpu_list_compare);
2764 return list;
2765 }
2766
2767 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2768 {
2769 ObjectClass *oc = data;
2770 X86CPUClass *cc = X86_CPU_CLASS(oc);
2771 CPUListState *s = user_data;
2772 char *name = x86_cpu_class_get_model_name(cc);
2773 const char *desc = cc->model_description;
2774 if (!desc && cc->cpu_def) {
2775 desc = cc->cpu_def->model_id;
2776 }
2777
2778 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2779 name, desc);
2780 g_free(name);
2781 }
2782
2783 /* list available CPU models and flags */
2784 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2785 {
2786 int i;
2787 CPUListState s = {
2788 .file = f,
2789 .cpu_fprintf = cpu_fprintf,
2790 };
2791 GSList *list;
2792
2793 (*cpu_fprintf)(f, "Available CPUs:\n");
2794 list = get_sorted_cpu_model_list();
2795 g_slist_foreach(list, x86_cpu_list_entry, &s);
2796 g_slist_free(list);
2797
2798 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2799 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2800 FeatureWordInfo *fw = &feature_word_info[i];
2801
2802 (*cpu_fprintf)(f, " ");
2803 listflags(f, cpu_fprintf, fw->feat_names);
2804 (*cpu_fprintf)(f, "\n");
2805 }
2806 }
2807
2808 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2809 {
2810 ObjectClass *oc = data;
2811 X86CPUClass *cc = X86_CPU_CLASS(oc);
2812 CpuDefinitionInfoList **cpu_list = user_data;
2813 CpuDefinitionInfoList *entry;
2814 CpuDefinitionInfo *info;
2815
2816 info = g_malloc0(sizeof(*info));
2817 info->name = x86_cpu_class_get_model_name(cc);
2818 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2819 info->has_unavailable_features = true;
2820 info->q_typename = g_strdup(object_class_get_name(oc));
2821 info->migration_safe = cc->migration_safe;
2822 info->has_migration_safe = true;
2823 info->q_static = cc->static_model;
2824
2825 entry = g_malloc0(sizeof(*entry));
2826 entry->value = info;
2827 entry->next = *cpu_list;
2828 *cpu_list = entry;
2829 }
2830
2831 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2832 {
2833 CpuDefinitionInfoList *cpu_list = NULL;
2834 GSList *list = get_sorted_cpu_model_list();
2835 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2836 g_slist_free(list);
2837 return cpu_list;
2838 }
2839
2840 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2841 bool migratable_only)
2842 {
2843 FeatureWordInfo *wi = &feature_word_info[w];
2844 uint32_t r;
2845
2846 if (kvm_enabled()) {
2847 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2848 wi->cpuid_ecx,
2849 wi->cpuid_reg);
2850 } else if (hvf_enabled()) {
2851 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2852 wi->cpuid_ecx,
2853 wi->cpuid_reg);
2854 } else if (tcg_enabled()) {
2855 r = wi->tcg_features;
2856 } else {
2857 return ~0;
2858 }
2859 if (migratable_only) {
2860 r &= x86_cpu_get_migratable_flags(w);
2861 }
2862 return r;
2863 }
2864
2865 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2866 {
2867 FeatureWord w;
2868
2869 for (w = 0; w < FEATURE_WORDS; w++) {
2870 report_unavailable_features(w, cpu->filtered_features[w]);
2871 }
2872 }
2873
2874 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2875 {
2876 PropValue *pv;
2877 for (pv = props; pv->prop; pv++) {
2878 if (!pv->value) {
2879 continue;
2880 }
2881 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2882 &error_abort);
2883 }
2884 }
2885
2886 /* Load data from X86CPUDefinition into a X86CPU object
2887 */
2888 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2889 {
2890 CPUX86State *env = &cpu->env;
2891 const char *vendor;
2892 char host_vendor[CPUID_VENDOR_SZ + 1];
2893 FeatureWord w;
2894
2895 /*NOTE: any property set by this function should be returned by
2896 * x86_cpu_static_props(), so static expansion of
2897 * query-cpu-model-expansion is always complete.
2898 */
2899
2900 /* CPU models only set _minimum_ values for level/xlevel: */
2901 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2902 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2903
2904 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2905 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2906 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2907 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2908 for (w = 0; w < FEATURE_WORDS; w++) {
2909 env->features[w] = def->features[w];
2910 }
2911
2912 /* Special cases not set in the X86CPUDefinition structs: */
2913 /* TODO: in-kernel irqchip for hvf */
2914 if (kvm_enabled()) {
2915 if (!kvm_irqchip_in_kernel()) {
2916 x86_cpu_change_kvm_default("x2apic", "off");
2917 }
2918
2919 x86_cpu_apply_props(cpu, kvm_default_props);
2920 } else if (tcg_enabled()) {
2921 x86_cpu_apply_props(cpu, tcg_default_props);
2922 }
2923
2924 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2925
2926 /* sysenter isn't supported in compatibility mode on AMD,
2927 * syscall isn't supported in compatibility mode on Intel.
2928 * Normally we advertise the actual CPU vendor, but you can
2929 * override this using the 'vendor' property if you want to use
2930 * KVM's sysenter/syscall emulation in compatibility mode and
2931 * when doing cross vendor migration
2932 */
2933 vendor = def->vendor;
2934 if (accel_uses_host_cpuid()) {
2935 uint32_t ebx = 0, ecx = 0, edx = 0;
2936 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2937 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2938 vendor = host_vendor;
2939 }
2940
2941 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2942
2943 }
2944
2945 /* Return a QDict containing keys for all properties that can be included
2946 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2947 * must be included in the dictionary.
2948 */
2949 static QDict *x86_cpu_static_props(void)
2950 {
2951 FeatureWord w;
2952 int i;
2953 static const char *props[] = {
2954 "min-level",
2955 "min-xlevel",
2956 "family",
2957 "model",
2958 "stepping",
2959 "model-id",
2960 "vendor",
2961 "lmce",
2962 NULL,
2963 };
2964 static QDict *d;
2965
2966 if (d) {
2967 return d;
2968 }
2969
2970 d = qdict_new();
2971 for (i = 0; props[i]; i++) {
2972 qdict_put_null(d, props[i]);
2973 }
2974
2975 for (w = 0; w < FEATURE_WORDS; w++) {
2976 FeatureWordInfo *fi = &feature_word_info[w];
2977 int bit;
2978 for (bit = 0; bit < 32; bit++) {
2979 if (!fi->feat_names[bit]) {
2980 continue;
2981 }
2982 qdict_put_null(d, fi->feat_names[bit]);
2983 }
2984 }
2985
2986 return d;
2987 }
2988
2989 /* Add an entry to @props dict, with the value for property. */
2990 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2991 {
2992 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2993 &error_abort);
2994
2995 qdict_put_obj(props, prop, value);
2996 }
2997
2998 /* Convert CPU model data from X86CPU object to a property dictionary
2999 * that can recreate exactly the same CPU model.
3000 */
3001 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3002 {
3003 QDict *sprops = x86_cpu_static_props();
3004 const QDictEntry *e;
3005
3006 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3007 const char *prop = qdict_entry_key(e);
3008 x86_cpu_expand_prop(cpu, props, prop);
3009 }
3010 }
3011
3012 /* Convert CPU model data from X86CPU object to a property dictionary
3013 * that can recreate exactly the same CPU model, including every
3014 * writeable QOM property.
3015 */
3016 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3017 {
3018 ObjectPropertyIterator iter;
3019 ObjectProperty *prop;
3020
3021 object_property_iter_init(&iter, OBJECT(cpu));
3022 while ((prop = object_property_iter_next(&iter))) {
3023 /* skip read-only or write-only properties */
3024 if (!prop->get || !prop->set) {
3025 continue;
3026 }
3027
3028 /* "hotplugged" is the only property that is configurable
3029 * on the command-line but will be set differently on CPUs
3030 * created using "-cpu ... -smp ..." and by CPUs created
3031 * on the fly by x86_cpu_from_model() for querying. Skip it.
3032 */
3033 if (!strcmp(prop->name, "hotplugged")) {
3034 continue;
3035 }
3036 x86_cpu_expand_prop(cpu, props, prop->name);
3037 }
3038 }
3039
3040 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3041 {
3042 const QDictEntry *prop;
3043 Error *err = NULL;
3044
3045 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3046 object_property_set_qobject(obj, qdict_entry_value(prop),
3047 qdict_entry_key(prop), &err);
3048 if (err) {
3049 break;
3050 }
3051 }
3052
3053 error_propagate(errp, err);
3054 }
3055
3056 /* Create X86CPU object according to model+props specification */
3057 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3058 {
3059 X86CPU *xc = NULL;
3060 X86CPUClass *xcc;
3061 Error *err = NULL;
3062
3063 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3064 if (xcc == NULL) {
3065 error_setg(&err, "CPU model '%s' not found", model);
3066 goto out;
3067 }
3068
3069 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3070 if (props) {
3071 object_apply_props(OBJECT(xc), props, &err);
3072 if (err) {
3073 goto out;
3074 }
3075 }
3076
3077 x86_cpu_expand_features(xc, &err);
3078 if (err) {
3079 goto out;
3080 }
3081
3082 out:
3083 if (err) {
3084 error_propagate(errp, err);
3085 object_unref(OBJECT(xc));
3086 xc = NULL;
3087 }
3088 return xc;
3089 }
3090
3091 CpuModelExpansionInfo *
3092 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3093 CpuModelInfo *model,
3094 Error **errp)
3095 {
3096 X86CPU *xc = NULL;
3097 Error *err = NULL;
3098 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3099 QDict *props = NULL;
3100 const char *base_name;
3101
3102 xc = x86_cpu_from_model(model->name,
3103 model->has_props ?
3104 qobject_to_qdict(model->props) :
3105 NULL, &err);
3106 if (err) {
3107 goto out;
3108 }
3109
3110 props = qdict_new();
3111
3112 switch (type) {
3113 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3114 /* Static expansion will be based on "base" only */
3115 base_name = "base";
3116 x86_cpu_to_dict(xc, props);
3117 break;
3118 case CPU_MODEL_EXPANSION_TYPE_FULL:
3119 /* As we don't return every single property, full expansion needs
3120 * to keep the original model name+props, and add extra
3121 * properties on top of that.
3122 */
3123 base_name = model->name;
3124 x86_cpu_to_dict_full(xc, props);
3125 break;
3126 default:
3127 error_setg(&err, "Unsupportted expansion type");
3128 goto out;
3129 }
3130
3131 if (!props) {
3132 props = qdict_new();
3133 }
3134 x86_cpu_to_dict(xc, props);
3135
3136 ret->model = g_new0(CpuModelInfo, 1);
3137 ret->model->name = g_strdup(base_name);
3138 ret->model->props = QOBJECT(props);
3139 ret->model->has_props = true;
3140
3141 out:
3142 object_unref(OBJECT(xc));
3143 if (err) {
3144 error_propagate(errp, err);
3145 qapi_free_CpuModelExpansionInfo(ret);
3146 ret = NULL;
3147 }
3148 return ret;
3149 }
3150
3151 static gchar *x86_gdb_arch_name(CPUState *cs)
3152 {
3153 #ifdef TARGET_X86_64
3154 return g_strdup("i386:x86-64");
3155 #else
3156 return g_strdup("i386");
3157 #endif
3158 }
3159
3160 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3161 {
3162 X86CPUDefinition *cpudef = data;
3163 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3164
3165 xcc->cpu_def = cpudef;
3166 xcc->migration_safe = true;
3167 }
3168
3169 static void x86_register_cpudef_type(X86CPUDefinition *def)
3170 {
3171 char *typename = x86_cpu_type_name(def->name);
3172 TypeInfo ti = {
3173 .name = typename,
3174 .parent = TYPE_X86_CPU,
3175 .class_init = x86_cpu_cpudef_class_init,
3176 .class_data = def,
3177 };
3178
3179 /* AMD aliases are handled at runtime based on CPUID vendor, so
3180 * they shouldn't be set on the CPU model table.
3181 */
3182 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3183 /* catch mistakes instead of silently truncating model_id when too long */
3184 assert(def->model_id && strlen(def->model_id) <= 48);
3185
3186
3187 type_register(&ti);
3188 g_free(typename);
3189 }
3190
3191 #if !defined(CONFIG_USER_ONLY)
3192
3193 void cpu_clear_apic_feature(CPUX86State *env)
3194 {
3195 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3196 }
3197
3198 #endif /* !CONFIG_USER_ONLY */
3199
3200 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3201 uint32_t *eax, uint32_t *ebx,
3202 uint32_t *ecx, uint32_t *edx)
3203 {
3204 X86CPU *cpu = x86_env_get_cpu(env);
3205 CPUState *cs = CPU(cpu);
3206 uint32_t pkg_offset;
3207 uint32_t limit;
3208 uint32_t signature[3];
3209
3210 /* Calculate & apply limits for different index ranges */
3211 if (index >= 0xC0000000) {
3212 limit = env->cpuid_xlevel2;
3213 } else if (index >= 0x80000000) {
3214 limit = env->cpuid_xlevel;
3215 } else if (index >= 0x40000000) {
3216 limit = 0x40000001;
3217 } else {
3218 limit = env->cpuid_level;
3219 }
3220
3221 if (index > limit) {
3222 /* Intel documentation states that invalid EAX input will
3223 * return the same information as EAX=cpuid_level
3224 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3225 */
3226 index = env->cpuid_level;
3227 }
3228
3229 switch(index) {
3230 case 0:
3231 *eax = env->cpuid_level;
3232 *ebx = env->cpuid_vendor1;
3233 *edx = env->cpuid_vendor2;
3234 *ecx = env->cpuid_vendor3;
3235 break;
3236 case 1:
3237 *eax = env->cpuid_version;
3238 *ebx = (cpu->apic_id << 24) |
3239 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3240 *ecx = env->features[FEAT_1_ECX];
3241 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3242 *ecx |= CPUID_EXT_OSXSAVE;
3243 }
3244 *edx = env->features[FEAT_1_EDX];
3245 if (cs->nr_cores * cs->nr_threads > 1) {
3246 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3247 *edx |= CPUID_HT;
3248 }
3249 break;
3250 case 2:
3251 /* cache info: needed for Pentium Pro compatibility */
3252 if (cpu->cache_info_passthrough) {
3253 host_cpuid(index, 0, eax, ebx, ecx, edx);
3254 break;
3255 }
3256 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3257 *ebx = 0;
3258 if (!cpu->enable_l3_cache) {
3259 *ecx = 0;
3260 } else {
3261 *ecx = L3_N_DESCRIPTOR;
3262 }
3263 *edx = (L1D_DESCRIPTOR << 16) | \
3264 (L1I_DESCRIPTOR << 8) | \
3265 (L2_DESCRIPTOR);
3266 break;
3267 case 4:
3268 /* cache info: needed for Core compatibility */
3269 if (cpu->cache_info_passthrough) {
3270 host_cpuid(index, count, eax, ebx, ecx, edx);
3271 *eax &= ~0xFC000000;
3272 } else {
3273 *eax = 0;
3274 switch (count) {
3275 case 0: /* L1 dcache info */
3276 *eax |= CPUID_4_TYPE_DCACHE | \
3277 CPUID_4_LEVEL(1) | \
3278 CPUID_4_SELF_INIT_LEVEL;
3279 *ebx = (L1D_LINE_SIZE - 1) | \
3280 ((L1D_PARTITIONS - 1) << 12) | \
3281 ((L1D_ASSOCIATIVITY - 1) << 22);
3282 *ecx = L1D_SETS - 1;
3283 *edx = CPUID_4_NO_INVD_SHARING;
3284 break;
3285 case 1: /* L1 icache info */
3286 *eax |= CPUID_4_TYPE_ICACHE | \
3287 CPUID_4_LEVEL(1) | \
3288 CPUID_4_SELF_INIT_LEVEL;
3289 *ebx = (L1I_LINE_SIZE - 1) | \
3290 ((L1I_PARTITIONS - 1) << 12) | \
3291 ((L1I_ASSOCIATIVITY - 1) << 22);
3292 *ecx = L1I_SETS - 1;
3293 *edx = CPUID_4_NO_INVD_SHARING;
3294 break;
3295 case 2: /* L2 cache info */
3296 *eax |= CPUID_4_TYPE_UNIFIED | \
3297 CPUID_4_LEVEL(2) | \
3298 CPUID_4_SELF_INIT_LEVEL;
3299 if (cs->nr_threads > 1) {
3300 *eax |= (cs->nr_threads - 1) << 14;
3301 }
3302 *ebx = (L2_LINE_SIZE - 1) | \
3303 ((L2_PARTITIONS - 1) << 12) | \
3304 ((L2_ASSOCIATIVITY - 1) << 22);
3305 *ecx = L2_SETS - 1;
3306 *edx = CPUID_4_NO_INVD_SHARING;
3307 break;
3308 case 3: /* L3 cache info */
3309 if (!cpu->enable_l3_cache) {
3310 *eax = 0;
3311 *ebx = 0;
3312 *ecx = 0;
3313 *edx = 0;
3314 break;
3315 }
3316 *eax |= CPUID_4_TYPE_UNIFIED | \
3317 CPUID_4_LEVEL(3) | \
3318 CPUID_4_SELF_INIT_LEVEL;
3319 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3320 *eax |= ((1 << pkg_offset) - 1) << 14;
3321 *ebx = (L3_N_LINE_SIZE - 1) | \
3322 ((L3_N_PARTITIONS - 1) << 12) | \
3323 ((L3_N_ASSOCIATIVITY - 1) << 22);
3324 *ecx = L3_N_SETS - 1;
3325 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3326 break;
3327 default: /* end of info */
3328 *eax = 0;
3329 *ebx = 0;
3330 *ecx = 0;
3331 *edx = 0;
3332 break;
3333 }
3334 }
3335
3336 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3337 if ((*eax & 31) && cs->nr_cores > 1) {
3338 *eax |= (cs->nr_cores - 1) << 26;
3339 }
3340 break;
3341 case 5:
3342 /* mwait info: needed for Core compatibility */
3343 *eax = 0; /* Smallest monitor-line size in bytes */
3344 *ebx = 0; /* Largest monitor-line size in bytes */
3345 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3346 *edx = 0;
3347 break;
3348 case 6:
3349 /* Thermal and Power Leaf */
3350 *eax = env->features[FEAT_6_EAX];
3351 *ebx = 0;
3352 *ecx = 0;
3353 *edx = 0;
3354 break;
3355 case 7:
3356 /* Structured Extended Feature Flags Enumeration Leaf */
3357 if (count == 0) {
3358 *eax = 0; /* Maximum ECX value for sub-leaves */
3359 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3360 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3361 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3362 *ecx |= CPUID_7_0_ECX_OSPKE;
3363 }
3364 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3365 } else {
3366 *eax = 0;
3367 *ebx = 0;
3368 *ecx = 0;
3369 *edx = 0;
3370 }
3371 break;
3372 case 9:
3373 /* Direct Cache Access Information Leaf */
3374 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3375 *ebx = 0;
3376 *ecx = 0;
3377 *edx = 0;
3378 break;
3379 case 0xA:
3380 /* Architectural Performance Monitoring Leaf */
3381 if (kvm_enabled() && cpu->enable_pmu) {
3382 KVMState *s = cs->kvm_state;
3383
3384 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3385 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3386 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3387 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3388 } else if (hvf_enabled() && cpu->enable_pmu) {
3389 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3390 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3391 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3392 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3393 } else {
3394 *eax = 0;
3395 *ebx = 0;
3396 *ecx = 0;
3397 *edx = 0;
3398 }
3399 break;
3400 case 0xB:
3401 /* Extended Topology Enumeration Leaf */
3402 if (!cpu->enable_cpuid_0xb) {
3403 *eax = *ebx = *ecx = *edx = 0;
3404 break;
3405 }
3406
3407 *ecx = count & 0xff;
3408 *edx = cpu->apic_id;
3409
3410 switch (count) {
3411 case 0:
3412 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3413 *ebx = cs->nr_threads;
3414 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3415 break;
3416 case 1:
3417 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3418 *ebx = cs->nr_cores * cs->nr_threads;
3419 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3420 break;
3421 default:
3422 *eax = 0;
3423 *ebx = 0;
3424 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3425 }
3426
3427 assert(!(*eax & ~0x1f));
3428 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3429 break;
3430 case 0xD: {
3431 /* Processor Extended State */
3432 *eax = 0;
3433 *ebx = 0;
3434 *ecx = 0;
3435 *edx = 0;
3436 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3437 break;
3438 }
3439
3440 if (count == 0) {
3441 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3442 *eax = env->features[FEAT_XSAVE_COMP_LO];
3443 *edx = env->features[FEAT_XSAVE_COMP_HI];
3444 *ebx = *ecx;
3445 } else if (count == 1) {
3446 *eax = env->features[FEAT_XSAVE];
3447 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3448 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3449 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3450 *eax = esa->size;
3451 *ebx = esa->offset;
3452 }
3453 }
3454 break;
3455 }
3456 case 0x40000000:
3457 /*
3458 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3459 * set here, but we restrict to TCG none the less.
3460 */
3461 if (tcg_enabled() && cpu->expose_tcg) {
3462 memcpy(signature, "TCGTCGTCGTCG", 12);
3463 *eax = 0x40000001;
3464 *ebx = signature[0];
3465 *ecx = signature[1];
3466 *edx = signature[2];
3467 } else {
3468 *eax = 0;
3469 *ebx = 0;
3470 *ecx = 0;
3471 *edx = 0;
3472 }
3473 break;
3474 case 0x40000001:
3475 *eax = 0;
3476 *ebx = 0;
3477 *ecx = 0;
3478 *edx = 0;
3479 break;
3480 case 0x80000000:
3481 *eax = env->cpuid_xlevel;
3482 *ebx = env->cpuid_vendor1;
3483 *edx = env->cpuid_vendor2;
3484 *ecx = env->cpuid_vendor3;
3485 break;
3486 case 0x80000001:
3487 *eax = env->cpuid_version;
3488 *ebx = 0;
3489 *ecx = env->features[FEAT_8000_0001_ECX];
3490 *edx = env->features[FEAT_8000_0001_EDX];
3491
3492 /* The Linux kernel checks for the CMPLegacy bit and
3493 * discards multiple thread information if it is set.
3494 * So don't set it here for Intel to make Linux guests happy.
3495 */
3496 if (cs->nr_cores * cs->nr_threads > 1) {
3497 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3498 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3499 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3500 *ecx |= 1 << 1; /* CmpLegacy bit */
3501 }
3502 }
3503 break;
3504 case 0x80000002:
3505 case 0x80000003:
3506 case 0x80000004:
3507 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3508 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3509 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3510 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3511 break;
3512 case 0x80000005:
3513 /* cache info (L1 cache) */
3514 if (cpu->cache_info_passthrough) {
3515 host_cpuid(index, 0, eax, ebx, ecx, edx);
3516 break;
3517 }
3518 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3519 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3520 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3521 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3522 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3523 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3524 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3525 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3526 break;
3527 case 0x80000006:
3528 /* cache info (L2 cache) */
3529 if (cpu->cache_info_passthrough) {
3530 host_cpuid(index, 0, eax, ebx, ecx, edx);
3531 break;
3532 }
3533 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3534 (L2_DTLB_2M_ENTRIES << 16) | \
3535 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3536 (L2_ITLB_2M_ENTRIES);
3537 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3538 (L2_DTLB_4K_ENTRIES << 16) | \
3539 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3540 (L2_ITLB_4K_ENTRIES);
3541 *ecx = (L2_SIZE_KB_AMD << 16) | \
3542 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3543 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3544 if (!cpu->enable_l3_cache) {
3545 *edx = ((L3_SIZE_KB / 512) << 18) | \
3546 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3547 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3548 } else {
3549 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3550 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3551 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3552 }
3553 break;
3554 case 0x80000007:
3555 *eax = 0;
3556 *ebx = 0;
3557 *ecx = 0;
3558 *edx = env->features[FEAT_8000_0007_EDX];
3559 break;
3560 case 0x80000008:
3561 /* virtual & phys address size in low 2 bytes. */
3562 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3563 /* 64 bit processor */
3564 *eax = cpu->phys_bits; /* configurable physical bits */
3565 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3566 *eax |= 0x00003900; /* 57 bits virtual */
3567 } else {
3568 *eax |= 0x00003000; /* 48 bits virtual */
3569 }
3570 } else {
3571 *eax = cpu->phys_bits;
3572 }
3573 *ebx = env->features[FEAT_8000_0008_EBX];
3574 *ecx = 0;
3575 *edx = 0;
3576 if (cs->nr_cores * cs->nr_threads > 1) {
3577 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3578 }
3579 break;
3580 case 0x8000000A:
3581 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3582 *eax = 0x00000001; /* SVM Revision */
3583 *ebx = 0x00000010; /* nr of ASIDs */
3584 *ecx = 0;
3585 *edx = env->features[FEAT_SVM]; /* optional features */
3586 } else {
3587 *eax = 0;
3588 *ebx = 0;
3589 *ecx = 0;
3590 *edx = 0;
3591 }
3592 break;
3593 case 0xC0000000:
3594 *eax = env->cpuid_xlevel2;
3595 *ebx = 0;
3596 *ecx = 0;
3597 *edx = 0;
3598 break;
3599 case 0xC0000001:
3600 /* Support for VIA CPU's CPUID instruction */
3601 *eax = env->cpuid_version;
3602 *ebx = 0;
3603 *ecx = 0;
3604 *edx = env->features[FEAT_C000_0001_EDX];
3605 break;
3606 case 0xC0000002:
3607 case 0xC0000003:
3608 case 0xC0000004:
3609 /* Reserved for the future, and now filled with zero */
3610 *eax = 0;
3611 *ebx = 0;
3612 *ecx = 0;
3613 *edx = 0;
3614 break;
3615 default:
3616 /* reserved values: zero */
3617 *eax = 0;
3618 *ebx = 0;
3619 *ecx = 0;
3620 *edx = 0;
3621 break;
3622 }
3623 }
3624
3625 /* CPUClass::reset() */
3626 static void x86_cpu_reset(CPUState *s)
3627 {
3628 X86CPU *cpu = X86_CPU(s);
3629 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3630 CPUX86State *env = &cpu->env;
3631 target_ulong cr4;
3632 uint64_t xcr0;
3633 int i;
3634
3635 xcc->parent_reset(s);
3636
3637 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3638
3639 env->old_exception = -1;
3640
3641 /* init to reset state */
3642
3643 env->hflags2 |= HF2_GIF_MASK;
3644
3645 cpu_x86_update_cr0(env, 0x60000010);
3646 env->a20_mask = ~0x0;
3647 env->smbase = 0x30000;
3648 env->msr_smi_count = 0;
3649
3650 env->idt.limit = 0xffff;
3651 env->gdt.limit = 0xffff;
3652 env->ldt.limit = 0xffff;
3653 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3654 env->tr.limit = 0xffff;
3655 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3656
3657 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3658 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3659 DESC_R_MASK | DESC_A_MASK);
3660 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3661 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3662 DESC_A_MASK);
3663 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3664 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3665 DESC_A_MASK);
3666 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3667 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3668 DESC_A_MASK);
3669 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3670 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3671 DESC_A_MASK);
3672 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3673 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3674 DESC_A_MASK);
3675
3676 env->eip = 0xfff0;
3677 env->regs[R_EDX] = env->cpuid_version;
3678
3679 env->eflags = 0x2;
3680
3681 /* FPU init */
3682 for (i = 0; i < 8; i++) {
3683 env->fptags[i] = 1;
3684 }
3685 cpu_set_fpuc(env, 0x37f);
3686
3687 env->mxcsr = 0x1f80;
3688 /* All units are in INIT state. */
3689 env->xstate_bv = 0;
3690
3691 env->pat = 0x0007040600070406ULL;
3692 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3693
3694 memset(env->dr, 0, sizeof(env->dr));
3695 env->dr[6] = DR6_FIXED_1;
3696 env->dr[7] = DR7_FIXED_1;
3697 cpu_breakpoint_remove_all(s, BP_CPU);
3698 cpu_watchpoint_remove_all(s, BP_CPU);
3699
3700 cr4 = 0;
3701 xcr0 = XSTATE_FP_MASK;
3702
3703 #ifdef CONFIG_USER_ONLY
3704 /* Enable all the features for user-mode. */
3705 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3706 xcr0 |= XSTATE_SSE_MASK;
3707 }
3708 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3709 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3710 if (env->features[esa->feature] & esa->bits) {
3711 xcr0 |= 1ull << i;
3712 }
3713 }
3714
3715 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3716 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3717 }
3718 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3719 cr4 |= CR4_FSGSBASE_MASK;
3720 }
3721 #endif
3722
3723 env->xcr0 = xcr0;
3724 cpu_x86_update_cr4(env, cr4);
3725
3726 /*
3727 * SDM 11.11.5 requires:
3728 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3729 * - IA32_MTRR_PHYSMASKn.V = 0
3730 * All other bits are undefined. For simplification, zero it all.
3731 */
3732 env->mtrr_deftype = 0;
3733 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3734 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3735
3736 env->interrupt_injected = -1;
3737 env->exception_injected = -1;
3738 env->nmi_injected = false;
3739 #if !defined(CONFIG_USER_ONLY)
3740 /* We hard-wire the BSP to the first CPU. */
3741 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3742
3743 s->halted = !cpu_is_bsp(cpu);
3744
3745 if (kvm_enabled()) {
3746 kvm_arch_reset_vcpu(cpu);
3747 }
3748 else if (hvf_enabled()) {
3749 hvf_reset_vcpu(s);
3750 }
3751 #endif
3752 }
3753
3754 #ifndef CONFIG_USER_ONLY
3755 bool cpu_is_bsp(X86CPU *cpu)
3756 {
3757 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3758 }
3759
3760 /* TODO: remove me, when reset over QOM tree is implemented */
3761 static void x86_cpu_machine_reset_cb(void *opaque)
3762 {
3763 X86CPU *cpu = opaque;
3764 cpu_reset(CPU(cpu));
3765 }
3766 #endif
3767
3768 static void mce_init(X86CPU *cpu)
3769 {
3770 CPUX86State *cenv = &cpu->env;
3771 unsigned int bank;
3772
3773 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3774 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3775 (CPUID_MCE | CPUID_MCA)) {
3776 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3777 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3778 cenv->mcg_ctl = ~(uint64_t)0;
3779 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3780 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3781 }
3782 }
3783 }
3784
3785 #ifndef CONFIG_USER_ONLY
3786 APICCommonClass *apic_get_class(void)
3787 {
3788 const char *apic_type = "apic";
3789
3790 /* TODO: in-kernel irqchip for hvf */
3791 if (kvm_apic_in_kernel()) {
3792 apic_type = "kvm-apic";
3793 } else if (xen_enabled()) {
3794 apic_type = "xen-apic";
3795 }
3796
3797 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3798 }
3799
3800 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3801 {
3802 APICCommonState *apic;
3803 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3804
3805 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3806
3807 object_property_add_child(OBJECT(cpu), "lapic",
3808 OBJECT(cpu->apic_state), &error_abort);
3809 object_unref(OBJECT(cpu->apic_state));
3810
3811 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3812 /* TODO: convert to link<> */
3813 apic = APIC_COMMON(cpu->apic_state);
3814 apic->cpu = cpu;
3815 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3816 }
3817
3818 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3819 {
3820 APICCommonState *apic;
3821 static bool apic_mmio_map_once;
3822
3823 if (cpu->apic_state == NULL) {
3824 return;
3825 }
3826 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3827 errp);
3828
3829 /* Map APIC MMIO area */
3830 apic = APIC_COMMON(cpu->apic_state);
3831 if (!apic_mmio_map_once) {
3832 memory_region_add_subregion_overlap(get_system_memory(),
3833 apic->apicbase &
3834 MSR_IA32_APICBASE_BASE,
3835 &apic->io_memory,
3836 0x1000);
3837 apic_mmio_map_once = true;
3838 }
3839 }
3840
3841 static void x86_cpu_machine_done(Notifier *n, void *unused)
3842 {
3843 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3844 MemoryRegion *smram =
3845 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3846
3847 if (smram) {
3848 cpu->smram = g_new(MemoryRegion, 1);
3849 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3850 smram, 0, 1ull << 32);
3851 memory_region_set_enabled(cpu->smram, true);
3852 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3853 }
3854 }
3855 #else
3856 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3857 {
3858 }
3859 #endif
3860
3861 /* Note: Only safe for use on x86(-64) hosts */
3862 static uint32_t x86_host_phys_bits(void)
3863 {
3864 uint32_t eax;
3865 uint32_t host_phys_bits;
3866
3867 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3868 if (eax >= 0x80000008) {
3869 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3870 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3871 * at 23:16 that can specify a maximum physical address bits for
3872 * the guest that can override this value; but I've not seen
3873 * anything with that set.
3874 */
3875 host_phys_bits = eax & 0xff;
3876 } else {
3877 /* It's an odd 64 bit machine that doesn't have the leaf for
3878 * physical address bits; fall back to 36 that's most older
3879 * Intel.
3880 */
3881 host_phys_bits = 36;
3882 }
3883
3884 return host_phys_bits;
3885 }
3886
3887 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3888 {
3889 if (*min < value) {
3890 *min = value;
3891 }
3892 }
3893
3894 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3895 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3896 {
3897 CPUX86State *env = &cpu->env;
3898 FeatureWordInfo *fi = &feature_word_info[w];
3899 uint32_t eax = fi->cpuid_eax;
3900 uint32_t region = eax & 0xF0000000;
3901
3902 if (!env->features[w]) {
3903 return;
3904 }
3905
3906 switch (region) {
3907 case 0x00000000:
3908 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3909 break;
3910 case 0x80000000:
3911 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3912 break;
3913 case 0xC0000000:
3914 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3915 break;
3916 }
3917 }
3918
3919 /* Calculate XSAVE components based on the configured CPU feature flags */
3920 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3921 {
3922 CPUX86State *env = &cpu->env;
3923 int i;
3924 uint64_t mask;
3925
3926 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3927 return;
3928 }
3929
3930 mask = 0;
3931 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3932 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3933 if (env->features[esa->feature] & esa->bits) {
3934 mask |= (1ULL << i);
3935 }
3936 }
3937
3938 env->features[FEAT_XSAVE_COMP_LO] = mask;
3939 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3940 }
3941
3942 /***** Steps involved on loading and filtering CPUID data
3943 *
3944 * When initializing and realizing a CPU object, the steps
3945 * involved in setting up CPUID data are:
3946 *
3947 * 1) Loading CPU model definition (X86CPUDefinition). This is
3948 * implemented by x86_cpu_load_def() and should be completely
3949 * transparent, as it is done automatically by instance_init.
3950 * No code should need to look at X86CPUDefinition structs
3951 * outside instance_init.
3952 *
3953 * 2) CPU expansion. This is done by realize before CPUID
3954 * filtering, and will make sure host/accelerator data is
3955 * loaded for CPU models that depend on host capabilities
3956 * (e.g. "host"). Done by x86_cpu_expand_features().
3957 *
3958 * 3) CPUID filtering. This initializes extra data related to
3959 * CPUID, and checks if the host supports all capabilities
3960 * required by the CPU. Runnability of a CPU model is
3961 * determined at this step. Done by x86_cpu_filter_features().
3962 *
3963 * Some operations don't require all steps to be performed.
3964 * More precisely:
3965 *
3966 * - CPU instance creation (instance_init) will run only CPU
3967 * model loading. CPU expansion can't run at instance_init-time
3968 * because host/accelerator data may be not available yet.
3969 * - CPU realization will perform both CPU model expansion and CPUID
3970 * filtering, and return an error in case one of them fails.
3971 * - query-cpu-definitions needs to run all 3 steps. It needs
3972 * to run CPUID filtering, as the 'unavailable-features'
3973 * field is set based on the filtering results.
3974 * - The query-cpu-model-expansion QMP command only needs to run
3975 * CPU model loading and CPU expansion. It should not filter
3976 * any CPUID data based on host capabilities.
3977 */
3978
3979 /* Expand CPU configuration data, based on configured features
3980 * and host/accelerator capabilities when appropriate.
3981 */
3982 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3983 {
3984 CPUX86State *env = &cpu->env;
3985 FeatureWord w;
3986 GList *l;
3987 Error *local_err = NULL;
3988
3989 /*TODO: Now cpu->max_features doesn't overwrite features
3990 * set using QOM properties, and we can convert
3991 * plus_features & minus_features to global properties
3992 * inside x86_cpu_parse_featurestr() too.
3993 */
3994 if (cpu->max_features) {
3995 for (w = 0; w < FEATURE_WORDS; w++) {
3996 /* Override only features that weren't set explicitly
3997 * by the user.
3998 */
3999 env->features[w] |=
4000 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4001 ~env->user_features[w];
4002 }
4003 }
4004
4005 for (l = plus_features; l; l = l->next) {
4006 const char *prop = l->data;
4007 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4008 if (local_err) {
4009 goto out;
4010 }
4011 }
4012
4013 for (l = minus_features; l; l = l->next) {
4014 const char *prop = l->data;
4015 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4016 if (local_err) {
4017 goto out;
4018 }
4019 }
4020
4021 if (!kvm_enabled() || !cpu->expose_kvm) {
4022 env->features[FEAT_KVM] = 0;
4023 }
4024
4025 x86_cpu_enable_xsave_components(cpu);
4026
4027 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4028 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4029 if (cpu->full_cpuid_auto_level) {
4030 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4031 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4032 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4033 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4034 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4035 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4036 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4037 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4038 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4039 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4040 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4041 /* SVM requires CPUID[0x8000000A] */
4042 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4043 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4044 }
4045 }
4046
4047 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4048 if (env->cpuid_level == UINT32_MAX) {
4049 env->cpuid_level = env->cpuid_min_level;
4050 }
4051 if (env->cpuid_xlevel == UINT32_MAX) {
4052 env->cpuid_xlevel = env->cpuid_min_xlevel;
4053 }
4054 if (env->cpuid_xlevel2 == UINT32_MAX) {
4055 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4056 }
4057
4058 out:
4059 if (local_err != NULL) {
4060 error_propagate(errp, local_err);
4061 }
4062 }
4063
4064 /*
4065 * Finishes initialization of CPUID data, filters CPU feature
4066 * words based on host availability of each feature.
4067 *
4068 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4069 */
4070 static int x86_cpu_filter_features(X86CPU *cpu)
4071 {
4072 CPUX86State *env = &cpu->env;
4073 FeatureWord w;
4074 int rv = 0;
4075
4076 for (w = 0; w < FEATURE_WORDS; w++) {
4077 uint32_t host_feat =
4078 x86_cpu_get_supported_feature_word(w, false);
4079 uint32_t requested_features = env->features[w];
4080 env->features[w] &= host_feat;
4081 cpu->filtered_features[w] = requested_features & ~env->features[w];
4082 if (cpu->filtered_features[w]) {
4083 rv = 1;
4084 }
4085 }
4086
4087 return rv;
4088 }
4089
4090 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4091 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4092 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4093 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4094 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4095 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4096 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4097 {
4098 CPUState *cs = CPU(dev);
4099 X86CPU *cpu = X86_CPU(dev);
4100 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4101 CPUX86State *env = &cpu->env;
4102 Error *local_err = NULL;
4103 static bool ht_warned;
4104
4105 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4106 char *name = x86_cpu_class_get_model_name(xcc);
4107 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4108 g_free(name);
4109 goto out;
4110 }
4111
4112 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4113 error_setg(errp, "apic-id property was not initialized properly");
4114 return;
4115 }
4116
4117 x86_cpu_expand_features(cpu, &local_err);
4118 if (local_err) {
4119 goto out;
4120 }
4121
4122 if (x86_cpu_filter_features(cpu) &&
4123 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4124 x86_cpu_report_filtered_features(cpu);
4125 if (cpu->enforce_cpuid) {
4126 error_setg(&local_err,
4127 accel_uses_host_cpuid() ?
4128 "Host doesn't support requested features" :
4129 "TCG doesn't support requested features");
4130 goto out;
4131 }
4132 }
4133
4134 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4135 * CPUID[1].EDX.
4136 */
4137 if (IS_AMD_CPU(env)) {
4138 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4139 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4140 & CPUID_EXT2_AMD_ALIASES);
4141 }
4142
4143 /* For 64bit systems think about the number of physical bits to present.
4144 * ideally this should be the same as the host; anything other than matching
4145 * the host can cause incorrect guest behaviour.
4146 * QEMU used to pick the magic value of 40 bits that corresponds to
4147 * consumer AMD devices but nothing else.
4148 */
4149 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4150 if (accel_uses_host_cpuid()) {
4151 uint32_t host_phys_bits = x86_host_phys_bits();
4152 static bool warned;
4153
4154 if (cpu->host_phys_bits) {
4155 /* The user asked for us to use the host physical bits */
4156 cpu->phys_bits = host_phys_bits;
4157 }
4158
4159 /* Print a warning if the user set it to a value that's not the
4160 * host value.
4161 */
4162 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4163 !warned) {
4164 warn_report("Host physical bits (%u)"
4165 " does not match phys-bits property (%u)",
4166 host_phys_bits, cpu->phys_bits);
4167 warned = true;
4168 }
4169
4170 if (cpu->phys_bits &&
4171 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4172 cpu->phys_bits < 32)) {
4173 error_setg(errp, "phys-bits should be between 32 and %u "
4174 " (but is %u)",
4175 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4176 return;
4177 }
4178 } else {
4179 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4180 error_setg(errp, "TCG only supports phys-bits=%u",
4181 TCG_PHYS_ADDR_BITS);
4182 return;
4183 }
4184 }
4185 /* 0 means it was not explicitly set by the user (or by machine
4186 * compat_props or by the host code above). In this case, the default
4187 * is the value used by TCG (40).
4188 */
4189 if (cpu->phys_bits == 0) {
4190 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4191 }
4192 } else {
4193 /* For 32 bit systems don't use the user set value, but keep
4194 * phys_bits consistent with what we tell the guest.
4195 */
4196 if (cpu->phys_bits != 0) {
4197 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4198 return;
4199 }
4200
4201 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4202 cpu->phys_bits = 36;
4203 } else {
4204 cpu->phys_bits = 32;
4205 }
4206 }
4207 cpu_exec_realizefn(cs, &local_err);
4208 if (local_err != NULL) {
4209 error_propagate(errp, local_err);
4210 return;
4211 }
4212
4213 #ifndef CONFIG_USER_ONLY
4214 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4215
4216 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4217 x86_cpu_apic_create(cpu, &local_err);
4218 if (local_err != NULL) {
4219 goto out;
4220 }
4221 }
4222 #endif
4223
4224 mce_init(cpu);
4225
4226 #ifndef CONFIG_USER_ONLY
4227 if (tcg_enabled()) {
4228 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4229 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4230
4231 /* Outer container... */
4232 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4233 memory_region_set_enabled(cpu->cpu_as_root, true);
4234
4235 /* ... with two regions inside: normal system memory with low
4236 * priority, and...
4237 */
4238 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4239 get_system_memory(), 0, ~0ull);
4240 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4241 memory_region_set_enabled(cpu->cpu_as_mem, true);
4242
4243 cs->num_ases = 2;
4244 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4245 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4246
4247 /* ... SMRAM with higher priority, linked from /machine/smram. */
4248 cpu->machine_done.notify = x86_cpu_machine_done;
4249 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4250 }
4251 #endif
4252
4253 qemu_init_vcpu(cs);
4254
4255 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4256 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4257 * based on inputs (sockets,cores,threads), it is still better to gives
4258 * users a warning.
4259 *
4260 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4261 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4262 */
4263 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4264 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4265 " -smp options properly.");
4266 ht_warned = true;
4267 }
4268
4269 x86_cpu_apic_realize(cpu, &local_err);
4270 if (local_err != NULL) {
4271 goto out;
4272 }
4273 cpu_reset(cs);
4274
4275 xcc->parent_realize(dev, &local_err);
4276
4277 out:
4278 if (local_err != NULL) {
4279 error_propagate(errp, local_err);
4280 return;
4281 }
4282 }
4283
4284 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4285 {
4286 X86CPU *cpu = X86_CPU(dev);
4287 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4288 Error *local_err = NULL;
4289
4290 #ifndef CONFIG_USER_ONLY
4291 cpu_remove_sync(CPU(dev));
4292 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4293 #endif
4294
4295 if (cpu->apic_state) {
4296 object_unparent(OBJECT(cpu->apic_state));
4297 cpu->apic_state = NULL;
4298 }
4299
4300 xcc->parent_unrealize(dev, &local_err);
4301 if (local_err != NULL) {
4302 error_propagate(errp, local_err);
4303 return;
4304 }
4305 }
4306
4307 typedef struct BitProperty {
4308 FeatureWord w;
4309 uint32_t mask;
4310 } BitProperty;
4311
4312 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4313 void *opaque, Error **errp)
4314 {
4315 X86CPU *cpu = X86_CPU(obj);
4316 BitProperty *fp = opaque;
4317 uint32_t f = cpu->env.features[fp->w];
4318 bool value = (f & fp->mask) == fp->mask;
4319 visit_type_bool(v, name, &value, errp);
4320 }
4321
4322 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4323 void *opaque, Error **errp)
4324 {
4325 DeviceState *dev = DEVICE(obj);
4326 X86CPU *cpu = X86_CPU(obj);
4327 BitProperty *fp = opaque;
4328 Error *local_err = NULL;
4329 bool value;
4330
4331 if (dev->realized) {
4332 qdev_prop_set_after_realize(dev, name, errp);
4333 return;
4334 }
4335
4336 visit_type_bool(v, name, &value, &local_err);
4337 if (local_err) {
4338 error_propagate(errp, local_err);
4339 return;
4340 }
4341
4342 if (value) {
4343 cpu->env.features[fp->w] |= fp->mask;
4344 } else {
4345 cpu->env.features[fp->w] &= ~fp->mask;
4346 }
4347 cpu->env.user_features[fp->w] |= fp->mask;
4348 }
4349
4350 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4351 void *opaque)
4352 {
4353 BitProperty *prop = opaque;
4354 g_free(prop);
4355 }
4356
4357 /* Register a boolean property to get/set a single bit in a uint32_t field.
4358 *
4359 * The same property name can be registered multiple times to make it affect
4360 * multiple bits in the same FeatureWord. In that case, the getter will return
4361 * true only if all bits are set.
4362 */
4363 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4364 const char *prop_name,
4365 FeatureWord w,
4366 int bitnr)
4367 {
4368 BitProperty *fp;
4369 ObjectProperty *op;
4370 uint32_t mask = (1UL << bitnr);
4371
4372 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4373 if (op) {
4374 fp = op->opaque;
4375 assert(fp->w == w);
4376 fp->mask |= mask;
4377 } else {
4378 fp = g_new0(BitProperty, 1);
4379 fp->w = w;
4380 fp->mask = mask;
4381 object_property_add(OBJECT(cpu), prop_name, "bool",
4382 x86_cpu_get_bit_prop,
4383 x86_cpu_set_bit_prop,
4384 x86_cpu_release_bit_prop, fp, &error_abort);
4385 }
4386 }
4387
4388 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4389 FeatureWord w,
4390 int bitnr)
4391 {
4392 FeatureWordInfo *fi = &feature_word_info[w];
4393 const char *name = fi->feat_names[bitnr];
4394
4395 if (!name) {
4396 return;
4397 }
4398
4399 /* Property names should use "-" instead of "_".
4400 * Old names containing underscores are registered as aliases
4401 * using object_property_add_alias()
4402 */
4403 assert(!strchr(name, '_'));
4404 /* aliases don't use "|" delimiters anymore, they are registered
4405 * manually using object_property_add_alias() */
4406 assert(!strchr(name, '|'));
4407 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4408 }
4409
4410 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4411 {
4412 X86CPU *cpu = X86_CPU(cs);
4413 CPUX86State *env = &cpu->env;
4414 GuestPanicInformation *panic_info = NULL;
4415
4416 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4417 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4418
4419 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4420
4421 assert(HV_CRASH_PARAMS >= 5);
4422 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4423 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4424 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4425 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4426 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4427 }
4428
4429 return panic_info;
4430 }
4431 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4432 const char *name, void *opaque,
4433 Error **errp)
4434 {
4435 CPUState *cs = CPU(obj);
4436 GuestPanicInformation *panic_info;
4437
4438 if (!cs->crash_occurred) {
4439 error_setg(errp, "No crash occured");
4440 return;
4441 }
4442
4443 panic_info = x86_cpu_get_crash_info(cs);
4444 if (panic_info == NULL) {
4445 error_setg(errp, "No crash information");
4446 return;
4447 }
4448
4449 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4450 errp);
4451 qapi_free_GuestPanicInformation(panic_info);
4452 }
4453
4454 static void x86_cpu_initfn(Object *obj)
4455 {
4456 CPUState *cs = CPU(obj);
4457 X86CPU *cpu = X86_CPU(obj);
4458 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4459 CPUX86State *env = &cpu->env;
4460 FeatureWord w;
4461
4462 cs->env_ptr = env;
4463
4464 object_property_add(obj, "family", "int",
4465 x86_cpuid_version_get_family,
4466 x86_cpuid_version_set_family, NULL, NULL, NULL);
4467 object_property_add(obj, "model", "int",
4468 x86_cpuid_version_get_model,
4469 x86_cpuid_version_set_model, NULL, NULL, NULL);
4470 object_property_add(obj, "stepping", "int",
4471 x86_cpuid_version_get_stepping,
4472 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4473 object_property_add_str(obj, "vendor",
4474 x86_cpuid_get_vendor,
4475 x86_cpuid_set_vendor, NULL);
4476 object_property_add_str(obj, "model-id",
4477 x86_cpuid_get_model_id,
4478 x86_cpuid_set_model_id, NULL);
4479 object_property_add(obj, "tsc-frequency", "int",
4480 x86_cpuid_get_tsc_freq,
4481 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4482 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4483 x86_cpu_get_feature_words,
4484 NULL, NULL, (void *)env->features, NULL);
4485 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4486 x86_cpu_get_feature_words,
4487 NULL, NULL, (void *)cpu->filtered_features, NULL);
4488
4489 object_property_add(obj, "crash-information", "GuestPanicInformation",
4490 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4491
4492 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4493
4494 for (w = 0; w < FEATURE_WORDS; w++) {
4495 int bitnr;
4496
4497 for (bitnr = 0; bitnr < 32; bitnr++) {
4498 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4499 }
4500 }
4501
4502 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4503 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4504 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4505 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4506 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4507 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4508 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4509
4510 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4511 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4512 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4513 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4514 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4515 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4516 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4517 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4518 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4519 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4520 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4521 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4522 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4523 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4524 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4525 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4526 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4527 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4528 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4529 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4530 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4531
4532 if (xcc->cpu_def) {
4533 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4534 }
4535 }
4536
4537 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4538 {
4539 X86CPU *cpu = X86_CPU(cs);
4540
4541 return cpu->apic_id;
4542 }
4543
4544 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4545 {
4546 X86CPU *cpu = X86_CPU(cs);
4547
4548 return cpu->env.cr[0] & CR0_PG_MASK;
4549 }
4550
4551 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4552 {
4553 X86CPU *cpu = X86_CPU(cs);
4554
4555 cpu->env.eip = value;
4556 }
4557
4558 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4559 {
4560 X86CPU *cpu = X86_CPU(cs);
4561
4562 cpu->env.eip = tb->pc - tb->cs_base;
4563 }
4564
4565 static bool x86_cpu_has_work(CPUState *cs)
4566 {
4567 X86CPU *cpu = X86_CPU(cs);
4568 CPUX86State *env = &cpu->env;
4569
4570 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4571 CPU_INTERRUPT_POLL)) &&
4572 (env->eflags & IF_MASK)) ||
4573 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4574 CPU_INTERRUPT_INIT |
4575 CPU_INTERRUPT_SIPI |
4576 CPU_INTERRUPT_MCE)) ||
4577 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4578 !(env->hflags & HF_SMM_MASK));
4579 }
4580
4581 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4582 {
4583 X86CPU *cpu = X86_CPU(cs);
4584 CPUX86State *env = &cpu->env;
4585
4586 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4587 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4588 : bfd_mach_i386_i8086);
4589 info->print_insn = print_insn_i386;
4590
4591 info->cap_arch = CS_ARCH_X86;
4592 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4593 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4594 : CS_MODE_16);
4595 info->cap_insn_unit = 1;
4596 info->cap_insn_split = 8;
4597 }
4598
4599 void x86_update_hflags(CPUX86State *env)
4600 {
4601 uint32_t hflags;
4602 #define HFLAG_COPY_MASK \
4603 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4604 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4605 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4606 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4607
4608 hflags = env->hflags & HFLAG_COPY_MASK;
4609 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4610 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4611 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4612 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4613 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4614
4615 if (env->cr[4] & CR4_OSFXSR_MASK) {
4616 hflags |= HF_OSFXSR_MASK;
4617 }
4618
4619 if (env->efer & MSR_EFER_LMA) {
4620 hflags |= HF_LMA_MASK;
4621 }
4622
4623 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4624 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4625 } else {
4626 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4627 (DESC_B_SHIFT - HF_CS32_SHIFT);
4628 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4629 (DESC_B_SHIFT - HF_SS32_SHIFT);
4630 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4631 !(hflags & HF_CS32_MASK)) {
4632 hflags |= HF_ADDSEG_MASK;
4633 } else {
4634 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4635 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4636 }
4637 }
4638 env->hflags = hflags;
4639 }
4640
4641 static Property x86_cpu_properties[] = {
4642 #ifdef CONFIG_USER_ONLY
4643 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4644 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4645 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4646 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4647 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4648 #else
4649 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4650 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4651 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4652 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4653 #endif
4654 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4655 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4656 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4657 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4658 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4659 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4660 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4661 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4662 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4663 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4664 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4665 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4666 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4667 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4668 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4669 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4670 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4671 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4672 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4673 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4674 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4675 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4676 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4677 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4678 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4679 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4680 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4681 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4682 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4683 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4684 false),
4685 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4686 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4687
4688 /*
4689 * From "Requirements for Implementing the Microsoft
4690 * Hypervisor Interface":
4691 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4692 *
4693 * "Starting with Windows Server 2012 and Windows 8, if
4694 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4695 * the hypervisor imposes no specific limit to the number of VPs.
4696 * In this case, Windows Server 2012 guest VMs may use more than
4697 * 64 VPs, up to the maximum supported number of processors applicable
4698 * to the specific Windows version being used."
4699 */
4700 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4701 DEFINE_PROP_END_OF_LIST()
4702 };
4703
4704 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4705 {
4706 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4707 CPUClass *cc = CPU_CLASS(oc);
4708 DeviceClass *dc = DEVICE_CLASS(oc);
4709
4710 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4711 &xcc->parent_realize);
4712 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4713 &xcc->parent_unrealize);
4714 dc->props = x86_cpu_properties;
4715
4716 xcc->parent_reset = cc->reset;
4717 cc->reset = x86_cpu_reset;
4718 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4719
4720 cc->class_by_name = x86_cpu_class_by_name;
4721 cc->parse_features = x86_cpu_parse_featurestr;
4722 cc->has_work = x86_cpu_has_work;
4723 #ifdef CONFIG_TCG
4724 cc->do_interrupt = x86_cpu_do_interrupt;
4725 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4726 #endif
4727 cc->dump_state = x86_cpu_dump_state;
4728 cc->get_crash_info = x86_cpu_get_crash_info;
4729 cc->set_pc = x86_cpu_set_pc;
4730 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4731 cc->gdb_read_register = x86_cpu_gdb_read_register;
4732 cc->gdb_write_register = x86_cpu_gdb_write_register;
4733 cc->get_arch_id = x86_cpu_get_arch_id;
4734 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4735 #ifdef CONFIG_USER_ONLY
4736 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4737 #else
4738 cc->asidx_from_attrs = x86_asidx_from_attrs;
4739 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4740 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4741 cc->write_elf64_note = x86_cpu_write_elf64_note;
4742 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4743 cc->write_elf32_note = x86_cpu_write_elf32_note;
4744 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4745 cc->vmsd = &vmstate_x86_cpu;
4746 #endif
4747 cc->gdb_arch_name = x86_gdb_arch_name;
4748 #ifdef TARGET_X86_64
4749 cc->gdb_core_xml_file = "i386-64bit.xml";
4750 cc->gdb_num_core_regs = 57;
4751 #else
4752 cc->gdb_core_xml_file = "i386-32bit.xml";
4753 cc->gdb_num_core_regs = 41;
4754 #endif
4755 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4756 cc->debug_excp_handler = breakpoint_handler;
4757 #endif
4758 cc->cpu_exec_enter = x86_cpu_exec_enter;
4759 cc->cpu_exec_exit = x86_cpu_exec_exit;
4760 #ifdef CONFIG_TCG
4761 cc->tcg_initialize = tcg_x86_init;
4762 #endif
4763 cc->disas_set_info = x86_disas_set_info;
4764
4765 dc->user_creatable = true;
4766 }
4767
4768 static const TypeInfo x86_cpu_type_info = {
4769 .name = TYPE_X86_CPU,
4770 .parent = TYPE_CPU,
4771 .instance_size = sizeof(X86CPU),
4772 .instance_init = x86_cpu_initfn,
4773 .abstract = true,
4774 .class_size = sizeof(X86CPUClass),
4775 .class_init = x86_cpu_common_class_init,
4776 };
4777
4778
4779 /* "base" CPU model, used by query-cpu-model-expansion */
4780 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4781 {
4782 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4783
4784 xcc->static_model = true;
4785 xcc->migration_safe = true;
4786 xcc->model_description = "base CPU model type with no features enabled";
4787 xcc->ordering = 8;
4788 }
4789
4790 static const TypeInfo x86_base_cpu_type_info = {
4791 .name = X86_CPU_TYPE_NAME("base"),
4792 .parent = TYPE_X86_CPU,
4793 .class_init = x86_cpu_base_class_init,
4794 };
4795
4796 static void x86_cpu_register_types(void)
4797 {
4798 int i;
4799
4800 type_register_static(&x86_cpu_type_info);
4801 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4802 x86_register_cpudef_type(&builtin_x86_defs[i]);
4803 }
4804 type_register_static(&max_x86_cpu_type_info);
4805 type_register_static(&x86_base_cpu_type_info);
4806 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4807 type_register_static(&host_x86_cpu_type_info);
4808 #endif
4809 }
4810
4811 type_init(x86_cpu_register_types)