]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
Drop superfluous includes of qapi-types.h and test-qapi-types.h
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/hvf.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28
29 #include "qemu/error-report.h"
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33 #include "qapi/qmp/types.h"
34
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
39
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
53
54 #include "disas/capstone.h"
55
56
57 /* Cache topology CPUID constants: */
58
59 /* CPUID Leaf 2 Descriptors */
60
61 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
62 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
63 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
64 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
65
66
67 /* CPUID Leaf 4 constants: */
68
69 /* EAX: */
70 #define CPUID_4_TYPE_DCACHE 1
71 #define CPUID_4_TYPE_ICACHE 2
72 #define CPUID_4_TYPE_UNIFIED 3
73
74 #define CPUID_4_LEVEL(l) ((l) << 5)
75
76 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
77 #define CPUID_4_FULLY_ASSOC (1 << 9)
78
79 /* EDX: */
80 #define CPUID_4_NO_INVD_SHARING (1 << 0)
81 #define CPUID_4_INCLUSIVE (1 << 1)
82 #define CPUID_4_COMPLEX_IDX (1 << 2)
83
84 #define ASSOC_FULL 0xFF
85
86 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
87 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
88 a == 2 ? 0x2 : \
89 a == 4 ? 0x4 : \
90 a == 8 ? 0x6 : \
91 a == 16 ? 0x8 : \
92 a == 32 ? 0xA : \
93 a == 48 ? 0xB : \
94 a == 64 ? 0xC : \
95 a == 96 ? 0xD : \
96 a == 128 ? 0xE : \
97 a == ASSOC_FULL ? 0xF : \
98 0 /* invalid value */)
99
100
101 /* Definitions of the hardcoded cache entries we expose: */
102
103 /* L1 data cache: */
104 #define L1D_LINE_SIZE 64
105 #define L1D_ASSOCIATIVITY 8
106 #define L1D_SETS 64
107 #define L1D_PARTITIONS 1
108 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
109 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
110 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
111 #define L1D_LINES_PER_TAG 1
112 #define L1D_SIZE_KB_AMD 64
113 #define L1D_ASSOCIATIVITY_AMD 2
114
115 /* L1 instruction cache: */
116 #define L1I_LINE_SIZE 64
117 #define L1I_ASSOCIATIVITY 8
118 #define L1I_SETS 64
119 #define L1I_PARTITIONS 1
120 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
121 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
122 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
123 #define L1I_LINES_PER_TAG 1
124 #define L1I_SIZE_KB_AMD 64
125 #define L1I_ASSOCIATIVITY_AMD 2
126
127 /* Level 2 unified cache: */
128 #define L2_LINE_SIZE 64
129 #define L2_ASSOCIATIVITY 16
130 #define L2_SETS 4096
131 #define L2_PARTITIONS 1
132 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
133 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
134 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
135 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
136 #define L2_LINES_PER_TAG 1
137 #define L2_SIZE_KB_AMD 512
138
139 /* Level 3 unified cache: */
140 #define L3_SIZE_KB 0 /* disabled */
141 #define L3_ASSOCIATIVITY 0 /* disabled */
142 #define L3_LINES_PER_TAG 0 /* disabled */
143 #define L3_LINE_SIZE 0 /* disabled */
144 #define L3_N_LINE_SIZE 64
145 #define L3_N_ASSOCIATIVITY 16
146 #define L3_N_SETS 16384
147 #define L3_N_PARTITIONS 1
148 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
149 #define L3_N_LINES_PER_TAG 1
150 #define L3_N_SIZE_KB_AMD 16384
151
152 /* TLB definitions: */
153
154 #define L1_DTLB_2M_ASSOC 1
155 #define L1_DTLB_2M_ENTRIES 255
156 #define L1_DTLB_4K_ASSOC 1
157 #define L1_DTLB_4K_ENTRIES 255
158
159 #define L1_ITLB_2M_ASSOC 1
160 #define L1_ITLB_2M_ENTRIES 255
161 #define L1_ITLB_4K_ASSOC 1
162 #define L1_ITLB_4K_ENTRIES 255
163
164 #define L2_DTLB_2M_ASSOC 0 /* disabled */
165 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
166 #define L2_DTLB_4K_ASSOC 4
167 #define L2_DTLB_4K_ENTRIES 512
168
169 #define L2_ITLB_2M_ASSOC 0 /* disabled */
170 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
171 #define L2_ITLB_4K_ASSOC 4
172 #define L2_ITLB_4K_ENTRIES 512
173
174
175
176 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
177 uint32_t vendor2, uint32_t vendor3)
178 {
179 int i;
180 for (i = 0; i < 4; i++) {
181 dst[i] = vendor1 >> (8 * i);
182 dst[i + 4] = vendor2 >> (8 * i);
183 dst[i + 8] = vendor3 >> (8 * i);
184 }
185 dst[CPUID_VENDOR_SZ] = '\0';
186 }
187
188 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
189 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
190 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
191 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
192 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
193 CPUID_PSE36 | CPUID_FXSR)
194 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
195 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
196 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
197 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
198 CPUID_PAE | CPUID_SEP | CPUID_APIC)
199
200 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
201 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
202 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
203 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
204 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
205 /* partly implemented:
206 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
207 /* missing:
208 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
209 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
210 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
211 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
212 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
213 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
214 /* missing:
215 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
216 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
217 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
218 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
219 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
220
221 #ifdef TARGET_X86_64
222 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
223 #else
224 #define TCG_EXT2_X86_64_FEATURES 0
225 #endif
226
227 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
228 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
229 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
230 TCG_EXT2_X86_64_FEATURES)
231 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
232 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
233 #define TCG_EXT4_FEATURES 0
234 #define TCG_SVM_FEATURES 0
235 #define TCG_KVM_FEATURES 0
236 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
237 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
238 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
239 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
240 CPUID_7_0_EBX_ERMS)
241 /* missing:
242 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
243 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
244 CPUID_7_0_EBX_RDSEED */
245 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
246 CPUID_7_0_ECX_LA57)
247 #define TCG_7_0_EDX_FEATURES 0
248 #define TCG_APM_FEATURES 0
249 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
250 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
251 /* missing:
252 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
253
254 typedef struct FeatureWordInfo {
255 /* feature flags names are taken from "Intel Processor Identification and
256 * the CPUID Instruction" and AMD's "CPUID Specification".
257 * In cases of disagreement between feature naming conventions,
258 * aliases may be added.
259 */
260 const char *feat_names[32];
261 uint32_t cpuid_eax; /* Input EAX for CPUID */
262 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
263 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
264 int cpuid_reg; /* output register (R_* constant) */
265 uint32_t tcg_features; /* Feature flags supported by TCG */
266 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
267 uint32_t migratable_flags; /* Feature flags known to be migratable */
268 } FeatureWordInfo;
269
270 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
271 [FEAT_1_EDX] = {
272 .feat_names = {
273 "fpu", "vme", "de", "pse",
274 "tsc", "msr", "pae", "mce",
275 "cx8", "apic", NULL, "sep",
276 "mtrr", "pge", "mca", "cmov",
277 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
278 NULL, "ds" /* Intel dts */, "acpi", "mmx",
279 "fxsr", "sse", "sse2", "ss",
280 "ht" /* Intel htt */, "tm", "ia64", "pbe",
281 },
282 .cpuid_eax = 1, .cpuid_reg = R_EDX,
283 .tcg_features = TCG_FEATURES,
284 },
285 [FEAT_1_ECX] = {
286 .feat_names = {
287 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
288 "ds-cpl", "vmx", "smx", "est",
289 "tm2", "ssse3", "cid", NULL,
290 "fma", "cx16", "xtpr", "pdcm",
291 NULL, "pcid", "dca", "sse4.1",
292 "sse4.2", "x2apic", "movbe", "popcnt",
293 "tsc-deadline", "aes", "xsave", "osxsave",
294 "avx", "f16c", "rdrand", "hypervisor",
295 },
296 .cpuid_eax = 1, .cpuid_reg = R_ECX,
297 .tcg_features = TCG_EXT_FEATURES,
298 },
299 /* Feature names that are already defined on feature_name[] but
300 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
301 * names on feat_names below. They are copied automatically
302 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
303 */
304 [FEAT_8000_0001_EDX] = {
305 .feat_names = {
306 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
307 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
308 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
309 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
310 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
311 "nx", NULL, "mmxext", NULL /* mmx */,
312 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
313 NULL, "lm", "3dnowext", "3dnow",
314 },
315 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
316 .tcg_features = TCG_EXT2_FEATURES,
317 },
318 [FEAT_8000_0001_ECX] = {
319 .feat_names = {
320 "lahf-lm", "cmp-legacy", "svm", "extapic",
321 "cr8legacy", "abm", "sse4a", "misalignsse",
322 "3dnowprefetch", "osvw", "ibs", "xop",
323 "skinit", "wdt", NULL, "lwp",
324 "fma4", "tce", NULL, "nodeid-msr",
325 NULL, "tbm", "topoext", "perfctr-core",
326 "perfctr-nb", NULL, NULL, NULL,
327 NULL, NULL, NULL, NULL,
328 },
329 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
330 .tcg_features = TCG_EXT3_FEATURES,
331 },
332 [FEAT_C000_0001_EDX] = {
333 .feat_names = {
334 NULL, NULL, "xstore", "xstore-en",
335 NULL, NULL, "xcrypt", "xcrypt-en",
336 "ace2", "ace2-en", "phe", "phe-en",
337 "pmm", "pmm-en", NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 },
343 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
344 .tcg_features = TCG_EXT4_FEATURES,
345 },
346 [FEAT_KVM] = {
347 .feat_names = {
348 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
349 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
350 NULL, "kvm-pv-tlb-flush", NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 "kvmclock-stable-bit", NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 },
357 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
358 .tcg_features = TCG_KVM_FEATURES,
359 },
360 [FEAT_HYPERV_EAX] = {
361 .feat_names = {
362 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
363 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
364 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
365 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
366 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
367 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 },
374 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
375 },
376 [FEAT_HYPERV_EBX] = {
377 .feat_names = {
378 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
379 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
380 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
381 NULL /* hv_create_port */, NULL /* hv_connect_port */,
382 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
383 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
384 NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 },
390 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
391 },
392 [FEAT_HYPERV_EDX] = {
393 .feat_names = {
394 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
395 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
396 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
397 NULL, NULL,
398 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 },
405 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
406 },
407 [FEAT_SVM] = {
408 .feat_names = {
409 "npt", "lbrv", "svm-lock", "nrip-save",
410 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
411 NULL, NULL, "pause-filter", NULL,
412 "pfthreshold", NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 },
418 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
419 .tcg_features = TCG_SVM_FEATURES,
420 },
421 [FEAT_7_0_EBX] = {
422 .feat_names = {
423 "fsgsbase", "tsc-adjust", NULL, "bmi1",
424 "hle", "avx2", NULL, "smep",
425 "bmi2", "erms", "invpcid", "rtm",
426 NULL, NULL, "mpx", NULL,
427 "avx512f", "avx512dq", "rdseed", "adx",
428 "smap", "avx512ifma", "pcommit", "clflushopt",
429 "clwb", NULL, "avx512pf", "avx512er",
430 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
431 },
432 .cpuid_eax = 7,
433 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
434 .cpuid_reg = R_EBX,
435 .tcg_features = TCG_7_0_EBX_FEATURES,
436 },
437 [FEAT_7_0_ECX] = {
438 .feat_names = {
439 NULL, "avx512vbmi", "umip", "pku",
440 "ospke", NULL, "avx512vbmi2", NULL,
441 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
442 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
443 "la57", NULL, NULL, NULL,
444 NULL, NULL, "rdpid", NULL,
445 NULL, NULL, NULL, NULL,
446 NULL, NULL, NULL, NULL,
447 },
448 .cpuid_eax = 7,
449 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
450 .cpuid_reg = R_ECX,
451 .tcg_features = TCG_7_0_ECX_FEATURES,
452 },
453 [FEAT_7_0_EDX] = {
454 .feat_names = {
455 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, "spec-ctrl", NULL,
462 NULL, NULL, NULL, NULL,
463 },
464 .cpuid_eax = 7,
465 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
466 .cpuid_reg = R_EDX,
467 .tcg_features = TCG_7_0_EDX_FEATURES,
468 },
469 [FEAT_8000_0007_EDX] = {
470 .feat_names = {
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 "invtsc", NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 },
480 .cpuid_eax = 0x80000007,
481 .cpuid_reg = R_EDX,
482 .tcg_features = TCG_APM_FEATURES,
483 .unmigratable_flags = CPUID_APM_INVTSC,
484 },
485 [FEAT_8000_0008_EBX] = {
486 .feat_names = {
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 "ibpb", NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 },
496 .cpuid_eax = 0x80000008,
497 .cpuid_reg = R_EBX,
498 .tcg_features = 0,
499 .unmigratable_flags = 0,
500 },
501 [FEAT_XSAVE] = {
502 .feat_names = {
503 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 },
512 .cpuid_eax = 0xd,
513 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
514 .cpuid_reg = R_EAX,
515 .tcg_features = TCG_XSAVE_FEATURES,
516 },
517 [FEAT_6_EAX] = {
518 .feat_names = {
519 NULL, NULL, "arat", NULL,
520 NULL, NULL, NULL, NULL,
521 NULL, NULL, NULL, NULL,
522 NULL, NULL, NULL, NULL,
523 NULL, NULL, NULL, NULL,
524 NULL, NULL, NULL, NULL,
525 NULL, NULL, NULL, NULL,
526 NULL, NULL, NULL, NULL,
527 },
528 .cpuid_eax = 6, .cpuid_reg = R_EAX,
529 .tcg_features = TCG_6_EAX_FEATURES,
530 },
531 [FEAT_XSAVE_COMP_LO] = {
532 .cpuid_eax = 0xD,
533 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
534 .cpuid_reg = R_EAX,
535 .tcg_features = ~0U,
536 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
537 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
538 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
539 XSTATE_PKRU_MASK,
540 },
541 [FEAT_XSAVE_COMP_HI] = {
542 .cpuid_eax = 0xD,
543 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
544 .cpuid_reg = R_EDX,
545 .tcg_features = ~0U,
546 },
547 };
548
549 typedef struct X86RegisterInfo32 {
550 /* Name of register */
551 const char *name;
552 /* QAPI enum value register */
553 X86CPURegister32 qapi_enum;
554 } X86RegisterInfo32;
555
556 #define REGISTER(reg) \
557 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
558 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
559 REGISTER(EAX),
560 REGISTER(ECX),
561 REGISTER(EDX),
562 REGISTER(EBX),
563 REGISTER(ESP),
564 REGISTER(EBP),
565 REGISTER(ESI),
566 REGISTER(EDI),
567 };
568 #undef REGISTER
569
570 typedef struct ExtSaveArea {
571 uint32_t feature, bits;
572 uint32_t offset, size;
573 } ExtSaveArea;
574
575 static const ExtSaveArea x86_ext_save_areas[] = {
576 [XSTATE_FP_BIT] = {
577 /* x87 FP state component is always enabled if XSAVE is supported */
578 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
579 /* x87 state is in the legacy region of the XSAVE area */
580 .offset = 0,
581 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
582 },
583 [XSTATE_SSE_BIT] = {
584 /* SSE state component is always enabled if XSAVE is supported */
585 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
586 /* SSE state is in the legacy region of the XSAVE area */
587 .offset = 0,
588 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
589 },
590 [XSTATE_YMM_BIT] =
591 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
592 .offset = offsetof(X86XSaveArea, avx_state),
593 .size = sizeof(XSaveAVX) },
594 [XSTATE_BNDREGS_BIT] =
595 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
596 .offset = offsetof(X86XSaveArea, bndreg_state),
597 .size = sizeof(XSaveBNDREG) },
598 [XSTATE_BNDCSR_BIT] =
599 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
600 .offset = offsetof(X86XSaveArea, bndcsr_state),
601 .size = sizeof(XSaveBNDCSR) },
602 [XSTATE_OPMASK_BIT] =
603 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
604 .offset = offsetof(X86XSaveArea, opmask_state),
605 .size = sizeof(XSaveOpmask) },
606 [XSTATE_ZMM_Hi256_BIT] =
607 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
608 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
609 .size = sizeof(XSaveZMM_Hi256) },
610 [XSTATE_Hi16_ZMM_BIT] =
611 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
612 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
613 .size = sizeof(XSaveHi16_ZMM) },
614 [XSTATE_PKRU_BIT] =
615 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
616 .offset = offsetof(X86XSaveArea, pkru_state),
617 .size = sizeof(XSavePKRU) },
618 };
619
620 static uint32_t xsave_area_size(uint64_t mask)
621 {
622 int i;
623 uint64_t ret = 0;
624
625 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
626 const ExtSaveArea *esa = &x86_ext_save_areas[i];
627 if ((mask >> i) & 1) {
628 ret = MAX(ret, esa->offset + esa->size);
629 }
630 }
631 return ret;
632 }
633
634 static inline bool accel_uses_host_cpuid(void)
635 {
636 return kvm_enabled() || hvf_enabled();
637 }
638
639 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
640 {
641 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
642 cpu->env.features[FEAT_XSAVE_COMP_LO];
643 }
644
645 const char *get_register_name_32(unsigned int reg)
646 {
647 if (reg >= CPU_NB_REGS32) {
648 return NULL;
649 }
650 return x86_reg_info_32[reg].name;
651 }
652
653 /*
654 * Returns the set of feature flags that are supported and migratable by
655 * QEMU, for a given FeatureWord.
656 */
657 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
658 {
659 FeatureWordInfo *wi = &feature_word_info[w];
660 uint32_t r = 0;
661 int i;
662
663 for (i = 0; i < 32; i++) {
664 uint32_t f = 1U << i;
665
666 /* If the feature name is known, it is implicitly considered migratable,
667 * unless it is explicitly set in unmigratable_flags */
668 if ((wi->migratable_flags & f) ||
669 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
670 r |= f;
671 }
672 }
673 return r;
674 }
675
676 void host_cpuid(uint32_t function, uint32_t count,
677 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
678 {
679 uint32_t vec[4];
680
681 #ifdef __x86_64__
682 asm volatile("cpuid"
683 : "=a"(vec[0]), "=b"(vec[1]),
684 "=c"(vec[2]), "=d"(vec[3])
685 : "0"(function), "c"(count) : "cc");
686 #elif defined(__i386__)
687 asm volatile("pusha \n\t"
688 "cpuid \n\t"
689 "mov %%eax, 0(%2) \n\t"
690 "mov %%ebx, 4(%2) \n\t"
691 "mov %%ecx, 8(%2) \n\t"
692 "mov %%edx, 12(%2) \n\t"
693 "popa"
694 : : "a"(function), "c"(count), "S"(vec)
695 : "memory", "cc");
696 #else
697 abort();
698 #endif
699
700 if (eax)
701 *eax = vec[0];
702 if (ebx)
703 *ebx = vec[1];
704 if (ecx)
705 *ecx = vec[2];
706 if (edx)
707 *edx = vec[3];
708 }
709
710 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
711 {
712 uint32_t eax, ebx, ecx, edx;
713
714 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
715 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
716
717 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
718 if (family) {
719 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
720 }
721 if (model) {
722 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
723 }
724 if (stepping) {
725 *stepping = eax & 0x0F;
726 }
727 }
728
729 /* CPU class name definitions: */
730
731 /* Return type name for a given CPU model name
732 * Caller is responsible for freeing the returned string.
733 */
734 static char *x86_cpu_type_name(const char *model_name)
735 {
736 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
737 }
738
739 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
740 {
741 ObjectClass *oc;
742 char *typename;
743
744 if (cpu_model == NULL) {
745 return NULL;
746 }
747
748 typename = x86_cpu_type_name(cpu_model);
749 oc = object_class_by_name(typename);
750 g_free(typename);
751 return oc;
752 }
753
754 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
755 {
756 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
757 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
758 return g_strndup(class_name,
759 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
760 }
761
762 struct X86CPUDefinition {
763 const char *name;
764 uint32_t level;
765 uint32_t xlevel;
766 /* vendor is zero-terminated, 12 character ASCII string */
767 char vendor[CPUID_VENDOR_SZ + 1];
768 int family;
769 int model;
770 int stepping;
771 FeatureWordArray features;
772 const char *model_id;
773 };
774
775 static X86CPUDefinition builtin_x86_defs[] = {
776 {
777 .name = "qemu64",
778 .level = 0xd,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 6,
781 .model = 6,
782 .stepping = 3,
783 .features[FEAT_1_EDX] =
784 PPRO_FEATURES |
785 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
786 CPUID_PSE36,
787 .features[FEAT_1_ECX] =
788 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
789 .features[FEAT_8000_0001_EDX] =
790 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
791 .features[FEAT_8000_0001_ECX] =
792 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
793 .xlevel = 0x8000000A,
794 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
795 },
796 {
797 .name = "phenom",
798 .level = 5,
799 .vendor = CPUID_VENDOR_AMD,
800 .family = 16,
801 .model = 2,
802 .stepping = 3,
803 /* Missing: CPUID_HT */
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
807 CPUID_PSE36 | CPUID_VME,
808 .features[FEAT_1_ECX] =
809 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
810 CPUID_EXT_POPCNT,
811 .features[FEAT_8000_0001_EDX] =
812 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
813 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
814 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
815 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
816 CPUID_EXT3_CR8LEG,
817 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
818 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
819 .features[FEAT_8000_0001_ECX] =
820 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
821 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
822 /* Missing: CPUID_SVM_LBRV */
823 .features[FEAT_SVM] =
824 CPUID_SVM_NPT,
825 .xlevel = 0x8000001A,
826 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
827 },
828 {
829 .name = "core2duo",
830 .level = 10,
831 .vendor = CPUID_VENDOR_INTEL,
832 .family = 6,
833 .model = 15,
834 .stepping = 11,
835 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
836 .features[FEAT_1_EDX] =
837 PPRO_FEATURES |
838 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
839 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
840 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
841 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
842 .features[FEAT_1_ECX] =
843 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
844 CPUID_EXT_CX16,
845 .features[FEAT_8000_0001_EDX] =
846 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
847 .features[FEAT_8000_0001_ECX] =
848 CPUID_EXT3_LAHF_LM,
849 .xlevel = 0x80000008,
850 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
851 },
852 {
853 .name = "kvm64",
854 .level = 0xd,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 15,
857 .model = 6,
858 .stepping = 1,
859 /* Missing: CPUID_HT */
860 .features[FEAT_1_EDX] =
861 PPRO_FEATURES | CPUID_VME |
862 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
863 CPUID_PSE36,
864 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
865 .features[FEAT_1_ECX] =
866 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
867 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
868 .features[FEAT_8000_0001_EDX] =
869 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
870 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
871 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
872 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
873 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
874 .features[FEAT_8000_0001_ECX] =
875 0,
876 .xlevel = 0x80000008,
877 .model_id = "Common KVM processor"
878 },
879 {
880 .name = "qemu32",
881 .level = 4,
882 .vendor = CPUID_VENDOR_INTEL,
883 .family = 6,
884 .model = 6,
885 .stepping = 3,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES,
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3,
890 .xlevel = 0x80000004,
891 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
892 },
893 {
894 .name = "kvm32",
895 .level = 5,
896 .vendor = CPUID_VENDOR_INTEL,
897 .family = 15,
898 .model = 6,
899 .stepping = 1,
900 .features[FEAT_1_EDX] =
901 PPRO_FEATURES | CPUID_VME |
902 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
903 .features[FEAT_1_ECX] =
904 CPUID_EXT_SSE3,
905 .features[FEAT_8000_0001_ECX] =
906 0,
907 .xlevel = 0x80000008,
908 .model_id = "Common 32-bit KVM processor"
909 },
910 {
911 .name = "coreduo",
912 .level = 10,
913 .vendor = CPUID_VENDOR_INTEL,
914 .family = 6,
915 .model = 14,
916 .stepping = 8,
917 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
918 .features[FEAT_1_EDX] =
919 PPRO_FEATURES | CPUID_VME |
920 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
921 CPUID_SS,
922 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
923 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
924 .features[FEAT_1_ECX] =
925 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_NX,
928 .xlevel = 0x80000008,
929 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
930 },
931 {
932 .name = "486",
933 .level = 1,
934 .vendor = CPUID_VENDOR_INTEL,
935 .family = 4,
936 .model = 8,
937 .stepping = 0,
938 .features[FEAT_1_EDX] =
939 I486_FEATURES,
940 .xlevel = 0,
941 .model_id = "",
942 },
943 {
944 .name = "pentium",
945 .level = 1,
946 .vendor = CPUID_VENDOR_INTEL,
947 .family = 5,
948 .model = 4,
949 .stepping = 3,
950 .features[FEAT_1_EDX] =
951 PENTIUM_FEATURES,
952 .xlevel = 0,
953 .model_id = "",
954 },
955 {
956 .name = "pentium2",
957 .level = 2,
958 .vendor = CPUID_VENDOR_INTEL,
959 .family = 6,
960 .model = 5,
961 .stepping = 2,
962 .features[FEAT_1_EDX] =
963 PENTIUM2_FEATURES,
964 .xlevel = 0,
965 .model_id = "",
966 },
967 {
968 .name = "pentium3",
969 .level = 3,
970 .vendor = CPUID_VENDOR_INTEL,
971 .family = 6,
972 .model = 7,
973 .stepping = 3,
974 .features[FEAT_1_EDX] =
975 PENTIUM3_FEATURES,
976 .xlevel = 0,
977 .model_id = "",
978 },
979 {
980 .name = "athlon",
981 .level = 2,
982 .vendor = CPUID_VENDOR_AMD,
983 .family = 6,
984 .model = 2,
985 .stepping = 3,
986 .features[FEAT_1_EDX] =
987 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
988 CPUID_MCA,
989 .features[FEAT_8000_0001_EDX] =
990 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
991 .xlevel = 0x80000008,
992 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
993 },
994 {
995 .name = "n270",
996 .level = 10,
997 .vendor = CPUID_VENDOR_INTEL,
998 .family = 6,
999 .model = 28,
1000 .stepping = 2,
1001 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1002 .features[FEAT_1_EDX] =
1003 PPRO_FEATURES |
1004 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1005 CPUID_ACPI | CPUID_SS,
1006 /* Some CPUs got no CPUID_SEP */
1007 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1008 * CPUID_EXT_XTPR */
1009 .features[FEAT_1_ECX] =
1010 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1011 CPUID_EXT_MOVBE,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_NX,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1018 },
1019 {
1020 .name = "Conroe",
1021 .level = 10,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 15,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1036 .features[FEAT_8000_0001_ECX] =
1037 CPUID_EXT3_LAHF_LM,
1038 .xlevel = 0x80000008,
1039 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1040 },
1041 {
1042 .name = "Penryn",
1043 .level = 10,
1044 .vendor = CPUID_VENDOR_INTEL,
1045 .family = 6,
1046 .model = 23,
1047 .stepping = 3,
1048 .features[FEAT_1_EDX] =
1049 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1050 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1051 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1052 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1053 CPUID_DE | CPUID_FP87,
1054 .features[FEAT_1_ECX] =
1055 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1056 CPUID_EXT_SSE3,
1057 .features[FEAT_8000_0001_EDX] =
1058 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1059 .features[FEAT_8000_0001_ECX] =
1060 CPUID_EXT3_LAHF_LM,
1061 .xlevel = 0x80000008,
1062 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1063 },
1064 {
1065 .name = "Nehalem",
1066 .level = 11,
1067 .vendor = CPUID_VENDOR_INTEL,
1068 .family = 6,
1069 .model = 26,
1070 .stepping = 3,
1071 .features[FEAT_1_EDX] =
1072 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1073 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1074 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1075 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1076 CPUID_DE | CPUID_FP87,
1077 .features[FEAT_1_ECX] =
1078 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1079 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .xlevel = 0x80000008,
1085 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1086 },
1087 {
1088 .name = "Nehalem-IBRS",
1089 .level = 11,
1090 .vendor = CPUID_VENDOR_INTEL,
1091 .family = 6,
1092 .model = 26,
1093 .stepping = 3,
1094 .features[FEAT_1_EDX] =
1095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1099 CPUID_DE | CPUID_FP87,
1100 .features[FEAT_1_ECX] =
1101 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1102 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1103 .features[FEAT_7_0_EDX] =
1104 CPUID_7_0_EDX_SPEC_CTRL,
1105 .features[FEAT_8000_0001_EDX] =
1106 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1107 .features[FEAT_8000_0001_ECX] =
1108 CPUID_EXT3_LAHF_LM,
1109 .xlevel = 0x80000008,
1110 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1111 },
1112 {
1113 .name = "Westmere",
1114 .level = 11,
1115 .vendor = CPUID_VENDOR_INTEL,
1116 .family = 6,
1117 .model = 44,
1118 .stepping = 1,
1119 .features[FEAT_1_EDX] =
1120 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1121 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1122 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1123 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1124 CPUID_DE | CPUID_FP87,
1125 .features[FEAT_1_ECX] =
1126 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1127 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1128 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1129 .features[FEAT_8000_0001_EDX] =
1130 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1131 .features[FEAT_8000_0001_ECX] =
1132 CPUID_EXT3_LAHF_LM,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1137 },
1138 {
1139 .name = "Westmere-IBRS",
1140 .level = 11,
1141 .vendor = CPUID_VENDOR_INTEL,
1142 .family = 6,
1143 .model = 44,
1144 .stepping = 1,
1145 .features[FEAT_1_EDX] =
1146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1153 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1154 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1155 .features[FEAT_8000_0001_EDX] =
1156 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1157 .features[FEAT_8000_0001_ECX] =
1158 CPUID_EXT3_LAHF_LM,
1159 .features[FEAT_7_0_EDX] =
1160 CPUID_7_0_EDX_SPEC_CTRL,
1161 .features[FEAT_6_EAX] =
1162 CPUID_6_EAX_ARAT,
1163 .xlevel = 0x80000008,
1164 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1165 },
1166 {
1167 .name = "SandyBridge",
1168 .level = 0xd,
1169 .vendor = CPUID_VENDOR_INTEL,
1170 .family = 6,
1171 .model = 42,
1172 .stepping = 1,
1173 .features[FEAT_1_EDX] =
1174 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1175 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1176 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1177 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1178 CPUID_DE | CPUID_FP87,
1179 .features[FEAT_1_ECX] =
1180 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1181 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1182 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1183 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1184 CPUID_EXT_SSE3,
1185 .features[FEAT_8000_0001_EDX] =
1186 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1187 CPUID_EXT2_SYSCALL,
1188 .features[FEAT_8000_0001_ECX] =
1189 CPUID_EXT3_LAHF_LM,
1190 .features[FEAT_XSAVE] =
1191 CPUID_XSAVE_XSAVEOPT,
1192 .features[FEAT_6_EAX] =
1193 CPUID_6_EAX_ARAT,
1194 .xlevel = 0x80000008,
1195 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1196 },
1197 {
1198 .name = "SandyBridge-IBRS",
1199 .level = 0xd,
1200 .vendor = CPUID_VENDOR_INTEL,
1201 .family = 6,
1202 .model = 42,
1203 .stepping = 1,
1204 .features[FEAT_1_EDX] =
1205 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1206 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1207 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1208 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1209 CPUID_DE | CPUID_FP87,
1210 .features[FEAT_1_ECX] =
1211 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1213 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1214 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1215 CPUID_EXT_SSE3,
1216 .features[FEAT_8000_0001_EDX] =
1217 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1218 CPUID_EXT2_SYSCALL,
1219 .features[FEAT_8000_0001_ECX] =
1220 CPUID_EXT3_LAHF_LM,
1221 .features[FEAT_7_0_EDX] =
1222 CPUID_7_0_EDX_SPEC_CTRL,
1223 .features[FEAT_XSAVE] =
1224 CPUID_XSAVE_XSAVEOPT,
1225 .features[FEAT_6_EAX] =
1226 CPUID_6_EAX_ARAT,
1227 .xlevel = 0x80000008,
1228 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1229 },
1230 {
1231 .name = "IvyBridge",
1232 .level = 0xd,
1233 .vendor = CPUID_VENDOR_INTEL,
1234 .family = 6,
1235 .model = 58,
1236 .stepping = 9,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1245 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1246 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1247 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1248 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1249 .features[FEAT_7_0_EBX] =
1250 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1251 CPUID_7_0_EBX_ERMS,
1252 .features[FEAT_8000_0001_EDX] =
1253 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1254 CPUID_EXT2_SYSCALL,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_LAHF_LM,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1263 },
1264 {
1265 .name = "IvyBridge-IBRS",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 58,
1270 .stepping = 9,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1280 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1281 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1282 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1283 .features[FEAT_7_0_EBX] =
1284 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1285 CPUID_7_0_EBX_ERMS,
1286 .features[FEAT_8000_0001_EDX] =
1287 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1288 CPUID_EXT2_SYSCALL,
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_LAHF_LM,
1291 .features[FEAT_7_0_EDX] =
1292 CPUID_7_0_EDX_SPEC_CTRL,
1293 .features[FEAT_XSAVE] =
1294 CPUID_XSAVE_XSAVEOPT,
1295 .features[FEAT_6_EAX] =
1296 CPUID_6_EAX_ARAT,
1297 .xlevel = 0x80000008,
1298 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1299 },
1300 {
1301 .name = "Haswell-noTSX",
1302 .level = 0xd,
1303 .vendor = CPUID_VENDOR_INTEL,
1304 .family = 6,
1305 .model = 60,
1306 .stepping = 1,
1307 .features[FEAT_1_EDX] =
1308 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1309 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1310 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1311 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1312 CPUID_DE | CPUID_FP87,
1313 .features[FEAT_1_ECX] =
1314 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1315 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1316 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1317 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1318 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1319 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1320 .features[FEAT_8000_0001_EDX] =
1321 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1322 CPUID_EXT2_SYSCALL,
1323 .features[FEAT_8000_0001_ECX] =
1324 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1325 .features[FEAT_7_0_EBX] =
1326 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1327 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1328 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1329 .features[FEAT_XSAVE] =
1330 CPUID_XSAVE_XSAVEOPT,
1331 .features[FEAT_6_EAX] =
1332 CPUID_6_EAX_ARAT,
1333 .xlevel = 0x80000008,
1334 .model_id = "Intel Core Processor (Haswell, no TSX)",
1335 },
1336 {
1337 .name = "Haswell-noTSX-IBRS",
1338 .level = 0xd,
1339 .vendor = CPUID_VENDOR_INTEL,
1340 .family = 6,
1341 .model = 60,
1342 .stepping = 1,
1343 .features[FEAT_1_EDX] =
1344 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1345 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1346 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1347 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1348 CPUID_DE | CPUID_FP87,
1349 .features[FEAT_1_ECX] =
1350 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1351 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1352 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1353 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1354 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1355 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1356 .features[FEAT_8000_0001_EDX] =
1357 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1358 CPUID_EXT2_SYSCALL,
1359 .features[FEAT_8000_0001_ECX] =
1360 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1361 .features[FEAT_7_0_EDX] =
1362 CPUID_7_0_EDX_SPEC_CTRL,
1363 .features[FEAT_7_0_EBX] =
1364 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1365 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1366 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1367 .features[FEAT_XSAVE] =
1368 CPUID_XSAVE_XSAVEOPT,
1369 .features[FEAT_6_EAX] =
1370 CPUID_6_EAX_ARAT,
1371 .xlevel = 0x80000008,
1372 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1373 },
1374 {
1375 .name = "Haswell",
1376 .level = 0xd,
1377 .vendor = CPUID_VENDOR_INTEL,
1378 .family = 6,
1379 .model = 60,
1380 .stepping = 4,
1381 .features[FEAT_1_EDX] =
1382 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1383 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1384 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1385 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1386 CPUID_DE | CPUID_FP87,
1387 .features[FEAT_1_ECX] =
1388 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1389 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1390 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1391 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1392 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1393 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1394 .features[FEAT_8000_0001_EDX] =
1395 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1396 CPUID_EXT2_SYSCALL,
1397 .features[FEAT_8000_0001_ECX] =
1398 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1399 .features[FEAT_7_0_EBX] =
1400 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1401 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1402 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1403 CPUID_7_0_EBX_RTM,
1404 .features[FEAT_XSAVE] =
1405 CPUID_XSAVE_XSAVEOPT,
1406 .features[FEAT_6_EAX] =
1407 CPUID_6_EAX_ARAT,
1408 .xlevel = 0x80000008,
1409 .model_id = "Intel Core Processor (Haswell)",
1410 },
1411 {
1412 .name = "Haswell-IBRS",
1413 .level = 0xd,
1414 .vendor = CPUID_VENDOR_INTEL,
1415 .family = 6,
1416 .model = 60,
1417 .stepping = 4,
1418 .features[FEAT_1_EDX] =
1419 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1420 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1421 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1422 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1423 CPUID_DE | CPUID_FP87,
1424 .features[FEAT_1_ECX] =
1425 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1426 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1427 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1428 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1429 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1430 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1433 CPUID_EXT2_SYSCALL,
1434 .features[FEAT_8000_0001_ECX] =
1435 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1436 .features[FEAT_7_0_EDX] =
1437 CPUID_7_0_EDX_SPEC_CTRL,
1438 .features[FEAT_7_0_EBX] =
1439 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1440 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1441 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1442 CPUID_7_0_EBX_RTM,
1443 .features[FEAT_XSAVE] =
1444 CPUID_XSAVE_XSAVEOPT,
1445 .features[FEAT_6_EAX] =
1446 CPUID_6_EAX_ARAT,
1447 .xlevel = 0x80000008,
1448 .model_id = "Intel Core Processor (Haswell, IBRS)",
1449 },
1450 {
1451 .name = "Broadwell-noTSX",
1452 .level = 0xd,
1453 .vendor = CPUID_VENDOR_INTEL,
1454 .family = 6,
1455 .model = 61,
1456 .stepping = 2,
1457 .features[FEAT_1_EDX] =
1458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1462 CPUID_DE | CPUID_FP87,
1463 .features[FEAT_1_ECX] =
1464 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1465 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1466 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1467 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1468 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1469 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1470 .features[FEAT_8000_0001_EDX] =
1471 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1472 CPUID_EXT2_SYSCALL,
1473 .features[FEAT_8000_0001_ECX] =
1474 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1475 .features[FEAT_7_0_EBX] =
1476 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1477 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1478 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1479 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1480 CPUID_7_0_EBX_SMAP,
1481 .features[FEAT_XSAVE] =
1482 CPUID_XSAVE_XSAVEOPT,
1483 .features[FEAT_6_EAX] =
1484 CPUID_6_EAX_ARAT,
1485 .xlevel = 0x80000008,
1486 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1487 },
1488 {
1489 .name = "Broadwell-noTSX-IBRS",
1490 .level = 0xd,
1491 .vendor = CPUID_VENDOR_INTEL,
1492 .family = 6,
1493 .model = 61,
1494 .stepping = 2,
1495 .features[FEAT_1_EDX] =
1496 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1497 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1498 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1499 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1500 CPUID_DE | CPUID_FP87,
1501 .features[FEAT_1_ECX] =
1502 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1503 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1504 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1505 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1506 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1507 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1508 .features[FEAT_8000_0001_EDX] =
1509 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1510 CPUID_EXT2_SYSCALL,
1511 .features[FEAT_8000_0001_ECX] =
1512 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1513 .features[FEAT_7_0_EDX] =
1514 CPUID_7_0_EDX_SPEC_CTRL,
1515 .features[FEAT_7_0_EBX] =
1516 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1517 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1518 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1519 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1520 CPUID_7_0_EBX_SMAP,
1521 .features[FEAT_XSAVE] =
1522 CPUID_XSAVE_XSAVEOPT,
1523 .features[FEAT_6_EAX] =
1524 CPUID_6_EAX_ARAT,
1525 .xlevel = 0x80000008,
1526 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1527 },
1528 {
1529 .name = "Broadwell",
1530 .level = 0xd,
1531 .vendor = CPUID_VENDOR_INTEL,
1532 .family = 6,
1533 .model = 61,
1534 .stepping = 2,
1535 .features[FEAT_1_EDX] =
1536 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1537 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1538 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1539 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1540 CPUID_DE | CPUID_FP87,
1541 .features[FEAT_1_ECX] =
1542 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1543 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1544 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1545 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1546 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1547 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1548 .features[FEAT_8000_0001_EDX] =
1549 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1550 CPUID_EXT2_SYSCALL,
1551 .features[FEAT_8000_0001_ECX] =
1552 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1553 .features[FEAT_7_0_EBX] =
1554 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1555 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1556 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1557 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1558 CPUID_7_0_EBX_SMAP,
1559 .features[FEAT_XSAVE] =
1560 CPUID_XSAVE_XSAVEOPT,
1561 .features[FEAT_6_EAX] =
1562 CPUID_6_EAX_ARAT,
1563 .xlevel = 0x80000008,
1564 .model_id = "Intel Core Processor (Broadwell)",
1565 },
1566 {
1567 .name = "Broadwell-IBRS",
1568 .level = 0xd,
1569 .vendor = CPUID_VENDOR_INTEL,
1570 .family = 6,
1571 .model = 61,
1572 .stepping = 2,
1573 .features[FEAT_1_EDX] =
1574 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1575 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1576 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1577 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1578 CPUID_DE | CPUID_FP87,
1579 .features[FEAT_1_ECX] =
1580 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1581 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1582 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1583 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1584 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1585 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1586 .features[FEAT_8000_0001_EDX] =
1587 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1588 CPUID_EXT2_SYSCALL,
1589 .features[FEAT_8000_0001_ECX] =
1590 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1591 .features[FEAT_7_0_EDX] =
1592 CPUID_7_0_EDX_SPEC_CTRL,
1593 .features[FEAT_7_0_EBX] =
1594 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1595 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1596 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1597 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1598 CPUID_7_0_EBX_SMAP,
1599 .features[FEAT_XSAVE] =
1600 CPUID_XSAVE_XSAVEOPT,
1601 .features[FEAT_6_EAX] =
1602 CPUID_6_EAX_ARAT,
1603 .xlevel = 0x80000008,
1604 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1605 },
1606 {
1607 .name = "Skylake-Client",
1608 .level = 0xd,
1609 .vendor = CPUID_VENDOR_INTEL,
1610 .family = 6,
1611 .model = 94,
1612 .stepping = 3,
1613 .features[FEAT_1_EDX] =
1614 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1615 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1616 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1617 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1618 CPUID_DE | CPUID_FP87,
1619 .features[FEAT_1_ECX] =
1620 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1621 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1622 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1623 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1624 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1625 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1626 .features[FEAT_8000_0001_EDX] =
1627 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1628 CPUID_EXT2_SYSCALL,
1629 .features[FEAT_8000_0001_ECX] =
1630 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1631 .features[FEAT_7_0_EBX] =
1632 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1633 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1634 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1635 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1636 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1637 /* Missing: XSAVES (not supported by some Linux versions,
1638 * including v4.1 to v4.12).
1639 * KVM doesn't yet expose any XSAVES state save component,
1640 * and the only one defined in Skylake (processor tracing)
1641 * probably will block migration anyway.
1642 */
1643 .features[FEAT_XSAVE] =
1644 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1645 CPUID_XSAVE_XGETBV1,
1646 .features[FEAT_6_EAX] =
1647 CPUID_6_EAX_ARAT,
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Core Processor (Skylake)",
1650 },
1651 {
1652 .name = "Skylake-Client-IBRS",
1653 .level = 0xd,
1654 .vendor = CPUID_VENDOR_INTEL,
1655 .family = 6,
1656 .model = 94,
1657 .stepping = 3,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1666 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1667 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1668 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1669 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1670 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1671 .features[FEAT_8000_0001_EDX] =
1672 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1673 CPUID_EXT2_SYSCALL,
1674 .features[FEAT_8000_0001_ECX] =
1675 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1676 .features[FEAT_7_0_EDX] =
1677 CPUID_7_0_EDX_SPEC_CTRL,
1678 .features[FEAT_7_0_EBX] =
1679 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1680 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1681 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1682 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1683 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1684 /* Missing: XSAVES (not supported by some Linux versions,
1685 * including v4.1 to v4.12).
1686 * KVM doesn't yet expose any XSAVES state save component,
1687 * and the only one defined in Skylake (processor tracing)
1688 * probably will block migration anyway.
1689 */
1690 .features[FEAT_XSAVE] =
1691 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1692 CPUID_XSAVE_XGETBV1,
1693 .features[FEAT_6_EAX] =
1694 CPUID_6_EAX_ARAT,
1695 .xlevel = 0x80000008,
1696 .model_id = "Intel Core Processor (Skylake, IBRS)",
1697 },
1698 {
1699 .name = "Skylake-Server",
1700 .level = 0xd,
1701 .vendor = CPUID_VENDOR_INTEL,
1702 .family = 6,
1703 .model = 85,
1704 .stepping = 4,
1705 .features[FEAT_1_EDX] =
1706 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1707 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1708 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1709 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1710 CPUID_DE | CPUID_FP87,
1711 .features[FEAT_1_ECX] =
1712 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1713 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1714 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1715 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1716 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1717 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1718 .features[FEAT_8000_0001_EDX] =
1719 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1720 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1721 .features[FEAT_8000_0001_ECX] =
1722 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1723 .features[FEAT_7_0_EBX] =
1724 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1725 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1726 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1727 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1728 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1729 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1730 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1731 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
1732 /* Missing: XSAVES (not supported by some Linux versions,
1733 * including v4.1 to v4.12).
1734 * KVM doesn't yet expose any XSAVES state save component,
1735 * and the only one defined in Skylake (processor tracing)
1736 * probably will block migration anyway.
1737 */
1738 .features[FEAT_XSAVE] =
1739 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1740 CPUID_XSAVE_XGETBV1,
1741 .features[FEAT_6_EAX] =
1742 CPUID_6_EAX_ARAT,
1743 .xlevel = 0x80000008,
1744 .model_id = "Intel Xeon Processor (Skylake)",
1745 },
1746 {
1747 .name = "Skylake-Server-IBRS",
1748 .level = 0xd,
1749 .vendor = CPUID_VENDOR_INTEL,
1750 .family = 6,
1751 .model = 85,
1752 .stepping = 4,
1753 .features[FEAT_1_EDX] =
1754 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1755 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1756 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1757 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1758 CPUID_DE | CPUID_FP87,
1759 .features[FEAT_1_ECX] =
1760 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1761 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1762 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1763 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1764 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1765 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1766 .features[FEAT_8000_0001_EDX] =
1767 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1768 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1769 .features[FEAT_8000_0001_ECX] =
1770 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1771 .features[FEAT_7_0_EDX] =
1772 CPUID_7_0_EDX_SPEC_CTRL,
1773 .features[FEAT_7_0_EBX] =
1774 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1775 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1776 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1777 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1778 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1779 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1780 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1781 CPUID_7_0_EBX_AVX512VL,
1782 /* Missing: XSAVES (not supported by some Linux versions,
1783 * including v4.1 to v4.12).
1784 * KVM doesn't yet expose any XSAVES state save component,
1785 * and the only one defined in Skylake (processor tracing)
1786 * probably will block migration anyway.
1787 */
1788 .features[FEAT_XSAVE] =
1789 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1790 CPUID_XSAVE_XGETBV1,
1791 .features[FEAT_6_EAX] =
1792 CPUID_6_EAX_ARAT,
1793 .xlevel = 0x80000008,
1794 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
1795 },
1796 {
1797 .name = "Opteron_G1",
1798 .level = 5,
1799 .vendor = CPUID_VENDOR_AMD,
1800 .family = 15,
1801 .model = 6,
1802 .stepping = 1,
1803 .features[FEAT_1_EDX] =
1804 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1805 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1806 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1807 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1808 CPUID_DE | CPUID_FP87,
1809 .features[FEAT_1_ECX] =
1810 CPUID_EXT_SSE3,
1811 .features[FEAT_8000_0001_EDX] =
1812 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1813 .xlevel = 0x80000008,
1814 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1815 },
1816 {
1817 .name = "Opteron_G2",
1818 .level = 5,
1819 .vendor = CPUID_VENDOR_AMD,
1820 .family = 15,
1821 .model = 6,
1822 .stepping = 1,
1823 .features[FEAT_1_EDX] =
1824 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1825 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1826 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1827 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1828 CPUID_DE | CPUID_FP87,
1829 .features[FEAT_1_ECX] =
1830 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1831 /* Missing: CPUID_EXT2_RDTSCP */
1832 .features[FEAT_8000_0001_EDX] =
1833 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1834 .features[FEAT_8000_0001_ECX] =
1835 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1836 .xlevel = 0x80000008,
1837 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1838 },
1839 {
1840 .name = "Opteron_G3",
1841 .level = 5,
1842 .vendor = CPUID_VENDOR_AMD,
1843 .family = 16,
1844 .model = 2,
1845 .stepping = 3,
1846 .features[FEAT_1_EDX] =
1847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1851 CPUID_DE | CPUID_FP87,
1852 .features[FEAT_1_ECX] =
1853 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1854 CPUID_EXT_SSE3,
1855 /* Missing: CPUID_EXT2_RDTSCP */
1856 .features[FEAT_8000_0001_EDX] =
1857 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1858 .features[FEAT_8000_0001_ECX] =
1859 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1860 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1861 .xlevel = 0x80000008,
1862 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1863 },
1864 {
1865 .name = "Opteron_G4",
1866 .level = 0xd,
1867 .vendor = CPUID_VENDOR_AMD,
1868 .family = 21,
1869 .model = 1,
1870 .stepping = 2,
1871 .features[FEAT_1_EDX] =
1872 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1873 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1874 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1875 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1876 CPUID_DE | CPUID_FP87,
1877 .features[FEAT_1_ECX] =
1878 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1879 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1880 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1881 CPUID_EXT_SSE3,
1882 /* Missing: CPUID_EXT2_RDTSCP */
1883 .features[FEAT_8000_0001_EDX] =
1884 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1885 CPUID_EXT2_SYSCALL,
1886 .features[FEAT_8000_0001_ECX] =
1887 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1888 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1889 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1890 CPUID_EXT3_LAHF_LM,
1891 /* no xsaveopt! */
1892 .xlevel = 0x8000001A,
1893 .model_id = "AMD Opteron 62xx class CPU",
1894 },
1895 {
1896 .name = "Opteron_G5",
1897 .level = 0xd,
1898 .vendor = CPUID_VENDOR_AMD,
1899 .family = 21,
1900 .model = 2,
1901 .stepping = 0,
1902 .features[FEAT_1_EDX] =
1903 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1904 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1905 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1906 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1907 CPUID_DE | CPUID_FP87,
1908 .features[FEAT_1_ECX] =
1909 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1910 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1911 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1912 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1913 /* Missing: CPUID_EXT2_RDTSCP */
1914 .features[FEAT_8000_0001_EDX] =
1915 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1916 CPUID_EXT2_SYSCALL,
1917 .features[FEAT_8000_0001_ECX] =
1918 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1919 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1920 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1921 CPUID_EXT3_LAHF_LM,
1922 /* no xsaveopt! */
1923 .xlevel = 0x8000001A,
1924 .model_id = "AMD Opteron 63xx class CPU",
1925 },
1926 {
1927 .name = "EPYC",
1928 .level = 0xd,
1929 .vendor = CPUID_VENDOR_AMD,
1930 .family = 23,
1931 .model = 1,
1932 .stepping = 2,
1933 .features[FEAT_1_EDX] =
1934 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1935 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1936 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1937 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1938 CPUID_VME | CPUID_FP87,
1939 .features[FEAT_1_ECX] =
1940 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1941 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1942 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1943 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1944 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1945 .features[FEAT_8000_0001_EDX] =
1946 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1947 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1948 CPUID_EXT2_SYSCALL,
1949 .features[FEAT_8000_0001_ECX] =
1950 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1951 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1952 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1953 .features[FEAT_7_0_EBX] =
1954 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1955 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1956 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1957 CPUID_7_0_EBX_SHA_NI,
1958 /* Missing: XSAVES (not supported by some Linux versions,
1959 * including v4.1 to v4.12).
1960 * KVM doesn't yet expose any XSAVES state save component.
1961 */
1962 .features[FEAT_XSAVE] =
1963 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1964 CPUID_XSAVE_XGETBV1,
1965 .features[FEAT_6_EAX] =
1966 CPUID_6_EAX_ARAT,
1967 .xlevel = 0x8000000A,
1968 .model_id = "AMD EPYC Processor",
1969 },
1970 {
1971 .name = "EPYC-IBPB",
1972 .level = 0xd,
1973 .vendor = CPUID_VENDOR_AMD,
1974 .family = 23,
1975 .model = 1,
1976 .stepping = 2,
1977 .features[FEAT_1_EDX] =
1978 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1979 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1980 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1981 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1982 CPUID_VME | CPUID_FP87,
1983 .features[FEAT_1_ECX] =
1984 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1985 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1986 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1987 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1988 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1989 .features[FEAT_8000_0001_EDX] =
1990 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1991 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1992 CPUID_EXT2_SYSCALL,
1993 .features[FEAT_8000_0001_ECX] =
1994 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1995 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1996 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1997 .features[FEAT_8000_0008_EBX] =
1998 CPUID_8000_0008_EBX_IBPB,
1999 .features[FEAT_7_0_EBX] =
2000 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2001 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2002 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2003 CPUID_7_0_EBX_SHA_NI,
2004 /* Missing: XSAVES (not supported by some Linux versions,
2005 * including v4.1 to v4.12).
2006 * KVM doesn't yet expose any XSAVES state save component.
2007 */
2008 .features[FEAT_XSAVE] =
2009 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2010 CPUID_XSAVE_XGETBV1,
2011 .features[FEAT_6_EAX] =
2012 CPUID_6_EAX_ARAT,
2013 .xlevel = 0x8000000A,
2014 .model_id = "AMD EPYC Processor (with IBPB)",
2015 },
2016 };
2017
2018 typedef struct PropValue {
2019 const char *prop, *value;
2020 } PropValue;
2021
2022 /* KVM-specific features that are automatically added/removed
2023 * from all CPU models when KVM is enabled.
2024 */
2025 static PropValue kvm_default_props[] = {
2026 { "kvmclock", "on" },
2027 { "kvm-nopiodelay", "on" },
2028 { "kvm-asyncpf", "on" },
2029 { "kvm-steal-time", "on" },
2030 { "kvm-pv-eoi", "on" },
2031 { "kvmclock-stable-bit", "on" },
2032 { "x2apic", "on" },
2033 { "acpi", "off" },
2034 { "monitor", "off" },
2035 { "svm", "off" },
2036 { NULL, NULL },
2037 };
2038
2039 /* TCG-specific defaults that override all CPU models when using TCG
2040 */
2041 static PropValue tcg_default_props[] = {
2042 { "vme", "off" },
2043 { NULL, NULL },
2044 };
2045
2046
2047 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2048 {
2049 PropValue *pv;
2050 for (pv = kvm_default_props; pv->prop; pv++) {
2051 if (!strcmp(pv->prop, prop)) {
2052 pv->value = value;
2053 break;
2054 }
2055 }
2056
2057 /* It is valid to call this function only for properties that
2058 * are already present in the kvm_default_props table.
2059 */
2060 assert(pv->prop);
2061 }
2062
2063 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2064 bool migratable_only);
2065
2066 static bool lmce_supported(void)
2067 {
2068 uint64_t mce_cap = 0;
2069
2070 #ifdef CONFIG_KVM
2071 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2072 return false;
2073 }
2074 #endif
2075
2076 return !!(mce_cap & MCG_LMCE_P);
2077 }
2078
2079 #define CPUID_MODEL_ID_SZ 48
2080
2081 /**
2082 * cpu_x86_fill_model_id:
2083 * Get CPUID model ID string from host CPU.
2084 *
2085 * @str should have at least CPUID_MODEL_ID_SZ bytes
2086 *
2087 * The function does NOT add a null terminator to the string
2088 * automatically.
2089 */
2090 static int cpu_x86_fill_model_id(char *str)
2091 {
2092 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2093 int i;
2094
2095 for (i = 0; i < 3; i++) {
2096 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2097 memcpy(str + i * 16 + 0, &eax, 4);
2098 memcpy(str + i * 16 + 4, &ebx, 4);
2099 memcpy(str + i * 16 + 8, &ecx, 4);
2100 memcpy(str + i * 16 + 12, &edx, 4);
2101 }
2102 return 0;
2103 }
2104
2105 static Property max_x86_cpu_properties[] = {
2106 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2107 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2108 DEFINE_PROP_END_OF_LIST()
2109 };
2110
2111 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2112 {
2113 DeviceClass *dc = DEVICE_CLASS(oc);
2114 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2115
2116 xcc->ordering = 9;
2117
2118 xcc->model_description =
2119 "Enables all features supported by the accelerator in the current host";
2120
2121 dc->props = max_x86_cpu_properties;
2122 }
2123
2124 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2125
2126 static void max_x86_cpu_initfn(Object *obj)
2127 {
2128 X86CPU *cpu = X86_CPU(obj);
2129 CPUX86State *env = &cpu->env;
2130 KVMState *s = kvm_state;
2131
2132 /* We can't fill the features array here because we don't know yet if
2133 * "migratable" is true or false.
2134 */
2135 cpu->max_features = true;
2136
2137 if (accel_uses_host_cpuid()) {
2138 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2139 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2140 int family, model, stepping;
2141 X86CPUDefinition host_cpudef = { };
2142 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2143
2144 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2145 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2146
2147 host_vendor_fms(vendor, &family, &model, &stepping);
2148
2149 cpu_x86_fill_model_id(model_id);
2150
2151 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2152 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2153 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2154 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2155 &error_abort);
2156 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2157 &error_abort);
2158
2159 if (kvm_enabled()) {
2160 env->cpuid_min_level =
2161 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2162 env->cpuid_min_xlevel =
2163 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2164 env->cpuid_min_xlevel2 =
2165 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2166 } else {
2167 env->cpuid_min_level =
2168 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2169 env->cpuid_min_xlevel =
2170 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2171 env->cpuid_min_xlevel2 =
2172 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2173 }
2174
2175 if (lmce_supported()) {
2176 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2177 }
2178 } else {
2179 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2180 "vendor", &error_abort);
2181 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2182 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2183 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2184 object_property_set_str(OBJECT(cpu),
2185 "QEMU TCG CPU version " QEMU_HW_VERSION,
2186 "model-id", &error_abort);
2187 }
2188
2189 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2190 }
2191
2192 static const TypeInfo max_x86_cpu_type_info = {
2193 .name = X86_CPU_TYPE_NAME("max"),
2194 .parent = TYPE_X86_CPU,
2195 .instance_init = max_x86_cpu_initfn,
2196 .class_init = max_x86_cpu_class_init,
2197 };
2198
2199 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2200 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2201 {
2202 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2203
2204 xcc->host_cpuid_required = true;
2205 xcc->ordering = 8;
2206
2207 if (kvm_enabled()) {
2208 xcc->model_description =
2209 "KVM processor with all supported host features ";
2210 } else if (hvf_enabled()) {
2211 xcc->model_description =
2212 "HVF processor with all supported host features ";
2213 }
2214 }
2215
2216 static const TypeInfo host_x86_cpu_type_info = {
2217 .name = X86_CPU_TYPE_NAME("host"),
2218 .parent = X86_CPU_TYPE_NAME("max"),
2219 .class_init = host_x86_cpu_class_init,
2220 };
2221
2222 #endif
2223
2224 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2225 {
2226 FeatureWordInfo *f = &feature_word_info[w];
2227 int i;
2228
2229 for (i = 0; i < 32; ++i) {
2230 if ((1UL << i) & mask) {
2231 const char *reg = get_register_name_32(f->cpuid_reg);
2232 assert(reg);
2233 warn_report("%s doesn't support requested feature: "
2234 "CPUID.%02XH:%s%s%s [bit %d]",
2235 accel_uses_host_cpuid() ? "host" : "TCG",
2236 f->cpuid_eax, reg,
2237 f->feat_names[i] ? "." : "",
2238 f->feat_names[i] ? f->feat_names[i] : "", i);
2239 }
2240 }
2241 }
2242
2243 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2244 const char *name, void *opaque,
2245 Error **errp)
2246 {
2247 X86CPU *cpu = X86_CPU(obj);
2248 CPUX86State *env = &cpu->env;
2249 int64_t value;
2250
2251 value = (env->cpuid_version >> 8) & 0xf;
2252 if (value == 0xf) {
2253 value += (env->cpuid_version >> 20) & 0xff;
2254 }
2255 visit_type_int(v, name, &value, errp);
2256 }
2257
2258 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2259 const char *name, void *opaque,
2260 Error **errp)
2261 {
2262 X86CPU *cpu = X86_CPU(obj);
2263 CPUX86State *env = &cpu->env;
2264 const int64_t min = 0;
2265 const int64_t max = 0xff + 0xf;
2266 Error *local_err = NULL;
2267 int64_t value;
2268
2269 visit_type_int(v, name, &value, &local_err);
2270 if (local_err) {
2271 error_propagate(errp, local_err);
2272 return;
2273 }
2274 if (value < min || value > max) {
2275 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2276 name ? name : "null", value, min, max);
2277 return;
2278 }
2279
2280 env->cpuid_version &= ~0xff00f00;
2281 if (value > 0x0f) {
2282 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2283 } else {
2284 env->cpuid_version |= value << 8;
2285 }
2286 }
2287
2288 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2289 const char *name, void *opaque,
2290 Error **errp)
2291 {
2292 X86CPU *cpu = X86_CPU(obj);
2293 CPUX86State *env = &cpu->env;
2294 int64_t value;
2295
2296 value = (env->cpuid_version >> 4) & 0xf;
2297 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2298 visit_type_int(v, name, &value, errp);
2299 }
2300
2301 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2302 const char *name, void *opaque,
2303 Error **errp)
2304 {
2305 X86CPU *cpu = X86_CPU(obj);
2306 CPUX86State *env = &cpu->env;
2307 const int64_t min = 0;
2308 const int64_t max = 0xff;
2309 Error *local_err = NULL;
2310 int64_t value;
2311
2312 visit_type_int(v, name, &value, &local_err);
2313 if (local_err) {
2314 error_propagate(errp, local_err);
2315 return;
2316 }
2317 if (value < min || value > max) {
2318 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2319 name ? name : "null", value, min, max);
2320 return;
2321 }
2322
2323 env->cpuid_version &= ~0xf00f0;
2324 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2325 }
2326
2327 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2328 const char *name, void *opaque,
2329 Error **errp)
2330 {
2331 X86CPU *cpu = X86_CPU(obj);
2332 CPUX86State *env = &cpu->env;
2333 int64_t value;
2334
2335 value = env->cpuid_version & 0xf;
2336 visit_type_int(v, name, &value, errp);
2337 }
2338
2339 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2340 const char *name, void *opaque,
2341 Error **errp)
2342 {
2343 X86CPU *cpu = X86_CPU(obj);
2344 CPUX86State *env = &cpu->env;
2345 const int64_t min = 0;
2346 const int64_t max = 0xf;
2347 Error *local_err = NULL;
2348 int64_t value;
2349
2350 visit_type_int(v, name, &value, &local_err);
2351 if (local_err) {
2352 error_propagate(errp, local_err);
2353 return;
2354 }
2355 if (value < min || value > max) {
2356 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2357 name ? name : "null", value, min, max);
2358 return;
2359 }
2360
2361 env->cpuid_version &= ~0xf;
2362 env->cpuid_version |= value & 0xf;
2363 }
2364
2365 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2366 {
2367 X86CPU *cpu = X86_CPU(obj);
2368 CPUX86State *env = &cpu->env;
2369 char *value;
2370
2371 value = g_malloc(CPUID_VENDOR_SZ + 1);
2372 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2373 env->cpuid_vendor3);
2374 return value;
2375 }
2376
2377 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2378 Error **errp)
2379 {
2380 X86CPU *cpu = X86_CPU(obj);
2381 CPUX86State *env = &cpu->env;
2382 int i;
2383
2384 if (strlen(value) != CPUID_VENDOR_SZ) {
2385 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2386 return;
2387 }
2388
2389 env->cpuid_vendor1 = 0;
2390 env->cpuid_vendor2 = 0;
2391 env->cpuid_vendor3 = 0;
2392 for (i = 0; i < 4; i++) {
2393 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2394 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2395 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2396 }
2397 }
2398
2399 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2400 {
2401 X86CPU *cpu = X86_CPU(obj);
2402 CPUX86State *env = &cpu->env;
2403 char *value;
2404 int i;
2405
2406 value = g_malloc(48 + 1);
2407 for (i = 0; i < 48; i++) {
2408 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2409 }
2410 value[48] = '\0';
2411 return value;
2412 }
2413
2414 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2415 Error **errp)
2416 {
2417 X86CPU *cpu = X86_CPU(obj);
2418 CPUX86State *env = &cpu->env;
2419 int c, len, i;
2420
2421 if (model_id == NULL) {
2422 model_id = "";
2423 }
2424 len = strlen(model_id);
2425 memset(env->cpuid_model, 0, 48);
2426 for (i = 0; i < 48; i++) {
2427 if (i >= len) {
2428 c = '\0';
2429 } else {
2430 c = (uint8_t)model_id[i];
2431 }
2432 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2433 }
2434 }
2435
2436 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2437 void *opaque, Error **errp)
2438 {
2439 X86CPU *cpu = X86_CPU(obj);
2440 int64_t value;
2441
2442 value = cpu->env.tsc_khz * 1000;
2443 visit_type_int(v, name, &value, errp);
2444 }
2445
2446 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2447 void *opaque, Error **errp)
2448 {
2449 X86CPU *cpu = X86_CPU(obj);
2450 const int64_t min = 0;
2451 const int64_t max = INT64_MAX;
2452 Error *local_err = NULL;
2453 int64_t value;
2454
2455 visit_type_int(v, name, &value, &local_err);
2456 if (local_err) {
2457 error_propagate(errp, local_err);
2458 return;
2459 }
2460 if (value < min || value > max) {
2461 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2462 name ? name : "null", value, min, max);
2463 return;
2464 }
2465
2466 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2467 }
2468
2469 /* Generic getter for "feature-words" and "filtered-features" properties */
2470 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2471 const char *name, void *opaque,
2472 Error **errp)
2473 {
2474 uint32_t *array = (uint32_t *)opaque;
2475 FeatureWord w;
2476 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2477 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2478 X86CPUFeatureWordInfoList *list = NULL;
2479
2480 for (w = 0; w < FEATURE_WORDS; w++) {
2481 FeatureWordInfo *wi = &feature_word_info[w];
2482 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2483 qwi->cpuid_input_eax = wi->cpuid_eax;
2484 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2485 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2486 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2487 qwi->features = array[w];
2488
2489 /* List will be in reverse order, but order shouldn't matter */
2490 list_entries[w].next = list;
2491 list_entries[w].value = &word_infos[w];
2492 list = &list_entries[w];
2493 }
2494
2495 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2496 }
2497
2498 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2499 void *opaque, Error **errp)
2500 {
2501 X86CPU *cpu = X86_CPU(obj);
2502 int64_t value = cpu->hyperv_spinlock_attempts;
2503
2504 visit_type_int(v, name, &value, errp);
2505 }
2506
2507 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2508 void *opaque, Error **errp)
2509 {
2510 const int64_t min = 0xFFF;
2511 const int64_t max = UINT_MAX;
2512 X86CPU *cpu = X86_CPU(obj);
2513 Error *err = NULL;
2514 int64_t value;
2515
2516 visit_type_int(v, name, &value, &err);
2517 if (err) {
2518 error_propagate(errp, err);
2519 return;
2520 }
2521
2522 if (value < min || value > max) {
2523 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2524 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2525 object_get_typename(obj), name ? name : "null",
2526 value, min, max);
2527 return;
2528 }
2529 cpu->hyperv_spinlock_attempts = value;
2530 }
2531
2532 static const PropertyInfo qdev_prop_spinlocks = {
2533 .name = "int",
2534 .get = x86_get_hv_spinlocks,
2535 .set = x86_set_hv_spinlocks,
2536 };
2537
2538 /* Convert all '_' in a feature string option name to '-', to make feature
2539 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2540 */
2541 static inline void feat2prop(char *s)
2542 {
2543 while ((s = strchr(s, '_'))) {
2544 *s = '-';
2545 }
2546 }
2547
2548 /* Return the feature property name for a feature flag bit */
2549 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2550 {
2551 /* XSAVE components are automatically enabled by other features,
2552 * so return the original feature name instead
2553 */
2554 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2555 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2556
2557 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2558 x86_ext_save_areas[comp].bits) {
2559 w = x86_ext_save_areas[comp].feature;
2560 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2561 }
2562 }
2563
2564 assert(bitnr < 32);
2565 assert(w < FEATURE_WORDS);
2566 return feature_word_info[w].feat_names[bitnr];
2567 }
2568
2569 /* Compatibily hack to maintain legacy +-feat semantic,
2570 * where +-feat overwrites any feature set by
2571 * feat=on|feat even if the later is parsed after +-feat
2572 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2573 */
2574 static GList *plus_features, *minus_features;
2575
2576 static gint compare_string(gconstpointer a, gconstpointer b)
2577 {
2578 return g_strcmp0(a, b);
2579 }
2580
2581 /* Parse "+feature,-feature,feature=foo" CPU feature string
2582 */
2583 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2584 Error **errp)
2585 {
2586 char *featurestr; /* Single 'key=value" string being parsed */
2587 static bool cpu_globals_initialized;
2588 bool ambiguous = false;
2589
2590 if (cpu_globals_initialized) {
2591 return;
2592 }
2593 cpu_globals_initialized = true;
2594
2595 if (!features) {
2596 return;
2597 }
2598
2599 for (featurestr = strtok(features, ",");
2600 featurestr;
2601 featurestr = strtok(NULL, ",")) {
2602 const char *name;
2603 const char *val = NULL;
2604 char *eq = NULL;
2605 char num[32];
2606 GlobalProperty *prop;
2607
2608 /* Compatibility syntax: */
2609 if (featurestr[0] == '+') {
2610 plus_features = g_list_append(plus_features,
2611 g_strdup(featurestr + 1));
2612 continue;
2613 } else if (featurestr[0] == '-') {
2614 minus_features = g_list_append(minus_features,
2615 g_strdup(featurestr + 1));
2616 continue;
2617 }
2618
2619 eq = strchr(featurestr, '=');
2620 if (eq) {
2621 *eq++ = 0;
2622 val = eq;
2623 } else {
2624 val = "on";
2625 }
2626
2627 feat2prop(featurestr);
2628 name = featurestr;
2629
2630 if (g_list_find_custom(plus_features, name, compare_string)) {
2631 warn_report("Ambiguous CPU model string. "
2632 "Don't mix both \"+%s\" and \"%s=%s\"",
2633 name, name, val);
2634 ambiguous = true;
2635 }
2636 if (g_list_find_custom(minus_features, name, compare_string)) {
2637 warn_report("Ambiguous CPU model string. "
2638 "Don't mix both \"-%s\" and \"%s=%s\"",
2639 name, name, val);
2640 ambiguous = true;
2641 }
2642
2643 /* Special case: */
2644 if (!strcmp(name, "tsc-freq")) {
2645 int ret;
2646 uint64_t tsc_freq;
2647
2648 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2649 if (ret < 0 || tsc_freq > INT64_MAX) {
2650 error_setg(errp, "bad numerical value %s", val);
2651 return;
2652 }
2653 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2654 val = num;
2655 name = "tsc-frequency";
2656 }
2657
2658 prop = g_new0(typeof(*prop), 1);
2659 prop->driver = typename;
2660 prop->property = g_strdup(name);
2661 prop->value = g_strdup(val);
2662 prop->errp = &error_fatal;
2663 qdev_prop_register_global(prop);
2664 }
2665
2666 if (ambiguous) {
2667 warn_report("Compatibility of ambiguous CPU model "
2668 "strings won't be kept on future QEMU versions");
2669 }
2670 }
2671
2672 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2673 static int x86_cpu_filter_features(X86CPU *cpu);
2674
2675 /* Check for missing features that may prevent the CPU class from
2676 * running using the current machine and accelerator.
2677 */
2678 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2679 strList **missing_feats)
2680 {
2681 X86CPU *xc;
2682 FeatureWord w;
2683 Error *err = NULL;
2684 strList **next = missing_feats;
2685
2686 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
2687 strList *new = g_new0(strList, 1);
2688 new->value = g_strdup("kvm");
2689 *missing_feats = new;
2690 return;
2691 }
2692
2693 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2694
2695 x86_cpu_expand_features(xc, &err);
2696 if (err) {
2697 /* Errors at x86_cpu_expand_features should never happen,
2698 * but in case it does, just report the model as not
2699 * runnable at all using the "type" property.
2700 */
2701 strList *new = g_new0(strList, 1);
2702 new->value = g_strdup("type");
2703 *next = new;
2704 next = &new->next;
2705 }
2706
2707 x86_cpu_filter_features(xc);
2708
2709 for (w = 0; w < FEATURE_WORDS; w++) {
2710 uint32_t filtered = xc->filtered_features[w];
2711 int i;
2712 for (i = 0; i < 32; i++) {
2713 if (filtered & (1UL << i)) {
2714 strList *new = g_new0(strList, 1);
2715 new->value = g_strdup(x86_cpu_feature_name(w, i));
2716 *next = new;
2717 next = &new->next;
2718 }
2719 }
2720 }
2721
2722 object_unref(OBJECT(xc));
2723 }
2724
2725 /* Print all cpuid feature names in featureset
2726 */
2727 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2728 {
2729 int bit;
2730 bool first = true;
2731
2732 for (bit = 0; bit < 32; bit++) {
2733 if (featureset[bit]) {
2734 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2735 first = false;
2736 }
2737 }
2738 }
2739
2740 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2741 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2742 {
2743 ObjectClass *class_a = (ObjectClass *)a;
2744 ObjectClass *class_b = (ObjectClass *)b;
2745 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2746 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2747 const char *name_a, *name_b;
2748
2749 if (cc_a->ordering != cc_b->ordering) {
2750 return cc_a->ordering - cc_b->ordering;
2751 } else {
2752 name_a = object_class_get_name(class_a);
2753 name_b = object_class_get_name(class_b);
2754 return strcmp(name_a, name_b);
2755 }
2756 }
2757
2758 static GSList *get_sorted_cpu_model_list(void)
2759 {
2760 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2761 list = g_slist_sort(list, x86_cpu_list_compare);
2762 return list;
2763 }
2764
2765 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2766 {
2767 ObjectClass *oc = data;
2768 X86CPUClass *cc = X86_CPU_CLASS(oc);
2769 CPUListState *s = user_data;
2770 char *name = x86_cpu_class_get_model_name(cc);
2771 const char *desc = cc->model_description;
2772 if (!desc && cc->cpu_def) {
2773 desc = cc->cpu_def->model_id;
2774 }
2775
2776 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2777 name, desc);
2778 g_free(name);
2779 }
2780
2781 /* list available CPU models and flags */
2782 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2783 {
2784 int i;
2785 CPUListState s = {
2786 .file = f,
2787 .cpu_fprintf = cpu_fprintf,
2788 };
2789 GSList *list;
2790
2791 (*cpu_fprintf)(f, "Available CPUs:\n");
2792 list = get_sorted_cpu_model_list();
2793 g_slist_foreach(list, x86_cpu_list_entry, &s);
2794 g_slist_free(list);
2795
2796 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2797 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2798 FeatureWordInfo *fw = &feature_word_info[i];
2799
2800 (*cpu_fprintf)(f, " ");
2801 listflags(f, cpu_fprintf, fw->feat_names);
2802 (*cpu_fprintf)(f, "\n");
2803 }
2804 }
2805
2806 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2807 {
2808 ObjectClass *oc = data;
2809 X86CPUClass *cc = X86_CPU_CLASS(oc);
2810 CpuDefinitionInfoList **cpu_list = user_data;
2811 CpuDefinitionInfoList *entry;
2812 CpuDefinitionInfo *info;
2813
2814 info = g_malloc0(sizeof(*info));
2815 info->name = x86_cpu_class_get_model_name(cc);
2816 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2817 info->has_unavailable_features = true;
2818 info->q_typename = g_strdup(object_class_get_name(oc));
2819 info->migration_safe = cc->migration_safe;
2820 info->has_migration_safe = true;
2821 info->q_static = cc->static_model;
2822
2823 entry = g_malloc0(sizeof(*entry));
2824 entry->value = info;
2825 entry->next = *cpu_list;
2826 *cpu_list = entry;
2827 }
2828
2829 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2830 {
2831 CpuDefinitionInfoList *cpu_list = NULL;
2832 GSList *list = get_sorted_cpu_model_list();
2833 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2834 g_slist_free(list);
2835 return cpu_list;
2836 }
2837
2838 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2839 bool migratable_only)
2840 {
2841 FeatureWordInfo *wi = &feature_word_info[w];
2842 uint32_t r;
2843
2844 if (kvm_enabled()) {
2845 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2846 wi->cpuid_ecx,
2847 wi->cpuid_reg);
2848 } else if (hvf_enabled()) {
2849 r = hvf_get_supported_cpuid(wi->cpuid_eax,
2850 wi->cpuid_ecx,
2851 wi->cpuid_reg);
2852 } else if (tcg_enabled()) {
2853 r = wi->tcg_features;
2854 } else {
2855 return ~0;
2856 }
2857 if (migratable_only) {
2858 r &= x86_cpu_get_migratable_flags(w);
2859 }
2860 return r;
2861 }
2862
2863 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2864 {
2865 FeatureWord w;
2866
2867 for (w = 0; w < FEATURE_WORDS; w++) {
2868 report_unavailable_features(w, cpu->filtered_features[w]);
2869 }
2870 }
2871
2872 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2873 {
2874 PropValue *pv;
2875 for (pv = props; pv->prop; pv++) {
2876 if (!pv->value) {
2877 continue;
2878 }
2879 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2880 &error_abort);
2881 }
2882 }
2883
2884 /* Load data from X86CPUDefinition into a X86CPU object
2885 */
2886 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2887 {
2888 CPUX86State *env = &cpu->env;
2889 const char *vendor;
2890 char host_vendor[CPUID_VENDOR_SZ + 1];
2891 FeatureWord w;
2892
2893 /*NOTE: any property set by this function should be returned by
2894 * x86_cpu_static_props(), so static expansion of
2895 * query-cpu-model-expansion is always complete.
2896 */
2897
2898 /* CPU models only set _minimum_ values for level/xlevel: */
2899 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2900 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2901
2902 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2903 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2904 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2905 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2906 for (w = 0; w < FEATURE_WORDS; w++) {
2907 env->features[w] = def->features[w];
2908 }
2909
2910 /* Special cases not set in the X86CPUDefinition structs: */
2911 /* TODO: in-kernel irqchip for hvf */
2912 if (kvm_enabled()) {
2913 if (!kvm_irqchip_in_kernel()) {
2914 x86_cpu_change_kvm_default("x2apic", "off");
2915 }
2916
2917 x86_cpu_apply_props(cpu, kvm_default_props);
2918 } else if (tcg_enabled()) {
2919 x86_cpu_apply_props(cpu, tcg_default_props);
2920 }
2921
2922 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2923
2924 /* sysenter isn't supported in compatibility mode on AMD,
2925 * syscall isn't supported in compatibility mode on Intel.
2926 * Normally we advertise the actual CPU vendor, but you can
2927 * override this using the 'vendor' property if you want to use
2928 * KVM's sysenter/syscall emulation in compatibility mode and
2929 * when doing cross vendor migration
2930 */
2931 vendor = def->vendor;
2932 if (accel_uses_host_cpuid()) {
2933 uint32_t ebx = 0, ecx = 0, edx = 0;
2934 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2935 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2936 vendor = host_vendor;
2937 }
2938
2939 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2940
2941 }
2942
2943 /* Return a QDict containing keys for all properties that can be included
2944 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2945 * must be included in the dictionary.
2946 */
2947 static QDict *x86_cpu_static_props(void)
2948 {
2949 FeatureWord w;
2950 int i;
2951 static const char *props[] = {
2952 "min-level",
2953 "min-xlevel",
2954 "family",
2955 "model",
2956 "stepping",
2957 "model-id",
2958 "vendor",
2959 "lmce",
2960 NULL,
2961 };
2962 static QDict *d;
2963
2964 if (d) {
2965 return d;
2966 }
2967
2968 d = qdict_new();
2969 for (i = 0; props[i]; i++) {
2970 qdict_put_null(d, props[i]);
2971 }
2972
2973 for (w = 0; w < FEATURE_WORDS; w++) {
2974 FeatureWordInfo *fi = &feature_word_info[w];
2975 int bit;
2976 for (bit = 0; bit < 32; bit++) {
2977 if (!fi->feat_names[bit]) {
2978 continue;
2979 }
2980 qdict_put_null(d, fi->feat_names[bit]);
2981 }
2982 }
2983
2984 return d;
2985 }
2986
2987 /* Add an entry to @props dict, with the value for property. */
2988 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2989 {
2990 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2991 &error_abort);
2992
2993 qdict_put_obj(props, prop, value);
2994 }
2995
2996 /* Convert CPU model data from X86CPU object to a property dictionary
2997 * that can recreate exactly the same CPU model.
2998 */
2999 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3000 {
3001 QDict *sprops = x86_cpu_static_props();
3002 const QDictEntry *e;
3003
3004 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3005 const char *prop = qdict_entry_key(e);
3006 x86_cpu_expand_prop(cpu, props, prop);
3007 }
3008 }
3009
3010 /* Convert CPU model data from X86CPU object to a property dictionary
3011 * that can recreate exactly the same CPU model, including every
3012 * writeable QOM property.
3013 */
3014 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3015 {
3016 ObjectPropertyIterator iter;
3017 ObjectProperty *prop;
3018
3019 object_property_iter_init(&iter, OBJECT(cpu));
3020 while ((prop = object_property_iter_next(&iter))) {
3021 /* skip read-only or write-only properties */
3022 if (!prop->get || !prop->set) {
3023 continue;
3024 }
3025
3026 /* "hotplugged" is the only property that is configurable
3027 * on the command-line but will be set differently on CPUs
3028 * created using "-cpu ... -smp ..." and by CPUs created
3029 * on the fly by x86_cpu_from_model() for querying. Skip it.
3030 */
3031 if (!strcmp(prop->name, "hotplugged")) {
3032 continue;
3033 }
3034 x86_cpu_expand_prop(cpu, props, prop->name);
3035 }
3036 }
3037
3038 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3039 {
3040 const QDictEntry *prop;
3041 Error *err = NULL;
3042
3043 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3044 object_property_set_qobject(obj, qdict_entry_value(prop),
3045 qdict_entry_key(prop), &err);
3046 if (err) {
3047 break;
3048 }
3049 }
3050
3051 error_propagate(errp, err);
3052 }
3053
3054 /* Create X86CPU object according to model+props specification */
3055 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3056 {
3057 X86CPU *xc = NULL;
3058 X86CPUClass *xcc;
3059 Error *err = NULL;
3060
3061 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3062 if (xcc == NULL) {
3063 error_setg(&err, "CPU model '%s' not found", model);
3064 goto out;
3065 }
3066
3067 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3068 if (props) {
3069 object_apply_props(OBJECT(xc), props, &err);
3070 if (err) {
3071 goto out;
3072 }
3073 }
3074
3075 x86_cpu_expand_features(xc, &err);
3076 if (err) {
3077 goto out;
3078 }
3079
3080 out:
3081 if (err) {
3082 error_propagate(errp, err);
3083 object_unref(OBJECT(xc));
3084 xc = NULL;
3085 }
3086 return xc;
3087 }
3088
3089 CpuModelExpansionInfo *
3090 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3091 CpuModelInfo *model,
3092 Error **errp)
3093 {
3094 X86CPU *xc = NULL;
3095 Error *err = NULL;
3096 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3097 QDict *props = NULL;
3098 const char *base_name;
3099
3100 xc = x86_cpu_from_model(model->name,
3101 model->has_props ?
3102 qobject_to_qdict(model->props) :
3103 NULL, &err);
3104 if (err) {
3105 goto out;
3106 }
3107
3108 props = qdict_new();
3109
3110 switch (type) {
3111 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3112 /* Static expansion will be based on "base" only */
3113 base_name = "base";
3114 x86_cpu_to_dict(xc, props);
3115 break;
3116 case CPU_MODEL_EXPANSION_TYPE_FULL:
3117 /* As we don't return every single property, full expansion needs
3118 * to keep the original model name+props, and add extra
3119 * properties on top of that.
3120 */
3121 base_name = model->name;
3122 x86_cpu_to_dict_full(xc, props);
3123 break;
3124 default:
3125 error_setg(&err, "Unsupportted expansion type");
3126 goto out;
3127 }
3128
3129 if (!props) {
3130 props = qdict_new();
3131 }
3132 x86_cpu_to_dict(xc, props);
3133
3134 ret->model = g_new0(CpuModelInfo, 1);
3135 ret->model->name = g_strdup(base_name);
3136 ret->model->props = QOBJECT(props);
3137 ret->model->has_props = true;
3138
3139 out:
3140 object_unref(OBJECT(xc));
3141 if (err) {
3142 error_propagate(errp, err);
3143 qapi_free_CpuModelExpansionInfo(ret);
3144 ret = NULL;
3145 }
3146 return ret;
3147 }
3148
3149 static gchar *x86_gdb_arch_name(CPUState *cs)
3150 {
3151 #ifdef TARGET_X86_64
3152 return g_strdup("i386:x86-64");
3153 #else
3154 return g_strdup("i386");
3155 #endif
3156 }
3157
3158 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3159 {
3160 X86CPUDefinition *cpudef = data;
3161 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3162
3163 xcc->cpu_def = cpudef;
3164 xcc->migration_safe = true;
3165 }
3166
3167 static void x86_register_cpudef_type(X86CPUDefinition *def)
3168 {
3169 char *typename = x86_cpu_type_name(def->name);
3170 TypeInfo ti = {
3171 .name = typename,
3172 .parent = TYPE_X86_CPU,
3173 .class_init = x86_cpu_cpudef_class_init,
3174 .class_data = def,
3175 };
3176
3177 /* AMD aliases are handled at runtime based on CPUID vendor, so
3178 * they shouldn't be set on the CPU model table.
3179 */
3180 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3181 /* catch mistakes instead of silently truncating model_id when too long */
3182 assert(def->model_id && strlen(def->model_id) <= 48);
3183
3184
3185 type_register(&ti);
3186 g_free(typename);
3187 }
3188
3189 #if !defined(CONFIG_USER_ONLY)
3190
3191 void cpu_clear_apic_feature(CPUX86State *env)
3192 {
3193 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3194 }
3195
3196 #endif /* !CONFIG_USER_ONLY */
3197
3198 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3199 uint32_t *eax, uint32_t *ebx,
3200 uint32_t *ecx, uint32_t *edx)
3201 {
3202 X86CPU *cpu = x86_env_get_cpu(env);
3203 CPUState *cs = CPU(cpu);
3204 uint32_t pkg_offset;
3205 uint32_t limit;
3206 uint32_t signature[3];
3207
3208 /* Calculate & apply limits for different index ranges */
3209 if (index >= 0xC0000000) {
3210 limit = env->cpuid_xlevel2;
3211 } else if (index >= 0x80000000) {
3212 limit = env->cpuid_xlevel;
3213 } else if (index >= 0x40000000) {
3214 limit = 0x40000001;
3215 } else {
3216 limit = env->cpuid_level;
3217 }
3218
3219 if (index > limit) {
3220 /* Intel documentation states that invalid EAX input will
3221 * return the same information as EAX=cpuid_level
3222 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3223 */
3224 index = env->cpuid_level;
3225 }
3226
3227 switch(index) {
3228 case 0:
3229 *eax = env->cpuid_level;
3230 *ebx = env->cpuid_vendor1;
3231 *edx = env->cpuid_vendor2;
3232 *ecx = env->cpuid_vendor3;
3233 break;
3234 case 1:
3235 *eax = env->cpuid_version;
3236 *ebx = (cpu->apic_id << 24) |
3237 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3238 *ecx = env->features[FEAT_1_ECX];
3239 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3240 *ecx |= CPUID_EXT_OSXSAVE;
3241 }
3242 *edx = env->features[FEAT_1_EDX];
3243 if (cs->nr_cores * cs->nr_threads > 1) {
3244 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3245 *edx |= CPUID_HT;
3246 }
3247 break;
3248 case 2:
3249 /* cache info: needed for Pentium Pro compatibility */
3250 if (cpu->cache_info_passthrough) {
3251 host_cpuid(index, 0, eax, ebx, ecx, edx);
3252 break;
3253 }
3254 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3255 *ebx = 0;
3256 if (!cpu->enable_l3_cache) {
3257 *ecx = 0;
3258 } else {
3259 *ecx = L3_N_DESCRIPTOR;
3260 }
3261 *edx = (L1D_DESCRIPTOR << 16) | \
3262 (L1I_DESCRIPTOR << 8) | \
3263 (L2_DESCRIPTOR);
3264 break;
3265 case 4:
3266 /* cache info: needed for Core compatibility */
3267 if (cpu->cache_info_passthrough) {
3268 host_cpuid(index, count, eax, ebx, ecx, edx);
3269 *eax &= ~0xFC000000;
3270 } else {
3271 *eax = 0;
3272 switch (count) {
3273 case 0: /* L1 dcache info */
3274 *eax |= CPUID_4_TYPE_DCACHE | \
3275 CPUID_4_LEVEL(1) | \
3276 CPUID_4_SELF_INIT_LEVEL;
3277 *ebx = (L1D_LINE_SIZE - 1) | \
3278 ((L1D_PARTITIONS - 1) << 12) | \
3279 ((L1D_ASSOCIATIVITY - 1) << 22);
3280 *ecx = L1D_SETS - 1;
3281 *edx = CPUID_4_NO_INVD_SHARING;
3282 break;
3283 case 1: /* L1 icache info */
3284 *eax |= CPUID_4_TYPE_ICACHE | \
3285 CPUID_4_LEVEL(1) | \
3286 CPUID_4_SELF_INIT_LEVEL;
3287 *ebx = (L1I_LINE_SIZE - 1) | \
3288 ((L1I_PARTITIONS - 1) << 12) | \
3289 ((L1I_ASSOCIATIVITY - 1) << 22);
3290 *ecx = L1I_SETS - 1;
3291 *edx = CPUID_4_NO_INVD_SHARING;
3292 break;
3293 case 2: /* L2 cache info */
3294 *eax |= CPUID_4_TYPE_UNIFIED | \
3295 CPUID_4_LEVEL(2) | \
3296 CPUID_4_SELF_INIT_LEVEL;
3297 if (cs->nr_threads > 1) {
3298 *eax |= (cs->nr_threads - 1) << 14;
3299 }
3300 *ebx = (L2_LINE_SIZE - 1) | \
3301 ((L2_PARTITIONS - 1) << 12) | \
3302 ((L2_ASSOCIATIVITY - 1) << 22);
3303 *ecx = L2_SETS - 1;
3304 *edx = CPUID_4_NO_INVD_SHARING;
3305 break;
3306 case 3: /* L3 cache info */
3307 if (!cpu->enable_l3_cache) {
3308 *eax = 0;
3309 *ebx = 0;
3310 *ecx = 0;
3311 *edx = 0;
3312 break;
3313 }
3314 *eax |= CPUID_4_TYPE_UNIFIED | \
3315 CPUID_4_LEVEL(3) | \
3316 CPUID_4_SELF_INIT_LEVEL;
3317 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3318 *eax |= ((1 << pkg_offset) - 1) << 14;
3319 *ebx = (L3_N_LINE_SIZE - 1) | \
3320 ((L3_N_PARTITIONS - 1) << 12) | \
3321 ((L3_N_ASSOCIATIVITY - 1) << 22);
3322 *ecx = L3_N_SETS - 1;
3323 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
3324 break;
3325 default: /* end of info */
3326 *eax = 0;
3327 *ebx = 0;
3328 *ecx = 0;
3329 *edx = 0;
3330 break;
3331 }
3332 }
3333
3334 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3335 if ((*eax & 31) && cs->nr_cores > 1) {
3336 *eax |= (cs->nr_cores - 1) << 26;
3337 }
3338 break;
3339 case 5:
3340 /* mwait info: needed for Core compatibility */
3341 *eax = 0; /* Smallest monitor-line size in bytes */
3342 *ebx = 0; /* Largest monitor-line size in bytes */
3343 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3344 *edx = 0;
3345 break;
3346 case 6:
3347 /* Thermal and Power Leaf */
3348 *eax = env->features[FEAT_6_EAX];
3349 *ebx = 0;
3350 *ecx = 0;
3351 *edx = 0;
3352 break;
3353 case 7:
3354 /* Structured Extended Feature Flags Enumeration Leaf */
3355 if (count == 0) {
3356 *eax = 0; /* Maximum ECX value for sub-leaves */
3357 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3358 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3359 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3360 *ecx |= CPUID_7_0_ECX_OSPKE;
3361 }
3362 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3363 } else {
3364 *eax = 0;
3365 *ebx = 0;
3366 *ecx = 0;
3367 *edx = 0;
3368 }
3369 break;
3370 case 9:
3371 /* Direct Cache Access Information Leaf */
3372 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3373 *ebx = 0;
3374 *ecx = 0;
3375 *edx = 0;
3376 break;
3377 case 0xA:
3378 /* Architectural Performance Monitoring Leaf */
3379 if (kvm_enabled() && cpu->enable_pmu) {
3380 KVMState *s = cs->kvm_state;
3381
3382 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3383 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3384 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3385 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3386 } else if (hvf_enabled() && cpu->enable_pmu) {
3387 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3388 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3389 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3390 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3391 } else {
3392 *eax = 0;
3393 *ebx = 0;
3394 *ecx = 0;
3395 *edx = 0;
3396 }
3397 break;
3398 case 0xB:
3399 /* Extended Topology Enumeration Leaf */
3400 if (!cpu->enable_cpuid_0xb) {
3401 *eax = *ebx = *ecx = *edx = 0;
3402 break;
3403 }
3404
3405 *ecx = count & 0xff;
3406 *edx = cpu->apic_id;
3407
3408 switch (count) {
3409 case 0:
3410 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3411 *ebx = cs->nr_threads;
3412 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3413 break;
3414 case 1:
3415 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3416 *ebx = cs->nr_cores * cs->nr_threads;
3417 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3418 break;
3419 default:
3420 *eax = 0;
3421 *ebx = 0;
3422 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3423 }
3424
3425 assert(!(*eax & ~0x1f));
3426 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3427 break;
3428 case 0xD: {
3429 /* Processor Extended State */
3430 *eax = 0;
3431 *ebx = 0;
3432 *ecx = 0;
3433 *edx = 0;
3434 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3435 break;
3436 }
3437
3438 if (count == 0) {
3439 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3440 *eax = env->features[FEAT_XSAVE_COMP_LO];
3441 *edx = env->features[FEAT_XSAVE_COMP_HI];
3442 *ebx = *ecx;
3443 } else if (count == 1) {
3444 *eax = env->features[FEAT_XSAVE];
3445 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3446 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3447 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3448 *eax = esa->size;
3449 *ebx = esa->offset;
3450 }
3451 }
3452 break;
3453 }
3454 case 0x40000000:
3455 /*
3456 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3457 * set here, but we restrict to TCG none the less.
3458 */
3459 if (tcg_enabled() && cpu->expose_tcg) {
3460 memcpy(signature, "TCGTCGTCGTCG", 12);
3461 *eax = 0x40000001;
3462 *ebx = signature[0];
3463 *ecx = signature[1];
3464 *edx = signature[2];
3465 } else {
3466 *eax = 0;
3467 *ebx = 0;
3468 *ecx = 0;
3469 *edx = 0;
3470 }
3471 break;
3472 case 0x40000001:
3473 *eax = 0;
3474 *ebx = 0;
3475 *ecx = 0;
3476 *edx = 0;
3477 break;
3478 case 0x80000000:
3479 *eax = env->cpuid_xlevel;
3480 *ebx = env->cpuid_vendor1;
3481 *edx = env->cpuid_vendor2;
3482 *ecx = env->cpuid_vendor3;
3483 break;
3484 case 0x80000001:
3485 *eax = env->cpuid_version;
3486 *ebx = 0;
3487 *ecx = env->features[FEAT_8000_0001_ECX];
3488 *edx = env->features[FEAT_8000_0001_EDX];
3489
3490 /* The Linux kernel checks for the CMPLegacy bit and
3491 * discards multiple thread information if it is set.
3492 * So don't set it here for Intel to make Linux guests happy.
3493 */
3494 if (cs->nr_cores * cs->nr_threads > 1) {
3495 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3496 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3497 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3498 *ecx |= 1 << 1; /* CmpLegacy bit */
3499 }
3500 }
3501 break;
3502 case 0x80000002:
3503 case 0x80000003:
3504 case 0x80000004:
3505 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3506 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3507 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3508 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3509 break;
3510 case 0x80000005:
3511 /* cache info (L1 cache) */
3512 if (cpu->cache_info_passthrough) {
3513 host_cpuid(index, 0, eax, ebx, ecx, edx);
3514 break;
3515 }
3516 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3517 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3518 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3519 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3520 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3521 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3522 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3523 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3524 break;
3525 case 0x80000006:
3526 /* cache info (L2 cache) */
3527 if (cpu->cache_info_passthrough) {
3528 host_cpuid(index, 0, eax, ebx, ecx, edx);
3529 break;
3530 }
3531 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3532 (L2_DTLB_2M_ENTRIES << 16) | \
3533 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3534 (L2_ITLB_2M_ENTRIES);
3535 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3536 (L2_DTLB_4K_ENTRIES << 16) | \
3537 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3538 (L2_ITLB_4K_ENTRIES);
3539 *ecx = (L2_SIZE_KB_AMD << 16) | \
3540 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3541 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3542 if (!cpu->enable_l3_cache) {
3543 *edx = ((L3_SIZE_KB / 512) << 18) | \
3544 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3545 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3546 } else {
3547 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3548 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3549 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3550 }
3551 break;
3552 case 0x80000007:
3553 *eax = 0;
3554 *ebx = 0;
3555 *ecx = 0;
3556 *edx = env->features[FEAT_8000_0007_EDX];
3557 break;
3558 case 0x80000008:
3559 /* virtual & phys address size in low 2 bytes. */
3560 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3561 /* 64 bit processor */
3562 *eax = cpu->phys_bits; /* configurable physical bits */
3563 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3564 *eax |= 0x00003900; /* 57 bits virtual */
3565 } else {
3566 *eax |= 0x00003000; /* 48 bits virtual */
3567 }
3568 } else {
3569 *eax = cpu->phys_bits;
3570 }
3571 *ebx = env->features[FEAT_8000_0008_EBX];
3572 *ecx = 0;
3573 *edx = 0;
3574 if (cs->nr_cores * cs->nr_threads > 1) {
3575 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3576 }
3577 break;
3578 case 0x8000000A:
3579 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3580 *eax = 0x00000001; /* SVM Revision */
3581 *ebx = 0x00000010; /* nr of ASIDs */
3582 *ecx = 0;
3583 *edx = env->features[FEAT_SVM]; /* optional features */
3584 } else {
3585 *eax = 0;
3586 *ebx = 0;
3587 *ecx = 0;
3588 *edx = 0;
3589 }
3590 break;
3591 case 0xC0000000:
3592 *eax = env->cpuid_xlevel2;
3593 *ebx = 0;
3594 *ecx = 0;
3595 *edx = 0;
3596 break;
3597 case 0xC0000001:
3598 /* Support for VIA CPU's CPUID instruction */
3599 *eax = env->cpuid_version;
3600 *ebx = 0;
3601 *ecx = 0;
3602 *edx = env->features[FEAT_C000_0001_EDX];
3603 break;
3604 case 0xC0000002:
3605 case 0xC0000003:
3606 case 0xC0000004:
3607 /* Reserved for the future, and now filled with zero */
3608 *eax = 0;
3609 *ebx = 0;
3610 *ecx = 0;
3611 *edx = 0;
3612 break;
3613 default:
3614 /* reserved values: zero */
3615 *eax = 0;
3616 *ebx = 0;
3617 *ecx = 0;
3618 *edx = 0;
3619 break;
3620 }
3621 }
3622
3623 /* CPUClass::reset() */
3624 static void x86_cpu_reset(CPUState *s)
3625 {
3626 X86CPU *cpu = X86_CPU(s);
3627 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3628 CPUX86State *env = &cpu->env;
3629 target_ulong cr4;
3630 uint64_t xcr0;
3631 int i;
3632
3633 xcc->parent_reset(s);
3634
3635 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3636
3637 env->old_exception = -1;
3638
3639 /* init to reset state */
3640
3641 env->hflags2 |= HF2_GIF_MASK;
3642
3643 cpu_x86_update_cr0(env, 0x60000010);
3644 env->a20_mask = ~0x0;
3645 env->smbase = 0x30000;
3646
3647 env->idt.limit = 0xffff;
3648 env->gdt.limit = 0xffff;
3649 env->ldt.limit = 0xffff;
3650 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3651 env->tr.limit = 0xffff;
3652 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3653
3654 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3655 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3656 DESC_R_MASK | DESC_A_MASK);
3657 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3658 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3659 DESC_A_MASK);
3660 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3661 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3662 DESC_A_MASK);
3663 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3664 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3665 DESC_A_MASK);
3666 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3667 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3668 DESC_A_MASK);
3669 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3670 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3671 DESC_A_MASK);
3672
3673 env->eip = 0xfff0;
3674 env->regs[R_EDX] = env->cpuid_version;
3675
3676 env->eflags = 0x2;
3677
3678 /* FPU init */
3679 for (i = 0; i < 8; i++) {
3680 env->fptags[i] = 1;
3681 }
3682 cpu_set_fpuc(env, 0x37f);
3683
3684 env->mxcsr = 0x1f80;
3685 /* All units are in INIT state. */
3686 env->xstate_bv = 0;
3687
3688 env->pat = 0x0007040600070406ULL;
3689 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3690
3691 memset(env->dr, 0, sizeof(env->dr));
3692 env->dr[6] = DR6_FIXED_1;
3693 env->dr[7] = DR7_FIXED_1;
3694 cpu_breakpoint_remove_all(s, BP_CPU);
3695 cpu_watchpoint_remove_all(s, BP_CPU);
3696
3697 cr4 = 0;
3698 xcr0 = XSTATE_FP_MASK;
3699
3700 #ifdef CONFIG_USER_ONLY
3701 /* Enable all the features for user-mode. */
3702 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3703 xcr0 |= XSTATE_SSE_MASK;
3704 }
3705 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3706 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3707 if (env->features[esa->feature] & esa->bits) {
3708 xcr0 |= 1ull << i;
3709 }
3710 }
3711
3712 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3713 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3714 }
3715 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3716 cr4 |= CR4_FSGSBASE_MASK;
3717 }
3718 #endif
3719
3720 env->xcr0 = xcr0;
3721 cpu_x86_update_cr4(env, cr4);
3722
3723 /*
3724 * SDM 11.11.5 requires:
3725 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3726 * - IA32_MTRR_PHYSMASKn.V = 0
3727 * All other bits are undefined. For simplification, zero it all.
3728 */
3729 env->mtrr_deftype = 0;
3730 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3731 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3732
3733 env->interrupt_injected = -1;
3734 env->exception_injected = -1;
3735 env->nmi_injected = false;
3736 #if !defined(CONFIG_USER_ONLY)
3737 /* We hard-wire the BSP to the first CPU. */
3738 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3739
3740 s->halted = !cpu_is_bsp(cpu);
3741
3742 if (kvm_enabled()) {
3743 kvm_arch_reset_vcpu(cpu);
3744 }
3745 else if (hvf_enabled()) {
3746 hvf_reset_vcpu(s);
3747 }
3748 #endif
3749 }
3750
3751 #ifndef CONFIG_USER_ONLY
3752 bool cpu_is_bsp(X86CPU *cpu)
3753 {
3754 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3755 }
3756
3757 /* TODO: remove me, when reset over QOM tree is implemented */
3758 static void x86_cpu_machine_reset_cb(void *opaque)
3759 {
3760 X86CPU *cpu = opaque;
3761 cpu_reset(CPU(cpu));
3762 }
3763 #endif
3764
3765 static void mce_init(X86CPU *cpu)
3766 {
3767 CPUX86State *cenv = &cpu->env;
3768 unsigned int bank;
3769
3770 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3771 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3772 (CPUID_MCE | CPUID_MCA)) {
3773 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3774 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3775 cenv->mcg_ctl = ~(uint64_t)0;
3776 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3777 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3778 }
3779 }
3780 }
3781
3782 #ifndef CONFIG_USER_ONLY
3783 APICCommonClass *apic_get_class(void)
3784 {
3785 const char *apic_type = "apic";
3786
3787 /* TODO: in-kernel irqchip for hvf */
3788 if (kvm_apic_in_kernel()) {
3789 apic_type = "kvm-apic";
3790 } else if (xen_enabled()) {
3791 apic_type = "xen-apic";
3792 }
3793
3794 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3795 }
3796
3797 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3798 {
3799 APICCommonState *apic;
3800 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3801
3802 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3803
3804 object_property_add_child(OBJECT(cpu), "lapic",
3805 OBJECT(cpu->apic_state), &error_abort);
3806 object_unref(OBJECT(cpu->apic_state));
3807
3808 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3809 /* TODO: convert to link<> */
3810 apic = APIC_COMMON(cpu->apic_state);
3811 apic->cpu = cpu;
3812 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3813 }
3814
3815 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3816 {
3817 APICCommonState *apic;
3818 static bool apic_mmio_map_once;
3819
3820 if (cpu->apic_state == NULL) {
3821 return;
3822 }
3823 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3824 errp);
3825
3826 /* Map APIC MMIO area */
3827 apic = APIC_COMMON(cpu->apic_state);
3828 if (!apic_mmio_map_once) {
3829 memory_region_add_subregion_overlap(get_system_memory(),
3830 apic->apicbase &
3831 MSR_IA32_APICBASE_BASE,
3832 &apic->io_memory,
3833 0x1000);
3834 apic_mmio_map_once = true;
3835 }
3836 }
3837
3838 static void x86_cpu_machine_done(Notifier *n, void *unused)
3839 {
3840 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3841 MemoryRegion *smram =
3842 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3843
3844 if (smram) {
3845 cpu->smram = g_new(MemoryRegion, 1);
3846 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3847 smram, 0, 1ull << 32);
3848 memory_region_set_enabled(cpu->smram, true);
3849 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3850 }
3851 }
3852 #else
3853 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3854 {
3855 }
3856 #endif
3857
3858 /* Note: Only safe for use on x86(-64) hosts */
3859 static uint32_t x86_host_phys_bits(void)
3860 {
3861 uint32_t eax;
3862 uint32_t host_phys_bits;
3863
3864 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3865 if (eax >= 0x80000008) {
3866 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3867 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3868 * at 23:16 that can specify a maximum physical address bits for
3869 * the guest that can override this value; but I've not seen
3870 * anything with that set.
3871 */
3872 host_phys_bits = eax & 0xff;
3873 } else {
3874 /* It's an odd 64 bit machine that doesn't have the leaf for
3875 * physical address bits; fall back to 36 that's most older
3876 * Intel.
3877 */
3878 host_phys_bits = 36;
3879 }
3880
3881 return host_phys_bits;
3882 }
3883
3884 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3885 {
3886 if (*min < value) {
3887 *min = value;
3888 }
3889 }
3890
3891 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3892 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3893 {
3894 CPUX86State *env = &cpu->env;
3895 FeatureWordInfo *fi = &feature_word_info[w];
3896 uint32_t eax = fi->cpuid_eax;
3897 uint32_t region = eax & 0xF0000000;
3898
3899 if (!env->features[w]) {
3900 return;
3901 }
3902
3903 switch (region) {
3904 case 0x00000000:
3905 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3906 break;
3907 case 0x80000000:
3908 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3909 break;
3910 case 0xC0000000:
3911 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3912 break;
3913 }
3914 }
3915
3916 /* Calculate XSAVE components based on the configured CPU feature flags */
3917 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3918 {
3919 CPUX86State *env = &cpu->env;
3920 int i;
3921 uint64_t mask;
3922
3923 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3924 return;
3925 }
3926
3927 mask = 0;
3928 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3929 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3930 if (env->features[esa->feature] & esa->bits) {
3931 mask |= (1ULL << i);
3932 }
3933 }
3934
3935 env->features[FEAT_XSAVE_COMP_LO] = mask;
3936 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3937 }
3938
3939 /***** Steps involved on loading and filtering CPUID data
3940 *
3941 * When initializing and realizing a CPU object, the steps
3942 * involved in setting up CPUID data are:
3943 *
3944 * 1) Loading CPU model definition (X86CPUDefinition). This is
3945 * implemented by x86_cpu_load_def() and should be completely
3946 * transparent, as it is done automatically by instance_init.
3947 * No code should need to look at X86CPUDefinition structs
3948 * outside instance_init.
3949 *
3950 * 2) CPU expansion. This is done by realize before CPUID
3951 * filtering, and will make sure host/accelerator data is
3952 * loaded for CPU models that depend on host capabilities
3953 * (e.g. "host"). Done by x86_cpu_expand_features().
3954 *
3955 * 3) CPUID filtering. This initializes extra data related to
3956 * CPUID, and checks if the host supports all capabilities
3957 * required by the CPU. Runnability of a CPU model is
3958 * determined at this step. Done by x86_cpu_filter_features().
3959 *
3960 * Some operations don't require all steps to be performed.
3961 * More precisely:
3962 *
3963 * - CPU instance creation (instance_init) will run only CPU
3964 * model loading. CPU expansion can't run at instance_init-time
3965 * because host/accelerator data may be not available yet.
3966 * - CPU realization will perform both CPU model expansion and CPUID
3967 * filtering, and return an error in case one of them fails.
3968 * - query-cpu-definitions needs to run all 3 steps. It needs
3969 * to run CPUID filtering, as the 'unavailable-features'
3970 * field is set based on the filtering results.
3971 * - The query-cpu-model-expansion QMP command only needs to run
3972 * CPU model loading and CPU expansion. It should not filter
3973 * any CPUID data based on host capabilities.
3974 */
3975
3976 /* Expand CPU configuration data, based on configured features
3977 * and host/accelerator capabilities when appropriate.
3978 */
3979 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3980 {
3981 CPUX86State *env = &cpu->env;
3982 FeatureWord w;
3983 GList *l;
3984 Error *local_err = NULL;
3985
3986 /*TODO: Now cpu->max_features doesn't overwrite features
3987 * set using QOM properties, and we can convert
3988 * plus_features & minus_features to global properties
3989 * inside x86_cpu_parse_featurestr() too.
3990 */
3991 if (cpu->max_features) {
3992 for (w = 0; w < FEATURE_WORDS; w++) {
3993 /* Override only features that weren't set explicitly
3994 * by the user.
3995 */
3996 env->features[w] |=
3997 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3998 ~env->user_features[w];
3999 }
4000 }
4001
4002 for (l = plus_features; l; l = l->next) {
4003 const char *prop = l->data;
4004 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4005 if (local_err) {
4006 goto out;
4007 }
4008 }
4009
4010 for (l = minus_features; l; l = l->next) {
4011 const char *prop = l->data;
4012 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4013 if (local_err) {
4014 goto out;
4015 }
4016 }
4017
4018 if (!kvm_enabled() || !cpu->expose_kvm) {
4019 env->features[FEAT_KVM] = 0;
4020 }
4021
4022 x86_cpu_enable_xsave_components(cpu);
4023
4024 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4025 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4026 if (cpu->full_cpuid_auto_level) {
4027 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4028 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4029 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4030 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4031 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4032 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4033 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4034 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4035 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4036 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4037 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4038 /* SVM requires CPUID[0x8000000A] */
4039 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4040 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4041 }
4042 }
4043
4044 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4045 if (env->cpuid_level == UINT32_MAX) {
4046 env->cpuid_level = env->cpuid_min_level;
4047 }
4048 if (env->cpuid_xlevel == UINT32_MAX) {
4049 env->cpuid_xlevel = env->cpuid_min_xlevel;
4050 }
4051 if (env->cpuid_xlevel2 == UINT32_MAX) {
4052 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4053 }
4054
4055 out:
4056 if (local_err != NULL) {
4057 error_propagate(errp, local_err);
4058 }
4059 }
4060
4061 /*
4062 * Finishes initialization of CPUID data, filters CPU feature
4063 * words based on host availability of each feature.
4064 *
4065 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4066 */
4067 static int x86_cpu_filter_features(X86CPU *cpu)
4068 {
4069 CPUX86State *env = &cpu->env;
4070 FeatureWord w;
4071 int rv = 0;
4072
4073 for (w = 0; w < FEATURE_WORDS; w++) {
4074 uint32_t host_feat =
4075 x86_cpu_get_supported_feature_word(w, false);
4076 uint32_t requested_features = env->features[w];
4077 env->features[w] &= host_feat;
4078 cpu->filtered_features[w] = requested_features & ~env->features[w];
4079 if (cpu->filtered_features[w]) {
4080 rv = 1;
4081 }
4082 }
4083
4084 return rv;
4085 }
4086
4087 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4088 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4089 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4090 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4091 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4092 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4093 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4094 {
4095 CPUState *cs = CPU(dev);
4096 X86CPU *cpu = X86_CPU(dev);
4097 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4098 CPUX86State *env = &cpu->env;
4099 Error *local_err = NULL;
4100 static bool ht_warned;
4101
4102 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4103 char *name = x86_cpu_class_get_model_name(xcc);
4104 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4105 g_free(name);
4106 goto out;
4107 }
4108
4109 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4110 error_setg(errp, "apic-id property was not initialized properly");
4111 return;
4112 }
4113
4114 x86_cpu_expand_features(cpu, &local_err);
4115 if (local_err) {
4116 goto out;
4117 }
4118
4119 if (x86_cpu_filter_features(cpu) &&
4120 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4121 x86_cpu_report_filtered_features(cpu);
4122 if (cpu->enforce_cpuid) {
4123 error_setg(&local_err,
4124 accel_uses_host_cpuid() ?
4125 "Host doesn't support requested features" :
4126 "TCG doesn't support requested features");
4127 goto out;
4128 }
4129 }
4130
4131 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4132 * CPUID[1].EDX.
4133 */
4134 if (IS_AMD_CPU(env)) {
4135 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4136 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4137 & CPUID_EXT2_AMD_ALIASES);
4138 }
4139
4140 /* For 64bit systems think about the number of physical bits to present.
4141 * ideally this should be the same as the host; anything other than matching
4142 * the host can cause incorrect guest behaviour.
4143 * QEMU used to pick the magic value of 40 bits that corresponds to
4144 * consumer AMD devices but nothing else.
4145 */
4146 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4147 if (accel_uses_host_cpuid()) {
4148 uint32_t host_phys_bits = x86_host_phys_bits();
4149 static bool warned;
4150
4151 if (cpu->host_phys_bits) {
4152 /* The user asked for us to use the host physical bits */
4153 cpu->phys_bits = host_phys_bits;
4154 }
4155
4156 /* Print a warning if the user set it to a value that's not the
4157 * host value.
4158 */
4159 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4160 !warned) {
4161 warn_report("Host physical bits (%u)"
4162 " does not match phys-bits property (%u)",
4163 host_phys_bits, cpu->phys_bits);
4164 warned = true;
4165 }
4166
4167 if (cpu->phys_bits &&
4168 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4169 cpu->phys_bits < 32)) {
4170 error_setg(errp, "phys-bits should be between 32 and %u "
4171 " (but is %u)",
4172 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4173 return;
4174 }
4175 } else {
4176 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4177 error_setg(errp, "TCG only supports phys-bits=%u",
4178 TCG_PHYS_ADDR_BITS);
4179 return;
4180 }
4181 }
4182 /* 0 means it was not explicitly set by the user (or by machine
4183 * compat_props or by the host code above). In this case, the default
4184 * is the value used by TCG (40).
4185 */
4186 if (cpu->phys_bits == 0) {
4187 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4188 }
4189 } else {
4190 /* For 32 bit systems don't use the user set value, but keep
4191 * phys_bits consistent with what we tell the guest.
4192 */
4193 if (cpu->phys_bits != 0) {
4194 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4195 return;
4196 }
4197
4198 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4199 cpu->phys_bits = 36;
4200 } else {
4201 cpu->phys_bits = 32;
4202 }
4203 }
4204 cpu_exec_realizefn(cs, &local_err);
4205 if (local_err != NULL) {
4206 error_propagate(errp, local_err);
4207 return;
4208 }
4209
4210 #ifndef CONFIG_USER_ONLY
4211 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4212
4213 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4214 x86_cpu_apic_create(cpu, &local_err);
4215 if (local_err != NULL) {
4216 goto out;
4217 }
4218 }
4219 #endif
4220
4221 mce_init(cpu);
4222
4223 #ifndef CONFIG_USER_ONLY
4224 if (tcg_enabled()) {
4225 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4226 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4227
4228 /* Outer container... */
4229 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4230 memory_region_set_enabled(cpu->cpu_as_root, true);
4231
4232 /* ... with two regions inside: normal system memory with low
4233 * priority, and...
4234 */
4235 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4236 get_system_memory(), 0, ~0ull);
4237 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4238 memory_region_set_enabled(cpu->cpu_as_mem, true);
4239
4240 cs->num_ases = 2;
4241 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4242 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4243
4244 /* ... SMRAM with higher priority, linked from /machine/smram. */
4245 cpu->machine_done.notify = x86_cpu_machine_done;
4246 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4247 }
4248 #endif
4249
4250 qemu_init_vcpu(cs);
4251
4252 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4253 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4254 * based on inputs (sockets,cores,threads), it is still better to gives
4255 * users a warning.
4256 *
4257 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4258 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4259 */
4260 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4261 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4262 " -smp options properly.");
4263 ht_warned = true;
4264 }
4265
4266 x86_cpu_apic_realize(cpu, &local_err);
4267 if (local_err != NULL) {
4268 goto out;
4269 }
4270 cpu_reset(cs);
4271
4272 xcc->parent_realize(dev, &local_err);
4273
4274 out:
4275 if (local_err != NULL) {
4276 error_propagate(errp, local_err);
4277 return;
4278 }
4279 }
4280
4281 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4282 {
4283 X86CPU *cpu = X86_CPU(dev);
4284 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4285 Error *local_err = NULL;
4286
4287 #ifndef CONFIG_USER_ONLY
4288 cpu_remove_sync(CPU(dev));
4289 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4290 #endif
4291
4292 if (cpu->apic_state) {
4293 object_unparent(OBJECT(cpu->apic_state));
4294 cpu->apic_state = NULL;
4295 }
4296
4297 xcc->parent_unrealize(dev, &local_err);
4298 if (local_err != NULL) {
4299 error_propagate(errp, local_err);
4300 return;
4301 }
4302 }
4303
4304 typedef struct BitProperty {
4305 FeatureWord w;
4306 uint32_t mask;
4307 } BitProperty;
4308
4309 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4310 void *opaque, Error **errp)
4311 {
4312 X86CPU *cpu = X86_CPU(obj);
4313 BitProperty *fp = opaque;
4314 uint32_t f = cpu->env.features[fp->w];
4315 bool value = (f & fp->mask) == fp->mask;
4316 visit_type_bool(v, name, &value, errp);
4317 }
4318
4319 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4320 void *opaque, Error **errp)
4321 {
4322 DeviceState *dev = DEVICE(obj);
4323 X86CPU *cpu = X86_CPU(obj);
4324 BitProperty *fp = opaque;
4325 Error *local_err = NULL;
4326 bool value;
4327
4328 if (dev->realized) {
4329 qdev_prop_set_after_realize(dev, name, errp);
4330 return;
4331 }
4332
4333 visit_type_bool(v, name, &value, &local_err);
4334 if (local_err) {
4335 error_propagate(errp, local_err);
4336 return;
4337 }
4338
4339 if (value) {
4340 cpu->env.features[fp->w] |= fp->mask;
4341 } else {
4342 cpu->env.features[fp->w] &= ~fp->mask;
4343 }
4344 cpu->env.user_features[fp->w] |= fp->mask;
4345 }
4346
4347 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4348 void *opaque)
4349 {
4350 BitProperty *prop = opaque;
4351 g_free(prop);
4352 }
4353
4354 /* Register a boolean property to get/set a single bit in a uint32_t field.
4355 *
4356 * The same property name can be registered multiple times to make it affect
4357 * multiple bits in the same FeatureWord. In that case, the getter will return
4358 * true only if all bits are set.
4359 */
4360 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4361 const char *prop_name,
4362 FeatureWord w,
4363 int bitnr)
4364 {
4365 BitProperty *fp;
4366 ObjectProperty *op;
4367 uint32_t mask = (1UL << bitnr);
4368
4369 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4370 if (op) {
4371 fp = op->opaque;
4372 assert(fp->w == w);
4373 fp->mask |= mask;
4374 } else {
4375 fp = g_new0(BitProperty, 1);
4376 fp->w = w;
4377 fp->mask = mask;
4378 object_property_add(OBJECT(cpu), prop_name, "bool",
4379 x86_cpu_get_bit_prop,
4380 x86_cpu_set_bit_prop,
4381 x86_cpu_release_bit_prop, fp, &error_abort);
4382 }
4383 }
4384
4385 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4386 FeatureWord w,
4387 int bitnr)
4388 {
4389 FeatureWordInfo *fi = &feature_word_info[w];
4390 const char *name = fi->feat_names[bitnr];
4391
4392 if (!name) {
4393 return;
4394 }
4395
4396 /* Property names should use "-" instead of "_".
4397 * Old names containing underscores are registered as aliases
4398 * using object_property_add_alias()
4399 */
4400 assert(!strchr(name, '_'));
4401 /* aliases don't use "|" delimiters anymore, they are registered
4402 * manually using object_property_add_alias() */
4403 assert(!strchr(name, '|'));
4404 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4405 }
4406
4407 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4408 {
4409 X86CPU *cpu = X86_CPU(cs);
4410 CPUX86State *env = &cpu->env;
4411 GuestPanicInformation *panic_info = NULL;
4412
4413 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4414 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4415
4416 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4417
4418 assert(HV_CRASH_PARAMS >= 5);
4419 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4420 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4421 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4422 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4423 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4424 }
4425
4426 return panic_info;
4427 }
4428 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4429 const char *name, void *opaque,
4430 Error **errp)
4431 {
4432 CPUState *cs = CPU(obj);
4433 GuestPanicInformation *panic_info;
4434
4435 if (!cs->crash_occurred) {
4436 error_setg(errp, "No crash occured");
4437 return;
4438 }
4439
4440 panic_info = x86_cpu_get_crash_info(cs);
4441 if (panic_info == NULL) {
4442 error_setg(errp, "No crash information");
4443 return;
4444 }
4445
4446 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4447 errp);
4448 qapi_free_GuestPanicInformation(panic_info);
4449 }
4450
4451 static void x86_cpu_initfn(Object *obj)
4452 {
4453 CPUState *cs = CPU(obj);
4454 X86CPU *cpu = X86_CPU(obj);
4455 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4456 CPUX86State *env = &cpu->env;
4457 FeatureWord w;
4458
4459 cs->env_ptr = env;
4460
4461 object_property_add(obj, "family", "int",
4462 x86_cpuid_version_get_family,
4463 x86_cpuid_version_set_family, NULL, NULL, NULL);
4464 object_property_add(obj, "model", "int",
4465 x86_cpuid_version_get_model,
4466 x86_cpuid_version_set_model, NULL, NULL, NULL);
4467 object_property_add(obj, "stepping", "int",
4468 x86_cpuid_version_get_stepping,
4469 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4470 object_property_add_str(obj, "vendor",
4471 x86_cpuid_get_vendor,
4472 x86_cpuid_set_vendor, NULL);
4473 object_property_add_str(obj, "model-id",
4474 x86_cpuid_get_model_id,
4475 x86_cpuid_set_model_id, NULL);
4476 object_property_add(obj, "tsc-frequency", "int",
4477 x86_cpuid_get_tsc_freq,
4478 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4479 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4480 x86_cpu_get_feature_words,
4481 NULL, NULL, (void *)env->features, NULL);
4482 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4483 x86_cpu_get_feature_words,
4484 NULL, NULL, (void *)cpu->filtered_features, NULL);
4485
4486 object_property_add(obj, "crash-information", "GuestPanicInformation",
4487 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4488
4489 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4490
4491 for (w = 0; w < FEATURE_WORDS; w++) {
4492 int bitnr;
4493
4494 for (bitnr = 0; bitnr < 32; bitnr++) {
4495 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4496 }
4497 }
4498
4499 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4500 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4501 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4502 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4503 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4504 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4505 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4506
4507 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4508 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4509 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4510 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4511 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4512 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4513 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4514 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4515 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4516 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4517 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4518 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4519 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4520 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4521 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4522 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4523 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4524 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4525 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4526 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4527 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4528
4529 if (xcc->cpu_def) {
4530 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4531 }
4532 }
4533
4534 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4535 {
4536 X86CPU *cpu = X86_CPU(cs);
4537
4538 return cpu->apic_id;
4539 }
4540
4541 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4542 {
4543 X86CPU *cpu = X86_CPU(cs);
4544
4545 return cpu->env.cr[0] & CR0_PG_MASK;
4546 }
4547
4548 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4549 {
4550 X86CPU *cpu = X86_CPU(cs);
4551
4552 cpu->env.eip = value;
4553 }
4554
4555 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4556 {
4557 X86CPU *cpu = X86_CPU(cs);
4558
4559 cpu->env.eip = tb->pc - tb->cs_base;
4560 }
4561
4562 static bool x86_cpu_has_work(CPUState *cs)
4563 {
4564 X86CPU *cpu = X86_CPU(cs);
4565 CPUX86State *env = &cpu->env;
4566
4567 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4568 CPU_INTERRUPT_POLL)) &&
4569 (env->eflags & IF_MASK)) ||
4570 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4571 CPU_INTERRUPT_INIT |
4572 CPU_INTERRUPT_SIPI |
4573 CPU_INTERRUPT_MCE)) ||
4574 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4575 !(env->hflags & HF_SMM_MASK));
4576 }
4577
4578 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
4579 {
4580 X86CPU *cpu = X86_CPU(cs);
4581 CPUX86State *env = &cpu->env;
4582
4583 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
4584 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
4585 : bfd_mach_i386_i8086);
4586 info->print_insn = print_insn_i386;
4587
4588 info->cap_arch = CS_ARCH_X86;
4589 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
4590 : env->hflags & HF_CS32_MASK ? CS_MODE_32
4591 : CS_MODE_16);
4592 info->cap_insn_unit = 1;
4593 info->cap_insn_split = 8;
4594 }
4595
4596 void x86_update_hflags(CPUX86State *env)
4597 {
4598 uint32_t hflags;
4599 #define HFLAG_COPY_MASK \
4600 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
4601 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
4602 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
4603 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
4604
4605 hflags = env->hflags & HFLAG_COPY_MASK;
4606 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
4607 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
4608 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
4609 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
4610 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
4611
4612 if (env->cr[4] & CR4_OSFXSR_MASK) {
4613 hflags |= HF_OSFXSR_MASK;
4614 }
4615
4616 if (env->efer & MSR_EFER_LMA) {
4617 hflags |= HF_LMA_MASK;
4618 }
4619
4620 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
4621 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
4622 } else {
4623 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
4624 (DESC_B_SHIFT - HF_CS32_SHIFT);
4625 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
4626 (DESC_B_SHIFT - HF_SS32_SHIFT);
4627 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
4628 !(hflags & HF_CS32_MASK)) {
4629 hflags |= HF_ADDSEG_MASK;
4630 } else {
4631 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
4632 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
4633 }
4634 }
4635 env->hflags = hflags;
4636 }
4637
4638 static Property x86_cpu_properties[] = {
4639 #ifdef CONFIG_USER_ONLY
4640 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4641 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4642 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4643 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4644 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4645 #else
4646 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4647 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4648 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4649 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4650 #endif
4651 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4652 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4653 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4654 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4655 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4656 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4657 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4658 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4659 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4660 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4661 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4662 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4663 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4664 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4665 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4666 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4667 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4668 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4669 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4670 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4671 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4672 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4673 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4674 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4675 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4676 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4677 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4678 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4679 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4680 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4681 false),
4682 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4683 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4684
4685 /*
4686 * From "Requirements for Implementing the Microsoft
4687 * Hypervisor Interface":
4688 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
4689 *
4690 * "Starting with Windows Server 2012 and Windows 8, if
4691 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
4692 * the hypervisor imposes no specific limit to the number of VPs.
4693 * In this case, Windows Server 2012 guest VMs may use more than
4694 * 64 VPs, up to the maximum supported number of processors applicable
4695 * to the specific Windows version being used."
4696 */
4697 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
4698 DEFINE_PROP_END_OF_LIST()
4699 };
4700
4701 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4702 {
4703 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4704 CPUClass *cc = CPU_CLASS(oc);
4705 DeviceClass *dc = DEVICE_CLASS(oc);
4706
4707 device_class_set_parent_realize(dc, x86_cpu_realizefn,
4708 &xcc->parent_realize);
4709 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
4710 &xcc->parent_unrealize);
4711 dc->props = x86_cpu_properties;
4712
4713 xcc->parent_reset = cc->reset;
4714 cc->reset = x86_cpu_reset;
4715 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4716
4717 cc->class_by_name = x86_cpu_class_by_name;
4718 cc->parse_features = x86_cpu_parse_featurestr;
4719 cc->has_work = x86_cpu_has_work;
4720 #ifdef CONFIG_TCG
4721 cc->do_interrupt = x86_cpu_do_interrupt;
4722 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4723 #endif
4724 cc->dump_state = x86_cpu_dump_state;
4725 cc->get_crash_info = x86_cpu_get_crash_info;
4726 cc->set_pc = x86_cpu_set_pc;
4727 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4728 cc->gdb_read_register = x86_cpu_gdb_read_register;
4729 cc->gdb_write_register = x86_cpu_gdb_write_register;
4730 cc->get_arch_id = x86_cpu_get_arch_id;
4731 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4732 #ifdef CONFIG_USER_ONLY
4733 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4734 #else
4735 cc->asidx_from_attrs = x86_asidx_from_attrs;
4736 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4737 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4738 cc->write_elf64_note = x86_cpu_write_elf64_note;
4739 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4740 cc->write_elf32_note = x86_cpu_write_elf32_note;
4741 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4742 cc->vmsd = &vmstate_x86_cpu;
4743 #endif
4744 cc->gdb_arch_name = x86_gdb_arch_name;
4745 #ifdef TARGET_X86_64
4746 cc->gdb_core_xml_file = "i386-64bit.xml";
4747 cc->gdb_num_core_regs = 57;
4748 #else
4749 cc->gdb_core_xml_file = "i386-32bit.xml";
4750 cc->gdb_num_core_regs = 41;
4751 #endif
4752 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4753 cc->debug_excp_handler = breakpoint_handler;
4754 #endif
4755 cc->cpu_exec_enter = x86_cpu_exec_enter;
4756 cc->cpu_exec_exit = x86_cpu_exec_exit;
4757 #ifdef CONFIG_TCG
4758 cc->tcg_initialize = tcg_x86_init;
4759 #endif
4760 cc->disas_set_info = x86_disas_set_info;
4761
4762 dc->user_creatable = true;
4763 }
4764
4765 static const TypeInfo x86_cpu_type_info = {
4766 .name = TYPE_X86_CPU,
4767 .parent = TYPE_CPU,
4768 .instance_size = sizeof(X86CPU),
4769 .instance_init = x86_cpu_initfn,
4770 .abstract = true,
4771 .class_size = sizeof(X86CPUClass),
4772 .class_init = x86_cpu_common_class_init,
4773 };
4774
4775
4776 /* "base" CPU model, used by query-cpu-model-expansion */
4777 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4778 {
4779 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4780
4781 xcc->static_model = true;
4782 xcc->migration_safe = true;
4783 xcc->model_description = "base CPU model type with no features enabled";
4784 xcc->ordering = 8;
4785 }
4786
4787 static const TypeInfo x86_base_cpu_type_info = {
4788 .name = X86_CPU_TYPE_NAME("base"),
4789 .parent = TYPE_X86_CPU,
4790 .class_init = x86_cpu_base_class_init,
4791 };
4792
4793 static void x86_cpu_register_types(void)
4794 {
4795 int i;
4796
4797 type_register_static(&x86_cpu_type_info);
4798 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4799 x86_register_cpudef_type(&builtin_x86_defs[i]);
4800 }
4801 type_register_static(&max_x86_cpu_type_info);
4802 type_register_static(&x86_base_cpu_type_info);
4803 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
4804 type_register_static(&host_x86_cpu_type_info);
4805 #endif
4806 }
4807
4808 type_init(x86_cpu_register_types)