]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386/kvm: Blacklist TSX on known broken hosts
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/qstring.h"
33 #include "qapi/qmp/qdict.h"
34 #include "qapi/qmp/qbool.h"
35 #include "qapi/qmp/qint.h"
36 #include "qapi/qmp/qfloat.h"
37
38 #include "qapi-types.h"
39 #include "qapi-visit.h"
40 #include "qapi/visitor.h"
41 #include "qom/qom-qobject.h"
42 #include "sysemu/arch_init.h"
43
44 #if defined(CONFIG_KVM)
45 #include <linux/kvm_para.h>
46 #endif
47
48 #include "sysemu/sysemu.h"
49 #include "hw/qdev-properties.h"
50 #include "hw/i386/topology.h"
51 #ifndef CONFIG_USER_ONLY
52 #include "exec/address-spaces.h"
53 #include "hw/hw.h"
54 #include "hw/xen/xen.h"
55 #include "hw/i386/apic_internal.h"
56 #endif
57
58
59 /* Cache topology CPUID constants: */
60
61 /* CPUID Leaf 2 Descriptors */
62
63 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
64 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
65 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
66 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
67
68
69 /* CPUID Leaf 4 constants: */
70
71 /* EAX: */
72 #define CPUID_4_TYPE_DCACHE 1
73 #define CPUID_4_TYPE_ICACHE 2
74 #define CPUID_4_TYPE_UNIFIED 3
75
76 #define CPUID_4_LEVEL(l) ((l) << 5)
77
78 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
79 #define CPUID_4_FULLY_ASSOC (1 << 9)
80
81 /* EDX: */
82 #define CPUID_4_NO_INVD_SHARING (1 << 0)
83 #define CPUID_4_INCLUSIVE (1 << 1)
84 #define CPUID_4_COMPLEX_IDX (1 << 2)
85
86 #define ASSOC_FULL 0xFF
87
88 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
89 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
90 a == 2 ? 0x2 : \
91 a == 4 ? 0x4 : \
92 a == 8 ? 0x6 : \
93 a == 16 ? 0x8 : \
94 a == 32 ? 0xA : \
95 a == 48 ? 0xB : \
96 a == 64 ? 0xC : \
97 a == 96 ? 0xD : \
98 a == 128 ? 0xE : \
99 a == ASSOC_FULL ? 0xF : \
100 0 /* invalid value */)
101
102
103 /* Definitions of the hardcoded cache entries we expose: */
104
105 /* L1 data cache: */
106 #define L1D_LINE_SIZE 64
107 #define L1D_ASSOCIATIVITY 8
108 #define L1D_SETS 64
109 #define L1D_PARTITIONS 1
110 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
111 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
112 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
113 #define L1D_LINES_PER_TAG 1
114 #define L1D_SIZE_KB_AMD 64
115 #define L1D_ASSOCIATIVITY_AMD 2
116
117 /* L1 instruction cache: */
118 #define L1I_LINE_SIZE 64
119 #define L1I_ASSOCIATIVITY 8
120 #define L1I_SETS 64
121 #define L1I_PARTITIONS 1
122 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
123 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
124 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
125 #define L1I_LINES_PER_TAG 1
126 #define L1I_SIZE_KB_AMD 64
127 #define L1I_ASSOCIATIVITY_AMD 2
128
129 /* Level 2 unified cache: */
130 #define L2_LINE_SIZE 64
131 #define L2_ASSOCIATIVITY 16
132 #define L2_SETS 4096
133 #define L2_PARTITIONS 1
134 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
135 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
136 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
137 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
138 #define L2_LINES_PER_TAG 1
139 #define L2_SIZE_KB_AMD 512
140
141 /* Level 3 unified cache: */
142 #define L3_SIZE_KB 0 /* disabled */
143 #define L3_ASSOCIATIVITY 0 /* disabled */
144 #define L3_LINES_PER_TAG 0 /* disabled */
145 #define L3_LINE_SIZE 0 /* disabled */
146 #define L3_N_LINE_SIZE 64
147 #define L3_N_ASSOCIATIVITY 16
148 #define L3_N_SETS 16384
149 #define L3_N_PARTITIONS 1
150 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
151 #define L3_N_LINES_PER_TAG 1
152 #define L3_N_SIZE_KB_AMD 16384
153
154 /* TLB definitions: */
155
156 #define L1_DTLB_2M_ASSOC 1
157 #define L1_DTLB_2M_ENTRIES 255
158 #define L1_DTLB_4K_ASSOC 1
159 #define L1_DTLB_4K_ENTRIES 255
160
161 #define L1_ITLB_2M_ASSOC 1
162 #define L1_ITLB_2M_ENTRIES 255
163 #define L1_ITLB_4K_ASSOC 1
164 #define L1_ITLB_4K_ENTRIES 255
165
166 #define L2_DTLB_2M_ASSOC 0 /* disabled */
167 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
168 #define L2_DTLB_4K_ASSOC 4
169 #define L2_DTLB_4K_ENTRIES 512
170
171 #define L2_ITLB_2M_ASSOC 0 /* disabled */
172 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
173 #define L2_ITLB_4K_ASSOC 4
174 #define L2_ITLB_4K_ENTRIES 512
175
176
177
178 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
179 uint32_t vendor2, uint32_t vendor3)
180 {
181 int i;
182 for (i = 0; i < 4; i++) {
183 dst[i] = vendor1 >> (8 * i);
184 dst[i + 4] = vendor2 >> (8 * i);
185 dst[i + 8] = vendor3 >> (8 * i);
186 }
187 dst[CPUID_VENDOR_SZ] = '\0';
188 }
189
190 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
191 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
193 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
194 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
195 CPUID_PSE36 | CPUID_FXSR)
196 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
197 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
198 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
199 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
200 CPUID_PAE | CPUID_SEP | CPUID_APIC)
201
202 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
203 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
204 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
205 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
206 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
207 /* partly implemented:
208 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
209 /* missing:
210 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
211 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
212 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
213 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
214 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
215 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
216 /* missing:
217 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
218 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
219 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
220 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
221 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
222
223 #ifdef TARGET_X86_64
224 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
225 #else
226 #define TCG_EXT2_X86_64_FEATURES 0
227 #endif
228
229 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
230 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
231 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
232 TCG_EXT2_X86_64_FEATURES)
233 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
234 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
235 #define TCG_EXT4_FEATURES 0
236 #define TCG_SVM_FEATURES 0
237 #define TCG_KVM_FEATURES 0
238 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
239 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
240 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
241 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
242 CPUID_7_0_EBX_ERMS)
243 /* missing:
244 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
245 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
246 CPUID_7_0_EBX_RDSEED */
247 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
248 CPUID_7_0_ECX_LA57)
249 #define TCG_7_0_EDX_FEATURES 0
250 #define TCG_APM_FEATURES 0
251 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
252 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
253 /* missing:
254 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
255
256 typedef struct FeatureWordInfo {
257 /* feature flags names are taken from "Intel Processor Identification and
258 * the CPUID Instruction" and AMD's "CPUID Specification".
259 * In cases of disagreement between feature naming conventions,
260 * aliases may be added.
261 */
262 const char *feat_names[32];
263 uint32_t cpuid_eax; /* Input EAX for CPUID */
264 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
265 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
266 int cpuid_reg; /* output register (R_* constant) */
267 uint32_t tcg_features; /* Feature flags supported by TCG */
268 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
269 uint32_t migratable_flags; /* Feature flags known to be migratable */
270 } FeatureWordInfo;
271
272 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
273 [FEAT_1_EDX] = {
274 .feat_names = {
275 "fpu", "vme", "de", "pse",
276 "tsc", "msr", "pae", "mce",
277 "cx8", "apic", NULL, "sep",
278 "mtrr", "pge", "mca", "cmov",
279 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
280 NULL, "ds" /* Intel dts */, "acpi", "mmx",
281 "fxsr", "sse", "sse2", "ss",
282 "ht" /* Intel htt */, "tm", "ia64", "pbe",
283 },
284 .cpuid_eax = 1, .cpuid_reg = R_EDX,
285 .tcg_features = TCG_FEATURES,
286 },
287 [FEAT_1_ECX] = {
288 .feat_names = {
289 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
290 "ds-cpl", "vmx", "smx", "est",
291 "tm2", "ssse3", "cid", NULL,
292 "fma", "cx16", "xtpr", "pdcm",
293 NULL, "pcid", "dca", "sse4.1",
294 "sse4.2", "x2apic", "movbe", "popcnt",
295 "tsc-deadline", "aes", "xsave", "osxsave",
296 "avx", "f16c", "rdrand", "hypervisor",
297 },
298 .cpuid_eax = 1, .cpuid_reg = R_ECX,
299 .tcg_features = TCG_EXT_FEATURES,
300 },
301 /* Feature names that are already defined on feature_name[] but
302 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
303 * names on feat_names below. They are copied automatically
304 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
305 */
306 [FEAT_8000_0001_EDX] = {
307 .feat_names = {
308 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
309 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
310 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
311 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
312 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
313 "nx", NULL, "mmxext", NULL /* mmx */,
314 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
315 NULL, "lm", "3dnowext", "3dnow",
316 },
317 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
318 .tcg_features = TCG_EXT2_FEATURES,
319 },
320 [FEAT_8000_0001_ECX] = {
321 .feat_names = {
322 "lahf-lm", "cmp-legacy", "svm", "extapic",
323 "cr8legacy", "abm", "sse4a", "misalignsse",
324 "3dnowprefetch", "osvw", "ibs", "xop",
325 "skinit", "wdt", NULL, "lwp",
326 "fma4", "tce", NULL, "nodeid-msr",
327 NULL, "tbm", "topoext", "perfctr-core",
328 "perfctr-nb", NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 },
331 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
332 .tcg_features = TCG_EXT3_FEATURES,
333 },
334 [FEAT_C000_0001_EDX] = {
335 .feat_names = {
336 NULL, NULL, "xstore", "xstore-en",
337 NULL, NULL, "xcrypt", "xcrypt-en",
338 "ace2", "ace2-en", "phe", "phe-en",
339 "pmm", "pmm-en", NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 },
345 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
346 .tcg_features = TCG_EXT4_FEATURES,
347 },
348 [FEAT_KVM] = {
349 .feat_names = {
350 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
351 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
352 NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 "kvmclock-stable-bit", NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 },
359 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
360 .tcg_features = TCG_KVM_FEATURES,
361 },
362 [FEAT_HYPERV_EAX] = {
363 .feat_names = {
364 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
365 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
366 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
367 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
368 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
369 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
370 NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL,
375 },
376 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
377 },
378 [FEAT_HYPERV_EBX] = {
379 .feat_names = {
380 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
381 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
382 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
383 NULL /* hv_create_port */, NULL /* hv_connect_port */,
384 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
385 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
386 NULL, NULL,
387 NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL,
391 },
392 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
393 },
394 [FEAT_HYPERV_EDX] = {
395 .feat_names = {
396 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
397 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
398 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
399 NULL, NULL,
400 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
401 NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL,
406 },
407 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
408 },
409 [FEAT_SVM] = {
410 .feat_names = {
411 "npt", "lbrv", "svm-lock", "nrip-save",
412 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
413 NULL, NULL, "pause-filter", NULL,
414 "pfthreshold", NULL, NULL, NULL,
415 NULL, NULL, NULL, NULL,
416 NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL,
419 },
420 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
421 .tcg_features = TCG_SVM_FEATURES,
422 },
423 [FEAT_7_0_EBX] = {
424 .feat_names = {
425 "fsgsbase", "tsc-adjust", NULL, "bmi1",
426 "hle", "avx2", NULL, "smep",
427 "bmi2", "erms", "invpcid", "rtm",
428 NULL, NULL, "mpx", NULL,
429 "avx512f", "avx512dq", "rdseed", "adx",
430 "smap", "avx512ifma", "pcommit", "clflushopt",
431 "clwb", NULL, "avx512pf", "avx512er",
432 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
433 },
434 .cpuid_eax = 7,
435 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
436 .cpuid_reg = R_EBX,
437 .tcg_features = TCG_7_0_EBX_FEATURES,
438 },
439 [FEAT_7_0_ECX] = {
440 .feat_names = {
441 NULL, "avx512vbmi", "umip", "pku",
442 "ospke", NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, "avx512-vpopcntdq", NULL,
445 "la57", NULL, NULL, NULL,
446 NULL, NULL, "rdpid", NULL,
447 NULL, NULL, NULL, NULL,
448 NULL, NULL, NULL, NULL,
449 },
450 .cpuid_eax = 7,
451 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
452 .cpuid_reg = R_ECX,
453 .tcg_features = TCG_7_0_ECX_FEATURES,
454 },
455 [FEAT_7_0_EDX] = {
456 .feat_names = {
457 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL,
465 },
466 .cpuid_eax = 7,
467 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
468 .cpuid_reg = R_EDX,
469 .tcg_features = TCG_7_0_EDX_FEATURES,
470 },
471 [FEAT_8000_0007_EDX] = {
472 .feat_names = {
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 "invtsc", NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL,
481 },
482 .cpuid_eax = 0x80000007,
483 .cpuid_reg = R_EDX,
484 .tcg_features = TCG_APM_FEATURES,
485 .unmigratable_flags = CPUID_APM_INVTSC,
486 },
487 [FEAT_XSAVE] = {
488 .feat_names = {
489 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL,
497 },
498 .cpuid_eax = 0xd,
499 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
500 .cpuid_reg = R_EAX,
501 .tcg_features = TCG_XSAVE_FEATURES,
502 },
503 [FEAT_6_EAX] = {
504 .feat_names = {
505 NULL, NULL, "arat", NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL,
513 },
514 .cpuid_eax = 6, .cpuid_reg = R_EAX,
515 .tcg_features = TCG_6_EAX_FEATURES,
516 },
517 [FEAT_XSAVE_COMP_LO] = {
518 .cpuid_eax = 0xD,
519 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
520 .cpuid_reg = R_EAX,
521 .tcg_features = ~0U,
522 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
523 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
524 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
525 XSTATE_PKRU_MASK,
526 },
527 [FEAT_XSAVE_COMP_HI] = {
528 .cpuid_eax = 0xD,
529 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
530 .cpuid_reg = R_EDX,
531 .tcg_features = ~0U,
532 },
533 };
534
535 typedef struct X86RegisterInfo32 {
536 /* Name of register */
537 const char *name;
538 /* QAPI enum value register */
539 X86CPURegister32 qapi_enum;
540 } X86RegisterInfo32;
541
542 #define REGISTER(reg) \
543 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
544 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
545 REGISTER(EAX),
546 REGISTER(ECX),
547 REGISTER(EDX),
548 REGISTER(EBX),
549 REGISTER(ESP),
550 REGISTER(EBP),
551 REGISTER(ESI),
552 REGISTER(EDI),
553 };
554 #undef REGISTER
555
556 typedef struct ExtSaveArea {
557 uint32_t feature, bits;
558 uint32_t offset, size;
559 } ExtSaveArea;
560
561 static const ExtSaveArea x86_ext_save_areas[] = {
562 [XSTATE_FP_BIT] = {
563 /* x87 FP state component is always enabled if XSAVE is supported */
564 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
565 /* x87 state is in the legacy region of the XSAVE area */
566 .offset = 0,
567 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
568 },
569 [XSTATE_SSE_BIT] = {
570 /* SSE state component is always enabled if XSAVE is supported */
571 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
572 /* SSE state is in the legacy region of the XSAVE area */
573 .offset = 0,
574 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
575 },
576 [XSTATE_YMM_BIT] =
577 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
578 .offset = offsetof(X86XSaveArea, avx_state),
579 .size = sizeof(XSaveAVX) },
580 [XSTATE_BNDREGS_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndreg_state),
583 .size = sizeof(XSaveBNDREG) },
584 [XSTATE_BNDCSR_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
586 .offset = offsetof(X86XSaveArea, bndcsr_state),
587 .size = sizeof(XSaveBNDCSR) },
588 [XSTATE_OPMASK_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, opmask_state),
591 .size = sizeof(XSaveOpmask) },
592 [XSTATE_ZMM_Hi256_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
595 .size = sizeof(XSaveZMM_Hi256) },
596 [XSTATE_Hi16_ZMM_BIT] =
597 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
598 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
599 .size = sizeof(XSaveHi16_ZMM) },
600 [XSTATE_PKRU_BIT] =
601 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
602 .offset = offsetof(X86XSaveArea, pkru_state),
603 .size = sizeof(XSavePKRU) },
604 };
605
606 static uint32_t xsave_area_size(uint64_t mask)
607 {
608 int i;
609 uint64_t ret = 0;
610
611 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
612 const ExtSaveArea *esa = &x86_ext_save_areas[i];
613 if ((mask >> i) & 1) {
614 ret = MAX(ret, esa->offset + esa->size);
615 }
616 }
617 return ret;
618 }
619
620 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
621 {
622 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
623 cpu->env.features[FEAT_XSAVE_COMP_LO];
624 }
625
626 const char *get_register_name_32(unsigned int reg)
627 {
628 if (reg >= CPU_NB_REGS32) {
629 return NULL;
630 }
631 return x86_reg_info_32[reg].name;
632 }
633
634 /*
635 * Returns the set of feature flags that are supported and migratable by
636 * QEMU, for a given FeatureWord.
637 */
638 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
639 {
640 FeatureWordInfo *wi = &feature_word_info[w];
641 uint32_t r = 0;
642 int i;
643
644 for (i = 0; i < 32; i++) {
645 uint32_t f = 1U << i;
646
647 /* If the feature name is known, it is implicitly considered migratable,
648 * unless it is explicitly set in unmigratable_flags */
649 if ((wi->migratable_flags & f) ||
650 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
651 r |= f;
652 }
653 }
654 return r;
655 }
656
657 void host_cpuid(uint32_t function, uint32_t count,
658 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
659 {
660 uint32_t vec[4];
661
662 #ifdef __x86_64__
663 asm volatile("cpuid"
664 : "=a"(vec[0]), "=b"(vec[1]),
665 "=c"(vec[2]), "=d"(vec[3])
666 : "0"(function), "c"(count) : "cc");
667 #elif defined(__i386__)
668 asm volatile("pusha \n\t"
669 "cpuid \n\t"
670 "mov %%eax, 0(%2) \n\t"
671 "mov %%ebx, 4(%2) \n\t"
672 "mov %%ecx, 8(%2) \n\t"
673 "mov %%edx, 12(%2) \n\t"
674 "popa"
675 : : "a"(function), "c"(count), "S"(vec)
676 : "memory", "cc");
677 #else
678 abort();
679 #endif
680
681 if (eax)
682 *eax = vec[0];
683 if (ebx)
684 *ebx = vec[1];
685 if (ecx)
686 *ecx = vec[2];
687 if (edx)
688 *edx = vec[3];
689 }
690
691 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
692 {
693 uint32_t eax, ebx, ecx, edx;
694
695 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
696 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
697
698 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
699 if (family) {
700 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
701 }
702 if (model) {
703 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
704 }
705 if (stepping) {
706 *stepping = eax & 0x0F;
707 }
708 }
709
710 /* CPU class name definitions: */
711
712 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
713 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
714
715 /* Return type name for a given CPU model name
716 * Caller is responsible for freeing the returned string.
717 */
718 static char *x86_cpu_type_name(const char *model_name)
719 {
720 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
721 }
722
723 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
724 {
725 ObjectClass *oc;
726 char *typename;
727
728 if (cpu_model == NULL) {
729 return NULL;
730 }
731
732 typename = x86_cpu_type_name(cpu_model);
733 oc = object_class_by_name(typename);
734 g_free(typename);
735 return oc;
736 }
737
738 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
739 {
740 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
741 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
742 return g_strndup(class_name,
743 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
744 }
745
746 struct X86CPUDefinition {
747 const char *name;
748 uint32_t level;
749 uint32_t xlevel;
750 /* vendor is zero-terminated, 12 character ASCII string */
751 char vendor[CPUID_VENDOR_SZ + 1];
752 int family;
753 int model;
754 int stepping;
755 FeatureWordArray features;
756 char model_id[48];
757 };
758
759 static X86CPUDefinition builtin_x86_defs[] = {
760 {
761 .name = "qemu64",
762 .level = 0xd,
763 .vendor = CPUID_VENDOR_AMD,
764 .family = 6,
765 .model = 6,
766 .stepping = 3,
767 .features[FEAT_1_EDX] =
768 PPRO_FEATURES |
769 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
770 CPUID_PSE36,
771 .features[FEAT_1_ECX] =
772 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
773 .features[FEAT_8000_0001_EDX] =
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 .features[FEAT_8000_0001_ECX] =
776 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
777 .xlevel = 0x8000000A,
778 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
779 },
780 {
781 .name = "phenom",
782 .level = 5,
783 .vendor = CPUID_VENDOR_AMD,
784 .family = 16,
785 .model = 2,
786 .stepping = 3,
787 /* Missing: CPUID_HT */
788 .features[FEAT_1_EDX] =
789 PPRO_FEATURES |
790 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
791 CPUID_PSE36 | CPUID_VME,
792 .features[FEAT_1_ECX] =
793 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
794 CPUID_EXT_POPCNT,
795 .features[FEAT_8000_0001_EDX] =
796 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
797 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
798 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
799 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
800 CPUID_EXT3_CR8LEG,
801 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
802 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
803 .features[FEAT_8000_0001_ECX] =
804 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
805 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
806 /* Missing: CPUID_SVM_LBRV */
807 .features[FEAT_SVM] =
808 CPUID_SVM_NPT,
809 .xlevel = 0x8000001A,
810 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
811 },
812 {
813 .name = "core2duo",
814 .level = 10,
815 .vendor = CPUID_VENDOR_INTEL,
816 .family = 6,
817 .model = 15,
818 .stepping = 11,
819 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
820 .features[FEAT_1_EDX] =
821 PPRO_FEATURES |
822 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
823 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
824 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
825 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
826 .features[FEAT_1_ECX] =
827 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
828 CPUID_EXT_CX16,
829 .features[FEAT_8000_0001_EDX] =
830 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
831 .features[FEAT_8000_0001_ECX] =
832 CPUID_EXT3_LAHF_LM,
833 .xlevel = 0x80000008,
834 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
835 },
836 {
837 .name = "kvm64",
838 .level = 0xd,
839 .vendor = CPUID_VENDOR_INTEL,
840 .family = 15,
841 .model = 6,
842 .stepping = 1,
843 /* Missing: CPUID_HT */
844 .features[FEAT_1_EDX] =
845 PPRO_FEATURES | CPUID_VME |
846 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
847 CPUID_PSE36,
848 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
849 .features[FEAT_1_ECX] =
850 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
851 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
852 .features[FEAT_8000_0001_EDX] =
853 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
854 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
855 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
856 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
857 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
858 .features[FEAT_8000_0001_ECX] =
859 0,
860 .xlevel = 0x80000008,
861 .model_id = "Common KVM processor"
862 },
863 {
864 .name = "qemu32",
865 .level = 4,
866 .vendor = CPUID_VENDOR_INTEL,
867 .family = 6,
868 .model = 6,
869 .stepping = 3,
870 .features[FEAT_1_EDX] =
871 PPRO_FEATURES,
872 .features[FEAT_1_ECX] =
873 CPUID_EXT_SSE3,
874 .xlevel = 0x80000004,
875 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
876 },
877 {
878 .name = "kvm32",
879 .level = 5,
880 .vendor = CPUID_VENDOR_INTEL,
881 .family = 15,
882 .model = 6,
883 .stepping = 1,
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES | CPUID_VME |
886 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
887 .features[FEAT_1_ECX] =
888 CPUID_EXT_SSE3,
889 .features[FEAT_8000_0001_ECX] =
890 0,
891 .xlevel = 0x80000008,
892 .model_id = "Common 32-bit KVM processor"
893 },
894 {
895 .name = "coreduo",
896 .level = 10,
897 .vendor = CPUID_VENDOR_INTEL,
898 .family = 6,
899 .model = 14,
900 .stepping = 8,
901 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
902 .features[FEAT_1_EDX] =
903 PPRO_FEATURES | CPUID_VME |
904 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
905 CPUID_SS,
906 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
907 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
908 .features[FEAT_1_ECX] =
909 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
910 .features[FEAT_8000_0001_EDX] =
911 CPUID_EXT2_NX,
912 .xlevel = 0x80000008,
913 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
914 },
915 {
916 .name = "486",
917 .level = 1,
918 .vendor = CPUID_VENDOR_INTEL,
919 .family = 4,
920 .model = 8,
921 .stepping = 0,
922 .features[FEAT_1_EDX] =
923 I486_FEATURES,
924 .xlevel = 0,
925 },
926 {
927 .name = "pentium",
928 .level = 1,
929 .vendor = CPUID_VENDOR_INTEL,
930 .family = 5,
931 .model = 4,
932 .stepping = 3,
933 .features[FEAT_1_EDX] =
934 PENTIUM_FEATURES,
935 .xlevel = 0,
936 },
937 {
938 .name = "pentium2",
939 .level = 2,
940 .vendor = CPUID_VENDOR_INTEL,
941 .family = 6,
942 .model = 5,
943 .stepping = 2,
944 .features[FEAT_1_EDX] =
945 PENTIUM2_FEATURES,
946 .xlevel = 0,
947 },
948 {
949 .name = "pentium3",
950 .level = 3,
951 .vendor = CPUID_VENDOR_INTEL,
952 .family = 6,
953 .model = 7,
954 .stepping = 3,
955 .features[FEAT_1_EDX] =
956 PENTIUM3_FEATURES,
957 .xlevel = 0,
958 },
959 {
960 .name = "athlon",
961 .level = 2,
962 .vendor = CPUID_VENDOR_AMD,
963 .family = 6,
964 .model = 2,
965 .stepping = 3,
966 .features[FEAT_1_EDX] =
967 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
968 CPUID_MCA,
969 .features[FEAT_8000_0001_EDX] =
970 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
971 .xlevel = 0x80000008,
972 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
973 },
974 {
975 .name = "n270",
976 .level = 10,
977 .vendor = CPUID_VENDOR_INTEL,
978 .family = 6,
979 .model = 28,
980 .stepping = 2,
981 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
982 .features[FEAT_1_EDX] =
983 PPRO_FEATURES |
984 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
985 CPUID_ACPI | CPUID_SS,
986 /* Some CPUs got no CPUID_SEP */
987 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
988 * CPUID_EXT_XTPR */
989 .features[FEAT_1_ECX] =
990 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
991 CPUID_EXT_MOVBE,
992 .features[FEAT_8000_0001_EDX] =
993 CPUID_EXT2_NX,
994 .features[FEAT_8000_0001_ECX] =
995 CPUID_EXT3_LAHF_LM,
996 .xlevel = 0x80000008,
997 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
998 },
999 {
1000 .name = "Conroe",
1001 .level = 10,
1002 .vendor = CPUID_VENDOR_INTEL,
1003 .family = 6,
1004 .model = 15,
1005 .stepping = 3,
1006 .features[FEAT_1_EDX] =
1007 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1008 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1009 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1010 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1011 CPUID_DE | CPUID_FP87,
1012 .features[FEAT_1_ECX] =
1013 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1014 .features[FEAT_8000_0001_EDX] =
1015 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1016 .features[FEAT_8000_0001_ECX] =
1017 CPUID_EXT3_LAHF_LM,
1018 .xlevel = 0x80000008,
1019 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1020 },
1021 {
1022 .name = "Penryn",
1023 .level = 10,
1024 .vendor = CPUID_VENDOR_INTEL,
1025 .family = 6,
1026 .model = 23,
1027 .stepping = 3,
1028 .features[FEAT_1_EDX] =
1029 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1030 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1031 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1032 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1033 CPUID_DE | CPUID_FP87,
1034 .features[FEAT_1_ECX] =
1035 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1036 CPUID_EXT_SSE3,
1037 .features[FEAT_8000_0001_EDX] =
1038 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1039 .features[FEAT_8000_0001_ECX] =
1040 CPUID_EXT3_LAHF_LM,
1041 .xlevel = 0x80000008,
1042 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1043 },
1044 {
1045 .name = "Nehalem",
1046 .level = 11,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 26,
1050 .stepping = 3,
1051 .features[FEAT_1_EDX] =
1052 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1059 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1060 .features[FEAT_8000_0001_EDX] =
1061 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1062 .features[FEAT_8000_0001_ECX] =
1063 CPUID_EXT3_LAHF_LM,
1064 .xlevel = 0x80000008,
1065 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1066 },
1067 {
1068 .name = "Westmere",
1069 .level = 11,
1070 .vendor = CPUID_VENDOR_INTEL,
1071 .family = 6,
1072 .model = 44,
1073 .stepping = 1,
1074 .features[FEAT_1_EDX] =
1075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1079 CPUID_DE | CPUID_FP87,
1080 .features[FEAT_1_ECX] =
1081 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1082 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1083 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1084 .features[FEAT_8000_0001_EDX] =
1085 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1086 .features[FEAT_8000_0001_ECX] =
1087 CPUID_EXT3_LAHF_LM,
1088 .features[FEAT_6_EAX] =
1089 CPUID_6_EAX_ARAT,
1090 .xlevel = 0x80000008,
1091 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1092 },
1093 {
1094 .name = "SandyBridge",
1095 .level = 0xd,
1096 .vendor = CPUID_VENDOR_INTEL,
1097 .family = 6,
1098 .model = 42,
1099 .stepping = 1,
1100 .features[FEAT_1_EDX] =
1101 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1102 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1103 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1104 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1105 CPUID_DE | CPUID_FP87,
1106 .features[FEAT_1_ECX] =
1107 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1108 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1109 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1110 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1111 CPUID_EXT_SSE3,
1112 .features[FEAT_8000_0001_EDX] =
1113 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1114 CPUID_EXT2_SYSCALL,
1115 .features[FEAT_8000_0001_ECX] =
1116 CPUID_EXT3_LAHF_LM,
1117 .features[FEAT_XSAVE] =
1118 CPUID_XSAVE_XSAVEOPT,
1119 .features[FEAT_6_EAX] =
1120 CPUID_6_EAX_ARAT,
1121 .xlevel = 0x80000008,
1122 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1123 },
1124 {
1125 .name = "IvyBridge",
1126 .level = 0xd,
1127 .vendor = CPUID_VENDOR_INTEL,
1128 .family = 6,
1129 .model = 58,
1130 .stepping = 9,
1131 .features[FEAT_1_EDX] =
1132 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1133 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1134 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1135 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1136 CPUID_DE | CPUID_FP87,
1137 .features[FEAT_1_ECX] =
1138 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1139 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1140 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1141 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1142 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1143 .features[FEAT_7_0_EBX] =
1144 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1145 CPUID_7_0_EBX_ERMS,
1146 .features[FEAT_8000_0001_EDX] =
1147 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1148 CPUID_EXT2_SYSCALL,
1149 .features[FEAT_8000_0001_ECX] =
1150 CPUID_EXT3_LAHF_LM,
1151 .features[FEAT_XSAVE] =
1152 CPUID_XSAVE_XSAVEOPT,
1153 .features[FEAT_6_EAX] =
1154 CPUID_6_EAX_ARAT,
1155 .xlevel = 0x80000008,
1156 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1157 },
1158 {
1159 .name = "Haswell-noTSX",
1160 .level = 0xd,
1161 .vendor = CPUID_VENDOR_INTEL,
1162 .family = 6,
1163 .model = 60,
1164 .stepping = 1,
1165 .features[FEAT_1_EDX] =
1166 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1167 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1168 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1169 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1170 CPUID_DE | CPUID_FP87,
1171 .features[FEAT_1_ECX] =
1172 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1173 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1174 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1175 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1176 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1177 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1180 CPUID_EXT2_SYSCALL,
1181 .features[FEAT_8000_0001_ECX] =
1182 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1183 .features[FEAT_7_0_EBX] =
1184 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1185 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1186 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1187 .features[FEAT_XSAVE] =
1188 CPUID_XSAVE_XSAVEOPT,
1189 .features[FEAT_6_EAX] =
1190 CPUID_6_EAX_ARAT,
1191 .xlevel = 0x80000008,
1192 .model_id = "Intel Core Processor (Haswell, no TSX)",
1193 }, {
1194 .name = "Haswell",
1195 .level = 0xd,
1196 .vendor = CPUID_VENDOR_INTEL,
1197 .family = 6,
1198 .model = 60,
1199 .stepping = 1,
1200 .features[FEAT_1_EDX] =
1201 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1202 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1203 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1204 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1205 CPUID_DE | CPUID_FP87,
1206 .features[FEAT_1_ECX] =
1207 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1208 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1209 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1210 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1211 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1212 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1213 .features[FEAT_8000_0001_EDX] =
1214 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1215 CPUID_EXT2_SYSCALL,
1216 .features[FEAT_8000_0001_ECX] =
1217 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1218 .features[FEAT_7_0_EBX] =
1219 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1220 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1221 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1222 CPUID_7_0_EBX_RTM,
1223 .features[FEAT_XSAVE] =
1224 CPUID_XSAVE_XSAVEOPT,
1225 .features[FEAT_6_EAX] =
1226 CPUID_6_EAX_ARAT,
1227 .xlevel = 0x80000008,
1228 .model_id = "Intel Core Processor (Haswell)",
1229 },
1230 {
1231 .name = "Broadwell-noTSX",
1232 .level = 0xd,
1233 .vendor = CPUID_VENDOR_INTEL,
1234 .family = 6,
1235 .model = 61,
1236 .stepping = 2,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1245 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1246 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1247 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1248 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1249 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1250 .features[FEAT_8000_0001_EDX] =
1251 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1252 CPUID_EXT2_SYSCALL,
1253 .features[FEAT_8000_0001_ECX] =
1254 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1255 .features[FEAT_7_0_EBX] =
1256 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1257 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1258 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1259 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1260 CPUID_7_0_EBX_SMAP,
1261 .features[FEAT_XSAVE] =
1262 CPUID_XSAVE_XSAVEOPT,
1263 .features[FEAT_6_EAX] =
1264 CPUID_6_EAX_ARAT,
1265 .xlevel = 0x80000008,
1266 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1267 },
1268 {
1269 .name = "Broadwell",
1270 .level = 0xd,
1271 .vendor = CPUID_VENDOR_INTEL,
1272 .family = 6,
1273 .model = 61,
1274 .stepping = 2,
1275 .features[FEAT_1_EDX] =
1276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1280 CPUID_DE | CPUID_FP87,
1281 .features[FEAT_1_ECX] =
1282 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1283 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1284 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1285 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1286 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1287 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1288 .features[FEAT_8000_0001_EDX] =
1289 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1290 CPUID_EXT2_SYSCALL,
1291 .features[FEAT_8000_0001_ECX] =
1292 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1293 .features[FEAT_7_0_EBX] =
1294 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1295 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1296 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1297 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1298 CPUID_7_0_EBX_SMAP,
1299 .features[FEAT_XSAVE] =
1300 CPUID_XSAVE_XSAVEOPT,
1301 .features[FEAT_6_EAX] =
1302 CPUID_6_EAX_ARAT,
1303 .xlevel = 0x80000008,
1304 .model_id = "Intel Core Processor (Broadwell)",
1305 },
1306 {
1307 .name = "Skylake-Client",
1308 .level = 0xd,
1309 .vendor = CPUID_VENDOR_INTEL,
1310 .family = 6,
1311 .model = 94,
1312 .stepping = 3,
1313 .features[FEAT_1_EDX] =
1314 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1315 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1316 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1317 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1318 CPUID_DE | CPUID_FP87,
1319 .features[FEAT_1_ECX] =
1320 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1321 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1322 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1323 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1324 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1325 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1326 .features[FEAT_8000_0001_EDX] =
1327 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1328 CPUID_EXT2_SYSCALL,
1329 .features[FEAT_8000_0001_ECX] =
1330 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1331 .features[FEAT_7_0_EBX] =
1332 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1333 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1334 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1335 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1336 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1337 /* Missing: XSAVES (not supported by some Linux versions,
1338 * including v4.1 to v4.6).
1339 * KVM doesn't yet expose any XSAVES state save component,
1340 * and the only one defined in Skylake (processor tracing)
1341 * probably will block migration anyway.
1342 */
1343 .features[FEAT_XSAVE] =
1344 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1345 CPUID_XSAVE_XGETBV1,
1346 .features[FEAT_6_EAX] =
1347 CPUID_6_EAX_ARAT,
1348 .xlevel = 0x80000008,
1349 .model_id = "Intel Core Processor (Skylake)",
1350 },
1351 {
1352 .name = "Opteron_G1",
1353 .level = 5,
1354 .vendor = CPUID_VENDOR_AMD,
1355 .family = 15,
1356 .model = 6,
1357 .stepping = 1,
1358 .features[FEAT_1_EDX] =
1359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1363 CPUID_DE | CPUID_FP87,
1364 .features[FEAT_1_ECX] =
1365 CPUID_EXT_SSE3,
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1368 .xlevel = 0x80000008,
1369 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1370 },
1371 {
1372 .name = "Opteron_G2",
1373 .level = 5,
1374 .vendor = CPUID_VENDOR_AMD,
1375 .family = 15,
1376 .model = 6,
1377 .stepping = 1,
1378 .features[FEAT_1_EDX] =
1379 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1380 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1381 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1382 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1383 CPUID_DE | CPUID_FP87,
1384 .features[FEAT_1_ECX] =
1385 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1386 /* Missing: CPUID_EXT2_RDTSCP */
1387 .features[FEAT_8000_0001_EDX] =
1388 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1389 .features[FEAT_8000_0001_ECX] =
1390 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1391 .xlevel = 0x80000008,
1392 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1393 },
1394 {
1395 .name = "Opteron_G3",
1396 .level = 5,
1397 .vendor = CPUID_VENDOR_AMD,
1398 .family = 16,
1399 .model = 2,
1400 .stepping = 3,
1401 .features[FEAT_1_EDX] =
1402 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1403 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1404 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1405 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1406 CPUID_DE | CPUID_FP87,
1407 .features[FEAT_1_ECX] =
1408 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1409 CPUID_EXT_SSE3,
1410 /* Missing: CPUID_EXT2_RDTSCP */
1411 .features[FEAT_8000_0001_EDX] =
1412 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1413 .features[FEAT_8000_0001_ECX] =
1414 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1415 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1416 .xlevel = 0x80000008,
1417 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1418 },
1419 {
1420 .name = "Opteron_G4",
1421 .level = 0xd,
1422 .vendor = CPUID_VENDOR_AMD,
1423 .family = 21,
1424 .model = 1,
1425 .stepping = 2,
1426 .features[FEAT_1_EDX] =
1427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1431 CPUID_DE | CPUID_FP87,
1432 .features[FEAT_1_ECX] =
1433 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1434 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1435 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1436 CPUID_EXT_SSE3,
1437 /* Missing: CPUID_EXT2_RDTSCP */
1438 .features[FEAT_8000_0001_EDX] =
1439 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1440 CPUID_EXT2_SYSCALL,
1441 .features[FEAT_8000_0001_ECX] =
1442 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1443 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1444 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1445 CPUID_EXT3_LAHF_LM,
1446 /* no xsaveopt! */
1447 .xlevel = 0x8000001A,
1448 .model_id = "AMD Opteron 62xx class CPU",
1449 },
1450 {
1451 .name = "Opteron_G5",
1452 .level = 0xd,
1453 .vendor = CPUID_VENDOR_AMD,
1454 .family = 21,
1455 .model = 2,
1456 .stepping = 0,
1457 .features[FEAT_1_EDX] =
1458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1462 CPUID_DE | CPUID_FP87,
1463 .features[FEAT_1_ECX] =
1464 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1465 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1466 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1467 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1468 /* Missing: CPUID_EXT2_RDTSCP */
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1471 CPUID_EXT2_SYSCALL,
1472 .features[FEAT_8000_0001_ECX] =
1473 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1474 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1475 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1476 CPUID_EXT3_LAHF_LM,
1477 /* no xsaveopt! */
1478 .xlevel = 0x8000001A,
1479 .model_id = "AMD Opteron 63xx class CPU",
1480 },
1481 };
1482
1483 typedef struct PropValue {
1484 const char *prop, *value;
1485 } PropValue;
1486
1487 /* KVM-specific features that are automatically added/removed
1488 * from all CPU models when KVM is enabled.
1489 */
1490 static PropValue kvm_default_props[] = {
1491 { "kvmclock", "on" },
1492 { "kvm-nopiodelay", "on" },
1493 { "kvm-asyncpf", "on" },
1494 { "kvm-steal-time", "on" },
1495 { "kvm-pv-eoi", "on" },
1496 { "kvmclock-stable-bit", "on" },
1497 { "x2apic", "on" },
1498 { "acpi", "off" },
1499 { "monitor", "off" },
1500 { "svm", "off" },
1501 { NULL, NULL },
1502 };
1503
1504 /* TCG-specific defaults that override all CPU models when using TCG
1505 */
1506 static PropValue tcg_default_props[] = {
1507 { "vme", "off" },
1508 { NULL, NULL },
1509 };
1510
1511
1512 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1513 {
1514 PropValue *pv;
1515 for (pv = kvm_default_props; pv->prop; pv++) {
1516 if (!strcmp(pv->prop, prop)) {
1517 pv->value = value;
1518 break;
1519 }
1520 }
1521
1522 /* It is valid to call this function only for properties that
1523 * are already present in the kvm_default_props table.
1524 */
1525 assert(pv->prop);
1526 }
1527
1528 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1529 bool migratable_only);
1530
1531 static bool lmce_supported(void)
1532 {
1533 uint64_t mce_cap = 0;
1534
1535 #ifdef CONFIG_KVM
1536 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1537 return false;
1538 }
1539 #endif
1540
1541 return !!(mce_cap & MCG_LMCE_P);
1542 }
1543
1544 static int cpu_x86_fill_model_id(char *str)
1545 {
1546 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1547 int i;
1548
1549 for (i = 0; i < 3; i++) {
1550 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1551 memcpy(str + i * 16 + 0, &eax, 4);
1552 memcpy(str + i * 16 + 4, &ebx, 4);
1553 memcpy(str + i * 16 + 8, &ecx, 4);
1554 memcpy(str + i * 16 + 12, &edx, 4);
1555 }
1556 return 0;
1557 }
1558
1559 static Property max_x86_cpu_properties[] = {
1560 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1561 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1562 DEFINE_PROP_END_OF_LIST()
1563 };
1564
1565 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1566 {
1567 DeviceClass *dc = DEVICE_CLASS(oc);
1568 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1569
1570 xcc->ordering = 9;
1571
1572 xcc->model_description =
1573 "Enables all features supported by the accelerator in the current host";
1574
1575 dc->props = max_x86_cpu_properties;
1576 }
1577
1578 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1579
1580 static void max_x86_cpu_initfn(Object *obj)
1581 {
1582 X86CPU *cpu = X86_CPU(obj);
1583 CPUX86State *env = &cpu->env;
1584 KVMState *s = kvm_state;
1585
1586 /* We can't fill the features array here because we don't know yet if
1587 * "migratable" is true or false.
1588 */
1589 cpu->max_features = true;
1590
1591 if (kvm_enabled()) {
1592 X86CPUDefinition host_cpudef = { };
1593 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1594
1595 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1596 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1597
1598 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1599 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1600 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1601 host_cpudef.stepping = eax & 0x0F;
1602
1603 cpu_x86_fill_model_id(host_cpudef.model_id);
1604
1605 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1606
1607 env->cpuid_min_level =
1608 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1609 env->cpuid_min_xlevel =
1610 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1611 env->cpuid_min_xlevel2 =
1612 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1613
1614 if (lmce_supported()) {
1615 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1616 }
1617 } else {
1618 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1619 "vendor", &error_abort);
1620 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1621 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1622 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1623 object_property_set_str(OBJECT(cpu),
1624 "QEMU TCG CPU version " QEMU_HW_VERSION,
1625 "model-id", &error_abort);
1626 }
1627
1628 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1629 }
1630
1631 static const TypeInfo max_x86_cpu_type_info = {
1632 .name = X86_CPU_TYPE_NAME("max"),
1633 .parent = TYPE_X86_CPU,
1634 .instance_init = max_x86_cpu_initfn,
1635 .class_init = max_x86_cpu_class_init,
1636 };
1637
1638 #ifdef CONFIG_KVM
1639
1640 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1641 {
1642 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1643
1644 xcc->kvm_required = true;
1645 xcc->ordering = 8;
1646
1647 xcc->model_description =
1648 "KVM processor with all supported host features "
1649 "(only available in KVM mode)";
1650 }
1651
1652 static const TypeInfo host_x86_cpu_type_info = {
1653 .name = X86_CPU_TYPE_NAME("host"),
1654 .parent = X86_CPU_TYPE_NAME("max"),
1655 .class_init = host_x86_cpu_class_init,
1656 };
1657
1658 #endif
1659
1660 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1661 {
1662 FeatureWordInfo *f = &feature_word_info[w];
1663 int i;
1664
1665 for (i = 0; i < 32; ++i) {
1666 if ((1UL << i) & mask) {
1667 const char *reg = get_register_name_32(f->cpuid_reg);
1668 assert(reg);
1669 fprintf(stderr, "warning: %s doesn't support requested feature: "
1670 "CPUID.%02XH:%s%s%s [bit %d]\n",
1671 kvm_enabled() ? "host" : "TCG",
1672 f->cpuid_eax, reg,
1673 f->feat_names[i] ? "." : "",
1674 f->feat_names[i] ? f->feat_names[i] : "", i);
1675 }
1676 }
1677 }
1678
1679 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1680 const char *name, void *opaque,
1681 Error **errp)
1682 {
1683 X86CPU *cpu = X86_CPU(obj);
1684 CPUX86State *env = &cpu->env;
1685 int64_t value;
1686
1687 value = (env->cpuid_version >> 8) & 0xf;
1688 if (value == 0xf) {
1689 value += (env->cpuid_version >> 20) & 0xff;
1690 }
1691 visit_type_int(v, name, &value, errp);
1692 }
1693
1694 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1695 const char *name, void *opaque,
1696 Error **errp)
1697 {
1698 X86CPU *cpu = X86_CPU(obj);
1699 CPUX86State *env = &cpu->env;
1700 const int64_t min = 0;
1701 const int64_t max = 0xff + 0xf;
1702 Error *local_err = NULL;
1703 int64_t value;
1704
1705 visit_type_int(v, name, &value, &local_err);
1706 if (local_err) {
1707 error_propagate(errp, local_err);
1708 return;
1709 }
1710 if (value < min || value > max) {
1711 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1712 name ? name : "null", value, min, max);
1713 return;
1714 }
1715
1716 env->cpuid_version &= ~0xff00f00;
1717 if (value > 0x0f) {
1718 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1719 } else {
1720 env->cpuid_version |= value << 8;
1721 }
1722 }
1723
1724 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1725 const char *name, void *opaque,
1726 Error **errp)
1727 {
1728 X86CPU *cpu = X86_CPU(obj);
1729 CPUX86State *env = &cpu->env;
1730 int64_t value;
1731
1732 value = (env->cpuid_version >> 4) & 0xf;
1733 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1734 visit_type_int(v, name, &value, errp);
1735 }
1736
1737 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1738 const char *name, void *opaque,
1739 Error **errp)
1740 {
1741 X86CPU *cpu = X86_CPU(obj);
1742 CPUX86State *env = &cpu->env;
1743 const int64_t min = 0;
1744 const int64_t max = 0xff;
1745 Error *local_err = NULL;
1746 int64_t value;
1747
1748 visit_type_int(v, name, &value, &local_err);
1749 if (local_err) {
1750 error_propagate(errp, local_err);
1751 return;
1752 }
1753 if (value < min || value > max) {
1754 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1755 name ? name : "null", value, min, max);
1756 return;
1757 }
1758
1759 env->cpuid_version &= ~0xf00f0;
1760 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1761 }
1762
1763 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1764 const char *name, void *opaque,
1765 Error **errp)
1766 {
1767 X86CPU *cpu = X86_CPU(obj);
1768 CPUX86State *env = &cpu->env;
1769 int64_t value;
1770
1771 value = env->cpuid_version & 0xf;
1772 visit_type_int(v, name, &value, errp);
1773 }
1774
1775 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1776 const char *name, void *opaque,
1777 Error **errp)
1778 {
1779 X86CPU *cpu = X86_CPU(obj);
1780 CPUX86State *env = &cpu->env;
1781 const int64_t min = 0;
1782 const int64_t max = 0xf;
1783 Error *local_err = NULL;
1784 int64_t value;
1785
1786 visit_type_int(v, name, &value, &local_err);
1787 if (local_err) {
1788 error_propagate(errp, local_err);
1789 return;
1790 }
1791 if (value < min || value > max) {
1792 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1793 name ? name : "null", value, min, max);
1794 return;
1795 }
1796
1797 env->cpuid_version &= ~0xf;
1798 env->cpuid_version |= value & 0xf;
1799 }
1800
1801 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1802 {
1803 X86CPU *cpu = X86_CPU(obj);
1804 CPUX86State *env = &cpu->env;
1805 char *value;
1806
1807 value = g_malloc(CPUID_VENDOR_SZ + 1);
1808 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1809 env->cpuid_vendor3);
1810 return value;
1811 }
1812
1813 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1814 Error **errp)
1815 {
1816 X86CPU *cpu = X86_CPU(obj);
1817 CPUX86State *env = &cpu->env;
1818 int i;
1819
1820 if (strlen(value) != CPUID_VENDOR_SZ) {
1821 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1822 return;
1823 }
1824
1825 env->cpuid_vendor1 = 0;
1826 env->cpuid_vendor2 = 0;
1827 env->cpuid_vendor3 = 0;
1828 for (i = 0; i < 4; i++) {
1829 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1830 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1831 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1832 }
1833 }
1834
1835 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1836 {
1837 X86CPU *cpu = X86_CPU(obj);
1838 CPUX86State *env = &cpu->env;
1839 char *value;
1840 int i;
1841
1842 value = g_malloc(48 + 1);
1843 for (i = 0; i < 48; i++) {
1844 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1845 }
1846 value[48] = '\0';
1847 return value;
1848 }
1849
1850 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1851 Error **errp)
1852 {
1853 X86CPU *cpu = X86_CPU(obj);
1854 CPUX86State *env = &cpu->env;
1855 int c, len, i;
1856
1857 if (model_id == NULL) {
1858 model_id = "";
1859 }
1860 len = strlen(model_id);
1861 memset(env->cpuid_model, 0, 48);
1862 for (i = 0; i < 48; i++) {
1863 if (i >= len) {
1864 c = '\0';
1865 } else {
1866 c = (uint8_t)model_id[i];
1867 }
1868 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1869 }
1870 }
1871
1872 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1873 void *opaque, Error **errp)
1874 {
1875 X86CPU *cpu = X86_CPU(obj);
1876 int64_t value;
1877
1878 value = cpu->env.tsc_khz * 1000;
1879 visit_type_int(v, name, &value, errp);
1880 }
1881
1882 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1883 void *opaque, Error **errp)
1884 {
1885 X86CPU *cpu = X86_CPU(obj);
1886 const int64_t min = 0;
1887 const int64_t max = INT64_MAX;
1888 Error *local_err = NULL;
1889 int64_t value;
1890
1891 visit_type_int(v, name, &value, &local_err);
1892 if (local_err) {
1893 error_propagate(errp, local_err);
1894 return;
1895 }
1896 if (value < min || value > max) {
1897 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1898 name ? name : "null", value, min, max);
1899 return;
1900 }
1901
1902 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1903 }
1904
1905 /* Generic getter for "feature-words" and "filtered-features" properties */
1906 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1907 const char *name, void *opaque,
1908 Error **errp)
1909 {
1910 uint32_t *array = (uint32_t *)opaque;
1911 FeatureWord w;
1912 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1913 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1914 X86CPUFeatureWordInfoList *list = NULL;
1915
1916 for (w = 0; w < FEATURE_WORDS; w++) {
1917 FeatureWordInfo *wi = &feature_word_info[w];
1918 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1919 qwi->cpuid_input_eax = wi->cpuid_eax;
1920 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1921 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1922 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1923 qwi->features = array[w];
1924
1925 /* List will be in reverse order, but order shouldn't matter */
1926 list_entries[w].next = list;
1927 list_entries[w].value = &word_infos[w];
1928 list = &list_entries[w];
1929 }
1930
1931 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1932 }
1933
1934 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1935 void *opaque, Error **errp)
1936 {
1937 X86CPU *cpu = X86_CPU(obj);
1938 int64_t value = cpu->hyperv_spinlock_attempts;
1939
1940 visit_type_int(v, name, &value, errp);
1941 }
1942
1943 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1944 void *opaque, Error **errp)
1945 {
1946 const int64_t min = 0xFFF;
1947 const int64_t max = UINT_MAX;
1948 X86CPU *cpu = X86_CPU(obj);
1949 Error *err = NULL;
1950 int64_t value;
1951
1952 visit_type_int(v, name, &value, &err);
1953 if (err) {
1954 error_propagate(errp, err);
1955 return;
1956 }
1957
1958 if (value < min || value > max) {
1959 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1960 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1961 object_get_typename(obj), name ? name : "null",
1962 value, min, max);
1963 return;
1964 }
1965 cpu->hyperv_spinlock_attempts = value;
1966 }
1967
1968 static PropertyInfo qdev_prop_spinlocks = {
1969 .name = "int",
1970 .get = x86_get_hv_spinlocks,
1971 .set = x86_set_hv_spinlocks,
1972 };
1973
1974 /* Convert all '_' in a feature string option name to '-', to make feature
1975 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1976 */
1977 static inline void feat2prop(char *s)
1978 {
1979 while ((s = strchr(s, '_'))) {
1980 *s = '-';
1981 }
1982 }
1983
1984 /* Return the feature property name for a feature flag bit */
1985 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1986 {
1987 /* XSAVE components are automatically enabled by other features,
1988 * so return the original feature name instead
1989 */
1990 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1991 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1992
1993 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1994 x86_ext_save_areas[comp].bits) {
1995 w = x86_ext_save_areas[comp].feature;
1996 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1997 }
1998 }
1999
2000 assert(bitnr < 32);
2001 assert(w < FEATURE_WORDS);
2002 return feature_word_info[w].feat_names[bitnr];
2003 }
2004
2005 /* Compatibily hack to maintain legacy +-feat semantic,
2006 * where +-feat overwrites any feature set by
2007 * feat=on|feat even if the later is parsed after +-feat
2008 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2009 */
2010 static GList *plus_features, *minus_features;
2011
2012 static gint compare_string(gconstpointer a, gconstpointer b)
2013 {
2014 return g_strcmp0(a, b);
2015 }
2016
2017 /* Parse "+feature,-feature,feature=foo" CPU feature string
2018 */
2019 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2020 Error **errp)
2021 {
2022 char *featurestr; /* Single 'key=value" string being parsed */
2023 static bool cpu_globals_initialized;
2024 bool ambiguous = false;
2025
2026 if (cpu_globals_initialized) {
2027 return;
2028 }
2029 cpu_globals_initialized = true;
2030
2031 if (!features) {
2032 return;
2033 }
2034
2035 for (featurestr = strtok(features, ",");
2036 featurestr;
2037 featurestr = strtok(NULL, ",")) {
2038 const char *name;
2039 const char *val = NULL;
2040 char *eq = NULL;
2041 char num[32];
2042 GlobalProperty *prop;
2043
2044 /* Compatibility syntax: */
2045 if (featurestr[0] == '+') {
2046 plus_features = g_list_append(plus_features,
2047 g_strdup(featurestr + 1));
2048 continue;
2049 } else if (featurestr[0] == '-') {
2050 minus_features = g_list_append(minus_features,
2051 g_strdup(featurestr + 1));
2052 continue;
2053 }
2054
2055 eq = strchr(featurestr, '=');
2056 if (eq) {
2057 *eq++ = 0;
2058 val = eq;
2059 } else {
2060 val = "on";
2061 }
2062
2063 feat2prop(featurestr);
2064 name = featurestr;
2065
2066 if (g_list_find_custom(plus_features, name, compare_string)) {
2067 error_report("warning: Ambiguous CPU model string. "
2068 "Don't mix both \"+%s\" and \"%s=%s\"",
2069 name, name, val);
2070 ambiguous = true;
2071 }
2072 if (g_list_find_custom(minus_features, name, compare_string)) {
2073 error_report("warning: Ambiguous CPU model string. "
2074 "Don't mix both \"-%s\" and \"%s=%s\"",
2075 name, name, val);
2076 ambiguous = true;
2077 }
2078
2079 /* Special case: */
2080 if (!strcmp(name, "tsc-freq")) {
2081 int ret;
2082 uint64_t tsc_freq;
2083
2084 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2085 if (ret < 0 || tsc_freq > INT64_MAX) {
2086 error_setg(errp, "bad numerical value %s", val);
2087 return;
2088 }
2089 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2090 val = num;
2091 name = "tsc-frequency";
2092 }
2093
2094 prop = g_new0(typeof(*prop), 1);
2095 prop->driver = typename;
2096 prop->property = g_strdup(name);
2097 prop->value = g_strdup(val);
2098 prop->errp = &error_fatal;
2099 qdev_prop_register_global(prop);
2100 }
2101
2102 if (ambiguous) {
2103 error_report("warning: Compatibility of ambiguous CPU model "
2104 "strings won't be kept on future QEMU versions");
2105 }
2106 }
2107
2108 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2109 static int x86_cpu_filter_features(X86CPU *cpu);
2110
2111 /* Check for missing features that may prevent the CPU class from
2112 * running using the current machine and accelerator.
2113 */
2114 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2115 strList **missing_feats)
2116 {
2117 X86CPU *xc;
2118 FeatureWord w;
2119 Error *err = NULL;
2120 strList **next = missing_feats;
2121
2122 if (xcc->kvm_required && !kvm_enabled()) {
2123 strList *new = g_new0(strList, 1);
2124 new->value = g_strdup("kvm");;
2125 *missing_feats = new;
2126 return;
2127 }
2128
2129 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2130
2131 x86_cpu_expand_features(xc, &err);
2132 if (err) {
2133 /* Errors at x86_cpu_expand_features should never happen,
2134 * but in case it does, just report the model as not
2135 * runnable at all using the "type" property.
2136 */
2137 strList *new = g_new0(strList, 1);
2138 new->value = g_strdup("type");
2139 *next = new;
2140 next = &new->next;
2141 }
2142
2143 x86_cpu_filter_features(xc);
2144
2145 for (w = 0; w < FEATURE_WORDS; w++) {
2146 uint32_t filtered = xc->filtered_features[w];
2147 int i;
2148 for (i = 0; i < 32; i++) {
2149 if (filtered & (1UL << i)) {
2150 strList *new = g_new0(strList, 1);
2151 new->value = g_strdup(x86_cpu_feature_name(w, i));
2152 *next = new;
2153 next = &new->next;
2154 }
2155 }
2156 }
2157
2158 object_unref(OBJECT(xc));
2159 }
2160
2161 /* Print all cpuid feature names in featureset
2162 */
2163 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2164 {
2165 int bit;
2166 bool first = true;
2167
2168 for (bit = 0; bit < 32; bit++) {
2169 if (featureset[bit]) {
2170 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2171 first = false;
2172 }
2173 }
2174 }
2175
2176 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2177 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2178 {
2179 ObjectClass *class_a = (ObjectClass *)a;
2180 ObjectClass *class_b = (ObjectClass *)b;
2181 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2182 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2183 const char *name_a, *name_b;
2184
2185 if (cc_a->ordering != cc_b->ordering) {
2186 return cc_a->ordering - cc_b->ordering;
2187 } else {
2188 name_a = object_class_get_name(class_a);
2189 name_b = object_class_get_name(class_b);
2190 return strcmp(name_a, name_b);
2191 }
2192 }
2193
2194 static GSList *get_sorted_cpu_model_list(void)
2195 {
2196 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2197 list = g_slist_sort(list, x86_cpu_list_compare);
2198 return list;
2199 }
2200
2201 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2202 {
2203 ObjectClass *oc = data;
2204 X86CPUClass *cc = X86_CPU_CLASS(oc);
2205 CPUListState *s = user_data;
2206 char *name = x86_cpu_class_get_model_name(cc);
2207 const char *desc = cc->model_description;
2208 if (!desc && cc->cpu_def) {
2209 desc = cc->cpu_def->model_id;
2210 }
2211
2212 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2213 name, desc);
2214 g_free(name);
2215 }
2216
2217 /* list available CPU models and flags */
2218 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2219 {
2220 int i;
2221 CPUListState s = {
2222 .file = f,
2223 .cpu_fprintf = cpu_fprintf,
2224 };
2225 GSList *list;
2226
2227 (*cpu_fprintf)(f, "Available CPUs:\n");
2228 list = get_sorted_cpu_model_list();
2229 g_slist_foreach(list, x86_cpu_list_entry, &s);
2230 g_slist_free(list);
2231
2232 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2233 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2234 FeatureWordInfo *fw = &feature_word_info[i];
2235
2236 (*cpu_fprintf)(f, " ");
2237 listflags(f, cpu_fprintf, fw->feat_names);
2238 (*cpu_fprintf)(f, "\n");
2239 }
2240 }
2241
2242 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2243 {
2244 ObjectClass *oc = data;
2245 X86CPUClass *cc = X86_CPU_CLASS(oc);
2246 CpuDefinitionInfoList **cpu_list = user_data;
2247 CpuDefinitionInfoList *entry;
2248 CpuDefinitionInfo *info;
2249
2250 info = g_malloc0(sizeof(*info));
2251 info->name = x86_cpu_class_get_model_name(cc);
2252 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2253 info->has_unavailable_features = true;
2254 info->q_typename = g_strdup(object_class_get_name(oc));
2255 info->migration_safe = cc->migration_safe;
2256 info->has_migration_safe = true;
2257 info->q_static = cc->static_model;
2258
2259 entry = g_malloc0(sizeof(*entry));
2260 entry->value = info;
2261 entry->next = *cpu_list;
2262 *cpu_list = entry;
2263 }
2264
2265 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2266 {
2267 CpuDefinitionInfoList *cpu_list = NULL;
2268 GSList *list = get_sorted_cpu_model_list();
2269 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2270 g_slist_free(list);
2271 return cpu_list;
2272 }
2273
2274 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2275 bool migratable_only)
2276 {
2277 FeatureWordInfo *wi = &feature_word_info[w];
2278 uint32_t r;
2279
2280 if (kvm_enabled()) {
2281 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2282 wi->cpuid_ecx,
2283 wi->cpuid_reg);
2284 } else if (tcg_enabled()) {
2285 r = wi->tcg_features;
2286 } else {
2287 return ~0;
2288 }
2289 if (migratable_only) {
2290 r &= x86_cpu_get_migratable_flags(w);
2291 }
2292 return r;
2293 }
2294
2295 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2296 {
2297 FeatureWord w;
2298
2299 for (w = 0; w < FEATURE_WORDS; w++) {
2300 report_unavailable_features(w, cpu->filtered_features[w]);
2301 }
2302 }
2303
2304 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2305 {
2306 PropValue *pv;
2307 for (pv = props; pv->prop; pv++) {
2308 if (!pv->value) {
2309 continue;
2310 }
2311 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2312 &error_abort);
2313 }
2314 }
2315
2316 /* Load data from X86CPUDefinition into a X86CPU object
2317 */
2318 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2319 {
2320 CPUX86State *env = &cpu->env;
2321 const char *vendor;
2322 char host_vendor[CPUID_VENDOR_SZ + 1];
2323 FeatureWord w;
2324
2325 /*NOTE: any property set by this function should be returned by
2326 * x86_cpu_static_props(), so static expansion of
2327 * query-cpu-model-expansion is always complete.
2328 */
2329
2330 /* CPU models only set _minimum_ values for level/xlevel: */
2331 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2332 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2333
2334 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2335 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2336 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2337 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2338 for (w = 0; w < FEATURE_WORDS; w++) {
2339 env->features[w] = def->features[w];
2340 }
2341
2342 /* Special cases not set in the X86CPUDefinition structs: */
2343 if (kvm_enabled()) {
2344 if (!kvm_irqchip_in_kernel()) {
2345 x86_cpu_change_kvm_default("x2apic", "off");
2346 }
2347
2348 x86_cpu_apply_props(cpu, kvm_default_props);
2349 } else if (tcg_enabled()) {
2350 x86_cpu_apply_props(cpu, tcg_default_props);
2351 }
2352
2353 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2354
2355 /* sysenter isn't supported in compatibility mode on AMD,
2356 * syscall isn't supported in compatibility mode on Intel.
2357 * Normally we advertise the actual CPU vendor, but you can
2358 * override this using the 'vendor' property if you want to use
2359 * KVM's sysenter/syscall emulation in compatibility mode and
2360 * when doing cross vendor migration
2361 */
2362 vendor = def->vendor;
2363 if (kvm_enabled()) {
2364 uint32_t ebx = 0, ecx = 0, edx = 0;
2365 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2366 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2367 vendor = host_vendor;
2368 }
2369
2370 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2371
2372 }
2373
2374 /* Return a QDict containing keys for all properties that can be included
2375 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2376 * must be included in the dictionary.
2377 */
2378 static QDict *x86_cpu_static_props(void)
2379 {
2380 FeatureWord w;
2381 int i;
2382 static const char *props[] = {
2383 "min-level",
2384 "min-xlevel",
2385 "family",
2386 "model",
2387 "stepping",
2388 "model-id",
2389 "vendor",
2390 "lmce",
2391 NULL,
2392 };
2393 static QDict *d;
2394
2395 if (d) {
2396 return d;
2397 }
2398
2399 d = qdict_new();
2400 for (i = 0; props[i]; i++) {
2401 qdict_put_obj(d, props[i], qnull());
2402 }
2403
2404 for (w = 0; w < FEATURE_WORDS; w++) {
2405 FeatureWordInfo *fi = &feature_word_info[w];
2406 int bit;
2407 for (bit = 0; bit < 32; bit++) {
2408 if (!fi->feat_names[bit]) {
2409 continue;
2410 }
2411 qdict_put_obj(d, fi->feat_names[bit], qnull());
2412 }
2413 }
2414
2415 return d;
2416 }
2417
2418 /* Add an entry to @props dict, with the value for property. */
2419 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2420 {
2421 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2422 &error_abort);
2423
2424 qdict_put_obj(props, prop, value);
2425 }
2426
2427 /* Convert CPU model data from X86CPU object to a property dictionary
2428 * that can recreate exactly the same CPU model.
2429 */
2430 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2431 {
2432 QDict *sprops = x86_cpu_static_props();
2433 const QDictEntry *e;
2434
2435 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2436 const char *prop = qdict_entry_key(e);
2437 x86_cpu_expand_prop(cpu, props, prop);
2438 }
2439 }
2440
2441 /* Convert CPU model data from X86CPU object to a property dictionary
2442 * that can recreate exactly the same CPU model, including every
2443 * writeable QOM property.
2444 */
2445 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2446 {
2447 ObjectPropertyIterator iter;
2448 ObjectProperty *prop;
2449
2450 object_property_iter_init(&iter, OBJECT(cpu));
2451 while ((prop = object_property_iter_next(&iter))) {
2452 /* skip read-only or write-only properties */
2453 if (!prop->get || !prop->set) {
2454 continue;
2455 }
2456
2457 /* "hotplugged" is the only property that is configurable
2458 * on the command-line but will be set differently on CPUs
2459 * created using "-cpu ... -smp ..." and by CPUs created
2460 * on the fly by x86_cpu_from_model() for querying. Skip it.
2461 */
2462 if (!strcmp(prop->name, "hotplugged")) {
2463 continue;
2464 }
2465 x86_cpu_expand_prop(cpu, props, prop->name);
2466 }
2467 }
2468
2469 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2470 {
2471 const QDictEntry *prop;
2472 Error *err = NULL;
2473
2474 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2475 object_property_set_qobject(obj, qdict_entry_value(prop),
2476 qdict_entry_key(prop), &err);
2477 if (err) {
2478 break;
2479 }
2480 }
2481
2482 error_propagate(errp, err);
2483 }
2484
2485 /* Create X86CPU object according to model+props specification */
2486 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2487 {
2488 X86CPU *xc = NULL;
2489 X86CPUClass *xcc;
2490 Error *err = NULL;
2491
2492 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2493 if (xcc == NULL) {
2494 error_setg(&err, "CPU model '%s' not found", model);
2495 goto out;
2496 }
2497
2498 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2499 if (props) {
2500 object_apply_props(OBJECT(xc), props, &err);
2501 if (err) {
2502 goto out;
2503 }
2504 }
2505
2506 x86_cpu_expand_features(xc, &err);
2507 if (err) {
2508 goto out;
2509 }
2510
2511 out:
2512 if (err) {
2513 error_propagate(errp, err);
2514 object_unref(OBJECT(xc));
2515 xc = NULL;
2516 }
2517 return xc;
2518 }
2519
2520 CpuModelExpansionInfo *
2521 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2522 CpuModelInfo *model,
2523 Error **errp)
2524 {
2525 X86CPU *xc = NULL;
2526 Error *err = NULL;
2527 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2528 QDict *props = NULL;
2529 const char *base_name;
2530
2531 xc = x86_cpu_from_model(model->name,
2532 model->has_props ?
2533 qobject_to_qdict(model->props) :
2534 NULL, &err);
2535 if (err) {
2536 goto out;
2537 }
2538
2539 props = qdict_new();
2540
2541 switch (type) {
2542 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2543 /* Static expansion will be based on "base" only */
2544 base_name = "base";
2545 x86_cpu_to_dict(xc, props);
2546 break;
2547 case CPU_MODEL_EXPANSION_TYPE_FULL:
2548 /* As we don't return every single property, full expansion needs
2549 * to keep the original model name+props, and add extra
2550 * properties on top of that.
2551 */
2552 base_name = model->name;
2553 x86_cpu_to_dict_full(xc, props);
2554 break;
2555 default:
2556 error_setg(&err, "Unsupportted expansion type");
2557 goto out;
2558 }
2559
2560 if (!props) {
2561 props = qdict_new();
2562 }
2563 x86_cpu_to_dict(xc, props);
2564
2565 ret->model = g_new0(CpuModelInfo, 1);
2566 ret->model->name = g_strdup(base_name);
2567 ret->model->props = QOBJECT(props);
2568 ret->model->has_props = true;
2569
2570 out:
2571 object_unref(OBJECT(xc));
2572 if (err) {
2573 error_propagate(errp, err);
2574 qapi_free_CpuModelExpansionInfo(ret);
2575 ret = NULL;
2576 }
2577 return ret;
2578 }
2579
2580 X86CPU *cpu_x86_init(const char *cpu_model)
2581 {
2582 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2583 }
2584
2585 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2586 {
2587 X86CPUDefinition *cpudef = data;
2588 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2589
2590 xcc->cpu_def = cpudef;
2591 xcc->migration_safe = true;
2592 }
2593
2594 static void x86_register_cpudef_type(X86CPUDefinition *def)
2595 {
2596 char *typename = x86_cpu_type_name(def->name);
2597 TypeInfo ti = {
2598 .name = typename,
2599 .parent = TYPE_X86_CPU,
2600 .class_init = x86_cpu_cpudef_class_init,
2601 .class_data = def,
2602 };
2603
2604 /* AMD aliases are handled at runtime based on CPUID vendor, so
2605 * they shouldn't be set on the CPU model table.
2606 */
2607 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2608
2609 type_register(&ti);
2610 g_free(typename);
2611 }
2612
2613 #if !defined(CONFIG_USER_ONLY)
2614
2615 void cpu_clear_apic_feature(CPUX86State *env)
2616 {
2617 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2618 }
2619
2620 #endif /* !CONFIG_USER_ONLY */
2621
2622 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2623 uint32_t *eax, uint32_t *ebx,
2624 uint32_t *ecx, uint32_t *edx)
2625 {
2626 X86CPU *cpu = x86_env_get_cpu(env);
2627 CPUState *cs = CPU(cpu);
2628 uint32_t pkg_offset;
2629
2630 /* test if maximum index reached */
2631 if (index & 0x80000000) {
2632 if (index > env->cpuid_xlevel) {
2633 if (env->cpuid_xlevel2 > 0) {
2634 /* Handle the Centaur's CPUID instruction. */
2635 if (index > env->cpuid_xlevel2) {
2636 index = env->cpuid_xlevel2;
2637 } else if (index < 0xC0000000) {
2638 index = env->cpuid_xlevel;
2639 }
2640 } else {
2641 /* Intel documentation states that invalid EAX input will
2642 * return the same information as EAX=cpuid_level
2643 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2644 */
2645 index = env->cpuid_level;
2646 }
2647 }
2648 } else {
2649 if (index > env->cpuid_level)
2650 index = env->cpuid_level;
2651 }
2652
2653 switch(index) {
2654 case 0:
2655 *eax = env->cpuid_level;
2656 *ebx = env->cpuid_vendor1;
2657 *edx = env->cpuid_vendor2;
2658 *ecx = env->cpuid_vendor3;
2659 break;
2660 case 1:
2661 *eax = env->cpuid_version;
2662 *ebx = (cpu->apic_id << 24) |
2663 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2664 *ecx = env->features[FEAT_1_ECX];
2665 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2666 *ecx |= CPUID_EXT_OSXSAVE;
2667 }
2668 *edx = env->features[FEAT_1_EDX];
2669 if (cs->nr_cores * cs->nr_threads > 1) {
2670 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2671 *edx |= CPUID_HT;
2672 }
2673 break;
2674 case 2:
2675 /* cache info: needed for Pentium Pro compatibility */
2676 if (cpu->cache_info_passthrough) {
2677 host_cpuid(index, 0, eax, ebx, ecx, edx);
2678 break;
2679 }
2680 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2681 *ebx = 0;
2682 if (!cpu->enable_l3_cache) {
2683 *ecx = 0;
2684 } else {
2685 *ecx = L3_N_DESCRIPTOR;
2686 }
2687 *edx = (L1D_DESCRIPTOR << 16) | \
2688 (L1I_DESCRIPTOR << 8) | \
2689 (L2_DESCRIPTOR);
2690 break;
2691 case 4:
2692 /* cache info: needed for Core compatibility */
2693 if (cpu->cache_info_passthrough) {
2694 host_cpuid(index, count, eax, ebx, ecx, edx);
2695 *eax &= ~0xFC000000;
2696 } else {
2697 *eax = 0;
2698 switch (count) {
2699 case 0: /* L1 dcache info */
2700 *eax |= CPUID_4_TYPE_DCACHE | \
2701 CPUID_4_LEVEL(1) | \
2702 CPUID_4_SELF_INIT_LEVEL;
2703 *ebx = (L1D_LINE_SIZE - 1) | \
2704 ((L1D_PARTITIONS - 1) << 12) | \
2705 ((L1D_ASSOCIATIVITY - 1) << 22);
2706 *ecx = L1D_SETS - 1;
2707 *edx = CPUID_4_NO_INVD_SHARING;
2708 break;
2709 case 1: /* L1 icache info */
2710 *eax |= CPUID_4_TYPE_ICACHE | \
2711 CPUID_4_LEVEL(1) | \
2712 CPUID_4_SELF_INIT_LEVEL;
2713 *ebx = (L1I_LINE_SIZE - 1) | \
2714 ((L1I_PARTITIONS - 1) << 12) | \
2715 ((L1I_ASSOCIATIVITY - 1) << 22);
2716 *ecx = L1I_SETS - 1;
2717 *edx = CPUID_4_NO_INVD_SHARING;
2718 break;
2719 case 2: /* L2 cache info */
2720 *eax |= CPUID_4_TYPE_UNIFIED | \
2721 CPUID_4_LEVEL(2) | \
2722 CPUID_4_SELF_INIT_LEVEL;
2723 if (cs->nr_threads > 1) {
2724 *eax |= (cs->nr_threads - 1) << 14;
2725 }
2726 *ebx = (L2_LINE_SIZE - 1) | \
2727 ((L2_PARTITIONS - 1) << 12) | \
2728 ((L2_ASSOCIATIVITY - 1) << 22);
2729 *ecx = L2_SETS - 1;
2730 *edx = CPUID_4_NO_INVD_SHARING;
2731 break;
2732 case 3: /* L3 cache info */
2733 if (!cpu->enable_l3_cache) {
2734 *eax = 0;
2735 *ebx = 0;
2736 *ecx = 0;
2737 *edx = 0;
2738 break;
2739 }
2740 *eax |= CPUID_4_TYPE_UNIFIED | \
2741 CPUID_4_LEVEL(3) | \
2742 CPUID_4_SELF_INIT_LEVEL;
2743 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2744 *eax |= ((1 << pkg_offset) - 1) << 14;
2745 *ebx = (L3_N_LINE_SIZE - 1) | \
2746 ((L3_N_PARTITIONS - 1) << 12) | \
2747 ((L3_N_ASSOCIATIVITY - 1) << 22);
2748 *ecx = L3_N_SETS - 1;
2749 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2750 break;
2751 default: /* end of info */
2752 *eax = 0;
2753 *ebx = 0;
2754 *ecx = 0;
2755 *edx = 0;
2756 break;
2757 }
2758 }
2759
2760 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2761 if ((*eax & 31) && cs->nr_cores > 1) {
2762 *eax |= (cs->nr_cores - 1) << 26;
2763 }
2764 break;
2765 case 5:
2766 /* mwait info: needed for Core compatibility */
2767 *eax = 0; /* Smallest monitor-line size in bytes */
2768 *ebx = 0; /* Largest monitor-line size in bytes */
2769 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2770 *edx = 0;
2771 break;
2772 case 6:
2773 /* Thermal and Power Leaf */
2774 *eax = env->features[FEAT_6_EAX];
2775 *ebx = 0;
2776 *ecx = 0;
2777 *edx = 0;
2778 break;
2779 case 7:
2780 /* Structured Extended Feature Flags Enumeration Leaf */
2781 if (count == 0) {
2782 *eax = 0; /* Maximum ECX value for sub-leaves */
2783 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2784 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2785 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2786 *ecx |= CPUID_7_0_ECX_OSPKE;
2787 }
2788 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2789 } else {
2790 *eax = 0;
2791 *ebx = 0;
2792 *ecx = 0;
2793 *edx = 0;
2794 }
2795 break;
2796 case 9:
2797 /* Direct Cache Access Information Leaf */
2798 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2799 *ebx = 0;
2800 *ecx = 0;
2801 *edx = 0;
2802 break;
2803 case 0xA:
2804 /* Architectural Performance Monitoring Leaf */
2805 if (kvm_enabled() && cpu->enable_pmu) {
2806 KVMState *s = cs->kvm_state;
2807
2808 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2809 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2810 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2811 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2812 } else {
2813 *eax = 0;
2814 *ebx = 0;
2815 *ecx = 0;
2816 *edx = 0;
2817 }
2818 break;
2819 case 0xB:
2820 /* Extended Topology Enumeration Leaf */
2821 if (!cpu->enable_cpuid_0xb) {
2822 *eax = *ebx = *ecx = *edx = 0;
2823 break;
2824 }
2825
2826 *ecx = count & 0xff;
2827 *edx = cpu->apic_id;
2828
2829 switch (count) {
2830 case 0:
2831 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2832 *ebx = cs->nr_threads;
2833 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2834 break;
2835 case 1:
2836 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2837 *ebx = cs->nr_cores * cs->nr_threads;
2838 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2839 break;
2840 default:
2841 *eax = 0;
2842 *ebx = 0;
2843 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2844 }
2845
2846 assert(!(*eax & ~0x1f));
2847 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2848 break;
2849 case 0xD: {
2850 /* Processor Extended State */
2851 *eax = 0;
2852 *ebx = 0;
2853 *ecx = 0;
2854 *edx = 0;
2855 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2856 break;
2857 }
2858
2859 if (count == 0) {
2860 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2861 *eax = env->features[FEAT_XSAVE_COMP_LO];
2862 *edx = env->features[FEAT_XSAVE_COMP_HI];
2863 *ebx = *ecx;
2864 } else if (count == 1) {
2865 *eax = env->features[FEAT_XSAVE];
2866 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2867 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2868 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2869 *eax = esa->size;
2870 *ebx = esa->offset;
2871 }
2872 }
2873 break;
2874 }
2875 case 0x80000000:
2876 *eax = env->cpuid_xlevel;
2877 *ebx = env->cpuid_vendor1;
2878 *edx = env->cpuid_vendor2;
2879 *ecx = env->cpuid_vendor3;
2880 break;
2881 case 0x80000001:
2882 *eax = env->cpuid_version;
2883 *ebx = 0;
2884 *ecx = env->features[FEAT_8000_0001_ECX];
2885 *edx = env->features[FEAT_8000_0001_EDX];
2886
2887 /* The Linux kernel checks for the CMPLegacy bit and
2888 * discards multiple thread information if it is set.
2889 * So don't set it here for Intel to make Linux guests happy.
2890 */
2891 if (cs->nr_cores * cs->nr_threads > 1) {
2892 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2893 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2894 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2895 *ecx |= 1 << 1; /* CmpLegacy bit */
2896 }
2897 }
2898 break;
2899 case 0x80000002:
2900 case 0x80000003:
2901 case 0x80000004:
2902 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2903 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2904 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2905 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2906 break;
2907 case 0x80000005:
2908 /* cache info (L1 cache) */
2909 if (cpu->cache_info_passthrough) {
2910 host_cpuid(index, 0, eax, ebx, ecx, edx);
2911 break;
2912 }
2913 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2914 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2915 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2916 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2917 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2918 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2919 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2920 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2921 break;
2922 case 0x80000006:
2923 /* cache info (L2 cache) */
2924 if (cpu->cache_info_passthrough) {
2925 host_cpuid(index, 0, eax, ebx, ecx, edx);
2926 break;
2927 }
2928 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2929 (L2_DTLB_2M_ENTRIES << 16) | \
2930 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2931 (L2_ITLB_2M_ENTRIES);
2932 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2933 (L2_DTLB_4K_ENTRIES << 16) | \
2934 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2935 (L2_ITLB_4K_ENTRIES);
2936 *ecx = (L2_SIZE_KB_AMD << 16) | \
2937 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2938 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2939 if (!cpu->enable_l3_cache) {
2940 *edx = ((L3_SIZE_KB / 512) << 18) | \
2941 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2942 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2943 } else {
2944 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2945 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2946 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2947 }
2948 break;
2949 case 0x80000007:
2950 *eax = 0;
2951 *ebx = 0;
2952 *ecx = 0;
2953 *edx = env->features[FEAT_8000_0007_EDX];
2954 break;
2955 case 0x80000008:
2956 /* virtual & phys address size in low 2 bytes. */
2957 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2958 /* 64 bit processor */
2959 *eax = cpu->phys_bits; /* configurable physical bits */
2960 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2961 *eax |= 0x00003900; /* 57 bits virtual */
2962 } else {
2963 *eax |= 0x00003000; /* 48 bits virtual */
2964 }
2965 } else {
2966 *eax = cpu->phys_bits;
2967 }
2968 *ebx = 0;
2969 *ecx = 0;
2970 *edx = 0;
2971 if (cs->nr_cores * cs->nr_threads > 1) {
2972 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2973 }
2974 break;
2975 case 0x8000000A:
2976 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2977 *eax = 0x00000001; /* SVM Revision */
2978 *ebx = 0x00000010; /* nr of ASIDs */
2979 *ecx = 0;
2980 *edx = env->features[FEAT_SVM]; /* optional features */
2981 } else {
2982 *eax = 0;
2983 *ebx = 0;
2984 *ecx = 0;
2985 *edx = 0;
2986 }
2987 break;
2988 case 0xC0000000:
2989 *eax = env->cpuid_xlevel2;
2990 *ebx = 0;
2991 *ecx = 0;
2992 *edx = 0;
2993 break;
2994 case 0xC0000001:
2995 /* Support for VIA CPU's CPUID instruction */
2996 *eax = env->cpuid_version;
2997 *ebx = 0;
2998 *ecx = 0;
2999 *edx = env->features[FEAT_C000_0001_EDX];
3000 break;
3001 case 0xC0000002:
3002 case 0xC0000003:
3003 case 0xC0000004:
3004 /* Reserved for the future, and now filled with zero */
3005 *eax = 0;
3006 *ebx = 0;
3007 *ecx = 0;
3008 *edx = 0;
3009 break;
3010 default:
3011 /* reserved values: zero */
3012 *eax = 0;
3013 *ebx = 0;
3014 *ecx = 0;
3015 *edx = 0;
3016 break;
3017 }
3018 }
3019
3020 /* CPUClass::reset() */
3021 static void x86_cpu_reset(CPUState *s)
3022 {
3023 X86CPU *cpu = X86_CPU(s);
3024 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3025 CPUX86State *env = &cpu->env;
3026 target_ulong cr4;
3027 uint64_t xcr0;
3028 int i;
3029
3030 xcc->parent_reset(s);
3031
3032 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3033
3034 env->old_exception = -1;
3035
3036 /* init to reset state */
3037
3038 env->hflags2 |= HF2_GIF_MASK;
3039
3040 cpu_x86_update_cr0(env, 0x60000010);
3041 env->a20_mask = ~0x0;
3042 env->smbase = 0x30000;
3043
3044 env->idt.limit = 0xffff;
3045 env->gdt.limit = 0xffff;
3046 env->ldt.limit = 0xffff;
3047 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3048 env->tr.limit = 0xffff;
3049 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3050
3051 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3052 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3053 DESC_R_MASK | DESC_A_MASK);
3054 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3055 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3056 DESC_A_MASK);
3057 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3058 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3059 DESC_A_MASK);
3060 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3061 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3062 DESC_A_MASK);
3063 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3064 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3065 DESC_A_MASK);
3066 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3067 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3068 DESC_A_MASK);
3069
3070 env->eip = 0xfff0;
3071 env->regs[R_EDX] = env->cpuid_version;
3072
3073 env->eflags = 0x2;
3074
3075 /* FPU init */
3076 for (i = 0; i < 8; i++) {
3077 env->fptags[i] = 1;
3078 }
3079 cpu_set_fpuc(env, 0x37f);
3080
3081 env->mxcsr = 0x1f80;
3082 /* All units are in INIT state. */
3083 env->xstate_bv = 0;
3084
3085 env->pat = 0x0007040600070406ULL;
3086 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3087
3088 memset(env->dr, 0, sizeof(env->dr));
3089 env->dr[6] = DR6_FIXED_1;
3090 env->dr[7] = DR7_FIXED_1;
3091 cpu_breakpoint_remove_all(s, BP_CPU);
3092 cpu_watchpoint_remove_all(s, BP_CPU);
3093
3094 cr4 = 0;
3095 xcr0 = XSTATE_FP_MASK;
3096
3097 #ifdef CONFIG_USER_ONLY
3098 /* Enable all the features for user-mode. */
3099 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3100 xcr0 |= XSTATE_SSE_MASK;
3101 }
3102 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3103 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3104 if (env->features[esa->feature] & esa->bits) {
3105 xcr0 |= 1ull << i;
3106 }
3107 }
3108
3109 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3110 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3111 }
3112 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3113 cr4 |= CR4_FSGSBASE_MASK;
3114 }
3115 #endif
3116
3117 env->xcr0 = xcr0;
3118 cpu_x86_update_cr4(env, cr4);
3119
3120 /*
3121 * SDM 11.11.5 requires:
3122 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3123 * - IA32_MTRR_PHYSMASKn.V = 0
3124 * All other bits are undefined. For simplification, zero it all.
3125 */
3126 env->mtrr_deftype = 0;
3127 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3128 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3129
3130 #if !defined(CONFIG_USER_ONLY)
3131 /* We hard-wire the BSP to the first CPU. */
3132 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3133
3134 s->halted = !cpu_is_bsp(cpu);
3135
3136 if (kvm_enabled()) {
3137 kvm_arch_reset_vcpu(cpu);
3138 }
3139 #endif
3140 }
3141
3142 #ifndef CONFIG_USER_ONLY
3143 bool cpu_is_bsp(X86CPU *cpu)
3144 {
3145 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3146 }
3147
3148 /* TODO: remove me, when reset over QOM tree is implemented */
3149 static void x86_cpu_machine_reset_cb(void *opaque)
3150 {
3151 X86CPU *cpu = opaque;
3152 cpu_reset(CPU(cpu));
3153 }
3154 #endif
3155
3156 static void mce_init(X86CPU *cpu)
3157 {
3158 CPUX86State *cenv = &cpu->env;
3159 unsigned int bank;
3160
3161 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3162 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3163 (CPUID_MCE | CPUID_MCA)) {
3164 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3165 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3166 cenv->mcg_ctl = ~(uint64_t)0;
3167 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3168 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3169 }
3170 }
3171 }
3172
3173 #ifndef CONFIG_USER_ONLY
3174 APICCommonClass *apic_get_class(void)
3175 {
3176 const char *apic_type = "apic";
3177
3178 if (kvm_apic_in_kernel()) {
3179 apic_type = "kvm-apic";
3180 } else if (xen_enabled()) {
3181 apic_type = "xen-apic";
3182 }
3183
3184 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3185 }
3186
3187 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3188 {
3189 APICCommonState *apic;
3190 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3191
3192 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3193
3194 object_property_add_child(OBJECT(cpu), "lapic",
3195 OBJECT(cpu->apic_state), &error_abort);
3196 object_unref(OBJECT(cpu->apic_state));
3197
3198 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3199 /* TODO: convert to link<> */
3200 apic = APIC_COMMON(cpu->apic_state);
3201 apic->cpu = cpu;
3202 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3203 }
3204
3205 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3206 {
3207 APICCommonState *apic;
3208 static bool apic_mmio_map_once;
3209
3210 if (cpu->apic_state == NULL) {
3211 return;
3212 }
3213 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3214 errp);
3215
3216 /* Map APIC MMIO area */
3217 apic = APIC_COMMON(cpu->apic_state);
3218 if (!apic_mmio_map_once) {
3219 memory_region_add_subregion_overlap(get_system_memory(),
3220 apic->apicbase &
3221 MSR_IA32_APICBASE_BASE,
3222 &apic->io_memory,
3223 0x1000);
3224 apic_mmio_map_once = true;
3225 }
3226 }
3227
3228 static void x86_cpu_machine_done(Notifier *n, void *unused)
3229 {
3230 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3231 MemoryRegion *smram =
3232 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3233
3234 if (smram) {
3235 cpu->smram = g_new(MemoryRegion, 1);
3236 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3237 smram, 0, 1ull << 32);
3238 memory_region_set_enabled(cpu->smram, false);
3239 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3240 }
3241 }
3242 #else
3243 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3244 {
3245 }
3246 #endif
3247
3248 /* Note: Only safe for use on x86(-64) hosts */
3249 static uint32_t x86_host_phys_bits(void)
3250 {
3251 uint32_t eax;
3252 uint32_t host_phys_bits;
3253
3254 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3255 if (eax >= 0x80000008) {
3256 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3257 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3258 * at 23:16 that can specify a maximum physical address bits for
3259 * the guest that can override this value; but I've not seen
3260 * anything with that set.
3261 */
3262 host_phys_bits = eax & 0xff;
3263 } else {
3264 /* It's an odd 64 bit machine that doesn't have the leaf for
3265 * physical address bits; fall back to 36 that's most older
3266 * Intel.
3267 */
3268 host_phys_bits = 36;
3269 }
3270
3271 return host_phys_bits;
3272 }
3273
3274 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3275 {
3276 if (*min < value) {
3277 *min = value;
3278 }
3279 }
3280
3281 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3282 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3283 {
3284 CPUX86State *env = &cpu->env;
3285 FeatureWordInfo *fi = &feature_word_info[w];
3286 uint32_t eax = fi->cpuid_eax;
3287 uint32_t region = eax & 0xF0000000;
3288
3289 if (!env->features[w]) {
3290 return;
3291 }
3292
3293 switch (region) {
3294 case 0x00000000:
3295 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3296 break;
3297 case 0x80000000:
3298 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3299 break;
3300 case 0xC0000000:
3301 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3302 break;
3303 }
3304 }
3305
3306 /* Calculate XSAVE components based on the configured CPU feature flags */
3307 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3308 {
3309 CPUX86State *env = &cpu->env;
3310 int i;
3311 uint64_t mask;
3312
3313 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3314 return;
3315 }
3316
3317 mask = 0;
3318 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3319 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3320 if (env->features[esa->feature] & esa->bits) {
3321 mask |= (1ULL << i);
3322 }
3323 }
3324
3325 env->features[FEAT_XSAVE_COMP_LO] = mask;
3326 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3327 }
3328
3329 /***** Steps involved on loading and filtering CPUID data
3330 *
3331 * When initializing and realizing a CPU object, the steps
3332 * involved in setting up CPUID data are:
3333 *
3334 * 1) Loading CPU model definition (X86CPUDefinition). This is
3335 * implemented by x86_cpu_load_def() and should be completely
3336 * transparent, as it is done automatically by instance_init.
3337 * No code should need to look at X86CPUDefinition structs
3338 * outside instance_init.
3339 *
3340 * 2) CPU expansion. This is done by realize before CPUID
3341 * filtering, and will make sure host/accelerator data is
3342 * loaded for CPU models that depend on host capabilities
3343 * (e.g. "host"). Done by x86_cpu_expand_features().
3344 *
3345 * 3) CPUID filtering. This initializes extra data related to
3346 * CPUID, and checks if the host supports all capabilities
3347 * required by the CPU. Runnability of a CPU model is
3348 * determined at this step. Done by x86_cpu_filter_features().
3349 *
3350 * Some operations don't require all steps to be performed.
3351 * More precisely:
3352 *
3353 * - CPU instance creation (instance_init) will run only CPU
3354 * model loading. CPU expansion can't run at instance_init-time
3355 * because host/accelerator data may be not available yet.
3356 * - CPU realization will perform both CPU model expansion and CPUID
3357 * filtering, and return an error in case one of them fails.
3358 * - query-cpu-definitions needs to run all 3 steps. It needs
3359 * to run CPUID filtering, as the 'unavailable-features'
3360 * field is set based on the filtering results.
3361 * - The query-cpu-model-expansion QMP command only needs to run
3362 * CPU model loading and CPU expansion. It should not filter
3363 * any CPUID data based on host capabilities.
3364 */
3365
3366 /* Expand CPU configuration data, based on configured features
3367 * and host/accelerator capabilities when appropriate.
3368 */
3369 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3370 {
3371 CPUX86State *env = &cpu->env;
3372 FeatureWord w;
3373 GList *l;
3374 Error *local_err = NULL;
3375
3376 /*TODO: cpu->max_features incorrectly overwrites features
3377 * set using "feat=on|off". Once we fix this, we can convert
3378 * plus_features & minus_features to global properties
3379 * inside x86_cpu_parse_featurestr() too.
3380 */
3381 if (cpu->max_features) {
3382 for (w = 0; w < FEATURE_WORDS; w++) {
3383 env->features[w] =
3384 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3385 }
3386 }
3387
3388 for (l = plus_features; l; l = l->next) {
3389 const char *prop = l->data;
3390 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3391 if (local_err) {
3392 goto out;
3393 }
3394 }
3395
3396 for (l = minus_features; l; l = l->next) {
3397 const char *prop = l->data;
3398 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3399 if (local_err) {
3400 goto out;
3401 }
3402 }
3403
3404 if (!kvm_enabled() || !cpu->expose_kvm) {
3405 env->features[FEAT_KVM] = 0;
3406 }
3407
3408 x86_cpu_enable_xsave_components(cpu);
3409
3410 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3411 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3412 if (cpu->full_cpuid_auto_level) {
3413 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3414 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3415 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3416 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3417 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3418 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3419 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3420 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3421 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3422 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3423 /* SVM requires CPUID[0x8000000A] */
3424 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3425 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3426 }
3427 }
3428
3429 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3430 if (env->cpuid_level == UINT32_MAX) {
3431 env->cpuid_level = env->cpuid_min_level;
3432 }
3433 if (env->cpuid_xlevel == UINT32_MAX) {
3434 env->cpuid_xlevel = env->cpuid_min_xlevel;
3435 }
3436 if (env->cpuid_xlevel2 == UINT32_MAX) {
3437 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3438 }
3439
3440 out:
3441 if (local_err != NULL) {
3442 error_propagate(errp, local_err);
3443 }
3444 }
3445
3446 /*
3447 * Finishes initialization of CPUID data, filters CPU feature
3448 * words based on host availability of each feature.
3449 *
3450 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3451 */
3452 static int x86_cpu_filter_features(X86CPU *cpu)
3453 {
3454 CPUX86State *env = &cpu->env;
3455 FeatureWord w;
3456 int rv = 0;
3457
3458 for (w = 0; w < FEATURE_WORDS; w++) {
3459 uint32_t host_feat =
3460 x86_cpu_get_supported_feature_word(w, false);
3461 uint32_t requested_features = env->features[w];
3462 env->features[w] &= host_feat;
3463 cpu->filtered_features[w] = requested_features & ~env->features[w];
3464 if (cpu->filtered_features[w]) {
3465 rv = 1;
3466 }
3467 }
3468
3469 return rv;
3470 }
3471
3472 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3473 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3474 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3475 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3476 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3477 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3478 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3479 {
3480 CPUState *cs = CPU(dev);
3481 X86CPU *cpu = X86_CPU(dev);
3482 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3483 CPUX86State *env = &cpu->env;
3484 Error *local_err = NULL;
3485 static bool ht_warned;
3486
3487 if (xcc->kvm_required && !kvm_enabled()) {
3488 char *name = x86_cpu_class_get_model_name(xcc);
3489 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3490 g_free(name);
3491 goto out;
3492 }
3493
3494 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3495 error_setg(errp, "apic-id property was not initialized properly");
3496 return;
3497 }
3498
3499 x86_cpu_expand_features(cpu, &local_err);
3500 if (local_err) {
3501 goto out;
3502 }
3503
3504 if (x86_cpu_filter_features(cpu) &&
3505 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3506 x86_cpu_report_filtered_features(cpu);
3507 if (cpu->enforce_cpuid) {
3508 error_setg(&local_err,
3509 kvm_enabled() ?
3510 "Host doesn't support requested features" :
3511 "TCG doesn't support requested features");
3512 goto out;
3513 }
3514 }
3515
3516 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3517 * CPUID[1].EDX.
3518 */
3519 if (IS_AMD_CPU(env)) {
3520 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3521 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3522 & CPUID_EXT2_AMD_ALIASES);
3523 }
3524
3525 /* For 64bit systems think about the number of physical bits to present.
3526 * ideally this should be the same as the host; anything other than matching
3527 * the host can cause incorrect guest behaviour.
3528 * QEMU used to pick the magic value of 40 bits that corresponds to
3529 * consumer AMD devices but nothing else.
3530 */
3531 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3532 if (kvm_enabled()) {
3533 uint32_t host_phys_bits = x86_host_phys_bits();
3534 static bool warned;
3535
3536 if (cpu->host_phys_bits) {
3537 /* The user asked for us to use the host physical bits */
3538 cpu->phys_bits = host_phys_bits;
3539 }
3540
3541 /* Print a warning if the user set it to a value that's not the
3542 * host value.
3543 */
3544 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3545 !warned) {
3546 error_report("Warning: Host physical bits (%u)"
3547 " does not match phys-bits property (%u)",
3548 host_phys_bits, cpu->phys_bits);
3549 warned = true;
3550 }
3551
3552 if (cpu->phys_bits &&
3553 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3554 cpu->phys_bits < 32)) {
3555 error_setg(errp, "phys-bits should be between 32 and %u "
3556 " (but is %u)",
3557 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3558 return;
3559 }
3560 } else {
3561 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3562 error_setg(errp, "TCG only supports phys-bits=%u",
3563 TCG_PHYS_ADDR_BITS);
3564 return;
3565 }
3566 }
3567 /* 0 means it was not explicitly set by the user (or by machine
3568 * compat_props or by the host code above). In this case, the default
3569 * is the value used by TCG (40).
3570 */
3571 if (cpu->phys_bits == 0) {
3572 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3573 }
3574 } else {
3575 /* For 32 bit systems don't use the user set value, but keep
3576 * phys_bits consistent with what we tell the guest.
3577 */
3578 if (cpu->phys_bits != 0) {
3579 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3580 return;
3581 }
3582
3583 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3584 cpu->phys_bits = 36;
3585 } else {
3586 cpu->phys_bits = 32;
3587 }
3588 }
3589 cpu_exec_realizefn(cs, &local_err);
3590 if (local_err != NULL) {
3591 error_propagate(errp, local_err);
3592 return;
3593 }
3594
3595 if (tcg_enabled()) {
3596 tcg_x86_init();
3597 }
3598
3599 #ifndef CONFIG_USER_ONLY
3600 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3601
3602 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3603 x86_cpu_apic_create(cpu, &local_err);
3604 if (local_err != NULL) {
3605 goto out;
3606 }
3607 }
3608 #endif
3609
3610 mce_init(cpu);
3611
3612 #ifndef CONFIG_USER_ONLY
3613 if (tcg_enabled()) {
3614 AddressSpace *newas = g_new(AddressSpace, 1);
3615
3616 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3617 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3618
3619 /* Outer container... */
3620 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3621 memory_region_set_enabled(cpu->cpu_as_root, true);
3622
3623 /* ... with two regions inside: normal system memory with low
3624 * priority, and...
3625 */
3626 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3627 get_system_memory(), 0, ~0ull);
3628 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3629 memory_region_set_enabled(cpu->cpu_as_mem, true);
3630 address_space_init(newas, cpu->cpu_as_root, "CPU");
3631 cs->num_ases = 1;
3632 cpu_address_space_init(cs, newas, 0);
3633
3634 /* ... SMRAM with higher priority, linked from /machine/smram. */
3635 cpu->machine_done.notify = x86_cpu_machine_done;
3636 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3637 }
3638 #endif
3639
3640 qemu_init_vcpu(cs);
3641
3642 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3643 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3644 * based on inputs (sockets,cores,threads), it is still better to gives
3645 * users a warning.
3646 *
3647 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3648 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3649 */
3650 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3651 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3652 " -smp options properly.");
3653 ht_warned = true;
3654 }
3655
3656 x86_cpu_apic_realize(cpu, &local_err);
3657 if (local_err != NULL) {
3658 goto out;
3659 }
3660 cpu_reset(cs);
3661
3662 xcc->parent_realize(dev, &local_err);
3663
3664 out:
3665 if (local_err != NULL) {
3666 error_propagate(errp, local_err);
3667 return;
3668 }
3669 }
3670
3671 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3672 {
3673 X86CPU *cpu = X86_CPU(dev);
3674 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3675 Error *local_err = NULL;
3676
3677 #ifndef CONFIG_USER_ONLY
3678 cpu_remove_sync(CPU(dev));
3679 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3680 #endif
3681
3682 if (cpu->apic_state) {
3683 object_unparent(OBJECT(cpu->apic_state));
3684 cpu->apic_state = NULL;
3685 }
3686
3687 xcc->parent_unrealize(dev, &local_err);
3688 if (local_err != NULL) {
3689 error_propagate(errp, local_err);
3690 return;
3691 }
3692 }
3693
3694 typedef struct BitProperty {
3695 uint32_t *ptr;
3696 uint32_t mask;
3697 } BitProperty;
3698
3699 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3700 void *opaque, Error **errp)
3701 {
3702 BitProperty *fp = opaque;
3703 bool value = (*fp->ptr & fp->mask) == fp->mask;
3704 visit_type_bool(v, name, &value, errp);
3705 }
3706
3707 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3708 void *opaque, Error **errp)
3709 {
3710 DeviceState *dev = DEVICE(obj);
3711 BitProperty *fp = opaque;
3712 Error *local_err = NULL;
3713 bool value;
3714
3715 if (dev->realized) {
3716 qdev_prop_set_after_realize(dev, name, errp);
3717 return;
3718 }
3719
3720 visit_type_bool(v, name, &value, &local_err);
3721 if (local_err) {
3722 error_propagate(errp, local_err);
3723 return;
3724 }
3725
3726 if (value) {
3727 *fp->ptr |= fp->mask;
3728 } else {
3729 *fp->ptr &= ~fp->mask;
3730 }
3731 }
3732
3733 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3734 void *opaque)
3735 {
3736 BitProperty *prop = opaque;
3737 g_free(prop);
3738 }
3739
3740 /* Register a boolean property to get/set a single bit in a uint32_t field.
3741 *
3742 * The same property name can be registered multiple times to make it affect
3743 * multiple bits in the same FeatureWord. In that case, the getter will return
3744 * true only if all bits are set.
3745 */
3746 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3747 const char *prop_name,
3748 uint32_t *field,
3749 int bitnr)
3750 {
3751 BitProperty *fp;
3752 ObjectProperty *op;
3753 uint32_t mask = (1UL << bitnr);
3754
3755 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3756 if (op) {
3757 fp = op->opaque;
3758 assert(fp->ptr == field);
3759 fp->mask |= mask;
3760 } else {
3761 fp = g_new0(BitProperty, 1);
3762 fp->ptr = field;
3763 fp->mask = mask;
3764 object_property_add(OBJECT(cpu), prop_name, "bool",
3765 x86_cpu_get_bit_prop,
3766 x86_cpu_set_bit_prop,
3767 x86_cpu_release_bit_prop, fp, &error_abort);
3768 }
3769 }
3770
3771 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3772 FeatureWord w,
3773 int bitnr)
3774 {
3775 FeatureWordInfo *fi = &feature_word_info[w];
3776 const char *name = fi->feat_names[bitnr];
3777
3778 if (!name) {
3779 return;
3780 }
3781
3782 /* Property names should use "-" instead of "_".
3783 * Old names containing underscores are registered as aliases
3784 * using object_property_add_alias()
3785 */
3786 assert(!strchr(name, '_'));
3787 /* aliases don't use "|" delimiters anymore, they are registered
3788 * manually using object_property_add_alias() */
3789 assert(!strchr(name, '|'));
3790 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3791 }
3792
3793 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3794 {
3795 X86CPU *cpu = X86_CPU(cs);
3796 CPUX86State *env = &cpu->env;
3797 GuestPanicInformation *panic_info = NULL;
3798
3799 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3800 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3801
3802 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3803
3804 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3805 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3806 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3807 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3808 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3809 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3810 }
3811
3812 return panic_info;
3813 }
3814 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3815 const char *name, void *opaque,
3816 Error **errp)
3817 {
3818 CPUState *cs = CPU(obj);
3819 GuestPanicInformation *panic_info;
3820
3821 if (!cs->crash_occurred) {
3822 error_setg(errp, "No crash occured");
3823 return;
3824 }
3825
3826 panic_info = x86_cpu_get_crash_info(cs);
3827 if (panic_info == NULL) {
3828 error_setg(errp, "No crash information");
3829 return;
3830 }
3831
3832 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3833 errp);
3834 qapi_free_GuestPanicInformation(panic_info);
3835 }
3836
3837 static void x86_cpu_initfn(Object *obj)
3838 {
3839 CPUState *cs = CPU(obj);
3840 X86CPU *cpu = X86_CPU(obj);
3841 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3842 CPUX86State *env = &cpu->env;
3843 FeatureWord w;
3844
3845 cs->env_ptr = env;
3846
3847 object_property_add(obj, "family", "int",
3848 x86_cpuid_version_get_family,
3849 x86_cpuid_version_set_family, NULL, NULL, NULL);
3850 object_property_add(obj, "model", "int",
3851 x86_cpuid_version_get_model,
3852 x86_cpuid_version_set_model, NULL, NULL, NULL);
3853 object_property_add(obj, "stepping", "int",
3854 x86_cpuid_version_get_stepping,
3855 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3856 object_property_add_str(obj, "vendor",
3857 x86_cpuid_get_vendor,
3858 x86_cpuid_set_vendor, NULL);
3859 object_property_add_str(obj, "model-id",
3860 x86_cpuid_get_model_id,
3861 x86_cpuid_set_model_id, NULL);
3862 object_property_add(obj, "tsc-frequency", "int",
3863 x86_cpuid_get_tsc_freq,
3864 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3865 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3866 x86_cpu_get_feature_words,
3867 NULL, NULL, (void *)env->features, NULL);
3868 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3869 x86_cpu_get_feature_words,
3870 NULL, NULL, (void *)cpu->filtered_features, NULL);
3871
3872 object_property_add(obj, "crash-information", "GuestPanicInformation",
3873 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3874
3875 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3876
3877 for (w = 0; w < FEATURE_WORDS; w++) {
3878 int bitnr;
3879
3880 for (bitnr = 0; bitnr < 32; bitnr++) {
3881 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3882 }
3883 }
3884
3885 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3886 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3887 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3888 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3889 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3890 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3891 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3892
3893 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3894 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3895 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3896 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3897 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3898 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3899 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3900 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3901 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3902 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3903 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3904 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3905 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3906 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3907 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3908 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3909 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3910 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3911 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3912 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3913 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3914
3915 if (xcc->cpu_def) {
3916 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3917 }
3918 }
3919
3920 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3921 {
3922 X86CPU *cpu = X86_CPU(cs);
3923
3924 return cpu->apic_id;
3925 }
3926
3927 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3928 {
3929 X86CPU *cpu = X86_CPU(cs);
3930
3931 return cpu->env.cr[0] & CR0_PG_MASK;
3932 }
3933
3934 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3935 {
3936 X86CPU *cpu = X86_CPU(cs);
3937
3938 cpu->env.eip = value;
3939 }
3940
3941 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3942 {
3943 X86CPU *cpu = X86_CPU(cs);
3944
3945 cpu->env.eip = tb->pc - tb->cs_base;
3946 }
3947
3948 static bool x86_cpu_has_work(CPUState *cs)
3949 {
3950 X86CPU *cpu = X86_CPU(cs);
3951 CPUX86State *env = &cpu->env;
3952
3953 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3954 CPU_INTERRUPT_POLL)) &&
3955 (env->eflags & IF_MASK)) ||
3956 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3957 CPU_INTERRUPT_INIT |
3958 CPU_INTERRUPT_SIPI |
3959 CPU_INTERRUPT_MCE)) ||
3960 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3961 !(env->hflags & HF_SMM_MASK));
3962 }
3963
3964 static Property x86_cpu_properties[] = {
3965 #ifdef CONFIG_USER_ONLY
3966 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3967 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3968 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3969 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3970 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3971 #else
3972 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3973 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3974 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3975 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3976 #endif
3977 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3978 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3979 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3980 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3981 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3982 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3983 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3984 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3985 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3986 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3987 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3988 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3989 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3990 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3991 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3992 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3993 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3994 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3995 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3996 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3997 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3998 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3999 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4000 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4001 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4002 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4003 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4004 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4005 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4006 false),
4007 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4008 DEFINE_PROP_END_OF_LIST()
4009 };
4010
4011 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4012 {
4013 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4014 CPUClass *cc = CPU_CLASS(oc);
4015 DeviceClass *dc = DEVICE_CLASS(oc);
4016
4017 xcc->parent_realize = dc->realize;
4018 xcc->parent_unrealize = dc->unrealize;
4019 dc->realize = x86_cpu_realizefn;
4020 dc->unrealize = x86_cpu_unrealizefn;
4021 dc->props = x86_cpu_properties;
4022
4023 xcc->parent_reset = cc->reset;
4024 cc->reset = x86_cpu_reset;
4025 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4026
4027 cc->class_by_name = x86_cpu_class_by_name;
4028 cc->parse_features = x86_cpu_parse_featurestr;
4029 cc->has_work = x86_cpu_has_work;
4030 cc->do_interrupt = x86_cpu_do_interrupt;
4031 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4032 cc->dump_state = x86_cpu_dump_state;
4033 cc->get_crash_info = x86_cpu_get_crash_info;
4034 cc->set_pc = x86_cpu_set_pc;
4035 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4036 cc->gdb_read_register = x86_cpu_gdb_read_register;
4037 cc->gdb_write_register = x86_cpu_gdb_write_register;
4038 cc->get_arch_id = x86_cpu_get_arch_id;
4039 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4040 #ifdef CONFIG_USER_ONLY
4041 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4042 #else
4043 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4044 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4045 cc->write_elf64_note = x86_cpu_write_elf64_note;
4046 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4047 cc->write_elf32_note = x86_cpu_write_elf32_note;
4048 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4049 cc->vmsd = &vmstate_x86_cpu;
4050 #endif
4051 /* CPU_NB_REGS * 2 = general regs + xmm regs
4052 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
4053 */
4054 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
4055 #ifndef CONFIG_USER_ONLY
4056 cc->debug_excp_handler = breakpoint_handler;
4057 #endif
4058 cc->cpu_exec_enter = x86_cpu_exec_enter;
4059 cc->cpu_exec_exit = x86_cpu_exec_exit;
4060
4061 dc->cannot_instantiate_with_device_add_yet = false;
4062 }
4063
4064 static const TypeInfo x86_cpu_type_info = {
4065 .name = TYPE_X86_CPU,
4066 .parent = TYPE_CPU,
4067 .instance_size = sizeof(X86CPU),
4068 .instance_init = x86_cpu_initfn,
4069 .abstract = true,
4070 .class_size = sizeof(X86CPUClass),
4071 .class_init = x86_cpu_common_class_init,
4072 };
4073
4074
4075 /* "base" CPU model, used by query-cpu-model-expansion */
4076 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4077 {
4078 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4079
4080 xcc->static_model = true;
4081 xcc->migration_safe = true;
4082 xcc->model_description = "base CPU model type with no features enabled";
4083 xcc->ordering = 8;
4084 }
4085
4086 static const TypeInfo x86_base_cpu_type_info = {
4087 .name = X86_CPU_TYPE_NAME("base"),
4088 .parent = TYPE_X86_CPU,
4089 .class_init = x86_cpu_base_class_init,
4090 };
4091
4092 static void x86_cpu_register_types(void)
4093 {
4094 int i;
4095
4096 type_register_static(&x86_cpu_type_info);
4097 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4098 x86_register_cpudef_type(&builtin_x86_defs[i]);
4099 }
4100 type_register_static(&max_x86_cpu_type_info);
4101 type_register_static(&x86_base_cpu_type_info);
4102 #ifdef CONFIG_KVM
4103 type_register_static(&host_x86_cpu_type_info);
4104 #endif
4105 }
4106
4107 type_init(x86_cpu_register_types)