]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
target/i386: Define CPUID_MODEL_ID_SZ macro
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
39
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
53
54
55 /* Cache topology CPUID constants: */
56
57 /* CPUID Leaf 2 Descriptors */
58
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63
64
65 /* CPUID Leaf 4 constants: */
66
67 /* EAX: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
71
72 #define CPUID_4_LEVEL(l) ((l) << 5)
73
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
76
77 /* EDX: */
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
81
82 #define ASSOC_FULL 0xFF
83
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
97
98
99 /* Definitions of the hardcoded cache entries we expose: */
100
101 /* L1 data cache: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
104 #define L1D_SETS 64
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
112
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
116 #define L1I_SETS 64
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
124
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
128 #define L2_SETS 4096
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
136
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
149
150 /* TLB definitions: */
151
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
156
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
161
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
166
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
171
172
173
174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
176 {
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
182 }
183 dst[CPUID_VENDOR_SZ] = '\0';
184 }
185
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
197
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218
219 #ifdef TARGET_X86_64
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221 #else
222 #define TCG_EXT2_X86_64_FEATURES 0
223 #endif
224
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
251
252 typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
257 */
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266 } FeatureWordInfo;
267
268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
279 },
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
282 },
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
293 },
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
296 },
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
301 */
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
312 },
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
315 },
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 },
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
329 },
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 },
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
343 },
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 },
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
357 },
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 },
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
373 },
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
387 },
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
389 },
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 },
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
404 },
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 },
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
429 },
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
434 },
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
445 },
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
450 },
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 },
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
466 },
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 },
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
482 },
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 },
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
498 },
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 },
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
512 },
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
522 },
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
528 },
529 };
530
531 typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536 } X86RegisterInfo32;
537
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
549 };
550 #undef REGISTER
551
552 typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555 } ExtSaveArea;
556
557 static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
564 },
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
571 },
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
600 };
601
602 static uint32_t xsave_area_size(uint64_t mask)
603 {
604 int i;
605 uint64_t ret = 0;
606
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
611 }
612 }
613 return ret;
614 }
615
616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
617 {
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
620 }
621
622 const char *get_register_name_32(unsigned int reg)
623 {
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
626 }
627 return x86_reg_info_32[reg].name;
628 }
629
630 /*
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
633 */
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
635 {
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
639
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
642
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
648 }
649 }
650 return r;
651 }
652
653 void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
655 {
656 uint32_t vec[4];
657
658 #ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673 #else
674 abort();
675 #endif
676
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
685 }
686
687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
688 {
689 uint32_t eax, ebx, ecx, edx;
690
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
693
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
697 }
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
700 }
701 if (stepping) {
702 *stepping = eax & 0x0F;
703 }
704 }
705
706 /* CPU class name definitions: */
707
708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
710
711 /* Return type name for a given CPU model name
712 * Caller is responsible for freeing the returned string.
713 */
714 static char *x86_cpu_type_name(const char *model_name)
715 {
716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
717 }
718
719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
720 {
721 ObjectClass *oc;
722 char *typename;
723
724 if (cpu_model == NULL) {
725 return NULL;
726 }
727
728 typename = x86_cpu_type_name(cpu_model);
729 oc = object_class_by_name(typename);
730 g_free(typename);
731 return oc;
732 }
733
734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
735 {
736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
738 return g_strndup(class_name,
739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
740 }
741
742 struct X86CPUDefinition {
743 const char *name;
744 uint32_t level;
745 uint32_t xlevel;
746 /* vendor is zero-terminated, 12 character ASCII string */
747 char vendor[CPUID_VENDOR_SZ + 1];
748 int family;
749 int model;
750 int stepping;
751 FeatureWordArray features;
752 char model_id[48];
753 };
754
755 static X86CPUDefinition builtin_x86_defs[] = {
756 {
757 .name = "qemu64",
758 .level = 0xd,
759 .vendor = CPUID_VENDOR_AMD,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 .features[FEAT_8000_0001_ECX] =
772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
773 .xlevel = 0x8000000A,
774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
775 },
776 {
777 .name = "phenom",
778 .level = 5,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 16,
781 .model = 2,
782 .stepping = 3,
783 /* Missing: CPUID_HT */
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
787 CPUID_PSE36 | CPUID_VME,
788 .features[FEAT_1_ECX] =
789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
790 CPUID_EXT_POPCNT,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
796 CPUID_EXT3_CR8LEG,
797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
799 .features[FEAT_8000_0001_ECX] =
800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
802 /* Missing: CPUID_SVM_LBRV */
803 .features[FEAT_SVM] =
804 CPUID_SVM_NPT,
805 .xlevel = 0x8000001A,
806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
807 },
808 {
809 .name = "core2duo",
810 .level = 10,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 15,
814 .stepping = 11,
815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
824 CPUID_EXT_CX16,
825 .features[FEAT_8000_0001_EDX] =
826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
827 .features[FEAT_8000_0001_ECX] =
828 CPUID_EXT3_LAHF_LM,
829 .xlevel = 0x80000008,
830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
831 },
832 {
833 .name = "kvm64",
834 .level = 0xd,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 15,
837 .model = 6,
838 .stepping = 1,
839 /* Missing: CPUID_HT */
840 .features[FEAT_1_EDX] =
841 PPRO_FEATURES | CPUID_VME |
842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
843 CPUID_PSE36,
844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
854 .features[FEAT_8000_0001_ECX] =
855 0,
856 .xlevel = 0x80000008,
857 .model_id = "Common KVM processor"
858 },
859 {
860 .name = "qemu32",
861 .level = 4,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 6,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .xlevel = 0x80000004,
871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
872 },
873 {
874 .name = "kvm32",
875 .level = 5,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 15,
878 .model = 6,
879 .stepping = 1,
880 .features[FEAT_1_EDX] =
881 PPRO_FEATURES | CPUID_VME |
882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3,
885 .features[FEAT_8000_0001_ECX] =
886 0,
887 .xlevel = 0x80000008,
888 .model_id = "Common 32-bit KVM processor"
889 },
890 {
891 .name = "coreduo",
892 .level = 10,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 14,
896 .stepping = 8,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
901 CPUID_SS,
902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_NX,
908 .xlevel = 0x80000008,
909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
910 },
911 {
912 .name = "486",
913 .level = 1,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 4,
916 .model = 8,
917 .stepping = 0,
918 .features[FEAT_1_EDX] =
919 I486_FEATURES,
920 .xlevel = 0,
921 },
922 {
923 .name = "pentium",
924 .level = 1,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 5,
927 .model = 4,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM_FEATURES,
931 .xlevel = 0,
932 },
933 {
934 .name = "pentium2",
935 .level = 2,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 5,
939 .stepping = 2,
940 .features[FEAT_1_EDX] =
941 PENTIUM2_FEATURES,
942 .xlevel = 0,
943 },
944 {
945 .name = "pentium3",
946 .level = 3,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 7,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 PENTIUM3_FEATURES,
953 .xlevel = 0,
954 },
955 {
956 .name = "athlon",
957 .level = 2,
958 .vendor = CPUID_VENDOR_AMD,
959 .family = 6,
960 .model = 2,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
964 CPUID_MCA,
965 .features[FEAT_8000_0001_EDX] =
966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
967 .xlevel = 0x80000008,
968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
969 },
970 {
971 .name = "n270",
972 .level = 10,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 28,
976 .stepping = 2,
977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
978 .features[FEAT_1_EDX] =
979 PPRO_FEATURES |
980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
981 CPUID_ACPI | CPUID_SS,
982 /* Some CPUs got no CPUID_SEP */
983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
984 * CPUID_EXT_XTPR */
985 .features[FEAT_1_ECX] =
986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
987 CPUID_EXT_MOVBE,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_NX,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
994 },
995 {
996 .name = "Conroe",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 15,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .xlevel = 0x80000008,
1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1016 },
1017 {
1018 .name = "Penryn",
1019 .level = 10,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 23,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1039 },
1040 {
1041 .name = "Nehalem",
1042 .level = 11,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 26,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1062 },
1063 {
1064 .name = "Westmere",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 44,
1069 .stepping = 1,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1088 },
1089 {
1090 .name = "SandyBridge",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 42,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1107 CPUID_EXT_SSE3,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1110 CPUID_EXT2_SYSCALL,
1111 .features[FEAT_8000_0001_ECX] =
1112 CPUID_EXT3_LAHF_LM,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1119 },
1120 {
1121 .name = "IvyBridge",
1122 .level = 0xd,
1123 .vendor = CPUID_VENDOR_INTEL,
1124 .family = 6,
1125 .model = 58,
1126 .stepping = 9,
1127 .features[FEAT_1_EDX] =
1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1132 CPUID_DE | CPUID_FP87,
1133 .features[FEAT_1_ECX] =
1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_7_0_EBX] =
1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1141 CPUID_7_0_EBX_ERMS,
1142 .features[FEAT_8000_0001_EDX] =
1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1144 CPUID_EXT2_SYSCALL,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1153 },
1154 {
1155 .name = "Haswell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 60,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Haswell, no TSX)",
1189 }, {
1190 .name = "Haswell",
1191 .level = 0xd,
1192 .vendor = CPUID_VENDOR_INTEL,
1193 .family = 6,
1194 .model = 60,
1195 .stepping = 4,
1196 .features[FEAT_1_EDX] =
1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1201 CPUID_DE | CPUID_FP87,
1202 .features[FEAT_1_ECX] =
1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1209 .features[FEAT_8000_0001_EDX] =
1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1211 CPUID_EXT2_SYSCALL,
1212 .features[FEAT_8000_0001_ECX] =
1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1214 .features[FEAT_7_0_EBX] =
1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1218 CPUID_7_0_EBX_RTM,
1219 .features[FEAT_XSAVE] =
1220 CPUID_XSAVE_XSAVEOPT,
1221 .features[FEAT_6_EAX] =
1222 CPUID_6_EAX_ARAT,
1223 .xlevel = 0x80000008,
1224 .model_id = "Intel Core Processor (Haswell)",
1225 },
1226 {
1227 .name = "Broadwell-noTSX",
1228 .level = 0xd,
1229 .vendor = CPUID_VENDOR_INTEL,
1230 .family = 6,
1231 .model = 61,
1232 .stepping = 2,
1233 .features[FEAT_1_EDX] =
1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1238 CPUID_DE | CPUID_FP87,
1239 .features[FEAT_1_ECX] =
1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 CPUID_EXT2_SYSCALL,
1249 .features[FEAT_8000_0001_ECX] =
1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 CPUID_7_0_EBX_SMAP,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1263 },
1264 {
1265 .name = "Broadwell",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 61,
1270 .stepping = 2,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 CPUID_EXT2_SYSCALL,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1289 .features[FEAT_7_0_EBX] =
1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1294 CPUID_7_0_EBX_SMAP,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Core Processor (Broadwell)",
1301 },
1302 {
1303 .name = "Skylake-Client",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 94,
1308 .stepping = 3,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1333 /* Missing: XSAVES (not supported by some Linux versions,
1334 * including v4.1 to v4.12).
1335 * KVM doesn't yet expose any XSAVES state save component,
1336 * and the only one defined in Skylake (processor tracing)
1337 * probably will block migration anyway.
1338 */
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1341 CPUID_XSAVE_XGETBV1,
1342 .features[FEAT_6_EAX] =
1343 CPUID_6_EAX_ARAT,
1344 .xlevel = 0x80000008,
1345 .model_id = "Intel Core Processor (Skylake)",
1346 },
1347 {
1348 .name = "Skylake-Server",
1349 .level = 0xd,
1350 .vendor = CPUID_VENDOR_INTEL,
1351 .family = 6,
1352 .model = 85,
1353 .stepping = 4,
1354 .features[FEAT_1_EDX] =
1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1359 CPUID_DE | CPUID_FP87,
1360 .features[FEAT_1_ECX] =
1361 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1362 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1363 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1364 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1365 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1366 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1369 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1372 .features[FEAT_7_0_EBX] =
1373 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1374 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1375 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1376 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1377 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1378 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1379 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1380 CPUID_7_0_EBX_AVX512VL,
1381 /* Missing: XSAVES (not supported by some Linux versions,
1382 * including v4.1 to v4.12).
1383 * KVM doesn't yet expose any XSAVES state save component,
1384 * and the only one defined in Skylake (processor tracing)
1385 * probably will block migration anyway.
1386 */
1387 .features[FEAT_XSAVE] =
1388 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1389 CPUID_XSAVE_XGETBV1,
1390 .features[FEAT_6_EAX] =
1391 CPUID_6_EAX_ARAT,
1392 .xlevel = 0x80000008,
1393 .model_id = "Intel Xeon Processor (Skylake)",
1394 },
1395 {
1396 .name = "Opteron_G1",
1397 .level = 5,
1398 .vendor = CPUID_VENDOR_AMD,
1399 .family = 15,
1400 .model = 6,
1401 .stepping = 1,
1402 .features[FEAT_1_EDX] =
1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1407 CPUID_DE | CPUID_FP87,
1408 .features[FEAT_1_ECX] =
1409 CPUID_EXT_SSE3,
1410 .features[FEAT_8000_0001_EDX] =
1411 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1412 .xlevel = 0x80000008,
1413 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1414 },
1415 {
1416 .name = "Opteron_G2",
1417 .level = 5,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 15,
1420 .model = 6,
1421 .stepping = 1,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1430 /* Missing: CPUID_EXT2_RDTSCP */
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1433 .features[FEAT_8000_0001_ECX] =
1434 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1435 .xlevel = 0x80000008,
1436 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1437 },
1438 {
1439 .name = "Opteron_G3",
1440 .level = 5,
1441 .vendor = CPUID_VENDOR_AMD,
1442 .family = 16,
1443 .model = 2,
1444 .stepping = 3,
1445 .features[FEAT_1_EDX] =
1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1450 CPUID_DE | CPUID_FP87,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1453 CPUID_EXT_SSE3,
1454 /* Missing: CPUID_EXT2_RDTSCP */
1455 .features[FEAT_8000_0001_EDX] =
1456 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1457 .features[FEAT_8000_0001_ECX] =
1458 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1459 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1460 .xlevel = 0x80000008,
1461 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1462 },
1463 {
1464 .name = "Opteron_G4",
1465 .level = 0xd,
1466 .vendor = CPUID_VENDOR_AMD,
1467 .family = 21,
1468 .model = 1,
1469 .stepping = 2,
1470 .features[FEAT_1_EDX] =
1471 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1472 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1473 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1474 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1475 CPUID_DE | CPUID_FP87,
1476 .features[FEAT_1_ECX] =
1477 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1478 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1479 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1480 CPUID_EXT_SSE3,
1481 /* Missing: CPUID_EXT2_RDTSCP */
1482 .features[FEAT_8000_0001_EDX] =
1483 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1484 CPUID_EXT2_SYSCALL,
1485 .features[FEAT_8000_0001_ECX] =
1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1489 CPUID_EXT3_LAHF_LM,
1490 /* no xsaveopt! */
1491 .xlevel = 0x8000001A,
1492 .model_id = "AMD Opteron 62xx class CPU",
1493 },
1494 {
1495 .name = "Opteron_G5",
1496 .level = 0xd,
1497 .vendor = CPUID_VENDOR_AMD,
1498 .family = 21,
1499 .model = 2,
1500 .stepping = 0,
1501 .features[FEAT_1_EDX] =
1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1506 CPUID_DE | CPUID_FP87,
1507 .features[FEAT_1_ECX] =
1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1512 /* Missing: CPUID_EXT2_RDTSCP */
1513 .features[FEAT_8000_0001_EDX] =
1514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1515 CPUID_EXT2_SYSCALL,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1520 CPUID_EXT3_LAHF_LM,
1521 /* no xsaveopt! */
1522 .xlevel = 0x8000001A,
1523 .model_id = "AMD Opteron 63xx class CPU",
1524 },
1525 };
1526
1527 typedef struct PropValue {
1528 const char *prop, *value;
1529 } PropValue;
1530
1531 /* KVM-specific features that are automatically added/removed
1532 * from all CPU models when KVM is enabled.
1533 */
1534 static PropValue kvm_default_props[] = {
1535 { "kvmclock", "on" },
1536 { "kvm-nopiodelay", "on" },
1537 { "kvm-asyncpf", "on" },
1538 { "kvm-steal-time", "on" },
1539 { "kvm-pv-eoi", "on" },
1540 { "kvmclock-stable-bit", "on" },
1541 { "x2apic", "on" },
1542 { "acpi", "off" },
1543 { "monitor", "off" },
1544 { "svm", "off" },
1545 { NULL, NULL },
1546 };
1547
1548 /* TCG-specific defaults that override all CPU models when using TCG
1549 */
1550 static PropValue tcg_default_props[] = {
1551 { "vme", "off" },
1552 { NULL, NULL },
1553 };
1554
1555
1556 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1557 {
1558 PropValue *pv;
1559 for (pv = kvm_default_props; pv->prop; pv++) {
1560 if (!strcmp(pv->prop, prop)) {
1561 pv->value = value;
1562 break;
1563 }
1564 }
1565
1566 /* It is valid to call this function only for properties that
1567 * are already present in the kvm_default_props table.
1568 */
1569 assert(pv->prop);
1570 }
1571
1572 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1573 bool migratable_only);
1574
1575 static bool lmce_supported(void)
1576 {
1577 uint64_t mce_cap = 0;
1578
1579 #ifdef CONFIG_KVM
1580 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1581 return false;
1582 }
1583 #endif
1584
1585 return !!(mce_cap & MCG_LMCE_P);
1586 }
1587
1588 #define CPUID_MODEL_ID_SZ 48
1589
1590 /**
1591 * cpu_x86_fill_model_id:
1592 * Get CPUID model ID string from host CPU.
1593 *
1594 * @str should have at least CPUID_MODEL_ID_SZ bytes
1595 *
1596 * The function does NOT add a null terminator to the string
1597 * automatically.
1598 */
1599 static int cpu_x86_fill_model_id(char *str)
1600 {
1601 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1602 int i;
1603
1604 for (i = 0; i < 3; i++) {
1605 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1606 memcpy(str + i * 16 + 0, &eax, 4);
1607 memcpy(str + i * 16 + 4, &ebx, 4);
1608 memcpy(str + i * 16 + 8, &ecx, 4);
1609 memcpy(str + i * 16 + 12, &edx, 4);
1610 }
1611 return 0;
1612 }
1613
1614 static Property max_x86_cpu_properties[] = {
1615 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1616 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1617 DEFINE_PROP_END_OF_LIST()
1618 };
1619
1620 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1621 {
1622 DeviceClass *dc = DEVICE_CLASS(oc);
1623 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1624
1625 xcc->ordering = 9;
1626
1627 xcc->model_description =
1628 "Enables all features supported by the accelerator in the current host";
1629
1630 dc->props = max_x86_cpu_properties;
1631 }
1632
1633 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1634
1635 static void max_x86_cpu_initfn(Object *obj)
1636 {
1637 X86CPU *cpu = X86_CPU(obj);
1638 CPUX86State *env = &cpu->env;
1639 KVMState *s = kvm_state;
1640
1641 /* We can't fill the features array here because we don't know yet if
1642 * "migratable" is true or false.
1643 */
1644 cpu->max_features = true;
1645
1646 if (kvm_enabled()) {
1647 X86CPUDefinition host_cpudef = { };
1648 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1649
1650 host_vendor_fms(host_cpudef.vendor, &host_cpudef.family,
1651 &host_cpudef.model, &host_cpudef.stepping);
1652
1653 cpu_x86_fill_model_id(host_cpudef.model_id);
1654
1655 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1656
1657 env->cpuid_min_level =
1658 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1659 env->cpuid_min_xlevel =
1660 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1661 env->cpuid_min_xlevel2 =
1662 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1663
1664 if (lmce_supported()) {
1665 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1666 }
1667 } else {
1668 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1669 "vendor", &error_abort);
1670 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1671 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1672 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1673 object_property_set_str(OBJECT(cpu),
1674 "QEMU TCG CPU version " QEMU_HW_VERSION,
1675 "model-id", &error_abort);
1676 }
1677
1678 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1679 }
1680
1681 static const TypeInfo max_x86_cpu_type_info = {
1682 .name = X86_CPU_TYPE_NAME("max"),
1683 .parent = TYPE_X86_CPU,
1684 .instance_init = max_x86_cpu_initfn,
1685 .class_init = max_x86_cpu_class_init,
1686 };
1687
1688 #ifdef CONFIG_KVM
1689
1690 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1691 {
1692 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1693
1694 xcc->kvm_required = true;
1695 xcc->ordering = 8;
1696
1697 xcc->model_description =
1698 "KVM processor with all supported host features "
1699 "(only available in KVM mode)";
1700 }
1701
1702 static const TypeInfo host_x86_cpu_type_info = {
1703 .name = X86_CPU_TYPE_NAME("host"),
1704 .parent = X86_CPU_TYPE_NAME("max"),
1705 .class_init = host_x86_cpu_class_init,
1706 };
1707
1708 #endif
1709
1710 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1711 {
1712 FeatureWordInfo *f = &feature_word_info[w];
1713 int i;
1714
1715 for (i = 0; i < 32; ++i) {
1716 if ((1UL << i) & mask) {
1717 const char *reg = get_register_name_32(f->cpuid_reg);
1718 assert(reg);
1719 fprintf(stderr, "warning: %s doesn't support requested feature: "
1720 "CPUID.%02XH:%s%s%s [bit %d]\n",
1721 kvm_enabled() ? "host" : "TCG",
1722 f->cpuid_eax, reg,
1723 f->feat_names[i] ? "." : "",
1724 f->feat_names[i] ? f->feat_names[i] : "", i);
1725 }
1726 }
1727 }
1728
1729 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1730 const char *name, void *opaque,
1731 Error **errp)
1732 {
1733 X86CPU *cpu = X86_CPU(obj);
1734 CPUX86State *env = &cpu->env;
1735 int64_t value;
1736
1737 value = (env->cpuid_version >> 8) & 0xf;
1738 if (value == 0xf) {
1739 value += (env->cpuid_version >> 20) & 0xff;
1740 }
1741 visit_type_int(v, name, &value, errp);
1742 }
1743
1744 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1745 const char *name, void *opaque,
1746 Error **errp)
1747 {
1748 X86CPU *cpu = X86_CPU(obj);
1749 CPUX86State *env = &cpu->env;
1750 const int64_t min = 0;
1751 const int64_t max = 0xff + 0xf;
1752 Error *local_err = NULL;
1753 int64_t value;
1754
1755 visit_type_int(v, name, &value, &local_err);
1756 if (local_err) {
1757 error_propagate(errp, local_err);
1758 return;
1759 }
1760 if (value < min || value > max) {
1761 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1762 name ? name : "null", value, min, max);
1763 return;
1764 }
1765
1766 env->cpuid_version &= ~0xff00f00;
1767 if (value > 0x0f) {
1768 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1769 } else {
1770 env->cpuid_version |= value << 8;
1771 }
1772 }
1773
1774 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1775 const char *name, void *opaque,
1776 Error **errp)
1777 {
1778 X86CPU *cpu = X86_CPU(obj);
1779 CPUX86State *env = &cpu->env;
1780 int64_t value;
1781
1782 value = (env->cpuid_version >> 4) & 0xf;
1783 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1784 visit_type_int(v, name, &value, errp);
1785 }
1786
1787 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1788 const char *name, void *opaque,
1789 Error **errp)
1790 {
1791 X86CPU *cpu = X86_CPU(obj);
1792 CPUX86State *env = &cpu->env;
1793 const int64_t min = 0;
1794 const int64_t max = 0xff;
1795 Error *local_err = NULL;
1796 int64_t value;
1797
1798 visit_type_int(v, name, &value, &local_err);
1799 if (local_err) {
1800 error_propagate(errp, local_err);
1801 return;
1802 }
1803 if (value < min || value > max) {
1804 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1805 name ? name : "null", value, min, max);
1806 return;
1807 }
1808
1809 env->cpuid_version &= ~0xf00f0;
1810 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1811 }
1812
1813 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1814 const char *name, void *opaque,
1815 Error **errp)
1816 {
1817 X86CPU *cpu = X86_CPU(obj);
1818 CPUX86State *env = &cpu->env;
1819 int64_t value;
1820
1821 value = env->cpuid_version & 0xf;
1822 visit_type_int(v, name, &value, errp);
1823 }
1824
1825 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1826 const char *name, void *opaque,
1827 Error **errp)
1828 {
1829 X86CPU *cpu = X86_CPU(obj);
1830 CPUX86State *env = &cpu->env;
1831 const int64_t min = 0;
1832 const int64_t max = 0xf;
1833 Error *local_err = NULL;
1834 int64_t value;
1835
1836 visit_type_int(v, name, &value, &local_err);
1837 if (local_err) {
1838 error_propagate(errp, local_err);
1839 return;
1840 }
1841 if (value < min || value > max) {
1842 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1843 name ? name : "null", value, min, max);
1844 return;
1845 }
1846
1847 env->cpuid_version &= ~0xf;
1848 env->cpuid_version |= value & 0xf;
1849 }
1850
1851 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1852 {
1853 X86CPU *cpu = X86_CPU(obj);
1854 CPUX86State *env = &cpu->env;
1855 char *value;
1856
1857 value = g_malloc(CPUID_VENDOR_SZ + 1);
1858 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1859 env->cpuid_vendor3);
1860 return value;
1861 }
1862
1863 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1864 Error **errp)
1865 {
1866 X86CPU *cpu = X86_CPU(obj);
1867 CPUX86State *env = &cpu->env;
1868 int i;
1869
1870 if (strlen(value) != CPUID_VENDOR_SZ) {
1871 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1872 return;
1873 }
1874
1875 env->cpuid_vendor1 = 0;
1876 env->cpuid_vendor2 = 0;
1877 env->cpuid_vendor3 = 0;
1878 for (i = 0; i < 4; i++) {
1879 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1880 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1881 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1882 }
1883 }
1884
1885 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1886 {
1887 X86CPU *cpu = X86_CPU(obj);
1888 CPUX86State *env = &cpu->env;
1889 char *value;
1890 int i;
1891
1892 value = g_malloc(48 + 1);
1893 for (i = 0; i < 48; i++) {
1894 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1895 }
1896 value[48] = '\0';
1897 return value;
1898 }
1899
1900 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1901 Error **errp)
1902 {
1903 X86CPU *cpu = X86_CPU(obj);
1904 CPUX86State *env = &cpu->env;
1905 int c, len, i;
1906
1907 if (model_id == NULL) {
1908 model_id = "";
1909 }
1910 len = strlen(model_id);
1911 memset(env->cpuid_model, 0, 48);
1912 for (i = 0; i < 48; i++) {
1913 if (i >= len) {
1914 c = '\0';
1915 } else {
1916 c = (uint8_t)model_id[i];
1917 }
1918 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1919 }
1920 }
1921
1922 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1923 void *opaque, Error **errp)
1924 {
1925 X86CPU *cpu = X86_CPU(obj);
1926 int64_t value;
1927
1928 value = cpu->env.tsc_khz * 1000;
1929 visit_type_int(v, name, &value, errp);
1930 }
1931
1932 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1933 void *opaque, Error **errp)
1934 {
1935 X86CPU *cpu = X86_CPU(obj);
1936 const int64_t min = 0;
1937 const int64_t max = INT64_MAX;
1938 Error *local_err = NULL;
1939 int64_t value;
1940
1941 visit_type_int(v, name, &value, &local_err);
1942 if (local_err) {
1943 error_propagate(errp, local_err);
1944 return;
1945 }
1946 if (value < min || value > max) {
1947 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1948 name ? name : "null", value, min, max);
1949 return;
1950 }
1951
1952 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1953 }
1954
1955 /* Generic getter for "feature-words" and "filtered-features" properties */
1956 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1957 const char *name, void *opaque,
1958 Error **errp)
1959 {
1960 uint32_t *array = (uint32_t *)opaque;
1961 FeatureWord w;
1962 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1963 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1964 X86CPUFeatureWordInfoList *list = NULL;
1965
1966 for (w = 0; w < FEATURE_WORDS; w++) {
1967 FeatureWordInfo *wi = &feature_word_info[w];
1968 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1969 qwi->cpuid_input_eax = wi->cpuid_eax;
1970 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1971 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1972 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1973 qwi->features = array[w];
1974
1975 /* List will be in reverse order, but order shouldn't matter */
1976 list_entries[w].next = list;
1977 list_entries[w].value = &word_infos[w];
1978 list = &list_entries[w];
1979 }
1980
1981 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1982 }
1983
1984 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1985 void *opaque, Error **errp)
1986 {
1987 X86CPU *cpu = X86_CPU(obj);
1988 int64_t value = cpu->hyperv_spinlock_attempts;
1989
1990 visit_type_int(v, name, &value, errp);
1991 }
1992
1993 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1994 void *opaque, Error **errp)
1995 {
1996 const int64_t min = 0xFFF;
1997 const int64_t max = UINT_MAX;
1998 X86CPU *cpu = X86_CPU(obj);
1999 Error *err = NULL;
2000 int64_t value;
2001
2002 visit_type_int(v, name, &value, &err);
2003 if (err) {
2004 error_propagate(errp, err);
2005 return;
2006 }
2007
2008 if (value < min || value > max) {
2009 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2010 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2011 object_get_typename(obj), name ? name : "null",
2012 value, min, max);
2013 return;
2014 }
2015 cpu->hyperv_spinlock_attempts = value;
2016 }
2017
2018 static const PropertyInfo qdev_prop_spinlocks = {
2019 .name = "int",
2020 .get = x86_get_hv_spinlocks,
2021 .set = x86_set_hv_spinlocks,
2022 };
2023
2024 /* Convert all '_' in a feature string option name to '-', to make feature
2025 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2026 */
2027 static inline void feat2prop(char *s)
2028 {
2029 while ((s = strchr(s, '_'))) {
2030 *s = '-';
2031 }
2032 }
2033
2034 /* Return the feature property name for a feature flag bit */
2035 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2036 {
2037 /* XSAVE components are automatically enabled by other features,
2038 * so return the original feature name instead
2039 */
2040 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2041 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2042
2043 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2044 x86_ext_save_areas[comp].bits) {
2045 w = x86_ext_save_areas[comp].feature;
2046 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2047 }
2048 }
2049
2050 assert(bitnr < 32);
2051 assert(w < FEATURE_WORDS);
2052 return feature_word_info[w].feat_names[bitnr];
2053 }
2054
2055 /* Compatibily hack to maintain legacy +-feat semantic,
2056 * where +-feat overwrites any feature set by
2057 * feat=on|feat even if the later is parsed after +-feat
2058 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2059 */
2060 static GList *plus_features, *minus_features;
2061
2062 static gint compare_string(gconstpointer a, gconstpointer b)
2063 {
2064 return g_strcmp0(a, b);
2065 }
2066
2067 /* Parse "+feature,-feature,feature=foo" CPU feature string
2068 */
2069 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2070 Error **errp)
2071 {
2072 char *featurestr; /* Single 'key=value" string being parsed */
2073 static bool cpu_globals_initialized;
2074 bool ambiguous = false;
2075
2076 if (cpu_globals_initialized) {
2077 return;
2078 }
2079 cpu_globals_initialized = true;
2080
2081 if (!features) {
2082 return;
2083 }
2084
2085 for (featurestr = strtok(features, ",");
2086 featurestr;
2087 featurestr = strtok(NULL, ",")) {
2088 const char *name;
2089 const char *val = NULL;
2090 char *eq = NULL;
2091 char num[32];
2092 GlobalProperty *prop;
2093
2094 /* Compatibility syntax: */
2095 if (featurestr[0] == '+') {
2096 plus_features = g_list_append(plus_features,
2097 g_strdup(featurestr + 1));
2098 continue;
2099 } else if (featurestr[0] == '-') {
2100 minus_features = g_list_append(minus_features,
2101 g_strdup(featurestr + 1));
2102 continue;
2103 }
2104
2105 eq = strchr(featurestr, '=');
2106 if (eq) {
2107 *eq++ = 0;
2108 val = eq;
2109 } else {
2110 val = "on";
2111 }
2112
2113 feat2prop(featurestr);
2114 name = featurestr;
2115
2116 if (g_list_find_custom(plus_features, name, compare_string)) {
2117 warn_report("Ambiguous CPU model string. "
2118 "Don't mix both \"+%s\" and \"%s=%s\"",
2119 name, name, val);
2120 ambiguous = true;
2121 }
2122 if (g_list_find_custom(minus_features, name, compare_string)) {
2123 warn_report("Ambiguous CPU model string. "
2124 "Don't mix both \"-%s\" and \"%s=%s\"",
2125 name, name, val);
2126 ambiguous = true;
2127 }
2128
2129 /* Special case: */
2130 if (!strcmp(name, "tsc-freq")) {
2131 int ret;
2132 uint64_t tsc_freq;
2133
2134 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2135 if (ret < 0 || tsc_freq > INT64_MAX) {
2136 error_setg(errp, "bad numerical value %s", val);
2137 return;
2138 }
2139 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2140 val = num;
2141 name = "tsc-frequency";
2142 }
2143
2144 prop = g_new0(typeof(*prop), 1);
2145 prop->driver = typename;
2146 prop->property = g_strdup(name);
2147 prop->value = g_strdup(val);
2148 prop->errp = &error_fatal;
2149 qdev_prop_register_global(prop);
2150 }
2151
2152 if (ambiguous) {
2153 warn_report("Compatibility of ambiguous CPU model "
2154 "strings won't be kept on future QEMU versions");
2155 }
2156 }
2157
2158 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2159 static int x86_cpu_filter_features(X86CPU *cpu);
2160
2161 /* Check for missing features that may prevent the CPU class from
2162 * running using the current machine and accelerator.
2163 */
2164 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2165 strList **missing_feats)
2166 {
2167 X86CPU *xc;
2168 FeatureWord w;
2169 Error *err = NULL;
2170 strList **next = missing_feats;
2171
2172 if (xcc->kvm_required && !kvm_enabled()) {
2173 strList *new = g_new0(strList, 1);
2174 new->value = g_strdup("kvm");;
2175 *missing_feats = new;
2176 return;
2177 }
2178
2179 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2180
2181 x86_cpu_expand_features(xc, &err);
2182 if (err) {
2183 /* Errors at x86_cpu_expand_features should never happen,
2184 * but in case it does, just report the model as not
2185 * runnable at all using the "type" property.
2186 */
2187 strList *new = g_new0(strList, 1);
2188 new->value = g_strdup("type");
2189 *next = new;
2190 next = &new->next;
2191 }
2192
2193 x86_cpu_filter_features(xc);
2194
2195 for (w = 0; w < FEATURE_WORDS; w++) {
2196 uint32_t filtered = xc->filtered_features[w];
2197 int i;
2198 for (i = 0; i < 32; i++) {
2199 if (filtered & (1UL << i)) {
2200 strList *new = g_new0(strList, 1);
2201 new->value = g_strdup(x86_cpu_feature_name(w, i));
2202 *next = new;
2203 next = &new->next;
2204 }
2205 }
2206 }
2207
2208 object_unref(OBJECT(xc));
2209 }
2210
2211 /* Print all cpuid feature names in featureset
2212 */
2213 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2214 {
2215 int bit;
2216 bool first = true;
2217
2218 for (bit = 0; bit < 32; bit++) {
2219 if (featureset[bit]) {
2220 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2221 first = false;
2222 }
2223 }
2224 }
2225
2226 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2227 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2228 {
2229 ObjectClass *class_a = (ObjectClass *)a;
2230 ObjectClass *class_b = (ObjectClass *)b;
2231 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2232 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2233 const char *name_a, *name_b;
2234
2235 if (cc_a->ordering != cc_b->ordering) {
2236 return cc_a->ordering - cc_b->ordering;
2237 } else {
2238 name_a = object_class_get_name(class_a);
2239 name_b = object_class_get_name(class_b);
2240 return strcmp(name_a, name_b);
2241 }
2242 }
2243
2244 static GSList *get_sorted_cpu_model_list(void)
2245 {
2246 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2247 list = g_slist_sort(list, x86_cpu_list_compare);
2248 return list;
2249 }
2250
2251 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2252 {
2253 ObjectClass *oc = data;
2254 X86CPUClass *cc = X86_CPU_CLASS(oc);
2255 CPUListState *s = user_data;
2256 char *name = x86_cpu_class_get_model_name(cc);
2257 const char *desc = cc->model_description;
2258 if (!desc && cc->cpu_def) {
2259 desc = cc->cpu_def->model_id;
2260 }
2261
2262 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2263 name, desc);
2264 g_free(name);
2265 }
2266
2267 /* list available CPU models and flags */
2268 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2269 {
2270 int i;
2271 CPUListState s = {
2272 .file = f,
2273 .cpu_fprintf = cpu_fprintf,
2274 };
2275 GSList *list;
2276
2277 (*cpu_fprintf)(f, "Available CPUs:\n");
2278 list = get_sorted_cpu_model_list();
2279 g_slist_foreach(list, x86_cpu_list_entry, &s);
2280 g_slist_free(list);
2281
2282 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2283 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2284 FeatureWordInfo *fw = &feature_word_info[i];
2285
2286 (*cpu_fprintf)(f, " ");
2287 listflags(f, cpu_fprintf, fw->feat_names);
2288 (*cpu_fprintf)(f, "\n");
2289 }
2290 }
2291
2292 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2293 {
2294 ObjectClass *oc = data;
2295 X86CPUClass *cc = X86_CPU_CLASS(oc);
2296 CpuDefinitionInfoList **cpu_list = user_data;
2297 CpuDefinitionInfoList *entry;
2298 CpuDefinitionInfo *info;
2299
2300 info = g_malloc0(sizeof(*info));
2301 info->name = x86_cpu_class_get_model_name(cc);
2302 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2303 info->has_unavailable_features = true;
2304 info->q_typename = g_strdup(object_class_get_name(oc));
2305 info->migration_safe = cc->migration_safe;
2306 info->has_migration_safe = true;
2307 info->q_static = cc->static_model;
2308
2309 entry = g_malloc0(sizeof(*entry));
2310 entry->value = info;
2311 entry->next = *cpu_list;
2312 *cpu_list = entry;
2313 }
2314
2315 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2316 {
2317 CpuDefinitionInfoList *cpu_list = NULL;
2318 GSList *list = get_sorted_cpu_model_list();
2319 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2320 g_slist_free(list);
2321 return cpu_list;
2322 }
2323
2324 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2325 bool migratable_only)
2326 {
2327 FeatureWordInfo *wi = &feature_word_info[w];
2328 uint32_t r;
2329
2330 if (kvm_enabled()) {
2331 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2332 wi->cpuid_ecx,
2333 wi->cpuid_reg);
2334 } else if (tcg_enabled()) {
2335 r = wi->tcg_features;
2336 } else {
2337 return ~0;
2338 }
2339 if (migratable_only) {
2340 r &= x86_cpu_get_migratable_flags(w);
2341 }
2342 return r;
2343 }
2344
2345 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2346 {
2347 FeatureWord w;
2348
2349 for (w = 0; w < FEATURE_WORDS; w++) {
2350 report_unavailable_features(w, cpu->filtered_features[w]);
2351 }
2352 }
2353
2354 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2355 {
2356 PropValue *pv;
2357 for (pv = props; pv->prop; pv++) {
2358 if (!pv->value) {
2359 continue;
2360 }
2361 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2362 &error_abort);
2363 }
2364 }
2365
2366 /* Load data from X86CPUDefinition into a X86CPU object
2367 */
2368 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2369 {
2370 CPUX86State *env = &cpu->env;
2371 const char *vendor;
2372 char host_vendor[CPUID_VENDOR_SZ + 1];
2373 FeatureWord w;
2374
2375 /*NOTE: any property set by this function should be returned by
2376 * x86_cpu_static_props(), so static expansion of
2377 * query-cpu-model-expansion is always complete.
2378 */
2379
2380 /* CPU models only set _minimum_ values for level/xlevel: */
2381 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2382 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2383
2384 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2385 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2386 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2387 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2388 for (w = 0; w < FEATURE_WORDS; w++) {
2389 env->features[w] = def->features[w];
2390 }
2391
2392 /* Special cases not set in the X86CPUDefinition structs: */
2393 if (kvm_enabled()) {
2394 if (!kvm_irqchip_in_kernel()) {
2395 x86_cpu_change_kvm_default("x2apic", "off");
2396 }
2397
2398 x86_cpu_apply_props(cpu, kvm_default_props);
2399 } else if (tcg_enabled()) {
2400 x86_cpu_apply_props(cpu, tcg_default_props);
2401 }
2402
2403 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2404
2405 /* sysenter isn't supported in compatibility mode on AMD,
2406 * syscall isn't supported in compatibility mode on Intel.
2407 * Normally we advertise the actual CPU vendor, but you can
2408 * override this using the 'vendor' property if you want to use
2409 * KVM's sysenter/syscall emulation in compatibility mode and
2410 * when doing cross vendor migration
2411 */
2412 vendor = def->vendor;
2413 if (kvm_enabled()) {
2414 uint32_t ebx = 0, ecx = 0, edx = 0;
2415 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2416 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2417 vendor = host_vendor;
2418 }
2419
2420 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2421
2422 }
2423
2424 /* Return a QDict containing keys for all properties that can be included
2425 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2426 * must be included in the dictionary.
2427 */
2428 static QDict *x86_cpu_static_props(void)
2429 {
2430 FeatureWord w;
2431 int i;
2432 static const char *props[] = {
2433 "min-level",
2434 "min-xlevel",
2435 "family",
2436 "model",
2437 "stepping",
2438 "model-id",
2439 "vendor",
2440 "lmce",
2441 NULL,
2442 };
2443 static QDict *d;
2444
2445 if (d) {
2446 return d;
2447 }
2448
2449 d = qdict_new();
2450 for (i = 0; props[i]; i++) {
2451 qdict_put(d, props[i], qnull());
2452 }
2453
2454 for (w = 0; w < FEATURE_WORDS; w++) {
2455 FeatureWordInfo *fi = &feature_word_info[w];
2456 int bit;
2457 for (bit = 0; bit < 32; bit++) {
2458 if (!fi->feat_names[bit]) {
2459 continue;
2460 }
2461 qdict_put(d, fi->feat_names[bit], qnull());
2462 }
2463 }
2464
2465 return d;
2466 }
2467
2468 /* Add an entry to @props dict, with the value for property. */
2469 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2470 {
2471 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2472 &error_abort);
2473
2474 qdict_put_obj(props, prop, value);
2475 }
2476
2477 /* Convert CPU model data from X86CPU object to a property dictionary
2478 * that can recreate exactly the same CPU model.
2479 */
2480 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2481 {
2482 QDict *sprops = x86_cpu_static_props();
2483 const QDictEntry *e;
2484
2485 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2486 const char *prop = qdict_entry_key(e);
2487 x86_cpu_expand_prop(cpu, props, prop);
2488 }
2489 }
2490
2491 /* Convert CPU model data from X86CPU object to a property dictionary
2492 * that can recreate exactly the same CPU model, including every
2493 * writeable QOM property.
2494 */
2495 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2496 {
2497 ObjectPropertyIterator iter;
2498 ObjectProperty *prop;
2499
2500 object_property_iter_init(&iter, OBJECT(cpu));
2501 while ((prop = object_property_iter_next(&iter))) {
2502 /* skip read-only or write-only properties */
2503 if (!prop->get || !prop->set) {
2504 continue;
2505 }
2506
2507 /* "hotplugged" is the only property that is configurable
2508 * on the command-line but will be set differently on CPUs
2509 * created using "-cpu ... -smp ..." and by CPUs created
2510 * on the fly by x86_cpu_from_model() for querying. Skip it.
2511 */
2512 if (!strcmp(prop->name, "hotplugged")) {
2513 continue;
2514 }
2515 x86_cpu_expand_prop(cpu, props, prop->name);
2516 }
2517 }
2518
2519 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2520 {
2521 const QDictEntry *prop;
2522 Error *err = NULL;
2523
2524 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2525 object_property_set_qobject(obj, qdict_entry_value(prop),
2526 qdict_entry_key(prop), &err);
2527 if (err) {
2528 break;
2529 }
2530 }
2531
2532 error_propagate(errp, err);
2533 }
2534
2535 /* Create X86CPU object according to model+props specification */
2536 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2537 {
2538 X86CPU *xc = NULL;
2539 X86CPUClass *xcc;
2540 Error *err = NULL;
2541
2542 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2543 if (xcc == NULL) {
2544 error_setg(&err, "CPU model '%s' not found", model);
2545 goto out;
2546 }
2547
2548 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2549 if (props) {
2550 object_apply_props(OBJECT(xc), props, &err);
2551 if (err) {
2552 goto out;
2553 }
2554 }
2555
2556 x86_cpu_expand_features(xc, &err);
2557 if (err) {
2558 goto out;
2559 }
2560
2561 out:
2562 if (err) {
2563 error_propagate(errp, err);
2564 object_unref(OBJECT(xc));
2565 xc = NULL;
2566 }
2567 return xc;
2568 }
2569
2570 CpuModelExpansionInfo *
2571 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2572 CpuModelInfo *model,
2573 Error **errp)
2574 {
2575 X86CPU *xc = NULL;
2576 Error *err = NULL;
2577 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2578 QDict *props = NULL;
2579 const char *base_name;
2580
2581 xc = x86_cpu_from_model(model->name,
2582 model->has_props ?
2583 qobject_to_qdict(model->props) :
2584 NULL, &err);
2585 if (err) {
2586 goto out;
2587 }
2588
2589 props = qdict_new();
2590
2591 switch (type) {
2592 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2593 /* Static expansion will be based on "base" only */
2594 base_name = "base";
2595 x86_cpu_to_dict(xc, props);
2596 break;
2597 case CPU_MODEL_EXPANSION_TYPE_FULL:
2598 /* As we don't return every single property, full expansion needs
2599 * to keep the original model name+props, and add extra
2600 * properties on top of that.
2601 */
2602 base_name = model->name;
2603 x86_cpu_to_dict_full(xc, props);
2604 break;
2605 default:
2606 error_setg(&err, "Unsupportted expansion type");
2607 goto out;
2608 }
2609
2610 if (!props) {
2611 props = qdict_new();
2612 }
2613 x86_cpu_to_dict(xc, props);
2614
2615 ret->model = g_new0(CpuModelInfo, 1);
2616 ret->model->name = g_strdup(base_name);
2617 ret->model->props = QOBJECT(props);
2618 ret->model->has_props = true;
2619
2620 out:
2621 object_unref(OBJECT(xc));
2622 if (err) {
2623 error_propagate(errp, err);
2624 qapi_free_CpuModelExpansionInfo(ret);
2625 ret = NULL;
2626 }
2627 return ret;
2628 }
2629
2630 static gchar *x86_gdb_arch_name(CPUState *cs)
2631 {
2632 #ifdef TARGET_X86_64
2633 return g_strdup("i386:x86-64");
2634 #else
2635 return g_strdup("i386");
2636 #endif
2637 }
2638
2639 X86CPU *cpu_x86_init(const char *cpu_model)
2640 {
2641 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2642 }
2643
2644 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2645 {
2646 X86CPUDefinition *cpudef = data;
2647 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2648
2649 xcc->cpu_def = cpudef;
2650 xcc->migration_safe = true;
2651 }
2652
2653 static void x86_register_cpudef_type(X86CPUDefinition *def)
2654 {
2655 char *typename = x86_cpu_type_name(def->name);
2656 TypeInfo ti = {
2657 .name = typename,
2658 .parent = TYPE_X86_CPU,
2659 .class_init = x86_cpu_cpudef_class_init,
2660 .class_data = def,
2661 };
2662
2663 /* AMD aliases are handled at runtime based on CPUID vendor, so
2664 * they shouldn't be set on the CPU model table.
2665 */
2666 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2667
2668 type_register(&ti);
2669 g_free(typename);
2670 }
2671
2672 #if !defined(CONFIG_USER_ONLY)
2673
2674 void cpu_clear_apic_feature(CPUX86State *env)
2675 {
2676 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2677 }
2678
2679 #endif /* !CONFIG_USER_ONLY */
2680
2681 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2682 uint32_t *eax, uint32_t *ebx,
2683 uint32_t *ecx, uint32_t *edx)
2684 {
2685 X86CPU *cpu = x86_env_get_cpu(env);
2686 CPUState *cs = CPU(cpu);
2687 uint32_t pkg_offset;
2688 uint32_t limit;
2689 uint32_t signature[3];
2690
2691 /* Calculate & apply limits for different index ranges */
2692 if (index >= 0xC0000000) {
2693 limit = env->cpuid_xlevel2;
2694 } else if (index >= 0x80000000) {
2695 limit = env->cpuid_xlevel;
2696 } else if (index >= 0x40000000) {
2697 limit = 0x40000001;
2698 } else {
2699 limit = env->cpuid_level;
2700 }
2701
2702 if (index > limit) {
2703 /* Intel documentation states that invalid EAX input will
2704 * return the same information as EAX=cpuid_level
2705 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2706 */
2707 index = env->cpuid_level;
2708 }
2709
2710 switch(index) {
2711 case 0:
2712 *eax = env->cpuid_level;
2713 *ebx = env->cpuid_vendor1;
2714 *edx = env->cpuid_vendor2;
2715 *ecx = env->cpuid_vendor3;
2716 break;
2717 case 1:
2718 *eax = env->cpuid_version;
2719 *ebx = (cpu->apic_id << 24) |
2720 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2721 *ecx = env->features[FEAT_1_ECX];
2722 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2723 *ecx |= CPUID_EXT_OSXSAVE;
2724 }
2725 *edx = env->features[FEAT_1_EDX];
2726 if (cs->nr_cores * cs->nr_threads > 1) {
2727 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2728 *edx |= CPUID_HT;
2729 }
2730 break;
2731 case 2:
2732 /* cache info: needed for Pentium Pro compatibility */
2733 if (cpu->cache_info_passthrough) {
2734 host_cpuid(index, 0, eax, ebx, ecx, edx);
2735 break;
2736 }
2737 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2738 *ebx = 0;
2739 if (!cpu->enable_l3_cache) {
2740 *ecx = 0;
2741 } else {
2742 *ecx = L3_N_DESCRIPTOR;
2743 }
2744 *edx = (L1D_DESCRIPTOR << 16) | \
2745 (L1I_DESCRIPTOR << 8) | \
2746 (L2_DESCRIPTOR);
2747 break;
2748 case 4:
2749 /* cache info: needed for Core compatibility */
2750 if (cpu->cache_info_passthrough) {
2751 host_cpuid(index, count, eax, ebx, ecx, edx);
2752 *eax &= ~0xFC000000;
2753 } else {
2754 *eax = 0;
2755 switch (count) {
2756 case 0: /* L1 dcache info */
2757 *eax |= CPUID_4_TYPE_DCACHE | \
2758 CPUID_4_LEVEL(1) | \
2759 CPUID_4_SELF_INIT_LEVEL;
2760 *ebx = (L1D_LINE_SIZE - 1) | \
2761 ((L1D_PARTITIONS - 1) << 12) | \
2762 ((L1D_ASSOCIATIVITY - 1) << 22);
2763 *ecx = L1D_SETS - 1;
2764 *edx = CPUID_4_NO_INVD_SHARING;
2765 break;
2766 case 1: /* L1 icache info */
2767 *eax |= CPUID_4_TYPE_ICACHE | \
2768 CPUID_4_LEVEL(1) | \
2769 CPUID_4_SELF_INIT_LEVEL;
2770 *ebx = (L1I_LINE_SIZE - 1) | \
2771 ((L1I_PARTITIONS - 1) << 12) | \
2772 ((L1I_ASSOCIATIVITY - 1) << 22);
2773 *ecx = L1I_SETS - 1;
2774 *edx = CPUID_4_NO_INVD_SHARING;
2775 break;
2776 case 2: /* L2 cache info */
2777 *eax |= CPUID_4_TYPE_UNIFIED | \
2778 CPUID_4_LEVEL(2) | \
2779 CPUID_4_SELF_INIT_LEVEL;
2780 if (cs->nr_threads > 1) {
2781 *eax |= (cs->nr_threads - 1) << 14;
2782 }
2783 *ebx = (L2_LINE_SIZE - 1) | \
2784 ((L2_PARTITIONS - 1) << 12) | \
2785 ((L2_ASSOCIATIVITY - 1) << 22);
2786 *ecx = L2_SETS - 1;
2787 *edx = CPUID_4_NO_INVD_SHARING;
2788 break;
2789 case 3: /* L3 cache info */
2790 if (!cpu->enable_l3_cache) {
2791 *eax = 0;
2792 *ebx = 0;
2793 *ecx = 0;
2794 *edx = 0;
2795 break;
2796 }
2797 *eax |= CPUID_4_TYPE_UNIFIED | \
2798 CPUID_4_LEVEL(3) | \
2799 CPUID_4_SELF_INIT_LEVEL;
2800 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2801 *eax |= ((1 << pkg_offset) - 1) << 14;
2802 *ebx = (L3_N_LINE_SIZE - 1) | \
2803 ((L3_N_PARTITIONS - 1) << 12) | \
2804 ((L3_N_ASSOCIATIVITY - 1) << 22);
2805 *ecx = L3_N_SETS - 1;
2806 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2807 break;
2808 default: /* end of info */
2809 *eax = 0;
2810 *ebx = 0;
2811 *ecx = 0;
2812 *edx = 0;
2813 break;
2814 }
2815 }
2816
2817 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2818 if ((*eax & 31) && cs->nr_cores > 1) {
2819 *eax |= (cs->nr_cores - 1) << 26;
2820 }
2821 break;
2822 case 5:
2823 /* mwait info: needed for Core compatibility */
2824 *eax = 0; /* Smallest monitor-line size in bytes */
2825 *ebx = 0; /* Largest monitor-line size in bytes */
2826 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2827 *edx = 0;
2828 break;
2829 case 6:
2830 /* Thermal and Power Leaf */
2831 *eax = env->features[FEAT_6_EAX];
2832 *ebx = 0;
2833 *ecx = 0;
2834 *edx = 0;
2835 break;
2836 case 7:
2837 /* Structured Extended Feature Flags Enumeration Leaf */
2838 if (count == 0) {
2839 *eax = 0; /* Maximum ECX value for sub-leaves */
2840 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2841 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2842 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2843 *ecx |= CPUID_7_0_ECX_OSPKE;
2844 }
2845 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2846 } else {
2847 *eax = 0;
2848 *ebx = 0;
2849 *ecx = 0;
2850 *edx = 0;
2851 }
2852 break;
2853 case 9:
2854 /* Direct Cache Access Information Leaf */
2855 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2856 *ebx = 0;
2857 *ecx = 0;
2858 *edx = 0;
2859 break;
2860 case 0xA:
2861 /* Architectural Performance Monitoring Leaf */
2862 if (kvm_enabled() && cpu->enable_pmu) {
2863 KVMState *s = cs->kvm_state;
2864
2865 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2866 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2867 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2868 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2869 } else {
2870 *eax = 0;
2871 *ebx = 0;
2872 *ecx = 0;
2873 *edx = 0;
2874 }
2875 break;
2876 case 0xB:
2877 /* Extended Topology Enumeration Leaf */
2878 if (!cpu->enable_cpuid_0xb) {
2879 *eax = *ebx = *ecx = *edx = 0;
2880 break;
2881 }
2882
2883 *ecx = count & 0xff;
2884 *edx = cpu->apic_id;
2885
2886 switch (count) {
2887 case 0:
2888 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2889 *ebx = cs->nr_threads;
2890 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2891 break;
2892 case 1:
2893 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2894 *ebx = cs->nr_cores * cs->nr_threads;
2895 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2896 break;
2897 default:
2898 *eax = 0;
2899 *ebx = 0;
2900 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2901 }
2902
2903 assert(!(*eax & ~0x1f));
2904 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2905 break;
2906 case 0xD: {
2907 /* Processor Extended State */
2908 *eax = 0;
2909 *ebx = 0;
2910 *ecx = 0;
2911 *edx = 0;
2912 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2913 break;
2914 }
2915
2916 if (count == 0) {
2917 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2918 *eax = env->features[FEAT_XSAVE_COMP_LO];
2919 *edx = env->features[FEAT_XSAVE_COMP_HI];
2920 *ebx = *ecx;
2921 } else if (count == 1) {
2922 *eax = env->features[FEAT_XSAVE];
2923 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2924 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2925 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2926 *eax = esa->size;
2927 *ebx = esa->offset;
2928 }
2929 }
2930 break;
2931 }
2932 case 0x40000000:
2933 /*
2934 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2935 * set here, but we restrict to TCG none the less.
2936 */
2937 if (tcg_enabled() && cpu->expose_tcg) {
2938 memcpy(signature, "TCGTCGTCGTCG", 12);
2939 *eax = 0x40000001;
2940 *ebx = signature[0];
2941 *ecx = signature[1];
2942 *edx = signature[2];
2943 } else {
2944 *eax = 0;
2945 *ebx = 0;
2946 *ecx = 0;
2947 *edx = 0;
2948 }
2949 break;
2950 case 0x40000001:
2951 *eax = 0;
2952 *ebx = 0;
2953 *ecx = 0;
2954 *edx = 0;
2955 break;
2956 case 0x80000000:
2957 *eax = env->cpuid_xlevel;
2958 *ebx = env->cpuid_vendor1;
2959 *edx = env->cpuid_vendor2;
2960 *ecx = env->cpuid_vendor3;
2961 break;
2962 case 0x80000001:
2963 *eax = env->cpuid_version;
2964 *ebx = 0;
2965 *ecx = env->features[FEAT_8000_0001_ECX];
2966 *edx = env->features[FEAT_8000_0001_EDX];
2967
2968 /* The Linux kernel checks for the CMPLegacy bit and
2969 * discards multiple thread information if it is set.
2970 * So don't set it here for Intel to make Linux guests happy.
2971 */
2972 if (cs->nr_cores * cs->nr_threads > 1) {
2973 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2974 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2975 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2976 *ecx |= 1 << 1; /* CmpLegacy bit */
2977 }
2978 }
2979 break;
2980 case 0x80000002:
2981 case 0x80000003:
2982 case 0x80000004:
2983 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2984 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2985 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2986 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2987 break;
2988 case 0x80000005:
2989 /* cache info (L1 cache) */
2990 if (cpu->cache_info_passthrough) {
2991 host_cpuid(index, 0, eax, ebx, ecx, edx);
2992 break;
2993 }
2994 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2995 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2996 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2997 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2998 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2999 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3000 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3001 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3002 break;
3003 case 0x80000006:
3004 /* cache info (L2 cache) */
3005 if (cpu->cache_info_passthrough) {
3006 host_cpuid(index, 0, eax, ebx, ecx, edx);
3007 break;
3008 }
3009 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3010 (L2_DTLB_2M_ENTRIES << 16) | \
3011 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3012 (L2_ITLB_2M_ENTRIES);
3013 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3014 (L2_DTLB_4K_ENTRIES << 16) | \
3015 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3016 (L2_ITLB_4K_ENTRIES);
3017 *ecx = (L2_SIZE_KB_AMD << 16) | \
3018 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3019 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3020 if (!cpu->enable_l3_cache) {
3021 *edx = ((L3_SIZE_KB / 512) << 18) | \
3022 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3023 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3024 } else {
3025 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3026 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3027 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3028 }
3029 break;
3030 case 0x80000007:
3031 *eax = 0;
3032 *ebx = 0;
3033 *ecx = 0;
3034 *edx = env->features[FEAT_8000_0007_EDX];
3035 break;
3036 case 0x80000008:
3037 /* virtual & phys address size in low 2 bytes. */
3038 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3039 /* 64 bit processor */
3040 *eax = cpu->phys_bits; /* configurable physical bits */
3041 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3042 *eax |= 0x00003900; /* 57 bits virtual */
3043 } else {
3044 *eax |= 0x00003000; /* 48 bits virtual */
3045 }
3046 } else {
3047 *eax = cpu->phys_bits;
3048 }
3049 *ebx = 0;
3050 *ecx = 0;
3051 *edx = 0;
3052 if (cs->nr_cores * cs->nr_threads > 1) {
3053 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3054 }
3055 break;
3056 case 0x8000000A:
3057 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3058 *eax = 0x00000001; /* SVM Revision */
3059 *ebx = 0x00000010; /* nr of ASIDs */
3060 *ecx = 0;
3061 *edx = env->features[FEAT_SVM]; /* optional features */
3062 } else {
3063 *eax = 0;
3064 *ebx = 0;
3065 *ecx = 0;
3066 *edx = 0;
3067 }
3068 break;
3069 case 0xC0000000:
3070 *eax = env->cpuid_xlevel2;
3071 *ebx = 0;
3072 *ecx = 0;
3073 *edx = 0;
3074 break;
3075 case 0xC0000001:
3076 /* Support for VIA CPU's CPUID instruction */
3077 *eax = env->cpuid_version;
3078 *ebx = 0;
3079 *ecx = 0;
3080 *edx = env->features[FEAT_C000_0001_EDX];
3081 break;
3082 case 0xC0000002:
3083 case 0xC0000003:
3084 case 0xC0000004:
3085 /* Reserved for the future, and now filled with zero */
3086 *eax = 0;
3087 *ebx = 0;
3088 *ecx = 0;
3089 *edx = 0;
3090 break;
3091 default:
3092 /* reserved values: zero */
3093 *eax = 0;
3094 *ebx = 0;
3095 *ecx = 0;
3096 *edx = 0;
3097 break;
3098 }
3099 }
3100
3101 /* CPUClass::reset() */
3102 static void x86_cpu_reset(CPUState *s)
3103 {
3104 X86CPU *cpu = X86_CPU(s);
3105 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3106 CPUX86State *env = &cpu->env;
3107 target_ulong cr4;
3108 uint64_t xcr0;
3109 int i;
3110
3111 xcc->parent_reset(s);
3112
3113 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3114
3115 env->old_exception = -1;
3116
3117 /* init to reset state */
3118
3119 env->hflags2 |= HF2_GIF_MASK;
3120
3121 cpu_x86_update_cr0(env, 0x60000010);
3122 env->a20_mask = ~0x0;
3123 env->smbase = 0x30000;
3124
3125 env->idt.limit = 0xffff;
3126 env->gdt.limit = 0xffff;
3127 env->ldt.limit = 0xffff;
3128 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3129 env->tr.limit = 0xffff;
3130 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3131
3132 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3133 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3134 DESC_R_MASK | DESC_A_MASK);
3135 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3136 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3137 DESC_A_MASK);
3138 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3139 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3140 DESC_A_MASK);
3141 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3142 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3143 DESC_A_MASK);
3144 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3145 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3146 DESC_A_MASK);
3147 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3148 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3149 DESC_A_MASK);
3150
3151 env->eip = 0xfff0;
3152 env->regs[R_EDX] = env->cpuid_version;
3153
3154 env->eflags = 0x2;
3155
3156 /* FPU init */
3157 for (i = 0; i < 8; i++) {
3158 env->fptags[i] = 1;
3159 }
3160 cpu_set_fpuc(env, 0x37f);
3161
3162 env->mxcsr = 0x1f80;
3163 /* All units are in INIT state. */
3164 env->xstate_bv = 0;
3165
3166 env->pat = 0x0007040600070406ULL;
3167 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3168
3169 memset(env->dr, 0, sizeof(env->dr));
3170 env->dr[6] = DR6_FIXED_1;
3171 env->dr[7] = DR7_FIXED_1;
3172 cpu_breakpoint_remove_all(s, BP_CPU);
3173 cpu_watchpoint_remove_all(s, BP_CPU);
3174
3175 cr4 = 0;
3176 xcr0 = XSTATE_FP_MASK;
3177
3178 #ifdef CONFIG_USER_ONLY
3179 /* Enable all the features for user-mode. */
3180 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3181 xcr0 |= XSTATE_SSE_MASK;
3182 }
3183 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3184 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3185 if (env->features[esa->feature] & esa->bits) {
3186 xcr0 |= 1ull << i;
3187 }
3188 }
3189
3190 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3191 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3192 }
3193 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3194 cr4 |= CR4_FSGSBASE_MASK;
3195 }
3196 #endif
3197
3198 env->xcr0 = xcr0;
3199 cpu_x86_update_cr4(env, cr4);
3200
3201 /*
3202 * SDM 11.11.5 requires:
3203 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3204 * - IA32_MTRR_PHYSMASKn.V = 0
3205 * All other bits are undefined. For simplification, zero it all.
3206 */
3207 env->mtrr_deftype = 0;
3208 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3209 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3210
3211 #if !defined(CONFIG_USER_ONLY)
3212 /* We hard-wire the BSP to the first CPU. */
3213 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3214
3215 s->halted = !cpu_is_bsp(cpu);
3216
3217 if (kvm_enabled()) {
3218 kvm_arch_reset_vcpu(cpu);
3219 }
3220 #endif
3221 }
3222
3223 #ifndef CONFIG_USER_ONLY
3224 bool cpu_is_bsp(X86CPU *cpu)
3225 {
3226 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3227 }
3228
3229 /* TODO: remove me, when reset over QOM tree is implemented */
3230 static void x86_cpu_machine_reset_cb(void *opaque)
3231 {
3232 X86CPU *cpu = opaque;
3233 cpu_reset(CPU(cpu));
3234 }
3235 #endif
3236
3237 static void mce_init(X86CPU *cpu)
3238 {
3239 CPUX86State *cenv = &cpu->env;
3240 unsigned int bank;
3241
3242 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3243 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3244 (CPUID_MCE | CPUID_MCA)) {
3245 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3246 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3247 cenv->mcg_ctl = ~(uint64_t)0;
3248 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3249 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3250 }
3251 }
3252 }
3253
3254 #ifndef CONFIG_USER_ONLY
3255 APICCommonClass *apic_get_class(void)
3256 {
3257 const char *apic_type = "apic";
3258
3259 if (kvm_apic_in_kernel()) {
3260 apic_type = "kvm-apic";
3261 } else if (xen_enabled()) {
3262 apic_type = "xen-apic";
3263 }
3264
3265 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3266 }
3267
3268 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3269 {
3270 APICCommonState *apic;
3271 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3272
3273 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3274
3275 object_property_add_child(OBJECT(cpu), "lapic",
3276 OBJECT(cpu->apic_state), &error_abort);
3277 object_unref(OBJECT(cpu->apic_state));
3278
3279 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3280 /* TODO: convert to link<> */
3281 apic = APIC_COMMON(cpu->apic_state);
3282 apic->cpu = cpu;
3283 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3284 }
3285
3286 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3287 {
3288 APICCommonState *apic;
3289 static bool apic_mmio_map_once;
3290
3291 if (cpu->apic_state == NULL) {
3292 return;
3293 }
3294 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3295 errp);
3296
3297 /* Map APIC MMIO area */
3298 apic = APIC_COMMON(cpu->apic_state);
3299 if (!apic_mmio_map_once) {
3300 memory_region_add_subregion_overlap(get_system_memory(),
3301 apic->apicbase &
3302 MSR_IA32_APICBASE_BASE,
3303 &apic->io_memory,
3304 0x1000);
3305 apic_mmio_map_once = true;
3306 }
3307 }
3308
3309 static void x86_cpu_machine_done(Notifier *n, void *unused)
3310 {
3311 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3312 MemoryRegion *smram =
3313 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3314
3315 if (smram) {
3316 cpu->smram = g_new(MemoryRegion, 1);
3317 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3318 smram, 0, 1ull << 32);
3319 memory_region_set_enabled(cpu->smram, true);
3320 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3321 }
3322 }
3323 #else
3324 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3325 {
3326 }
3327 #endif
3328
3329 /* Note: Only safe for use on x86(-64) hosts */
3330 static uint32_t x86_host_phys_bits(void)
3331 {
3332 uint32_t eax;
3333 uint32_t host_phys_bits;
3334
3335 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3336 if (eax >= 0x80000008) {
3337 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3338 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3339 * at 23:16 that can specify a maximum physical address bits for
3340 * the guest that can override this value; but I've not seen
3341 * anything with that set.
3342 */
3343 host_phys_bits = eax & 0xff;
3344 } else {
3345 /* It's an odd 64 bit machine that doesn't have the leaf for
3346 * physical address bits; fall back to 36 that's most older
3347 * Intel.
3348 */
3349 host_phys_bits = 36;
3350 }
3351
3352 return host_phys_bits;
3353 }
3354
3355 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3356 {
3357 if (*min < value) {
3358 *min = value;
3359 }
3360 }
3361
3362 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3363 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3364 {
3365 CPUX86State *env = &cpu->env;
3366 FeatureWordInfo *fi = &feature_word_info[w];
3367 uint32_t eax = fi->cpuid_eax;
3368 uint32_t region = eax & 0xF0000000;
3369
3370 if (!env->features[w]) {
3371 return;
3372 }
3373
3374 switch (region) {
3375 case 0x00000000:
3376 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3377 break;
3378 case 0x80000000:
3379 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3380 break;
3381 case 0xC0000000:
3382 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3383 break;
3384 }
3385 }
3386
3387 /* Calculate XSAVE components based on the configured CPU feature flags */
3388 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3389 {
3390 CPUX86State *env = &cpu->env;
3391 int i;
3392 uint64_t mask;
3393
3394 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3395 return;
3396 }
3397
3398 mask = 0;
3399 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3400 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3401 if (env->features[esa->feature] & esa->bits) {
3402 mask |= (1ULL << i);
3403 }
3404 }
3405
3406 env->features[FEAT_XSAVE_COMP_LO] = mask;
3407 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3408 }
3409
3410 /***** Steps involved on loading and filtering CPUID data
3411 *
3412 * When initializing and realizing a CPU object, the steps
3413 * involved in setting up CPUID data are:
3414 *
3415 * 1) Loading CPU model definition (X86CPUDefinition). This is
3416 * implemented by x86_cpu_load_def() and should be completely
3417 * transparent, as it is done automatically by instance_init.
3418 * No code should need to look at X86CPUDefinition structs
3419 * outside instance_init.
3420 *
3421 * 2) CPU expansion. This is done by realize before CPUID
3422 * filtering, and will make sure host/accelerator data is
3423 * loaded for CPU models that depend on host capabilities
3424 * (e.g. "host"). Done by x86_cpu_expand_features().
3425 *
3426 * 3) CPUID filtering. This initializes extra data related to
3427 * CPUID, and checks if the host supports all capabilities
3428 * required by the CPU. Runnability of a CPU model is
3429 * determined at this step. Done by x86_cpu_filter_features().
3430 *
3431 * Some operations don't require all steps to be performed.
3432 * More precisely:
3433 *
3434 * - CPU instance creation (instance_init) will run only CPU
3435 * model loading. CPU expansion can't run at instance_init-time
3436 * because host/accelerator data may be not available yet.
3437 * - CPU realization will perform both CPU model expansion and CPUID
3438 * filtering, and return an error in case one of them fails.
3439 * - query-cpu-definitions needs to run all 3 steps. It needs
3440 * to run CPUID filtering, as the 'unavailable-features'
3441 * field is set based on the filtering results.
3442 * - The query-cpu-model-expansion QMP command only needs to run
3443 * CPU model loading and CPU expansion. It should not filter
3444 * any CPUID data based on host capabilities.
3445 */
3446
3447 /* Expand CPU configuration data, based on configured features
3448 * and host/accelerator capabilities when appropriate.
3449 */
3450 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3451 {
3452 CPUX86State *env = &cpu->env;
3453 FeatureWord w;
3454 GList *l;
3455 Error *local_err = NULL;
3456
3457 /*TODO: Now cpu->max_features doesn't overwrite features
3458 * set using QOM properties, and we can convert
3459 * plus_features & minus_features to global properties
3460 * inside x86_cpu_parse_featurestr() too.
3461 */
3462 if (cpu->max_features) {
3463 for (w = 0; w < FEATURE_WORDS; w++) {
3464 /* Override only features that weren't set explicitly
3465 * by the user.
3466 */
3467 env->features[w] |=
3468 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3469 ~env->user_features[w];
3470 }
3471 }
3472
3473 for (l = plus_features; l; l = l->next) {
3474 const char *prop = l->data;
3475 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3476 if (local_err) {
3477 goto out;
3478 }
3479 }
3480
3481 for (l = minus_features; l; l = l->next) {
3482 const char *prop = l->data;
3483 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3484 if (local_err) {
3485 goto out;
3486 }
3487 }
3488
3489 if (!kvm_enabled() || !cpu->expose_kvm) {
3490 env->features[FEAT_KVM] = 0;
3491 }
3492
3493 x86_cpu_enable_xsave_components(cpu);
3494
3495 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3496 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3497 if (cpu->full_cpuid_auto_level) {
3498 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3499 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3500 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3501 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3502 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3503 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3504 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3505 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3506 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3507 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3508 /* SVM requires CPUID[0x8000000A] */
3509 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3510 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3511 }
3512 }
3513
3514 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3515 if (env->cpuid_level == UINT32_MAX) {
3516 env->cpuid_level = env->cpuid_min_level;
3517 }
3518 if (env->cpuid_xlevel == UINT32_MAX) {
3519 env->cpuid_xlevel = env->cpuid_min_xlevel;
3520 }
3521 if (env->cpuid_xlevel2 == UINT32_MAX) {
3522 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3523 }
3524
3525 out:
3526 if (local_err != NULL) {
3527 error_propagate(errp, local_err);
3528 }
3529 }
3530
3531 /*
3532 * Finishes initialization of CPUID data, filters CPU feature
3533 * words based on host availability of each feature.
3534 *
3535 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3536 */
3537 static int x86_cpu_filter_features(X86CPU *cpu)
3538 {
3539 CPUX86State *env = &cpu->env;
3540 FeatureWord w;
3541 int rv = 0;
3542
3543 for (w = 0; w < FEATURE_WORDS; w++) {
3544 uint32_t host_feat =
3545 x86_cpu_get_supported_feature_word(w, false);
3546 uint32_t requested_features = env->features[w];
3547 env->features[w] &= host_feat;
3548 cpu->filtered_features[w] = requested_features & ~env->features[w];
3549 if (cpu->filtered_features[w]) {
3550 rv = 1;
3551 }
3552 }
3553
3554 return rv;
3555 }
3556
3557 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3558 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3559 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3560 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3561 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3562 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3563 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3564 {
3565 CPUState *cs = CPU(dev);
3566 X86CPU *cpu = X86_CPU(dev);
3567 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3568 CPUX86State *env = &cpu->env;
3569 Error *local_err = NULL;
3570 static bool ht_warned;
3571
3572 if (xcc->kvm_required && !kvm_enabled()) {
3573 char *name = x86_cpu_class_get_model_name(xcc);
3574 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3575 g_free(name);
3576 goto out;
3577 }
3578
3579 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3580 error_setg(errp, "apic-id property was not initialized properly");
3581 return;
3582 }
3583
3584 x86_cpu_expand_features(cpu, &local_err);
3585 if (local_err) {
3586 goto out;
3587 }
3588
3589 if (x86_cpu_filter_features(cpu) &&
3590 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3591 x86_cpu_report_filtered_features(cpu);
3592 if (cpu->enforce_cpuid) {
3593 error_setg(&local_err,
3594 kvm_enabled() ?
3595 "Host doesn't support requested features" :
3596 "TCG doesn't support requested features");
3597 goto out;
3598 }
3599 }
3600
3601 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3602 * CPUID[1].EDX.
3603 */
3604 if (IS_AMD_CPU(env)) {
3605 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3606 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3607 & CPUID_EXT2_AMD_ALIASES);
3608 }
3609
3610 /* For 64bit systems think about the number of physical bits to present.
3611 * ideally this should be the same as the host; anything other than matching
3612 * the host can cause incorrect guest behaviour.
3613 * QEMU used to pick the magic value of 40 bits that corresponds to
3614 * consumer AMD devices but nothing else.
3615 */
3616 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3617 if (kvm_enabled()) {
3618 uint32_t host_phys_bits = x86_host_phys_bits();
3619 static bool warned;
3620
3621 if (cpu->host_phys_bits) {
3622 /* The user asked for us to use the host physical bits */
3623 cpu->phys_bits = host_phys_bits;
3624 }
3625
3626 /* Print a warning if the user set it to a value that's not the
3627 * host value.
3628 */
3629 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3630 !warned) {
3631 warn_report("Host physical bits (%u)"
3632 " does not match phys-bits property (%u)",
3633 host_phys_bits, cpu->phys_bits);
3634 warned = true;
3635 }
3636
3637 if (cpu->phys_bits &&
3638 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3639 cpu->phys_bits < 32)) {
3640 error_setg(errp, "phys-bits should be between 32 and %u "
3641 " (but is %u)",
3642 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3643 return;
3644 }
3645 } else {
3646 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3647 error_setg(errp, "TCG only supports phys-bits=%u",
3648 TCG_PHYS_ADDR_BITS);
3649 return;
3650 }
3651 }
3652 /* 0 means it was not explicitly set by the user (or by machine
3653 * compat_props or by the host code above). In this case, the default
3654 * is the value used by TCG (40).
3655 */
3656 if (cpu->phys_bits == 0) {
3657 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3658 }
3659 } else {
3660 /* For 32 bit systems don't use the user set value, but keep
3661 * phys_bits consistent with what we tell the guest.
3662 */
3663 if (cpu->phys_bits != 0) {
3664 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3665 return;
3666 }
3667
3668 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3669 cpu->phys_bits = 36;
3670 } else {
3671 cpu->phys_bits = 32;
3672 }
3673 }
3674 cpu_exec_realizefn(cs, &local_err);
3675 if (local_err != NULL) {
3676 error_propagate(errp, local_err);
3677 return;
3678 }
3679
3680 if (tcg_enabled()) {
3681 tcg_x86_init();
3682 }
3683
3684 #ifndef CONFIG_USER_ONLY
3685 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3686
3687 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3688 x86_cpu_apic_create(cpu, &local_err);
3689 if (local_err != NULL) {
3690 goto out;
3691 }
3692 }
3693 #endif
3694
3695 mce_init(cpu);
3696
3697 #ifndef CONFIG_USER_ONLY
3698 if (tcg_enabled()) {
3699 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3700 "cpu-memory");
3701 AddressSpace *as_smm = g_new(AddressSpace, 1);
3702
3703 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3704 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3705
3706 /* Outer container... */
3707 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3708 memory_region_set_enabled(cpu->cpu_as_root, true);
3709
3710 /* ... with two regions inside: normal system memory with low
3711 * priority, and...
3712 */
3713 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3714 get_system_memory(), 0, ~0ull);
3715 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3716 memory_region_set_enabled(cpu->cpu_as_mem, true);
3717 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3718
3719 cs->num_ases = 2;
3720 cpu_address_space_init(cs, as_normal, 0);
3721 cpu_address_space_init(cs, as_smm, 1);
3722
3723 /* ... SMRAM with higher priority, linked from /machine/smram. */
3724 cpu->machine_done.notify = x86_cpu_machine_done;
3725 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3726 }
3727 #endif
3728
3729 qemu_init_vcpu(cs);
3730
3731 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3732 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3733 * based on inputs (sockets,cores,threads), it is still better to gives
3734 * users a warning.
3735 *
3736 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3737 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3738 */
3739 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3740 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3741 " -smp options properly.");
3742 ht_warned = true;
3743 }
3744
3745 x86_cpu_apic_realize(cpu, &local_err);
3746 if (local_err != NULL) {
3747 goto out;
3748 }
3749 cpu_reset(cs);
3750
3751 xcc->parent_realize(dev, &local_err);
3752
3753 out:
3754 if (local_err != NULL) {
3755 error_propagate(errp, local_err);
3756 return;
3757 }
3758 }
3759
3760 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3761 {
3762 X86CPU *cpu = X86_CPU(dev);
3763 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3764 Error *local_err = NULL;
3765
3766 #ifndef CONFIG_USER_ONLY
3767 cpu_remove_sync(CPU(dev));
3768 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3769 #endif
3770
3771 if (cpu->apic_state) {
3772 object_unparent(OBJECT(cpu->apic_state));
3773 cpu->apic_state = NULL;
3774 }
3775
3776 xcc->parent_unrealize(dev, &local_err);
3777 if (local_err != NULL) {
3778 error_propagate(errp, local_err);
3779 return;
3780 }
3781 }
3782
3783 typedef struct BitProperty {
3784 FeatureWord w;
3785 uint32_t mask;
3786 } BitProperty;
3787
3788 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3789 void *opaque, Error **errp)
3790 {
3791 X86CPU *cpu = X86_CPU(obj);
3792 BitProperty *fp = opaque;
3793 uint32_t f = cpu->env.features[fp->w];
3794 bool value = (f & fp->mask) == fp->mask;
3795 visit_type_bool(v, name, &value, errp);
3796 }
3797
3798 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3799 void *opaque, Error **errp)
3800 {
3801 DeviceState *dev = DEVICE(obj);
3802 X86CPU *cpu = X86_CPU(obj);
3803 BitProperty *fp = opaque;
3804 Error *local_err = NULL;
3805 bool value;
3806
3807 if (dev->realized) {
3808 qdev_prop_set_after_realize(dev, name, errp);
3809 return;
3810 }
3811
3812 visit_type_bool(v, name, &value, &local_err);
3813 if (local_err) {
3814 error_propagate(errp, local_err);
3815 return;
3816 }
3817
3818 if (value) {
3819 cpu->env.features[fp->w] |= fp->mask;
3820 } else {
3821 cpu->env.features[fp->w] &= ~fp->mask;
3822 }
3823 cpu->env.user_features[fp->w] |= fp->mask;
3824 }
3825
3826 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3827 void *opaque)
3828 {
3829 BitProperty *prop = opaque;
3830 g_free(prop);
3831 }
3832
3833 /* Register a boolean property to get/set a single bit in a uint32_t field.
3834 *
3835 * The same property name can be registered multiple times to make it affect
3836 * multiple bits in the same FeatureWord. In that case, the getter will return
3837 * true only if all bits are set.
3838 */
3839 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3840 const char *prop_name,
3841 FeatureWord w,
3842 int bitnr)
3843 {
3844 BitProperty *fp;
3845 ObjectProperty *op;
3846 uint32_t mask = (1UL << bitnr);
3847
3848 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3849 if (op) {
3850 fp = op->opaque;
3851 assert(fp->w == w);
3852 fp->mask |= mask;
3853 } else {
3854 fp = g_new0(BitProperty, 1);
3855 fp->w = w;
3856 fp->mask = mask;
3857 object_property_add(OBJECT(cpu), prop_name, "bool",
3858 x86_cpu_get_bit_prop,
3859 x86_cpu_set_bit_prop,
3860 x86_cpu_release_bit_prop, fp, &error_abort);
3861 }
3862 }
3863
3864 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3865 FeatureWord w,
3866 int bitnr)
3867 {
3868 FeatureWordInfo *fi = &feature_word_info[w];
3869 const char *name = fi->feat_names[bitnr];
3870
3871 if (!name) {
3872 return;
3873 }
3874
3875 /* Property names should use "-" instead of "_".
3876 * Old names containing underscores are registered as aliases
3877 * using object_property_add_alias()
3878 */
3879 assert(!strchr(name, '_'));
3880 /* aliases don't use "|" delimiters anymore, they are registered
3881 * manually using object_property_add_alias() */
3882 assert(!strchr(name, '|'));
3883 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3884 }
3885
3886 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3887 {
3888 X86CPU *cpu = X86_CPU(cs);
3889 CPUX86State *env = &cpu->env;
3890 GuestPanicInformation *panic_info = NULL;
3891
3892 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3893 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3894
3895 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3896
3897 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3898 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3899 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3900 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3901 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3902 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3903 }
3904
3905 return panic_info;
3906 }
3907 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3908 const char *name, void *opaque,
3909 Error **errp)
3910 {
3911 CPUState *cs = CPU(obj);
3912 GuestPanicInformation *panic_info;
3913
3914 if (!cs->crash_occurred) {
3915 error_setg(errp, "No crash occured");
3916 return;
3917 }
3918
3919 panic_info = x86_cpu_get_crash_info(cs);
3920 if (panic_info == NULL) {
3921 error_setg(errp, "No crash information");
3922 return;
3923 }
3924
3925 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3926 errp);
3927 qapi_free_GuestPanicInformation(panic_info);
3928 }
3929
3930 static void x86_cpu_initfn(Object *obj)
3931 {
3932 CPUState *cs = CPU(obj);
3933 X86CPU *cpu = X86_CPU(obj);
3934 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3935 CPUX86State *env = &cpu->env;
3936 FeatureWord w;
3937
3938 cs->env_ptr = env;
3939
3940 object_property_add(obj, "family", "int",
3941 x86_cpuid_version_get_family,
3942 x86_cpuid_version_set_family, NULL, NULL, NULL);
3943 object_property_add(obj, "model", "int",
3944 x86_cpuid_version_get_model,
3945 x86_cpuid_version_set_model, NULL, NULL, NULL);
3946 object_property_add(obj, "stepping", "int",
3947 x86_cpuid_version_get_stepping,
3948 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3949 object_property_add_str(obj, "vendor",
3950 x86_cpuid_get_vendor,
3951 x86_cpuid_set_vendor, NULL);
3952 object_property_add_str(obj, "model-id",
3953 x86_cpuid_get_model_id,
3954 x86_cpuid_set_model_id, NULL);
3955 object_property_add(obj, "tsc-frequency", "int",
3956 x86_cpuid_get_tsc_freq,
3957 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3958 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3959 x86_cpu_get_feature_words,
3960 NULL, NULL, (void *)env->features, NULL);
3961 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3962 x86_cpu_get_feature_words,
3963 NULL, NULL, (void *)cpu->filtered_features, NULL);
3964
3965 object_property_add(obj, "crash-information", "GuestPanicInformation",
3966 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3967
3968 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3969
3970 for (w = 0; w < FEATURE_WORDS; w++) {
3971 int bitnr;
3972
3973 for (bitnr = 0; bitnr < 32; bitnr++) {
3974 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3975 }
3976 }
3977
3978 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3979 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3980 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3981 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3982 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3983 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3984 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3985
3986 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3987 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3988 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3989 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3990 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3991 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3992 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3993 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3994 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3995 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3996 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3997 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3998 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3999 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4000 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4001 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4002 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4003 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4004 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4005 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4006 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4007
4008 if (xcc->cpu_def) {
4009 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4010 }
4011 }
4012
4013 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4014 {
4015 X86CPU *cpu = X86_CPU(cs);
4016
4017 return cpu->apic_id;
4018 }
4019
4020 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4021 {
4022 X86CPU *cpu = X86_CPU(cs);
4023
4024 return cpu->env.cr[0] & CR0_PG_MASK;
4025 }
4026
4027 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4028 {
4029 X86CPU *cpu = X86_CPU(cs);
4030
4031 cpu->env.eip = value;
4032 }
4033
4034 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4035 {
4036 X86CPU *cpu = X86_CPU(cs);
4037
4038 cpu->env.eip = tb->pc - tb->cs_base;
4039 }
4040
4041 static bool x86_cpu_has_work(CPUState *cs)
4042 {
4043 X86CPU *cpu = X86_CPU(cs);
4044 CPUX86State *env = &cpu->env;
4045
4046 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4047 CPU_INTERRUPT_POLL)) &&
4048 (env->eflags & IF_MASK)) ||
4049 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4050 CPU_INTERRUPT_INIT |
4051 CPU_INTERRUPT_SIPI |
4052 CPU_INTERRUPT_MCE)) ||
4053 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4054 !(env->hflags & HF_SMM_MASK));
4055 }
4056
4057 static Property x86_cpu_properties[] = {
4058 #ifdef CONFIG_USER_ONLY
4059 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4060 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4061 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4062 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4063 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4064 #else
4065 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4066 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4067 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4068 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4069 #endif
4070 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4071 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4072 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4073 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4074 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4075 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4076 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4077 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4078 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4079 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4080 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4081 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4082 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4083 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4084 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4085 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4086 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4087 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4088 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4089 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4090 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4091 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4092 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4093 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4094 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4095 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4096 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4097 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4098 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4099 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4100 false),
4101 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4102 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4103 DEFINE_PROP_END_OF_LIST()
4104 };
4105
4106 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4107 {
4108 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4109 CPUClass *cc = CPU_CLASS(oc);
4110 DeviceClass *dc = DEVICE_CLASS(oc);
4111
4112 xcc->parent_realize = dc->realize;
4113 xcc->parent_unrealize = dc->unrealize;
4114 dc->realize = x86_cpu_realizefn;
4115 dc->unrealize = x86_cpu_unrealizefn;
4116 dc->props = x86_cpu_properties;
4117
4118 xcc->parent_reset = cc->reset;
4119 cc->reset = x86_cpu_reset;
4120 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4121
4122 cc->class_by_name = x86_cpu_class_by_name;
4123 cc->parse_features = x86_cpu_parse_featurestr;
4124 cc->has_work = x86_cpu_has_work;
4125 #ifdef CONFIG_TCG
4126 cc->do_interrupt = x86_cpu_do_interrupt;
4127 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4128 #endif
4129 cc->dump_state = x86_cpu_dump_state;
4130 cc->get_crash_info = x86_cpu_get_crash_info;
4131 cc->set_pc = x86_cpu_set_pc;
4132 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4133 cc->gdb_read_register = x86_cpu_gdb_read_register;
4134 cc->gdb_write_register = x86_cpu_gdb_write_register;
4135 cc->get_arch_id = x86_cpu_get_arch_id;
4136 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4137 #ifdef CONFIG_USER_ONLY
4138 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4139 #else
4140 cc->asidx_from_attrs = x86_asidx_from_attrs;
4141 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4142 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4143 cc->write_elf64_note = x86_cpu_write_elf64_note;
4144 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4145 cc->write_elf32_note = x86_cpu_write_elf32_note;
4146 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4147 cc->vmsd = &vmstate_x86_cpu;
4148 #endif
4149 cc->gdb_arch_name = x86_gdb_arch_name;
4150 #ifdef TARGET_X86_64
4151 cc->gdb_core_xml_file = "i386-64bit.xml";
4152 cc->gdb_num_core_regs = 57;
4153 #else
4154 cc->gdb_core_xml_file = "i386-32bit.xml";
4155 cc->gdb_num_core_regs = 41;
4156 #endif
4157 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4158 cc->debug_excp_handler = breakpoint_handler;
4159 #endif
4160 cc->cpu_exec_enter = x86_cpu_exec_enter;
4161 cc->cpu_exec_exit = x86_cpu_exec_exit;
4162
4163 dc->user_creatable = true;
4164 }
4165
4166 static const TypeInfo x86_cpu_type_info = {
4167 .name = TYPE_X86_CPU,
4168 .parent = TYPE_CPU,
4169 .instance_size = sizeof(X86CPU),
4170 .instance_init = x86_cpu_initfn,
4171 .abstract = true,
4172 .class_size = sizeof(X86CPUClass),
4173 .class_init = x86_cpu_common_class_init,
4174 };
4175
4176
4177 /* "base" CPU model, used by query-cpu-model-expansion */
4178 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4179 {
4180 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4181
4182 xcc->static_model = true;
4183 xcc->migration_safe = true;
4184 xcc->model_description = "base CPU model type with no features enabled";
4185 xcc->ordering = 8;
4186 }
4187
4188 static const TypeInfo x86_base_cpu_type_info = {
4189 .name = X86_CPU_TYPE_NAME("base"),
4190 .parent = TYPE_X86_CPU,
4191 .class_init = x86_cpu_base_class_init,
4192 };
4193
4194 static void x86_cpu_register_types(void)
4195 {
4196 int i;
4197
4198 type_register_static(&x86_cpu_type_info);
4199 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4200 x86_register_cpudef_type(&builtin_x86_defs[i]);
4201 }
4202 type_register_static(&max_x86_cpu_type_info);
4203 type_register_static(&x86_base_cpu_type_info);
4204 #ifdef CONFIG_KVM
4205 type_register_static(&host_x86_cpu_type_info);
4206 #endif
4207 }
4208
4209 type_init(x86_cpu_register_types)