]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
target/i386: Don't use x86_cpu_load_def() on "max" CPU model
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32 #include "qapi/qmp/types.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "qom/qom-qobject.h"
38 #include "sysemu/arch_init.h"
39
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/i386/topology.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "exec/address-spaces.h"
49 #include "hw/hw.h"
50 #include "hw/xen/xen.h"
51 #include "hw/i386/apic_internal.h"
52 #endif
53
54
55 /* Cache topology CPUID constants: */
56
57 /* CPUID Leaf 2 Descriptors */
58
59 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
61 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
62 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
63
64
65 /* CPUID Leaf 4 constants: */
66
67 /* EAX: */
68 #define CPUID_4_TYPE_DCACHE 1
69 #define CPUID_4_TYPE_ICACHE 2
70 #define CPUID_4_TYPE_UNIFIED 3
71
72 #define CPUID_4_LEVEL(l) ((l) << 5)
73
74 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75 #define CPUID_4_FULLY_ASSOC (1 << 9)
76
77 /* EDX: */
78 #define CPUID_4_NO_INVD_SHARING (1 << 0)
79 #define CPUID_4_INCLUSIVE (1 << 1)
80 #define CPUID_4_COMPLEX_IDX (1 << 2)
81
82 #define ASSOC_FULL 0xFF
83
84 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
97
98
99 /* Definitions of the hardcoded cache entries we expose: */
100
101 /* L1 data cache: */
102 #define L1D_LINE_SIZE 64
103 #define L1D_ASSOCIATIVITY 8
104 #define L1D_SETS 64
105 #define L1D_PARTITIONS 1
106 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109 #define L1D_LINES_PER_TAG 1
110 #define L1D_SIZE_KB_AMD 64
111 #define L1D_ASSOCIATIVITY_AMD 2
112
113 /* L1 instruction cache: */
114 #define L1I_LINE_SIZE 64
115 #define L1I_ASSOCIATIVITY 8
116 #define L1I_SETS 64
117 #define L1I_PARTITIONS 1
118 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121 #define L1I_LINES_PER_TAG 1
122 #define L1I_SIZE_KB_AMD 64
123 #define L1I_ASSOCIATIVITY_AMD 2
124
125 /* Level 2 unified cache: */
126 #define L2_LINE_SIZE 64
127 #define L2_ASSOCIATIVITY 16
128 #define L2_SETS 4096
129 #define L2_PARTITIONS 1
130 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134 #define L2_LINES_PER_TAG 1
135 #define L2_SIZE_KB_AMD 512
136
137 /* Level 3 unified cache: */
138 #define L3_SIZE_KB 0 /* disabled */
139 #define L3_ASSOCIATIVITY 0 /* disabled */
140 #define L3_LINES_PER_TAG 0 /* disabled */
141 #define L3_LINE_SIZE 0 /* disabled */
142 #define L3_N_LINE_SIZE 64
143 #define L3_N_ASSOCIATIVITY 16
144 #define L3_N_SETS 16384
145 #define L3_N_PARTITIONS 1
146 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147 #define L3_N_LINES_PER_TAG 1
148 #define L3_N_SIZE_KB_AMD 16384
149
150 /* TLB definitions: */
151
152 #define L1_DTLB_2M_ASSOC 1
153 #define L1_DTLB_2M_ENTRIES 255
154 #define L1_DTLB_4K_ASSOC 1
155 #define L1_DTLB_4K_ENTRIES 255
156
157 #define L1_ITLB_2M_ASSOC 1
158 #define L1_ITLB_2M_ENTRIES 255
159 #define L1_ITLB_4K_ASSOC 1
160 #define L1_ITLB_4K_ENTRIES 255
161
162 #define L2_DTLB_2M_ASSOC 0 /* disabled */
163 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
164 #define L2_DTLB_4K_ASSOC 4
165 #define L2_DTLB_4K_ENTRIES 512
166
167 #define L2_ITLB_2M_ASSOC 0 /* disabled */
168 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
169 #define L2_ITLB_4K_ASSOC 4
170 #define L2_ITLB_4K_ENTRIES 512
171
172
173
174 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
176 {
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
182 }
183 dst[CPUID_VENDOR_SZ] = '\0';
184 }
185
186 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
197
198 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218
219 #ifdef TARGET_X86_64
220 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221 #else
222 #define TCG_EXT2_X86_64_FEATURES 0
223 #endif
224
225 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231 #define TCG_EXT4_FEATURES 0
232 #define TCG_SVM_FEATURES 0
233 #define TCG_KVM_FEATURES 0
234 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245 #define TCG_7_0_EDX_FEATURES 0
246 #define TCG_APM_FEATURES 0
247 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
251
252 typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
257 */
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266 } FeatureWordInfo;
267
268 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
279 },
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
282 },
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
293 },
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
296 },
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
301 */
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
312 },
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
315 },
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 },
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
329 },
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 },
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
343 },
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 },
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
357 },
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 },
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
373 },
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
387 },
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
389 },
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 },
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
404 },
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 },
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
429 },
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
434 },
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
445 },
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
450 },
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 },
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
466 },
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 },
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
482 },
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 },
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
498 },
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 },
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
512 },
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
522 },
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
528 },
529 };
530
531 typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536 } X86RegisterInfo32;
537
538 #define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
549 };
550 #undef REGISTER
551
552 typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555 } ExtSaveArea;
556
557 static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
564 },
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
571 },
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
600 };
601
602 static uint32_t xsave_area_size(uint64_t mask)
603 {
604 int i;
605 uint64_t ret = 0;
606
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
611 }
612 }
613 return ret;
614 }
615
616 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
617 {
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
620 }
621
622 const char *get_register_name_32(unsigned int reg)
623 {
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
626 }
627 return x86_reg_info_32[reg].name;
628 }
629
630 /*
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
633 */
634 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
635 {
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
639
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
642
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
648 }
649 }
650 return r;
651 }
652
653 void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
655 {
656 uint32_t vec[4];
657
658 #ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663 #elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673 #else
674 abort();
675 #endif
676
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
685 }
686
687 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
688 {
689 uint32_t eax, ebx, ecx, edx;
690
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
693
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
697 }
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
700 }
701 if (stepping) {
702 *stepping = eax & 0x0F;
703 }
704 }
705
706 /* CPU class name definitions: */
707
708 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
709 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
710
711 /* Return type name for a given CPU model name
712 * Caller is responsible for freeing the returned string.
713 */
714 static char *x86_cpu_type_name(const char *model_name)
715 {
716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
717 }
718
719 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
720 {
721 ObjectClass *oc;
722 char *typename;
723
724 if (cpu_model == NULL) {
725 return NULL;
726 }
727
728 typename = x86_cpu_type_name(cpu_model);
729 oc = object_class_by_name(typename);
730 g_free(typename);
731 return oc;
732 }
733
734 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
735 {
736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
738 return g_strndup(class_name,
739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
740 }
741
742 struct X86CPUDefinition {
743 const char *name;
744 uint32_t level;
745 uint32_t xlevel;
746 /* vendor is zero-terminated, 12 character ASCII string */
747 char vendor[CPUID_VENDOR_SZ + 1];
748 int family;
749 int model;
750 int stepping;
751 FeatureWordArray features;
752 char model_id[48];
753 };
754
755 static X86CPUDefinition builtin_x86_defs[] = {
756 {
757 .name = "qemu64",
758 .level = 0xd,
759 .vendor = CPUID_VENDOR_AMD,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 .features[FEAT_8000_0001_ECX] =
772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
773 .xlevel = 0x8000000A,
774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
775 },
776 {
777 .name = "phenom",
778 .level = 5,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 16,
781 .model = 2,
782 .stepping = 3,
783 /* Missing: CPUID_HT */
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
787 CPUID_PSE36 | CPUID_VME,
788 .features[FEAT_1_ECX] =
789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
790 CPUID_EXT_POPCNT,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
796 CPUID_EXT3_CR8LEG,
797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
799 .features[FEAT_8000_0001_ECX] =
800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
802 /* Missing: CPUID_SVM_LBRV */
803 .features[FEAT_SVM] =
804 CPUID_SVM_NPT,
805 .xlevel = 0x8000001A,
806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
807 },
808 {
809 .name = "core2duo",
810 .level = 10,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 15,
814 .stepping = 11,
815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
824 CPUID_EXT_CX16,
825 .features[FEAT_8000_0001_EDX] =
826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
827 .features[FEAT_8000_0001_ECX] =
828 CPUID_EXT3_LAHF_LM,
829 .xlevel = 0x80000008,
830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
831 },
832 {
833 .name = "kvm64",
834 .level = 0xd,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 15,
837 .model = 6,
838 .stepping = 1,
839 /* Missing: CPUID_HT */
840 .features[FEAT_1_EDX] =
841 PPRO_FEATURES | CPUID_VME |
842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
843 CPUID_PSE36,
844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
854 .features[FEAT_8000_0001_ECX] =
855 0,
856 .xlevel = 0x80000008,
857 .model_id = "Common KVM processor"
858 },
859 {
860 .name = "qemu32",
861 .level = 4,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 6,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .xlevel = 0x80000004,
871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
872 },
873 {
874 .name = "kvm32",
875 .level = 5,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 15,
878 .model = 6,
879 .stepping = 1,
880 .features[FEAT_1_EDX] =
881 PPRO_FEATURES | CPUID_VME |
882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3,
885 .features[FEAT_8000_0001_ECX] =
886 0,
887 .xlevel = 0x80000008,
888 .model_id = "Common 32-bit KVM processor"
889 },
890 {
891 .name = "coreduo",
892 .level = 10,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 14,
896 .stepping = 8,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
901 CPUID_SS,
902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_NX,
908 .xlevel = 0x80000008,
909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
910 },
911 {
912 .name = "486",
913 .level = 1,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 4,
916 .model = 8,
917 .stepping = 0,
918 .features[FEAT_1_EDX] =
919 I486_FEATURES,
920 .xlevel = 0,
921 },
922 {
923 .name = "pentium",
924 .level = 1,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 5,
927 .model = 4,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM_FEATURES,
931 .xlevel = 0,
932 },
933 {
934 .name = "pentium2",
935 .level = 2,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 5,
939 .stepping = 2,
940 .features[FEAT_1_EDX] =
941 PENTIUM2_FEATURES,
942 .xlevel = 0,
943 },
944 {
945 .name = "pentium3",
946 .level = 3,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 7,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 PENTIUM3_FEATURES,
953 .xlevel = 0,
954 },
955 {
956 .name = "athlon",
957 .level = 2,
958 .vendor = CPUID_VENDOR_AMD,
959 .family = 6,
960 .model = 2,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
964 CPUID_MCA,
965 .features[FEAT_8000_0001_EDX] =
966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
967 .xlevel = 0x80000008,
968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
969 },
970 {
971 .name = "n270",
972 .level = 10,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 28,
976 .stepping = 2,
977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
978 .features[FEAT_1_EDX] =
979 PPRO_FEATURES |
980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
981 CPUID_ACPI | CPUID_SS,
982 /* Some CPUs got no CPUID_SEP */
983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
984 * CPUID_EXT_XTPR */
985 .features[FEAT_1_ECX] =
986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
987 CPUID_EXT_MOVBE,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_NX,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
994 },
995 {
996 .name = "Conroe",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 15,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .xlevel = 0x80000008,
1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1016 },
1017 {
1018 .name = "Penryn",
1019 .level = 10,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 23,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1039 },
1040 {
1041 .name = "Nehalem",
1042 .level = 11,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 26,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1062 },
1063 {
1064 .name = "Westmere",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 44,
1069 .stepping = 1,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1088 },
1089 {
1090 .name = "SandyBridge",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 42,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1107 CPUID_EXT_SSE3,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1110 CPUID_EXT2_SYSCALL,
1111 .features[FEAT_8000_0001_ECX] =
1112 CPUID_EXT3_LAHF_LM,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1119 },
1120 {
1121 .name = "IvyBridge",
1122 .level = 0xd,
1123 .vendor = CPUID_VENDOR_INTEL,
1124 .family = 6,
1125 .model = 58,
1126 .stepping = 9,
1127 .features[FEAT_1_EDX] =
1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1132 CPUID_DE | CPUID_FP87,
1133 .features[FEAT_1_ECX] =
1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_7_0_EBX] =
1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1141 CPUID_7_0_EBX_ERMS,
1142 .features[FEAT_8000_0001_EDX] =
1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1144 CPUID_EXT2_SYSCALL,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1153 },
1154 {
1155 .name = "Haswell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 60,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Haswell, no TSX)",
1189 }, {
1190 .name = "Haswell",
1191 .level = 0xd,
1192 .vendor = CPUID_VENDOR_INTEL,
1193 .family = 6,
1194 .model = 60,
1195 .stepping = 4,
1196 .features[FEAT_1_EDX] =
1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1201 CPUID_DE | CPUID_FP87,
1202 .features[FEAT_1_ECX] =
1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1209 .features[FEAT_8000_0001_EDX] =
1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1211 CPUID_EXT2_SYSCALL,
1212 .features[FEAT_8000_0001_ECX] =
1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1214 .features[FEAT_7_0_EBX] =
1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1218 CPUID_7_0_EBX_RTM,
1219 .features[FEAT_XSAVE] =
1220 CPUID_XSAVE_XSAVEOPT,
1221 .features[FEAT_6_EAX] =
1222 CPUID_6_EAX_ARAT,
1223 .xlevel = 0x80000008,
1224 .model_id = "Intel Core Processor (Haswell)",
1225 },
1226 {
1227 .name = "Broadwell-noTSX",
1228 .level = 0xd,
1229 .vendor = CPUID_VENDOR_INTEL,
1230 .family = 6,
1231 .model = 61,
1232 .stepping = 2,
1233 .features[FEAT_1_EDX] =
1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1238 CPUID_DE | CPUID_FP87,
1239 .features[FEAT_1_ECX] =
1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 CPUID_EXT2_SYSCALL,
1249 .features[FEAT_8000_0001_ECX] =
1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 CPUID_7_0_EBX_SMAP,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1263 },
1264 {
1265 .name = "Broadwell",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 61,
1270 .stepping = 2,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 CPUID_EXT2_SYSCALL,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1289 .features[FEAT_7_0_EBX] =
1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1294 CPUID_7_0_EBX_SMAP,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Core Processor (Broadwell)",
1301 },
1302 {
1303 .name = "Skylake-Client",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 94,
1308 .stepping = 3,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1333 /* Missing: XSAVES (not supported by some Linux versions,
1334 * including v4.1 to v4.12).
1335 * KVM doesn't yet expose any XSAVES state save component,
1336 * and the only one defined in Skylake (processor tracing)
1337 * probably will block migration anyway.
1338 */
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1341 CPUID_XSAVE_XGETBV1,
1342 .features[FEAT_6_EAX] =
1343 CPUID_6_EAX_ARAT,
1344 .xlevel = 0x80000008,
1345 .model_id = "Intel Core Processor (Skylake)",
1346 },
1347 {
1348 .name = "Skylake-Server",
1349 .level = 0xd,
1350 .vendor = CPUID_VENDOR_INTEL,
1351 .family = 6,
1352 .model = 85,
1353 .stepping = 4,
1354 .features[FEAT_1_EDX] =
1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1359 CPUID_DE | CPUID_FP87,
1360 .features[FEAT_1_ECX] =
1361 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1362 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1363 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1364 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1365 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1366 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1369 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1372 .features[FEAT_7_0_EBX] =
1373 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1374 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1375 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1376 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1377 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1378 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1379 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1380 CPUID_7_0_EBX_AVX512VL,
1381 /* Missing: XSAVES (not supported by some Linux versions,
1382 * including v4.1 to v4.12).
1383 * KVM doesn't yet expose any XSAVES state save component,
1384 * and the only one defined in Skylake (processor tracing)
1385 * probably will block migration anyway.
1386 */
1387 .features[FEAT_XSAVE] =
1388 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1389 CPUID_XSAVE_XGETBV1,
1390 .features[FEAT_6_EAX] =
1391 CPUID_6_EAX_ARAT,
1392 .xlevel = 0x80000008,
1393 .model_id = "Intel Xeon Processor (Skylake)",
1394 },
1395 {
1396 .name = "Opteron_G1",
1397 .level = 5,
1398 .vendor = CPUID_VENDOR_AMD,
1399 .family = 15,
1400 .model = 6,
1401 .stepping = 1,
1402 .features[FEAT_1_EDX] =
1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1407 CPUID_DE | CPUID_FP87,
1408 .features[FEAT_1_ECX] =
1409 CPUID_EXT_SSE3,
1410 .features[FEAT_8000_0001_EDX] =
1411 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1412 .xlevel = 0x80000008,
1413 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1414 },
1415 {
1416 .name = "Opteron_G2",
1417 .level = 5,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 15,
1420 .model = 6,
1421 .stepping = 1,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1430 /* Missing: CPUID_EXT2_RDTSCP */
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1433 .features[FEAT_8000_0001_ECX] =
1434 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1435 .xlevel = 0x80000008,
1436 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1437 },
1438 {
1439 .name = "Opteron_G3",
1440 .level = 5,
1441 .vendor = CPUID_VENDOR_AMD,
1442 .family = 16,
1443 .model = 2,
1444 .stepping = 3,
1445 .features[FEAT_1_EDX] =
1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1450 CPUID_DE | CPUID_FP87,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1453 CPUID_EXT_SSE3,
1454 /* Missing: CPUID_EXT2_RDTSCP */
1455 .features[FEAT_8000_0001_EDX] =
1456 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1457 .features[FEAT_8000_0001_ECX] =
1458 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1459 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1460 .xlevel = 0x80000008,
1461 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1462 },
1463 {
1464 .name = "Opteron_G4",
1465 .level = 0xd,
1466 .vendor = CPUID_VENDOR_AMD,
1467 .family = 21,
1468 .model = 1,
1469 .stepping = 2,
1470 .features[FEAT_1_EDX] =
1471 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1472 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1473 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1474 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1475 CPUID_DE | CPUID_FP87,
1476 .features[FEAT_1_ECX] =
1477 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1478 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1479 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1480 CPUID_EXT_SSE3,
1481 /* Missing: CPUID_EXT2_RDTSCP */
1482 .features[FEAT_8000_0001_EDX] =
1483 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1484 CPUID_EXT2_SYSCALL,
1485 .features[FEAT_8000_0001_ECX] =
1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1489 CPUID_EXT3_LAHF_LM,
1490 /* no xsaveopt! */
1491 .xlevel = 0x8000001A,
1492 .model_id = "AMD Opteron 62xx class CPU",
1493 },
1494 {
1495 .name = "Opteron_G5",
1496 .level = 0xd,
1497 .vendor = CPUID_VENDOR_AMD,
1498 .family = 21,
1499 .model = 2,
1500 .stepping = 0,
1501 .features[FEAT_1_EDX] =
1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1506 CPUID_DE | CPUID_FP87,
1507 .features[FEAT_1_ECX] =
1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1512 /* Missing: CPUID_EXT2_RDTSCP */
1513 .features[FEAT_8000_0001_EDX] =
1514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1515 CPUID_EXT2_SYSCALL,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1520 CPUID_EXT3_LAHF_LM,
1521 /* no xsaveopt! */
1522 .xlevel = 0x8000001A,
1523 .model_id = "AMD Opteron 63xx class CPU",
1524 },
1525 };
1526
1527 typedef struct PropValue {
1528 const char *prop, *value;
1529 } PropValue;
1530
1531 /* KVM-specific features that are automatically added/removed
1532 * from all CPU models when KVM is enabled.
1533 */
1534 static PropValue kvm_default_props[] = {
1535 { "kvmclock", "on" },
1536 { "kvm-nopiodelay", "on" },
1537 { "kvm-asyncpf", "on" },
1538 { "kvm-steal-time", "on" },
1539 { "kvm-pv-eoi", "on" },
1540 { "kvmclock-stable-bit", "on" },
1541 { "x2apic", "on" },
1542 { "acpi", "off" },
1543 { "monitor", "off" },
1544 { "svm", "off" },
1545 { NULL, NULL },
1546 };
1547
1548 /* TCG-specific defaults that override all CPU models when using TCG
1549 */
1550 static PropValue tcg_default_props[] = {
1551 { "vme", "off" },
1552 { NULL, NULL },
1553 };
1554
1555
1556 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1557 {
1558 PropValue *pv;
1559 for (pv = kvm_default_props; pv->prop; pv++) {
1560 if (!strcmp(pv->prop, prop)) {
1561 pv->value = value;
1562 break;
1563 }
1564 }
1565
1566 /* It is valid to call this function only for properties that
1567 * are already present in the kvm_default_props table.
1568 */
1569 assert(pv->prop);
1570 }
1571
1572 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1573 bool migratable_only);
1574
1575 static bool lmce_supported(void)
1576 {
1577 uint64_t mce_cap = 0;
1578
1579 #ifdef CONFIG_KVM
1580 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1581 return false;
1582 }
1583 #endif
1584
1585 return !!(mce_cap & MCG_LMCE_P);
1586 }
1587
1588 #define CPUID_MODEL_ID_SZ 48
1589
1590 /**
1591 * cpu_x86_fill_model_id:
1592 * Get CPUID model ID string from host CPU.
1593 *
1594 * @str should have at least CPUID_MODEL_ID_SZ bytes
1595 *
1596 * The function does NOT add a null terminator to the string
1597 * automatically.
1598 */
1599 static int cpu_x86_fill_model_id(char *str)
1600 {
1601 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1602 int i;
1603
1604 for (i = 0; i < 3; i++) {
1605 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1606 memcpy(str + i * 16 + 0, &eax, 4);
1607 memcpy(str + i * 16 + 4, &ebx, 4);
1608 memcpy(str + i * 16 + 8, &ecx, 4);
1609 memcpy(str + i * 16 + 12, &edx, 4);
1610 }
1611 return 0;
1612 }
1613
1614 static Property max_x86_cpu_properties[] = {
1615 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1616 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1617 DEFINE_PROP_END_OF_LIST()
1618 };
1619
1620 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1621 {
1622 DeviceClass *dc = DEVICE_CLASS(oc);
1623 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1624
1625 xcc->ordering = 9;
1626
1627 xcc->model_description =
1628 "Enables all features supported by the accelerator in the current host";
1629
1630 dc->props = max_x86_cpu_properties;
1631 }
1632
1633 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1634
1635 static void max_x86_cpu_initfn(Object *obj)
1636 {
1637 X86CPU *cpu = X86_CPU(obj);
1638 CPUX86State *env = &cpu->env;
1639 KVMState *s = kvm_state;
1640
1641 /* We can't fill the features array here because we don't know yet if
1642 * "migratable" is true or false.
1643 */
1644 cpu->max_features = true;
1645
1646 if (kvm_enabled()) {
1647 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
1648 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
1649 int family, model, stepping;
1650
1651 host_vendor_fms(vendor, &family, &model, &stepping);
1652
1653 cpu_x86_fill_model_id(model_id);
1654
1655 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
1656 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
1657 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
1658 object_property_set_int(OBJECT(cpu), stepping, "stepping",
1659 &error_abort);
1660 object_property_set_str(OBJECT(cpu), model_id, "model-id",
1661 &error_abort);
1662
1663 env->cpuid_min_level =
1664 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1665 env->cpuid_min_xlevel =
1666 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1667 env->cpuid_min_xlevel2 =
1668 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1669
1670 if (lmce_supported()) {
1671 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1672 }
1673 } else {
1674 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1675 "vendor", &error_abort);
1676 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1677 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1678 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1679 object_property_set_str(OBJECT(cpu),
1680 "QEMU TCG CPU version " QEMU_HW_VERSION,
1681 "model-id", &error_abort);
1682 }
1683
1684 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1685 }
1686
1687 static const TypeInfo max_x86_cpu_type_info = {
1688 .name = X86_CPU_TYPE_NAME("max"),
1689 .parent = TYPE_X86_CPU,
1690 .instance_init = max_x86_cpu_initfn,
1691 .class_init = max_x86_cpu_class_init,
1692 };
1693
1694 #ifdef CONFIG_KVM
1695
1696 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1697 {
1698 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1699
1700 xcc->kvm_required = true;
1701 xcc->ordering = 8;
1702
1703 xcc->model_description =
1704 "KVM processor with all supported host features "
1705 "(only available in KVM mode)";
1706 }
1707
1708 static const TypeInfo host_x86_cpu_type_info = {
1709 .name = X86_CPU_TYPE_NAME("host"),
1710 .parent = X86_CPU_TYPE_NAME("max"),
1711 .class_init = host_x86_cpu_class_init,
1712 };
1713
1714 #endif
1715
1716 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1717 {
1718 FeatureWordInfo *f = &feature_word_info[w];
1719 int i;
1720
1721 for (i = 0; i < 32; ++i) {
1722 if ((1UL << i) & mask) {
1723 const char *reg = get_register_name_32(f->cpuid_reg);
1724 assert(reg);
1725 fprintf(stderr, "warning: %s doesn't support requested feature: "
1726 "CPUID.%02XH:%s%s%s [bit %d]\n",
1727 kvm_enabled() ? "host" : "TCG",
1728 f->cpuid_eax, reg,
1729 f->feat_names[i] ? "." : "",
1730 f->feat_names[i] ? f->feat_names[i] : "", i);
1731 }
1732 }
1733 }
1734
1735 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1736 const char *name, void *opaque,
1737 Error **errp)
1738 {
1739 X86CPU *cpu = X86_CPU(obj);
1740 CPUX86State *env = &cpu->env;
1741 int64_t value;
1742
1743 value = (env->cpuid_version >> 8) & 0xf;
1744 if (value == 0xf) {
1745 value += (env->cpuid_version >> 20) & 0xff;
1746 }
1747 visit_type_int(v, name, &value, errp);
1748 }
1749
1750 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1751 const char *name, void *opaque,
1752 Error **errp)
1753 {
1754 X86CPU *cpu = X86_CPU(obj);
1755 CPUX86State *env = &cpu->env;
1756 const int64_t min = 0;
1757 const int64_t max = 0xff + 0xf;
1758 Error *local_err = NULL;
1759 int64_t value;
1760
1761 visit_type_int(v, name, &value, &local_err);
1762 if (local_err) {
1763 error_propagate(errp, local_err);
1764 return;
1765 }
1766 if (value < min || value > max) {
1767 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1768 name ? name : "null", value, min, max);
1769 return;
1770 }
1771
1772 env->cpuid_version &= ~0xff00f00;
1773 if (value > 0x0f) {
1774 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1775 } else {
1776 env->cpuid_version |= value << 8;
1777 }
1778 }
1779
1780 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1781 const char *name, void *opaque,
1782 Error **errp)
1783 {
1784 X86CPU *cpu = X86_CPU(obj);
1785 CPUX86State *env = &cpu->env;
1786 int64_t value;
1787
1788 value = (env->cpuid_version >> 4) & 0xf;
1789 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1790 visit_type_int(v, name, &value, errp);
1791 }
1792
1793 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1794 const char *name, void *opaque,
1795 Error **errp)
1796 {
1797 X86CPU *cpu = X86_CPU(obj);
1798 CPUX86State *env = &cpu->env;
1799 const int64_t min = 0;
1800 const int64_t max = 0xff;
1801 Error *local_err = NULL;
1802 int64_t value;
1803
1804 visit_type_int(v, name, &value, &local_err);
1805 if (local_err) {
1806 error_propagate(errp, local_err);
1807 return;
1808 }
1809 if (value < min || value > max) {
1810 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1811 name ? name : "null", value, min, max);
1812 return;
1813 }
1814
1815 env->cpuid_version &= ~0xf00f0;
1816 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1817 }
1818
1819 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1820 const char *name, void *opaque,
1821 Error **errp)
1822 {
1823 X86CPU *cpu = X86_CPU(obj);
1824 CPUX86State *env = &cpu->env;
1825 int64_t value;
1826
1827 value = env->cpuid_version & 0xf;
1828 visit_type_int(v, name, &value, errp);
1829 }
1830
1831 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1832 const char *name, void *opaque,
1833 Error **errp)
1834 {
1835 X86CPU *cpu = X86_CPU(obj);
1836 CPUX86State *env = &cpu->env;
1837 const int64_t min = 0;
1838 const int64_t max = 0xf;
1839 Error *local_err = NULL;
1840 int64_t value;
1841
1842 visit_type_int(v, name, &value, &local_err);
1843 if (local_err) {
1844 error_propagate(errp, local_err);
1845 return;
1846 }
1847 if (value < min || value > max) {
1848 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1849 name ? name : "null", value, min, max);
1850 return;
1851 }
1852
1853 env->cpuid_version &= ~0xf;
1854 env->cpuid_version |= value & 0xf;
1855 }
1856
1857 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1858 {
1859 X86CPU *cpu = X86_CPU(obj);
1860 CPUX86State *env = &cpu->env;
1861 char *value;
1862
1863 value = g_malloc(CPUID_VENDOR_SZ + 1);
1864 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1865 env->cpuid_vendor3);
1866 return value;
1867 }
1868
1869 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1870 Error **errp)
1871 {
1872 X86CPU *cpu = X86_CPU(obj);
1873 CPUX86State *env = &cpu->env;
1874 int i;
1875
1876 if (strlen(value) != CPUID_VENDOR_SZ) {
1877 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1878 return;
1879 }
1880
1881 env->cpuid_vendor1 = 0;
1882 env->cpuid_vendor2 = 0;
1883 env->cpuid_vendor3 = 0;
1884 for (i = 0; i < 4; i++) {
1885 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1886 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1887 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1888 }
1889 }
1890
1891 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1892 {
1893 X86CPU *cpu = X86_CPU(obj);
1894 CPUX86State *env = &cpu->env;
1895 char *value;
1896 int i;
1897
1898 value = g_malloc(48 + 1);
1899 for (i = 0; i < 48; i++) {
1900 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1901 }
1902 value[48] = '\0';
1903 return value;
1904 }
1905
1906 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1907 Error **errp)
1908 {
1909 X86CPU *cpu = X86_CPU(obj);
1910 CPUX86State *env = &cpu->env;
1911 int c, len, i;
1912
1913 if (model_id == NULL) {
1914 model_id = "";
1915 }
1916 len = strlen(model_id);
1917 memset(env->cpuid_model, 0, 48);
1918 for (i = 0; i < 48; i++) {
1919 if (i >= len) {
1920 c = '\0';
1921 } else {
1922 c = (uint8_t)model_id[i];
1923 }
1924 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1925 }
1926 }
1927
1928 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1929 void *opaque, Error **errp)
1930 {
1931 X86CPU *cpu = X86_CPU(obj);
1932 int64_t value;
1933
1934 value = cpu->env.tsc_khz * 1000;
1935 visit_type_int(v, name, &value, errp);
1936 }
1937
1938 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1939 void *opaque, Error **errp)
1940 {
1941 X86CPU *cpu = X86_CPU(obj);
1942 const int64_t min = 0;
1943 const int64_t max = INT64_MAX;
1944 Error *local_err = NULL;
1945 int64_t value;
1946
1947 visit_type_int(v, name, &value, &local_err);
1948 if (local_err) {
1949 error_propagate(errp, local_err);
1950 return;
1951 }
1952 if (value < min || value > max) {
1953 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1954 name ? name : "null", value, min, max);
1955 return;
1956 }
1957
1958 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1959 }
1960
1961 /* Generic getter for "feature-words" and "filtered-features" properties */
1962 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1963 const char *name, void *opaque,
1964 Error **errp)
1965 {
1966 uint32_t *array = (uint32_t *)opaque;
1967 FeatureWord w;
1968 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1969 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1970 X86CPUFeatureWordInfoList *list = NULL;
1971
1972 for (w = 0; w < FEATURE_WORDS; w++) {
1973 FeatureWordInfo *wi = &feature_word_info[w];
1974 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1975 qwi->cpuid_input_eax = wi->cpuid_eax;
1976 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1977 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1978 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1979 qwi->features = array[w];
1980
1981 /* List will be in reverse order, but order shouldn't matter */
1982 list_entries[w].next = list;
1983 list_entries[w].value = &word_infos[w];
1984 list = &list_entries[w];
1985 }
1986
1987 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1988 }
1989
1990 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1991 void *opaque, Error **errp)
1992 {
1993 X86CPU *cpu = X86_CPU(obj);
1994 int64_t value = cpu->hyperv_spinlock_attempts;
1995
1996 visit_type_int(v, name, &value, errp);
1997 }
1998
1999 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2000 void *opaque, Error **errp)
2001 {
2002 const int64_t min = 0xFFF;
2003 const int64_t max = UINT_MAX;
2004 X86CPU *cpu = X86_CPU(obj);
2005 Error *err = NULL;
2006 int64_t value;
2007
2008 visit_type_int(v, name, &value, &err);
2009 if (err) {
2010 error_propagate(errp, err);
2011 return;
2012 }
2013
2014 if (value < min || value > max) {
2015 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2016 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2017 object_get_typename(obj), name ? name : "null",
2018 value, min, max);
2019 return;
2020 }
2021 cpu->hyperv_spinlock_attempts = value;
2022 }
2023
2024 static const PropertyInfo qdev_prop_spinlocks = {
2025 .name = "int",
2026 .get = x86_get_hv_spinlocks,
2027 .set = x86_set_hv_spinlocks,
2028 };
2029
2030 /* Convert all '_' in a feature string option name to '-', to make feature
2031 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2032 */
2033 static inline void feat2prop(char *s)
2034 {
2035 while ((s = strchr(s, '_'))) {
2036 *s = '-';
2037 }
2038 }
2039
2040 /* Return the feature property name for a feature flag bit */
2041 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2042 {
2043 /* XSAVE components are automatically enabled by other features,
2044 * so return the original feature name instead
2045 */
2046 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2047 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2048
2049 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2050 x86_ext_save_areas[comp].bits) {
2051 w = x86_ext_save_areas[comp].feature;
2052 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2053 }
2054 }
2055
2056 assert(bitnr < 32);
2057 assert(w < FEATURE_WORDS);
2058 return feature_word_info[w].feat_names[bitnr];
2059 }
2060
2061 /* Compatibily hack to maintain legacy +-feat semantic,
2062 * where +-feat overwrites any feature set by
2063 * feat=on|feat even if the later is parsed after +-feat
2064 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2065 */
2066 static GList *plus_features, *minus_features;
2067
2068 static gint compare_string(gconstpointer a, gconstpointer b)
2069 {
2070 return g_strcmp0(a, b);
2071 }
2072
2073 /* Parse "+feature,-feature,feature=foo" CPU feature string
2074 */
2075 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2076 Error **errp)
2077 {
2078 char *featurestr; /* Single 'key=value" string being parsed */
2079 static bool cpu_globals_initialized;
2080 bool ambiguous = false;
2081
2082 if (cpu_globals_initialized) {
2083 return;
2084 }
2085 cpu_globals_initialized = true;
2086
2087 if (!features) {
2088 return;
2089 }
2090
2091 for (featurestr = strtok(features, ",");
2092 featurestr;
2093 featurestr = strtok(NULL, ",")) {
2094 const char *name;
2095 const char *val = NULL;
2096 char *eq = NULL;
2097 char num[32];
2098 GlobalProperty *prop;
2099
2100 /* Compatibility syntax: */
2101 if (featurestr[0] == '+') {
2102 plus_features = g_list_append(plus_features,
2103 g_strdup(featurestr + 1));
2104 continue;
2105 } else if (featurestr[0] == '-') {
2106 minus_features = g_list_append(minus_features,
2107 g_strdup(featurestr + 1));
2108 continue;
2109 }
2110
2111 eq = strchr(featurestr, '=');
2112 if (eq) {
2113 *eq++ = 0;
2114 val = eq;
2115 } else {
2116 val = "on";
2117 }
2118
2119 feat2prop(featurestr);
2120 name = featurestr;
2121
2122 if (g_list_find_custom(plus_features, name, compare_string)) {
2123 warn_report("Ambiguous CPU model string. "
2124 "Don't mix both \"+%s\" and \"%s=%s\"",
2125 name, name, val);
2126 ambiguous = true;
2127 }
2128 if (g_list_find_custom(minus_features, name, compare_string)) {
2129 warn_report("Ambiguous CPU model string. "
2130 "Don't mix both \"-%s\" and \"%s=%s\"",
2131 name, name, val);
2132 ambiguous = true;
2133 }
2134
2135 /* Special case: */
2136 if (!strcmp(name, "tsc-freq")) {
2137 int ret;
2138 uint64_t tsc_freq;
2139
2140 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2141 if (ret < 0 || tsc_freq > INT64_MAX) {
2142 error_setg(errp, "bad numerical value %s", val);
2143 return;
2144 }
2145 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2146 val = num;
2147 name = "tsc-frequency";
2148 }
2149
2150 prop = g_new0(typeof(*prop), 1);
2151 prop->driver = typename;
2152 prop->property = g_strdup(name);
2153 prop->value = g_strdup(val);
2154 prop->errp = &error_fatal;
2155 qdev_prop_register_global(prop);
2156 }
2157
2158 if (ambiguous) {
2159 warn_report("Compatibility of ambiguous CPU model "
2160 "strings won't be kept on future QEMU versions");
2161 }
2162 }
2163
2164 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2165 static int x86_cpu_filter_features(X86CPU *cpu);
2166
2167 /* Check for missing features that may prevent the CPU class from
2168 * running using the current machine and accelerator.
2169 */
2170 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2171 strList **missing_feats)
2172 {
2173 X86CPU *xc;
2174 FeatureWord w;
2175 Error *err = NULL;
2176 strList **next = missing_feats;
2177
2178 if (xcc->kvm_required && !kvm_enabled()) {
2179 strList *new = g_new0(strList, 1);
2180 new->value = g_strdup("kvm");;
2181 *missing_feats = new;
2182 return;
2183 }
2184
2185 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2186
2187 x86_cpu_expand_features(xc, &err);
2188 if (err) {
2189 /* Errors at x86_cpu_expand_features should never happen,
2190 * but in case it does, just report the model as not
2191 * runnable at all using the "type" property.
2192 */
2193 strList *new = g_new0(strList, 1);
2194 new->value = g_strdup("type");
2195 *next = new;
2196 next = &new->next;
2197 }
2198
2199 x86_cpu_filter_features(xc);
2200
2201 for (w = 0; w < FEATURE_WORDS; w++) {
2202 uint32_t filtered = xc->filtered_features[w];
2203 int i;
2204 for (i = 0; i < 32; i++) {
2205 if (filtered & (1UL << i)) {
2206 strList *new = g_new0(strList, 1);
2207 new->value = g_strdup(x86_cpu_feature_name(w, i));
2208 *next = new;
2209 next = &new->next;
2210 }
2211 }
2212 }
2213
2214 object_unref(OBJECT(xc));
2215 }
2216
2217 /* Print all cpuid feature names in featureset
2218 */
2219 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2220 {
2221 int bit;
2222 bool first = true;
2223
2224 for (bit = 0; bit < 32; bit++) {
2225 if (featureset[bit]) {
2226 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2227 first = false;
2228 }
2229 }
2230 }
2231
2232 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2233 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2234 {
2235 ObjectClass *class_a = (ObjectClass *)a;
2236 ObjectClass *class_b = (ObjectClass *)b;
2237 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2238 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2239 const char *name_a, *name_b;
2240
2241 if (cc_a->ordering != cc_b->ordering) {
2242 return cc_a->ordering - cc_b->ordering;
2243 } else {
2244 name_a = object_class_get_name(class_a);
2245 name_b = object_class_get_name(class_b);
2246 return strcmp(name_a, name_b);
2247 }
2248 }
2249
2250 static GSList *get_sorted_cpu_model_list(void)
2251 {
2252 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2253 list = g_slist_sort(list, x86_cpu_list_compare);
2254 return list;
2255 }
2256
2257 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2258 {
2259 ObjectClass *oc = data;
2260 X86CPUClass *cc = X86_CPU_CLASS(oc);
2261 CPUListState *s = user_data;
2262 char *name = x86_cpu_class_get_model_name(cc);
2263 const char *desc = cc->model_description;
2264 if (!desc && cc->cpu_def) {
2265 desc = cc->cpu_def->model_id;
2266 }
2267
2268 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2269 name, desc);
2270 g_free(name);
2271 }
2272
2273 /* list available CPU models and flags */
2274 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2275 {
2276 int i;
2277 CPUListState s = {
2278 .file = f,
2279 .cpu_fprintf = cpu_fprintf,
2280 };
2281 GSList *list;
2282
2283 (*cpu_fprintf)(f, "Available CPUs:\n");
2284 list = get_sorted_cpu_model_list();
2285 g_slist_foreach(list, x86_cpu_list_entry, &s);
2286 g_slist_free(list);
2287
2288 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2289 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2290 FeatureWordInfo *fw = &feature_word_info[i];
2291
2292 (*cpu_fprintf)(f, " ");
2293 listflags(f, cpu_fprintf, fw->feat_names);
2294 (*cpu_fprintf)(f, "\n");
2295 }
2296 }
2297
2298 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2299 {
2300 ObjectClass *oc = data;
2301 X86CPUClass *cc = X86_CPU_CLASS(oc);
2302 CpuDefinitionInfoList **cpu_list = user_data;
2303 CpuDefinitionInfoList *entry;
2304 CpuDefinitionInfo *info;
2305
2306 info = g_malloc0(sizeof(*info));
2307 info->name = x86_cpu_class_get_model_name(cc);
2308 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2309 info->has_unavailable_features = true;
2310 info->q_typename = g_strdup(object_class_get_name(oc));
2311 info->migration_safe = cc->migration_safe;
2312 info->has_migration_safe = true;
2313 info->q_static = cc->static_model;
2314
2315 entry = g_malloc0(sizeof(*entry));
2316 entry->value = info;
2317 entry->next = *cpu_list;
2318 *cpu_list = entry;
2319 }
2320
2321 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2322 {
2323 CpuDefinitionInfoList *cpu_list = NULL;
2324 GSList *list = get_sorted_cpu_model_list();
2325 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2326 g_slist_free(list);
2327 return cpu_list;
2328 }
2329
2330 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2331 bool migratable_only)
2332 {
2333 FeatureWordInfo *wi = &feature_word_info[w];
2334 uint32_t r;
2335
2336 if (kvm_enabled()) {
2337 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2338 wi->cpuid_ecx,
2339 wi->cpuid_reg);
2340 } else if (tcg_enabled()) {
2341 r = wi->tcg_features;
2342 } else {
2343 return ~0;
2344 }
2345 if (migratable_only) {
2346 r &= x86_cpu_get_migratable_flags(w);
2347 }
2348 return r;
2349 }
2350
2351 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2352 {
2353 FeatureWord w;
2354
2355 for (w = 0; w < FEATURE_WORDS; w++) {
2356 report_unavailable_features(w, cpu->filtered_features[w]);
2357 }
2358 }
2359
2360 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2361 {
2362 PropValue *pv;
2363 for (pv = props; pv->prop; pv++) {
2364 if (!pv->value) {
2365 continue;
2366 }
2367 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2368 &error_abort);
2369 }
2370 }
2371
2372 /* Load data from X86CPUDefinition into a X86CPU object
2373 */
2374 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2375 {
2376 CPUX86State *env = &cpu->env;
2377 const char *vendor;
2378 char host_vendor[CPUID_VENDOR_SZ + 1];
2379 FeatureWord w;
2380
2381 /*NOTE: any property set by this function should be returned by
2382 * x86_cpu_static_props(), so static expansion of
2383 * query-cpu-model-expansion is always complete.
2384 */
2385
2386 /* CPU models only set _minimum_ values for level/xlevel: */
2387 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2388 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2389
2390 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2391 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2392 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2393 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2394 for (w = 0; w < FEATURE_WORDS; w++) {
2395 env->features[w] = def->features[w];
2396 }
2397
2398 /* Special cases not set in the X86CPUDefinition structs: */
2399 if (kvm_enabled()) {
2400 if (!kvm_irqchip_in_kernel()) {
2401 x86_cpu_change_kvm_default("x2apic", "off");
2402 }
2403
2404 x86_cpu_apply_props(cpu, kvm_default_props);
2405 } else if (tcg_enabled()) {
2406 x86_cpu_apply_props(cpu, tcg_default_props);
2407 }
2408
2409 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2410
2411 /* sysenter isn't supported in compatibility mode on AMD,
2412 * syscall isn't supported in compatibility mode on Intel.
2413 * Normally we advertise the actual CPU vendor, but you can
2414 * override this using the 'vendor' property if you want to use
2415 * KVM's sysenter/syscall emulation in compatibility mode and
2416 * when doing cross vendor migration
2417 */
2418 vendor = def->vendor;
2419 if (kvm_enabled()) {
2420 uint32_t ebx = 0, ecx = 0, edx = 0;
2421 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2422 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2423 vendor = host_vendor;
2424 }
2425
2426 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2427
2428 }
2429
2430 /* Return a QDict containing keys for all properties that can be included
2431 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2432 * must be included in the dictionary.
2433 */
2434 static QDict *x86_cpu_static_props(void)
2435 {
2436 FeatureWord w;
2437 int i;
2438 static const char *props[] = {
2439 "min-level",
2440 "min-xlevel",
2441 "family",
2442 "model",
2443 "stepping",
2444 "model-id",
2445 "vendor",
2446 "lmce",
2447 NULL,
2448 };
2449 static QDict *d;
2450
2451 if (d) {
2452 return d;
2453 }
2454
2455 d = qdict_new();
2456 for (i = 0; props[i]; i++) {
2457 qdict_put(d, props[i], qnull());
2458 }
2459
2460 for (w = 0; w < FEATURE_WORDS; w++) {
2461 FeatureWordInfo *fi = &feature_word_info[w];
2462 int bit;
2463 for (bit = 0; bit < 32; bit++) {
2464 if (!fi->feat_names[bit]) {
2465 continue;
2466 }
2467 qdict_put(d, fi->feat_names[bit], qnull());
2468 }
2469 }
2470
2471 return d;
2472 }
2473
2474 /* Add an entry to @props dict, with the value for property. */
2475 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2476 {
2477 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2478 &error_abort);
2479
2480 qdict_put_obj(props, prop, value);
2481 }
2482
2483 /* Convert CPU model data from X86CPU object to a property dictionary
2484 * that can recreate exactly the same CPU model.
2485 */
2486 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2487 {
2488 QDict *sprops = x86_cpu_static_props();
2489 const QDictEntry *e;
2490
2491 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2492 const char *prop = qdict_entry_key(e);
2493 x86_cpu_expand_prop(cpu, props, prop);
2494 }
2495 }
2496
2497 /* Convert CPU model data from X86CPU object to a property dictionary
2498 * that can recreate exactly the same CPU model, including every
2499 * writeable QOM property.
2500 */
2501 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2502 {
2503 ObjectPropertyIterator iter;
2504 ObjectProperty *prop;
2505
2506 object_property_iter_init(&iter, OBJECT(cpu));
2507 while ((prop = object_property_iter_next(&iter))) {
2508 /* skip read-only or write-only properties */
2509 if (!prop->get || !prop->set) {
2510 continue;
2511 }
2512
2513 /* "hotplugged" is the only property that is configurable
2514 * on the command-line but will be set differently on CPUs
2515 * created using "-cpu ... -smp ..." and by CPUs created
2516 * on the fly by x86_cpu_from_model() for querying. Skip it.
2517 */
2518 if (!strcmp(prop->name, "hotplugged")) {
2519 continue;
2520 }
2521 x86_cpu_expand_prop(cpu, props, prop->name);
2522 }
2523 }
2524
2525 static void object_apply_props(Object *obj, QDict *props, Error **errp)
2526 {
2527 const QDictEntry *prop;
2528 Error *err = NULL;
2529
2530 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2531 object_property_set_qobject(obj, qdict_entry_value(prop),
2532 qdict_entry_key(prop), &err);
2533 if (err) {
2534 break;
2535 }
2536 }
2537
2538 error_propagate(errp, err);
2539 }
2540
2541 /* Create X86CPU object according to model+props specification */
2542 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2543 {
2544 X86CPU *xc = NULL;
2545 X86CPUClass *xcc;
2546 Error *err = NULL;
2547
2548 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2549 if (xcc == NULL) {
2550 error_setg(&err, "CPU model '%s' not found", model);
2551 goto out;
2552 }
2553
2554 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2555 if (props) {
2556 object_apply_props(OBJECT(xc), props, &err);
2557 if (err) {
2558 goto out;
2559 }
2560 }
2561
2562 x86_cpu_expand_features(xc, &err);
2563 if (err) {
2564 goto out;
2565 }
2566
2567 out:
2568 if (err) {
2569 error_propagate(errp, err);
2570 object_unref(OBJECT(xc));
2571 xc = NULL;
2572 }
2573 return xc;
2574 }
2575
2576 CpuModelExpansionInfo *
2577 arch_query_cpu_model_expansion(CpuModelExpansionType type,
2578 CpuModelInfo *model,
2579 Error **errp)
2580 {
2581 X86CPU *xc = NULL;
2582 Error *err = NULL;
2583 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2584 QDict *props = NULL;
2585 const char *base_name;
2586
2587 xc = x86_cpu_from_model(model->name,
2588 model->has_props ?
2589 qobject_to_qdict(model->props) :
2590 NULL, &err);
2591 if (err) {
2592 goto out;
2593 }
2594
2595 props = qdict_new();
2596
2597 switch (type) {
2598 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2599 /* Static expansion will be based on "base" only */
2600 base_name = "base";
2601 x86_cpu_to_dict(xc, props);
2602 break;
2603 case CPU_MODEL_EXPANSION_TYPE_FULL:
2604 /* As we don't return every single property, full expansion needs
2605 * to keep the original model name+props, and add extra
2606 * properties on top of that.
2607 */
2608 base_name = model->name;
2609 x86_cpu_to_dict_full(xc, props);
2610 break;
2611 default:
2612 error_setg(&err, "Unsupportted expansion type");
2613 goto out;
2614 }
2615
2616 if (!props) {
2617 props = qdict_new();
2618 }
2619 x86_cpu_to_dict(xc, props);
2620
2621 ret->model = g_new0(CpuModelInfo, 1);
2622 ret->model->name = g_strdup(base_name);
2623 ret->model->props = QOBJECT(props);
2624 ret->model->has_props = true;
2625
2626 out:
2627 object_unref(OBJECT(xc));
2628 if (err) {
2629 error_propagate(errp, err);
2630 qapi_free_CpuModelExpansionInfo(ret);
2631 ret = NULL;
2632 }
2633 return ret;
2634 }
2635
2636 static gchar *x86_gdb_arch_name(CPUState *cs)
2637 {
2638 #ifdef TARGET_X86_64
2639 return g_strdup("i386:x86-64");
2640 #else
2641 return g_strdup("i386");
2642 #endif
2643 }
2644
2645 X86CPU *cpu_x86_init(const char *cpu_model)
2646 {
2647 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2648 }
2649
2650 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2651 {
2652 X86CPUDefinition *cpudef = data;
2653 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2654
2655 xcc->cpu_def = cpudef;
2656 xcc->migration_safe = true;
2657 }
2658
2659 static void x86_register_cpudef_type(X86CPUDefinition *def)
2660 {
2661 char *typename = x86_cpu_type_name(def->name);
2662 TypeInfo ti = {
2663 .name = typename,
2664 .parent = TYPE_X86_CPU,
2665 .class_init = x86_cpu_cpudef_class_init,
2666 .class_data = def,
2667 };
2668
2669 /* AMD aliases are handled at runtime based on CPUID vendor, so
2670 * they shouldn't be set on the CPU model table.
2671 */
2672 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2673
2674 type_register(&ti);
2675 g_free(typename);
2676 }
2677
2678 #if !defined(CONFIG_USER_ONLY)
2679
2680 void cpu_clear_apic_feature(CPUX86State *env)
2681 {
2682 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2683 }
2684
2685 #endif /* !CONFIG_USER_ONLY */
2686
2687 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2688 uint32_t *eax, uint32_t *ebx,
2689 uint32_t *ecx, uint32_t *edx)
2690 {
2691 X86CPU *cpu = x86_env_get_cpu(env);
2692 CPUState *cs = CPU(cpu);
2693 uint32_t pkg_offset;
2694 uint32_t limit;
2695 uint32_t signature[3];
2696
2697 /* Calculate & apply limits for different index ranges */
2698 if (index >= 0xC0000000) {
2699 limit = env->cpuid_xlevel2;
2700 } else if (index >= 0x80000000) {
2701 limit = env->cpuid_xlevel;
2702 } else if (index >= 0x40000000) {
2703 limit = 0x40000001;
2704 } else {
2705 limit = env->cpuid_level;
2706 }
2707
2708 if (index > limit) {
2709 /* Intel documentation states that invalid EAX input will
2710 * return the same information as EAX=cpuid_level
2711 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2712 */
2713 index = env->cpuid_level;
2714 }
2715
2716 switch(index) {
2717 case 0:
2718 *eax = env->cpuid_level;
2719 *ebx = env->cpuid_vendor1;
2720 *edx = env->cpuid_vendor2;
2721 *ecx = env->cpuid_vendor3;
2722 break;
2723 case 1:
2724 *eax = env->cpuid_version;
2725 *ebx = (cpu->apic_id << 24) |
2726 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2727 *ecx = env->features[FEAT_1_ECX];
2728 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2729 *ecx |= CPUID_EXT_OSXSAVE;
2730 }
2731 *edx = env->features[FEAT_1_EDX];
2732 if (cs->nr_cores * cs->nr_threads > 1) {
2733 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2734 *edx |= CPUID_HT;
2735 }
2736 break;
2737 case 2:
2738 /* cache info: needed for Pentium Pro compatibility */
2739 if (cpu->cache_info_passthrough) {
2740 host_cpuid(index, 0, eax, ebx, ecx, edx);
2741 break;
2742 }
2743 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2744 *ebx = 0;
2745 if (!cpu->enable_l3_cache) {
2746 *ecx = 0;
2747 } else {
2748 *ecx = L3_N_DESCRIPTOR;
2749 }
2750 *edx = (L1D_DESCRIPTOR << 16) | \
2751 (L1I_DESCRIPTOR << 8) | \
2752 (L2_DESCRIPTOR);
2753 break;
2754 case 4:
2755 /* cache info: needed for Core compatibility */
2756 if (cpu->cache_info_passthrough) {
2757 host_cpuid(index, count, eax, ebx, ecx, edx);
2758 *eax &= ~0xFC000000;
2759 } else {
2760 *eax = 0;
2761 switch (count) {
2762 case 0: /* L1 dcache info */
2763 *eax |= CPUID_4_TYPE_DCACHE | \
2764 CPUID_4_LEVEL(1) | \
2765 CPUID_4_SELF_INIT_LEVEL;
2766 *ebx = (L1D_LINE_SIZE - 1) | \
2767 ((L1D_PARTITIONS - 1) << 12) | \
2768 ((L1D_ASSOCIATIVITY - 1) << 22);
2769 *ecx = L1D_SETS - 1;
2770 *edx = CPUID_4_NO_INVD_SHARING;
2771 break;
2772 case 1: /* L1 icache info */
2773 *eax |= CPUID_4_TYPE_ICACHE | \
2774 CPUID_4_LEVEL(1) | \
2775 CPUID_4_SELF_INIT_LEVEL;
2776 *ebx = (L1I_LINE_SIZE - 1) | \
2777 ((L1I_PARTITIONS - 1) << 12) | \
2778 ((L1I_ASSOCIATIVITY - 1) << 22);
2779 *ecx = L1I_SETS - 1;
2780 *edx = CPUID_4_NO_INVD_SHARING;
2781 break;
2782 case 2: /* L2 cache info */
2783 *eax |= CPUID_4_TYPE_UNIFIED | \
2784 CPUID_4_LEVEL(2) | \
2785 CPUID_4_SELF_INIT_LEVEL;
2786 if (cs->nr_threads > 1) {
2787 *eax |= (cs->nr_threads - 1) << 14;
2788 }
2789 *ebx = (L2_LINE_SIZE - 1) | \
2790 ((L2_PARTITIONS - 1) << 12) | \
2791 ((L2_ASSOCIATIVITY - 1) << 22);
2792 *ecx = L2_SETS - 1;
2793 *edx = CPUID_4_NO_INVD_SHARING;
2794 break;
2795 case 3: /* L3 cache info */
2796 if (!cpu->enable_l3_cache) {
2797 *eax = 0;
2798 *ebx = 0;
2799 *ecx = 0;
2800 *edx = 0;
2801 break;
2802 }
2803 *eax |= CPUID_4_TYPE_UNIFIED | \
2804 CPUID_4_LEVEL(3) | \
2805 CPUID_4_SELF_INIT_LEVEL;
2806 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2807 *eax |= ((1 << pkg_offset) - 1) << 14;
2808 *ebx = (L3_N_LINE_SIZE - 1) | \
2809 ((L3_N_PARTITIONS - 1) << 12) | \
2810 ((L3_N_ASSOCIATIVITY - 1) << 22);
2811 *ecx = L3_N_SETS - 1;
2812 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2813 break;
2814 default: /* end of info */
2815 *eax = 0;
2816 *ebx = 0;
2817 *ecx = 0;
2818 *edx = 0;
2819 break;
2820 }
2821 }
2822
2823 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2824 if ((*eax & 31) && cs->nr_cores > 1) {
2825 *eax |= (cs->nr_cores - 1) << 26;
2826 }
2827 break;
2828 case 5:
2829 /* mwait info: needed for Core compatibility */
2830 *eax = 0; /* Smallest monitor-line size in bytes */
2831 *ebx = 0; /* Largest monitor-line size in bytes */
2832 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2833 *edx = 0;
2834 break;
2835 case 6:
2836 /* Thermal and Power Leaf */
2837 *eax = env->features[FEAT_6_EAX];
2838 *ebx = 0;
2839 *ecx = 0;
2840 *edx = 0;
2841 break;
2842 case 7:
2843 /* Structured Extended Feature Flags Enumeration Leaf */
2844 if (count == 0) {
2845 *eax = 0; /* Maximum ECX value for sub-leaves */
2846 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2847 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2848 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2849 *ecx |= CPUID_7_0_ECX_OSPKE;
2850 }
2851 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2852 } else {
2853 *eax = 0;
2854 *ebx = 0;
2855 *ecx = 0;
2856 *edx = 0;
2857 }
2858 break;
2859 case 9:
2860 /* Direct Cache Access Information Leaf */
2861 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2862 *ebx = 0;
2863 *ecx = 0;
2864 *edx = 0;
2865 break;
2866 case 0xA:
2867 /* Architectural Performance Monitoring Leaf */
2868 if (kvm_enabled() && cpu->enable_pmu) {
2869 KVMState *s = cs->kvm_state;
2870
2871 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2872 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2873 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2874 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2875 } else {
2876 *eax = 0;
2877 *ebx = 0;
2878 *ecx = 0;
2879 *edx = 0;
2880 }
2881 break;
2882 case 0xB:
2883 /* Extended Topology Enumeration Leaf */
2884 if (!cpu->enable_cpuid_0xb) {
2885 *eax = *ebx = *ecx = *edx = 0;
2886 break;
2887 }
2888
2889 *ecx = count & 0xff;
2890 *edx = cpu->apic_id;
2891
2892 switch (count) {
2893 case 0:
2894 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2895 *ebx = cs->nr_threads;
2896 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2897 break;
2898 case 1:
2899 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2900 *ebx = cs->nr_cores * cs->nr_threads;
2901 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2902 break;
2903 default:
2904 *eax = 0;
2905 *ebx = 0;
2906 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2907 }
2908
2909 assert(!(*eax & ~0x1f));
2910 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2911 break;
2912 case 0xD: {
2913 /* Processor Extended State */
2914 *eax = 0;
2915 *ebx = 0;
2916 *ecx = 0;
2917 *edx = 0;
2918 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2919 break;
2920 }
2921
2922 if (count == 0) {
2923 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2924 *eax = env->features[FEAT_XSAVE_COMP_LO];
2925 *edx = env->features[FEAT_XSAVE_COMP_HI];
2926 *ebx = *ecx;
2927 } else if (count == 1) {
2928 *eax = env->features[FEAT_XSAVE];
2929 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2930 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2931 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2932 *eax = esa->size;
2933 *ebx = esa->offset;
2934 }
2935 }
2936 break;
2937 }
2938 case 0x40000000:
2939 /*
2940 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2941 * set here, but we restrict to TCG none the less.
2942 */
2943 if (tcg_enabled() && cpu->expose_tcg) {
2944 memcpy(signature, "TCGTCGTCGTCG", 12);
2945 *eax = 0x40000001;
2946 *ebx = signature[0];
2947 *ecx = signature[1];
2948 *edx = signature[2];
2949 } else {
2950 *eax = 0;
2951 *ebx = 0;
2952 *ecx = 0;
2953 *edx = 0;
2954 }
2955 break;
2956 case 0x40000001:
2957 *eax = 0;
2958 *ebx = 0;
2959 *ecx = 0;
2960 *edx = 0;
2961 break;
2962 case 0x80000000:
2963 *eax = env->cpuid_xlevel;
2964 *ebx = env->cpuid_vendor1;
2965 *edx = env->cpuid_vendor2;
2966 *ecx = env->cpuid_vendor3;
2967 break;
2968 case 0x80000001:
2969 *eax = env->cpuid_version;
2970 *ebx = 0;
2971 *ecx = env->features[FEAT_8000_0001_ECX];
2972 *edx = env->features[FEAT_8000_0001_EDX];
2973
2974 /* The Linux kernel checks for the CMPLegacy bit and
2975 * discards multiple thread information if it is set.
2976 * So don't set it here for Intel to make Linux guests happy.
2977 */
2978 if (cs->nr_cores * cs->nr_threads > 1) {
2979 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2980 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2981 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2982 *ecx |= 1 << 1; /* CmpLegacy bit */
2983 }
2984 }
2985 break;
2986 case 0x80000002:
2987 case 0x80000003:
2988 case 0x80000004:
2989 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2990 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2991 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2992 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2993 break;
2994 case 0x80000005:
2995 /* cache info (L1 cache) */
2996 if (cpu->cache_info_passthrough) {
2997 host_cpuid(index, 0, eax, ebx, ecx, edx);
2998 break;
2999 }
3000 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3001 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3002 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3003 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3004 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3005 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3006 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3007 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3008 break;
3009 case 0x80000006:
3010 /* cache info (L2 cache) */
3011 if (cpu->cache_info_passthrough) {
3012 host_cpuid(index, 0, eax, ebx, ecx, edx);
3013 break;
3014 }
3015 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3016 (L2_DTLB_2M_ENTRIES << 16) | \
3017 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3018 (L2_ITLB_2M_ENTRIES);
3019 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3020 (L2_DTLB_4K_ENTRIES << 16) | \
3021 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3022 (L2_ITLB_4K_ENTRIES);
3023 *ecx = (L2_SIZE_KB_AMD << 16) | \
3024 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3025 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3026 if (!cpu->enable_l3_cache) {
3027 *edx = ((L3_SIZE_KB / 512) << 18) | \
3028 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3029 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3030 } else {
3031 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3032 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3033 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3034 }
3035 break;
3036 case 0x80000007:
3037 *eax = 0;
3038 *ebx = 0;
3039 *ecx = 0;
3040 *edx = env->features[FEAT_8000_0007_EDX];
3041 break;
3042 case 0x80000008:
3043 /* virtual & phys address size in low 2 bytes. */
3044 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3045 /* 64 bit processor */
3046 *eax = cpu->phys_bits; /* configurable physical bits */
3047 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3048 *eax |= 0x00003900; /* 57 bits virtual */
3049 } else {
3050 *eax |= 0x00003000; /* 48 bits virtual */
3051 }
3052 } else {
3053 *eax = cpu->phys_bits;
3054 }
3055 *ebx = 0;
3056 *ecx = 0;
3057 *edx = 0;
3058 if (cs->nr_cores * cs->nr_threads > 1) {
3059 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3060 }
3061 break;
3062 case 0x8000000A:
3063 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3064 *eax = 0x00000001; /* SVM Revision */
3065 *ebx = 0x00000010; /* nr of ASIDs */
3066 *ecx = 0;
3067 *edx = env->features[FEAT_SVM]; /* optional features */
3068 } else {
3069 *eax = 0;
3070 *ebx = 0;
3071 *ecx = 0;
3072 *edx = 0;
3073 }
3074 break;
3075 case 0xC0000000:
3076 *eax = env->cpuid_xlevel2;
3077 *ebx = 0;
3078 *ecx = 0;
3079 *edx = 0;
3080 break;
3081 case 0xC0000001:
3082 /* Support for VIA CPU's CPUID instruction */
3083 *eax = env->cpuid_version;
3084 *ebx = 0;
3085 *ecx = 0;
3086 *edx = env->features[FEAT_C000_0001_EDX];
3087 break;
3088 case 0xC0000002:
3089 case 0xC0000003:
3090 case 0xC0000004:
3091 /* Reserved for the future, and now filled with zero */
3092 *eax = 0;
3093 *ebx = 0;
3094 *ecx = 0;
3095 *edx = 0;
3096 break;
3097 default:
3098 /* reserved values: zero */
3099 *eax = 0;
3100 *ebx = 0;
3101 *ecx = 0;
3102 *edx = 0;
3103 break;
3104 }
3105 }
3106
3107 /* CPUClass::reset() */
3108 static void x86_cpu_reset(CPUState *s)
3109 {
3110 X86CPU *cpu = X86_CPU(s);
3111 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3112 CPUX86State *env = &cpu->env;
3113 target_ulong cr4;
3114 uint64_t xcr0;
3115 int i;
3116
3117 xcc->parent_reset(s);
3118
3119 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3120
3121 env->old_exception = -1;
3122
3123 /* init to reset state */
3124
3125 env->hflags2 |= HF2_GIF_MASK;
3126
3127 cpu_x86_update_cr0(env, 0x60000010);
3128 env->a20_mask = ~0x0;
3129 env->smbase = 0x30000;
3130
3131 env->idt.limit = 0xffff;
3132 env->gdt.limit = 0xffff;
3133 env->ldt.limit = 0xffff;
3134 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3135 env->tr.limit = 0xffff;
3136 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3137
3138 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3139 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3140 DESC_R_MASK | DESC_A_MASK);
3141 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3142 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3143 DESC_A_MASK);
3144 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3145 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3146 DESC_A_MASK);
3147 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3148 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3149 DESC_A_MASK);
3150 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3151 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3152 DESC_A_MASK);
3153 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3154 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3155 DESC_A_MASK);
3156
3157 env->eip = 0xfff0;
3158 env->regs[R_EDX] = env->cpuid_version;
3159
3160 env->eflags = 0x2;
3161
3162 /* FPU init */
3163 for (i = 0; i < 8; i++) {
3164 env->fptags[i] = 1;
3165 }
3166 cpu_set_fpuc(env, 0x37f);
3167
3168 env->mxcsr = 0x1f80;
3169 /* All units are in INIT state. */
3170 env->xstate_bv = 0;
3171
3172 env->pat = 0x0007040600070406ULL;
3173 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3174
3175 memset(env->dr, 0, sizeof(env->dr));
3176 env->dr[6] = DR6_FIXED_1;
3177 env->dr[7] = DR7_FIXED_1;
3178 cpu_breakpoint_remove_all(s, BP_CPU);
3179 cpu_watchpoint_remove_all(s, BP_CPU);
3180
3181 cr4 = 0;
3182 xcr0 = XSTATE_FP_MASK;
3183
3184 #ifdef CONFIG_USER_ONLY
3185 /* Enable all the features for user-mode. */
3186 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3187 xcr0 |= XSTATE_SSE_MASK;
3188 }
3189 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3190 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3191 if (env->features[esa->feature] & esa->bits) {
3192 xcr0 |= 1ull << i;
3193 }
3194 }
3195
3196 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3197 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3198 }
3199 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3200 cr4 |= CR4_FSGSBASE_MASK;
3201 }
3202 #endif
3203
3204 env->xcr0 = xcr0;
3205 cpu_x86_update_cr4(env, cr4);
3206
3207 /*
3208 * SDM 11.11.5 requires:
3209 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3210 * - IA32_MTRR_PHYSMASKn.V = 0
3211 * All other bits are undefined. For simplification, zero it all.
3212 */
3213 env->mtrr_deftype = 0;
3214 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3215 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3216
3217 #if !defined(CONFIG_USER_ONLY)
3218 /* We hard-wire the BSP to the first CPU. */
3219 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3220
3221 s->halted = !cpu_is_bsp(cpu);
3222
3223 if (kvm_enabled()) {
3224 kvm_arch_reset_vcpu(cpu);
3225 }
3226 #endif
3227 }
3228
3229 #ifndef CONFIG_USER_ONLY
3230 bool cpu_is_bsp(X86CPU *cpu)
3231 {
3232 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3233 }
3234
3235 /* TODO: remove me, when reset over QOM tree is implemented */
3236 static void x86_cpu_machine_reset_cb(void *opaque)
3237 {
3238 X86CPU *cpu = opaque;
3239 cpu_reset(CPU(cpu));
3240 }
3241 #endif
3242
3243 static void mce_init(X86CPU *cpu)
3244 {
3245 CPUX86State *cenv = &cpu->env;
3246 unsigned int bank;
3247
3248 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3249 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3250 (CPUID_MCE | CPUID_MCA)) {
3251 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3252 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3253 cenv->mcg_ctl = ~(uint64_t)0;
3254 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3255 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3256 }
3257 }
3258 }
3259
3260 #ifndef CONFIG_USER_ONLY
3261 APICCommonClass *apic_get_class(void)
3262 {
3263 const char *apic_type = "apic";
3264
3265 if (kvm_apic_in_kernel()) {
3266 apic_type = "kvm-apic";
3267 } else if (xen_enabled()) {
3268 apic_type = "xen-apic";
3269 }
3270
3271 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3272 }
3273
3274 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3275 {
3276 APICCommonState *apic;
3277 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3278
3279 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3280
3281 object_property_add_child(OBJECT(cpu), "lapic",
3282 OBJECT(cpu->apic_state), &error_abort);
3283 object_unref(OBJECT(cpu->apic_state));
3284
3285 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3286 /* TODO: convert to link<> */
3287 apic = APIC_COMMON(cpu->apic_state);
3288 apic->cpu = cpu;
3289 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3290 }
3291
3292 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3293 {
3294 APICCommonState *apic;
3295 static bool apic_mmio_map_once;
3296
3297 if (cpu->apic_state == NULL) {
3298 return;
3299 }
3300 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3301 errp);
3302
3303 /* Map APIC MMIO area */
3304 apic = APIC_COMMON(cpu->apic_state);
3305 if (!apic_mmio_map_once) {
3306 memory_region_add_subregion_overlap(get_system_memory(),
3307 apic->apicbase &
3308 MSR_IA32_APICBASE_BASE,
3309 &apic->io_memory,
3310 0x1000);
3311 apic_mmio_map_once = true;
3312 }
3313 }
3314
3315 static void x86_cpu_machine_done(Notifier *n, void *unused)
3316 {
3317 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3318 MemoryRegion *smram =
3319 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3320
3321 if (smram) {
3322 cpu->smram = g_new(MemoryRegion, 1);
3323 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3324 smram, 0, 1ull << 32);
3325 memory_region_set_enabled(cpu->smram, true);
3326 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3327 }
3328 }
3329 #else
3330 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3331 {
3332 }
3333 #endif
3334
3335 /* Note: Only safe for use on x86(-64) hosts */
3336 static uint32_t x86_host_phys_bits(void)
3337 {
3338 uint32_t eax;
3339 uint32_t host_phys_bits;
3340
3341 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3342 if (eax >= 0x80000008) {
3343 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3344 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3345 * at 23:16 that can specify a maximum physical address bits for
3346 * the guest that can override this value; but I've not seen
3347 * anything with that set.
3348 */
3349 host_phys_bits = eax & 0xff;
3350 } else {
3351 /* It's an odd 64 bit machine that doesn't have the leaf for
3352 * physical address bits; fall back to 36 that's most older
3353 * Intel.
3354 */
3355 host_phys_bits = 36;
3356 }
3357
3358 return host_phys_bits;
3359 }
3360
3361 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3362 {
3363 if (*min < value) {
3364 *min = value;
3365 }
3366 }
3367
3368 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3369 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3370 {
3371 CPUX86State *env = &cpu->env;
3372 FeatureWordInfo *fi = &feature_word_info[w];
3373 uint32_t eax = fi->cpuid_eax;
3374 uint32_t region = eax & 0xF0000000;
3375
3376 if (!env->features[w]) {
3377 return;
3378 }
3379
3380 switch (region) {
3381 case 0x00000000:
3382 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3383 break;
3384 case 0x80000000:
3385 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3386 break;
3387 case 0xC0000000:
3388 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3389 break;
3390 }
3391 }
3392
3393 /* Calculate XSAVE components based on the configured CPU feature flags */
3394 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3395 {
3396 CPUX86State *env = &cpu->env;
3397 int i;
3398 uint64_t mask;
3399
3400 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3401 return;
3402 }
3403
3404 mask = 0;
3405 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3406 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3407 if (env->features[esa->feature] & esa->bits) {
3408 mask |= (1ULL << i);
3409 }
3410 }
3411
3412 env->features[FEAT_XSAVE_COMP_LO] = mask;
3413 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3414 }
3415
3416 /***** Steps involved on loading and filtering CPUID data
3417 *
3418 * When initializing and realizing a CPU object, the steps
3419 * involved in setting up CPUID data are:
3420 *
3421 * 1) Loading CPU model definition (X86CPUDefinition). This is
3422 * implemented by x86_cpu_load_def() and should be completely
3423 * transparent, as it is done automatically by instance_init.
3424 * No code should need to look at X86CPUDefinition structs
3425 * outside instance_init.
3426 *
3427 * 2) CPU expansion. This is done by realize before CPUID
3428 * filtering, and will make sure host/accelerator data is
3429 * loaded for CPU models that depend on host capabilities
3430 * (e.g. "host"). Done by x86_cpu_expand_features().
3431 *
3432 * 3) CPUID filtering. This initializes extra data related to
3433 * CPUID, and checks if the host supports all capabilities
3434 * required by the CPU. Runnability of a CPU model is
3435 * determined at this step. Done by x86_cpu_filter_features().
3436 *
3437 * Some operations don't require all steps to be performed.
3438 * More precisely:
3439 *
3440 * - CPU instance creation (instance_init) will run only CPU
3441 * model loading. CPU expansion can't run at instance_init-time
3442 * because host/accelerator data may be not available yet.
3443 * - CPU realization will perform both CPU model expansion and CPUID
3444 * filtering, and return an error in case one of them fails.
3445 * - query-cpu-definitions needs to run all 3 steps. It needs
3446 * to run CPUID filtering, as the 'unavailable-features'
3447 * field is set based on the filtering results.
3448 * - The query-cpu-model-expansion QMP command only needs to run
3449 * CPU model loading and CPU expansion. It should not filter
3450 * any CPUID data based on host capabilities.
3451 */
3452
3453 /* Expand CPU configuration data, based on configured features
3454 * and host/accelerator capabilities when appropriate.
3455 */
3456 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3457 {
3458 CPUX86State *env = &cpu->env;
3459 FeatureWord w;
3460 GList *l;
3461 Error *local_err = NULL;
3462
3463 /*TODO: Now cpu->max_features doesn't overwrite features
3464 * set using QOM properties, and we can convert
3465 * plus_features & minus_features to global properties
3466 * inside x86_cpu_parse_featurestr() too.
3467 */
3468 if (cpu->max_features) {
3469 for (w = 0; w < FEATURE_WORDS; w++) {
3470 /* Override only features that weren't set explicitly
3471 * by the user.
3472 */
3473 env->features[w] |=
3474 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3475 ~env->user_features[w];
3476 }
3477 }
3478
3479 for (l = plus_features; l; l = l->next) {
3480 const char *prop = l->data;
3481 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3482 if (local_err) {
3483 goto out;
3484 }
3485 }
3486
3487 for (l = minus_features; l; l = l->next) {
3488 const char *prop = l->data;
3489 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3490 if (local_err) {
3491 goto out;
3492 }
3493 }
3494
3495 if (!kvm_enabled() || !cpu->expose_kvm) {
3496 env->features[FEAT_KVM] = 0;
3497 }
3498
3499 x86_cpu_enable_xsave_components(cpu);
3500
3501 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3502 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3503 if (cpu->full_cpuid_auto_level) {
3504 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3505 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3506 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3507 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3508 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3509 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3510 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3511 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3512 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3513 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3514 /* SVM requires CPUID[0x8000000A] */
3515 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3516 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3517 }
3518 }
3519
3520 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3521 if (env->cpuid_level == UINT32_MAX) {
3522 env->cpuid_level = env->cpuid_min_level;
3523 }
3524 if (env->cpuid_xlevel == UINT32_MAX) {
3525 env->cpuid_xlevel = env->cpuid_min_xlevel;
3526 }
3527 if (env->cpuid_xlevel2 == UINT32_MAX) {
3528 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3529 }
3530
3531 out:
3532 if (local_err != NULL) {
3533 error_propagate(errp, local_err);
3534 }
3535 }
3536
3537 /*
3538 * Finishes initialization of CPUID data, filters CPU feature
3539 * words based on host availability of each feature.
3540 *
3541 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3542 */
3543 static int x86_cpu_filter_features(X86CPU *cpu)
3544 {
3545 CPUX86State *env = &cpu->env;
3546 FeatureWord w;
3547 int rv = 0;
3548
3549 for (w = 0; w < FEATURE_WORDS; w++) {
3550 uint32_t host_feat =
3551 x86_cpu_get_supported_feature_word(w, false);
3552 uint32_t requested_features = env->features[w];
3553 env->features[w] &= host_feat;
3554 cpu->filtered_features[w] = requested_features & ~env->features[w];
3555 if (cpu->filtered_features[w]) {
3556 rv = 1;
3557 }
3558 }
3559
3560 return rv;
3561 }
3562
3563 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3564 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3565 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3566 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3567 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3568 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3569 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3570 {
3571 CPUState *cs = CPU(dev);
3572 X86CPU *cpu = X86_CPU(dev);
3573 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3574 CPUX86State *env = &cpu->env;
3575 Error *local_err = NULL;
3576 static bool ht_warned;
3577
3578 if (xcc->kvm_required && !kvm_enabled()) {
3579 char *name = x86_cpu_class_get_model_name(xcc);
3580 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3581 g_free(name);
3582 goto out;
3583 }
3584
3585 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3586 error_setg(errp, "apic-id property was not initialized properly");
3587 return;
3588 }
3589
3590 x86_cpu_expand_features(cpu, &local_err);
3591 if (local_err) {
3592 goto out;
3593 }
3594
3595 if (x86_cpu_filter_features(cpu) &&
3596 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3597 x86_cpu_report_filtered_features(cpu);
3598 if (cpu->enforce_cpuid) {
3599 error_setg(&local_err,
3600 kvm_enabled() ?
3601 "Host doesn't support requested features" :
3602 "TCG doesn't support requested features");
3603 goto out;
3604 }
3605 }
3606
3607 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3608 * CPUID[1].EDX.
3609 */
3610 if (IS_AMD_CPU(env)) {
3611 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3612 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3613 & CPUID_EXT2_AMD_ALIASES);
3614 }
3615
3616 /* For 64bit systems think about the number of physical bits to present.
3617 * ideally this should be the same as the host; anything other than matching
3618 * the host can cause incorrect guest behaviour.
3619 * QEMU used to pick the magic value of 40 bits that corresponds to
3620 * consumer AMD devices but nothing else.
3621 */
3622 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3623 if (kvm_enabled()) {
3624 uint32_t host_phys_bits = x86_host_phys_bits();
3625 static bool warned;
3626
3627 if (cpu->host_phys_bits) {
3628 /* The user asked for us to use the host physical bits */
3629 cpu->phys_bits = host_phys_bits;
3630 }
3631
3632 /* Print a warning if the user set it to a value that's not the
3633 * host value.
3634 */
3635 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3636 !warned) {
3637 warn_report("Host physical bits (%u)"
3638 " does not match phys-bits property (%u)",
3639 host_phys_bits, cpu->phys_bits);
3640 warned = true;
3641 }
3642
3643 if (cpu->phys_bits &&
3644 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3645 cpu->phys_bits < 32)) {
3646 error_setg(errp, "phys-bits should be between 32 and %u "
3647 " (but is %u)",
3648 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3649 return;
3650 }
3651 } else {
3652 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3653 error_setg(errp, "TCG only supports phys-bits=%u",
3654 TCG_PHYS_ADDR_BITS);
3655 return;
3656 }
3657 }
3658 /* 0 means it was not explicitly set by the user (or by machine
3659 * compat_props or by the host code above). In this case, the default
3660 * is the value used by TCG (40).
3661 */
3662 if (cpu->phys_bits == 0) {
3663 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3664 }
3665 } else {
3666 /* For 32 bit systems don't use the user set value, but keep
3667 * phys_bits consistent with what we tell the guest.
3668 */
3669 if (cpu->phys_bits != 0) {
3670 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3671 return;
3672 }
3673
3674 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3675 cpu->phys_bits = 36;
3676 } else {
3677 cpu->phys_bits = 32;
3678 }
3679 }
3680 cpu_exec_realizefn(cs, &local_err);
3681 if (local_err != NULL) {
3682 error_propagate(errp, local_err);
3683 return;
3684 }
3685
3686 if (tcg_enabled()) {
3687 tcg_x86_init();
3688 }
3689
3690 #ifndef CONFIG_USER_ONLY
3691 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3692
3693 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3694 x86_cpu_apic_create(cpu, &local_err);
3695 if (local_err != NULL) {
3696 goto out;
3697 }
3698 }
3699 #endif
3700
3701 mce_init(cpu);
3702
3703 #ifndef CONFIG_USER_ONLY
3704 if (tcg_enabled()) {
3705 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3706 "cpu-memory");
3707 AddressSpace *as_smm = g_new(AddressSpace, 1);
3708
3709 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3710 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3711
3712 /* Outer container... */
3713 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3714 memory_region_set_enabled(cpu->cpu_as_root, true);
3715
3716 /* ... with two regions inside: normal system memory with low
3717 * priority, and...
3718 */
3719 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3720 get_system_memory(), 0, ~0ull);
3721 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3722 memory_region_set_enabled(cpu->cpu_as_mem, true);
3723 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3724
3725 cs->num_ases = 2;
3726 cpu_address_space_init(cs, as_normal, 0);
3727 cpu_address_space_init(cs, as_smm, 1);
3728
3729 /* ... SMRAM with higher priority, linked from /machine/smram. */
3730 cpu->machine_done.notify = x86_cpu_machine_done;
3731 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3732 }
3733 #endif
3734
3735 qemu_init_vcpu(cs);
3736
3737 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3738 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3739 * based on inputs (sockets,cores,threads), it is still better to gives
3740 * users a warning.
3741 *
3742 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3743 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3744 */
3745 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3746 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3747 " -smp options properly.");
3748 ht_warned = true;
3749 }
3750
3751 x86_cpu_apic_realize(cpu, &local_err);
3752 if (local_err != NULL) {
3753 goto out;
3754 }
3755 cpu_reset(cs);
3756
3757 xcc->parent_realize(dev, &local_err);
3758
3759 out:
3760 if (local_err != NULL) {
3761 error_propagate(errp, local_err);
3762 return;
3763 }
3764 }
3765
3766 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3767 {
3768 X86CPU *cpu = X86_CPU(dev);
3769 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3770 Error *local_err = NULL;
3771
3772 #ifndef CONFIG_USER_ONLY
3773 cpu_remove_sync(CPU(dev));
3774 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3775 #endif
3776
3777 if (cpu->apic_state) {
3778 object_unparent(OBJECT(cpu->apic_state));
3779 cpu->apic_state = NULL;
3780 }
3781
3782 xcc->parent_unrealize(dev, &local_err);
3783 if (local_err != NULL) {
3784 error_propagate(errp, local_err);
3785 return;
3786 }
3787 }
3788
3789 typedef struct BitProperty {
3790 FeatureWord w;
3791 uint32_t mask;
3792 } BitProperty;
3793
3794 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3795 void *opaque, Error **errp)
3796 {
3797 X86CPU *cpu = X86_CPU(obj);
3798 BitProperty *fp = opaque;
3799 uint32_t f = cpu->env.features[fp->w];
3800 bool value = (f & fp->mask) == fp->mask;
3801 visit_type_bool(v, name, &value, errp);
3802 }
3803
3804 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3805 void *opaque, Error **errp)
3806 {
3807 DeviceState *dev = DEVICE(obj);
3808 X86CPU *cpu = X86_CPU(obj);
3809 BitProperty *fp = opaque;
3810 Error *local_err = NULL;
3811 bool value;
3812
3813 if (dev->realized) {
3814 qdev_prop_set_after_realize(dev, name, errp);
3815 return;
3816 }
3817
3818 visit_type_bool(v, name, &value, &local_err);
3819 if (local_err) {
3820 error_propagate(errp, local_err);
3821 return;
3822 }
3823
3824 if (value) {
3825 cpu->env.features[fp->w] |= fp->mask;
3826 } else {
3827 cpu->env.features[fp->w] &= ~fp->mask;
3828 }
3829 cpu->env.user_features[fp->w] |= fp->mask;
3830 }
3831
3832 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3833 void *opaque)
3834 {
3835 BitProperty *prop = opaque;
3836 g_free(prop);
3837 }
3838
3839 /* Register a boolean property to get/set a single bit in a uint32_t field.
3840 *
3841 * The same property name can be registered multiple times to make it affect
3842 * multiple bits in the same FeatureWord. In that case, the getter will return
3843 * true only if all bits are set.
3844 */
3845 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3846 const char *prop_name,
3847 FeatureWord w,
3848 int bitnr)
3849 {
3850 BitProperty *fp;
3851 ObjectProperty *op;
3852 uint32_t mask = (1UL << bitnr);
3853
3854 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3855 if (op) {
3856 fp = op->opaque;
3857 assert(fp->w == w);
3858 fp->mask |= mask;
3859 } else {
3860 fp = g_new0(BitProperty, 1);
3861 fp->w = w;
3862 fp->mask = mask;
3863 object_property_add(OBJECT(cpu), prop_name, "bool",
3864 x86_cpu_get_bit_prop,
3865 x86_cpu_set_bit_prop,
3866 x86_cpu_release_bit_prop, fp, &error_abort);
3867 }
3868 }
3869
3870 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3871 FeatureWord w,
3872 int bitnr)
3873 {
3874 FeatureWordInfo *fi = &feature_word_info[w];
3875 const char *name = fi->feat_names[bitnr];
3876
3877 if (!name) {
3878 return;
3879 }
3880
3881 /* Property names should use "-" instead of "_".
3882 * Old names containing underscores are registered as aliases
3883 * using object_property_add_alias()
3884 */
3885 assert(!strchr(name, '_'));
3886 /* aliases don't use "|" delimiters anymore, they are registered
3887 * manually using object_property_add_alias() */
3888 assert(!strchr(name, '|'));
3889 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3890 }
3891
3892 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3893 {
3894 X86CPU *cpu = X86_CPU(cs);
3895 CPUX86State *env = &cpu->env;
3896 GuestPanicInformation *panic_info = NULL;
3897
3898 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3899 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3900
3901 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3902
3903 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3904 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3905 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3906 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3907 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3908 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3909 }
3910
3911 return panic_info;
3912 }
3913 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3914 const char *name, void *opaque,
3915 Error **errp)
3916 {
3917 CPUState *cs = CPU(obj);
3918 GuestPanicInformation *panic_info;
3919
3920 if (!cs->crash_occurred) {
3921 error_setg(errp, "No crash occured");
3922 return;
3923 }
3924
3925 panic_info = x86_cpu_get_crash_info(cs);
3926 if (panic_info == NULL) {
3927 error_setg(errp, "No crash information");
3928 return;
3929 }
3930
3931 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3932 errp);
3933 qapi_free_GuestPanicInformation(panic_info);
3934 }
3935
3936 static void x86_cpu_initfn(Object *obj)
3937 {
3938 CPUState *cs = CPU(obj);
3939 X86CPU *cpu = X86_CPU(obj);
3940 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3941 CPUX86State *env = &cpu->env;
3942 FeatureWord w;
3943
3944 cs->env_ptr = env;
3945
3946 object_property_add(obj, "family", "int",
3947 x86_cpuid_version_get_family,
3948 x86_cpuid_version_set_family, NULL, NULL, NULL);
3949 object_property_add(obj, "model", "int",
3950 x86_cpuid_version_get_model,
3951 x86_cpuid_version_set_model, NULL, NULL, NULL);
3952 object_property_add(obj, "stepping", "int",
3953 x86_cpuid_version_get_stepping,
3954 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3955 object_property_add_str(obj, "vendor",
3956 x86_cpuid_get_vendor,
3957 x86_cpuid_set_vendor, NULL);
3958 object_property_add_str(obj, "model-id",
3959 x86_cpuid_get_model_id,
3960 x86_cpuid_set_model_id, NULL);
3961 object_property_add(obj, "tsc-frequency", "int",
3962 x86_cpuid_get_tsc_freq,
3963 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3964 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3965 x86_cpu_get_feature_words,
3966 NULL, NULL, (void *)env->features, NULL);
3967 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3968 x86_cpu_get_feature_words,
3969 NULL, NULL, (void *)cpu->filtered_features, NULL);
3970
3971 object_property_add(obj, "crash-information", "GuestPanicInformation",
3972 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3973
3974 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3975
3976 for (w = 0; w < FEATURE_WORDS; w++) {
3977 int bitnr;
3978
3979 for (bitnr = 0; bitnr < 32; bitnr++) {
3980 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3981 }
3982 }
3983
3984 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3985 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3986 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3987 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3988 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3989 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3990 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3991
3992 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3993 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3994 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3995 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3996 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3997 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3998 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3999 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4000 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4001 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4002 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4003 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4004 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4005 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4006 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4007 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4008 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4009 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4010 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4011 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4012 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4013
4014 if (xcc->cpu_def) {
4015 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4016 }
4017 }
4018
4019 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4020 {
4021 X86CPU *cpu = X86_CPU(cs);
4022
4023 return cpu->apic_id;
4024 }
4025
4026 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4027 {
4028 X86CPU *cpu = X86_CPU(cs);
4029
4030 return cpu->env.cr[0] & CR0_PG_MASK;
4031 }
4032
4033 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4034 {
4035 X86CPU *cpu = X86_CPU(cs);
4036
4037 cpu->env.eip = value;
4038 }
4039
4040 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4041 {
4042 X86CPU *cpu = X86_CPU(cs);
4043
4044 cpu->env.eip = tb->pc - tb->cs_base;
4045 }
4046
4047 static bool x86_cpu_has_work(CPUState *cs)
4048 {
4049 X86CPU *cpu = X86_CPU(cs);
4050 CPUX86State *env = &cpu->env;
4051
4052 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4053 CPU_INTERRUPT_POLL)) &&
4054 (env->eflags & IF_MASK)) ||
4055 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4056 CPU_INTERRUPT_INIT |
4057 CPU_INTERRUPT_SIPI |
4058 CPU_INTERRUPT_MCE)) ||
4059 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4060 !(env->hflags & HF_SMM_MASK));
4061 }
4062
4063 static Property x86_cpu_properties[] = {
4064 #ifdef CONFIG_USER_ONLY
4065 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4066 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4067 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4068 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4069 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4070 #else
4071 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4072 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4073 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4074 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4075 #endif
4076 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4077 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4078 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4079 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4080 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4081 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4082 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4083 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4084 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4085 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4086 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4087 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4088 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4089 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4090 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4091 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4092 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4093 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4094 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4095 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4096 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4097 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4098 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4099 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4100 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4101 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4102 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4103 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4104 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4105 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4106 false),
4107 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4108 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4109 DEFINE_PROP_END_OF_LIST()
4110 };
4111
4112 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4113 {
4114 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4115 CPUClass *cc = CPU_CLASS(oc);
4116 DeviceClass *dc = DEVICE_CLASS(oc);
4117
4118 xcc->parent_realize = dc->realize;
4119 xcc->parent_unrealize = dc->unrealize;
4120 dc->realize = x86_cpu_realizefn;
4121 dc->unrealize = x86_cpu_unrealizefn;
4122 dc->props = x86_cpu_properties;
4123
4124 xcc->parent_reset = cc->reset;
4125 cc->reset = x86_cpu_reset;
4126 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4127
4128 cc->class_by_name = x86_cpu_class_by_name;
4129 cc->parse_features = x86_cpu_parse_featurestr;
4130 cc->has_work = x86_cpu_has_work;
4131 #ifdef CONFIG_TCG
4132 cc->do_interrupt = x86_cpu_do_interrupt;
4133 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4134 #endif
4135 cc->dump_state = x86_cpu_dump_state;
4136 cc->get_crash_info = x86_cpu_get_crash_info;
4137 cc->set_pc = x86_cpu_set_pc;
4138 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4139 cc->gdb_read_register = x86_cpu_gdb_read_register;
4140 cc->gdb_write_register = x86_cpu_gdb_write_register;
4141 cc->get_arch_id = x86_cpu_get_arch_id;
4142 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4143 #ifdef CONFIG_USER_ONLY
4144 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4145 #else
4146 cc->asidx_from_attrs = x86_asidx_from_attrs;
4147 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4148 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4149 cc->write_elf64_note = x86_cpu_write_elf64_note;
4150 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4151 cc->write_elf32_note = x86_cpu_write_elf32_note;
4152 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4153 cc->vmsd = &vmstate_x86_cpu;
4154 #endif
4155 cc->gdb_arch_name = x86_gdb_arch_name;
4156 #ifdef TARGET_X86_64
4157 cc->gdb_core_xml_file = "i386-64bit.xml";
4158 cc->gdb_num_core_regs = 57;
4159 #else
4160 cc->gdb_core_xml_file = "i386-32bit.xml";
4161 cc->gdb_num_core_regs = 41;
4162 #endif
4163 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4164 cc->debug_excp_handler = breakpoint_handler;
4165 #endif
4166 cc->cpu_exec_enter = x86_cpu_exec_enter;
4167 cc->cpu_exec_exit = x86_cpu_exec_exit;
4168
4169 dc->user_creatable = true;
4170 }
4171
4172 static const TypeInfo x86_cpu_type_info = {
4173 .name = TYPE_X86_CPU,
4174 .parent = TYPE_CPU,
4175 .instance_size = sizeof(X86CPU),
4176 .instance_init = x86_cpu_initfn,
4177 .abstract = true,
4178 .class_size = sizeof(X86CPUClass),
4179 .class_init = x86_cpu_common_class_init,
4180 };
4181
4182
4183 /* "base" CPU model, used by query-cpu-model-expansion */
4184 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4185 {
4186 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4187
4188 xcc->static_model = true;
4189 xcc->migration_safe = true;
4190 xcc->model_description = "base CPU model type with no features enabled";
4191 xcc->ordering = 8;
4192 }
4193
4194 static const TypeInfo x86_base_cpu_type_info = {
4195 .name = X86_CPU_TYPE_NAME("base"),
4196 .parent = TYPE_X86_CPU,
4197 .class_init = x86_cpu_base_class_init,
4198 };
4199
4200 static void x86_cpu_register_types(void)
4201 {
4202 int i;
4203
4204 type_register_static(&x86_cpu_type_info);
4205 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4206 x86_register_cpudef_type(&builtin_x86_defs[i]);
4207 }
4208 type_register_static(&max_x86_cpu_type_info);
4209 type_register_static(&x86_base_cpu_type_info);
4210 #ifdef CONFIG_KVM
4211 type_register_static(&host_x86_cpu_type_info);
4212 #endif
4213 }
4214
4215 type_init(x86_cpu_register_types)