]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Don't set CPUClass::cpu_def on "max" model
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
242 CPUID_7_0_ECX_LA57)
243 #define TCG_7_0_EDX_FEATURES 0
244 #define TCG_APM_FEATURES 0
245 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
246 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
247 /* missing:
248 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
249
250 typedef struct FeatureWordInfo {
251 /* feature flags names are taken from "Intel Processor Identification and
252 * the CPUID Instruction" and AMD's "CPUID Specification".
253 * In cases of disagreement between feature naming conventions,
254 * aliases may be added.
255 */
256 const char *feat_names[32];
257 uint32_t cpuid_eax; /* Input EAX for CPUID */
258 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
259 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
260 int cpuid_reg; /* output register (R_* constant) */
261 uint32_t tcg_features; /* Feature flags supported by TCG */
262 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
263 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 } FeatureWordInfo;
265
266 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 [FEAT_1_EDX] = {
268 .feat_names = {
269 "fpu", "vme", "de", "pse",
270 "tsc", "msr", "pae", "mce",
271 "cx8", "apic", NULL, "sep",
272 "mtrr", "pge", "mca", "cmov",
273 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
274 NULL, "ds" /* Intel dts */, "acpi", "mmx",
275 "fxsr", "sse", "sse2", "ss",
276 "ht" /* Intel htt */, "tm", "ia64", "pbe",
277 },
278 .cpuid_eax = 1, .cpuid_reg = R_EDX,
279 .tcg_features = TCG_FEATURES,
280 },
281 [FEAT_1_ECX] = {
282 .feat_names = {
283 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
284 "ds-cpl", "vmx", "smx", "est",
285 "tm2", "ssse3", "cid", NULL,
286 "fma", "cx16", "xtpr", "pdcm",
287 NULL, "pcid", "dca", "sse4.1",
288 "sse4.2", "x2apic", "movbe", "popcnt",
289 "tsc-deadline", "aes", "xsave", "osxsave",
290 "avx", "f16c", "rdrand", "hypervisor",
291 },
292 .cpuid_eax = 1, .cpuid_reg = R_ECX,
293 .tcg_features = TCG_EXT_FEATURES,
294 },
295 /* Feature names that are already defined on feature_name[] but
296 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
297 * names on feat_names below. They are copied automatically
298 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
299 */
300 [FEAT_8000_0001_EDX] = {
301 .feat_names = {
302 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
303 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
304 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
305 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
306 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
307 "nx", NULL, "mmxext", NULL /* mmx */,
308 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
309 NULL, "lm", "3dnowext", "3dnow",
310 },
311 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
312 .tcg_features = TCG_EXT2_FEATURES,
313 },
314 [FEAT_8000_0001_ECX] = {
315 .feat_names = {
316 "lahf-lm", "cmp-legacy", "svm", "extapic",
317 "cr8legacy", "abm", "sse4a", "misalignsse",
318 "3dnowprefetch", "osvw", "ibs", "xop",
319 "skinit", "wdt", NULL, "lwp",
320 "fma4", "tce", NULL, "nodeid-msr",
321 NULL, "tbm", "topoext", "perfctr-core",
322 "perfctr-nb", NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 },
325 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
326 .tcg_features = TCG_EXT3_FEATURES,
327 },
328 [FEAT_C000_0001_EDX] = {
329 .feat_names = {
330 NULL, NULL, "xstore", "xstore-en",
331 NULL, NULL, "xcrypt", "xcrypt-en",
332 "ace2", "ace2-en", "phe", "phe-en",
333 "pmm", "pmm-en", NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 },
339 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
340 .tcg_features = TCG_EXT4_FEATURES,
341 },
342 [FEAT_KVM] = {
343 .feat_names = {
344 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
345 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 "kvmclock-stable-bit", NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 },
353 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
354 .tcg_features = TCG_KVM_FEATURES,
355 },
356 [FEAT_HYPERV_EAX] = {
357 .feat_names = {
358 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
359 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
360 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
361 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
362 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
363 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 },
370 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
371 },
372 [FEAT_HYPERV_EBX] = {
373 .feat_names = {
374 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
375 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
376 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
377 NULL /* hv_create_port */, NULL /* hv_connect_port */,
378 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
379 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
380 NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 },
386 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
387 },
388 [FEAT_HYPERV_EDX] = {
389 .feat_names = {
390 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
391 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
392 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
393 NULL, NULL,
394 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 },
401 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
402 },
403 [FEAT_SVM] = {
404 .feat_names = {
405 "npt", "lbrv", "svm-lock", "nrip-save",
406 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
407 NULL, NULL, "pause-filter", NULL,
408 "pfthreshold", NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 },
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
416 },
417 [FEAT_7_0_EBX] = {
418 .feat_names = {
419 "fsgsbase", "tsc-adjust", NULL, "bmi1",
420 "hle", "avx2", NULL, "smep",
421 "bmi2", "erms", "invpcid", "rtm",
422 NULL, NULL, "mpx", NULL,
423 "avx512f", "avx512dq", "rdseed", "adx",
424 "smap", "avx512ifma", "pcommit", "clflushopt",
425 "clwb", NULL, "avx512pf", "avx512er",
426 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
427 },
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_EBX,
431 .tcg_features = TCG_7_0_EBX_FEATURES,
432 },
433 [FEAT_7_0_ECX] = {
434 .feat_names = {
435 NULL, "avx512vbmi", "umip", "pku",
436 "ospke", NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "avx512-vpopcntdq", NULL,
439 "la57", NULL, NULL, NULL,
440 NULL, NULL, "rdpid", NULL,
441 NULL, NULL, NULL, NULL,
442 NULL, NULL, NULL, NULL,
443 },
444 .cpuid_eax = 7,
445 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
446 .cpuid_reg = R_ECX,
447 .tcg_features = TCG_7_0_ECX_FEATURES,
448 },
449 [FEAT_7_0_EDX] = {
450 .feat_names = {
451 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 },
460 .cpuid_eax = 7,
461 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
462 .cpuid_reg = R_EDX,
463 .tcg_features = TCG_7_0_EDX_FEATURES,
464 },
465 [FEAT_8000_0007_EDX] = {
466 .feat_names = {
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 "invtsc", NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 },
476 .cpuid_eax = 0x80000007,
477 .cpuid_reg = R_EDX,
478 .tcg_features = TCG_APM_FEATURES,
479 .unmigratable_flags = CPUID_APM_INVTSC,
480 },
481 [FEAT_XSAVE] = {
482 .feat_names = {
483 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 },
492 .cpuid_eax = 0xd,
493 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
494 .cpuid_reg = R_EAX,
495 .tcg_features = TCG_XSAVE_FEATURES,
496 },
497 [FEAT_6_EAX] = {
498 .feat_names = {
499 NULL, NULL, "arat", NULL,
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 },
508 .cpuid_eax = 6, .cpuid_reg = R_EAX,
509 .tcg_features = TCG_6_EAX_FEATURES,
510 },
511 [FEAT_XSAVE_COMP_LO] = {
512 .cpuid_eax = 0xD,
513 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
514 .cpuid_reg = R_EAX,
515 .tcg_features = ~0U,
516 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
517 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
518 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
519 XSTATE_PKRU_MASK,
520 },
521 [FEAT_XSAVE_COMP_HI] = {
522 .cpuid_eax = 0xD,
523 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
524 .cpuid_reg = R_EDX,
525 .tcg_features = ~0U,
526 },
527 };
528
529 typedef struct X86RegisterInfo32 {
530 /* Name of register */
531 const char *name;
532 /* QAPI enum value register */
533 X86CPURegister32 qapi_enum;
534 } X86RegisterInfo32;
535
536 #define REGISTER(reg) \
537 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
538 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
539 REGISTER(EAX),
540 REGISTER(ECX),
541 REGISTER(EDX),
542 REGISTER(EBX),
543 REGISTER(ESP),
544 REGISTER(EBP),
545 REGISTER(ESI),
546 REGISTER(EDI),
547 };
548 #undef REGISTER
549
550 typedef struct ExtSaveArea {
551 uint32_t feature, bits;
552 uint32_t offset, size;
553 } ExtSaveArea;
554
555 static const ExtSaveArea x86_ext_save_areas[] = {
556 [XSTATE_FP_BIT] = {
557 /* x87 FP state component is always enabled if XSAVE is supported */
558 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
559 /* x87 state is in the legacy region of the XSAVE area */
560 .offset = 0,
561 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
562 },
563 [XSTATE_SSE_BIT] = {
564 /* SSE state component is always enabled if XSAVE is supported */
565 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
566 /* SSE state is in the legacy region of the XSAVE area */
567 .offset = 0,
568 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
569 },
570 [XSTATE_YMM_BIT] =
571 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
572 .offset = offsetof(X86XSaveArea, avx_state),
573 .size = sizeof(XSaveAVX) },
574 [XSTATE_BNDREGS_BIT] =
575 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
576 .offset = offsetof(X86XSaveArea, bndreg_state),
577 .size = sizeof(XSaveBNDREG) },
578 [XSTATE_BNDCSR_BIT] =
579 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
580 .offset = offsetof(X86XSaveArea, bndcsr_state),
581 .size = sizeof(XSaveBNDCSR) },
582 [XSTATE_OPMASK_BIT] =
583 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
584 .offset = offsetof(X86XSaveArea, opmask_state),
585 .size = sizeof(XSaveOpmask) },
586 [XSTATE_ZMM_Hi256_BIT] =
587 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
588 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
589 .size = sizeof(XSaveZMM_Hi256) },
590 [XSTATE_Hi16_ZMM_BIT] =
591 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
592 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
593 .size = sizeof(XSaveHi16_ZMM) },
594 [XSTATE_PKRU_BIT] =
595 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
596 .offset = offsetof(X86XSaveArea, pkru_state),
597 .size = sizeof(XSavePKRU) },
598 };
599
600 static uint32_t xsave_area_size(uint64_t mask)
601 {
602 int i;
603 uint64_t ret = 0;
604
605 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
606 const ExtSaveArea *esa = &x86_ext_save_areas[i];
607 if ((mask >> i) & 1) {
608 ret = MAX(ret, esa->offset + esa->size);
609 }
610 }
611 return ret;
612 }
613
614 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
615 {
616 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
617 cpu->env.features[FEAT_XSAVE_COMP_LO];
618 }
619
620 const char *get_register_name_32(unsigned int reg)
621 {
622 if (reg >= CPU_NB_REGS32) {
623 return NULL;
624 }
625 return x86_reg_info_32[reg].name;
626 }
627
628 /*
629 * Returns the set of feature flags that are supported and migratable by
630 * QEMU, for a given FeatureWord.
631 */
632 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
633 {
634 FeatureWordInfo *wi = &feature_word_info[w];
635 uint32_t r = 0;
636 int i;
637
638 for (i = 0; i < 32; i++) {
639 uint32_t f = 1U << i;
640
641 /* If the feature name is known, it is implicitly considered migratable,
642 * unless it is explicitly set in unmigratable_flags */
643 if ((wi->migratable_flags & f) ||
644 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
645 r |= f;
646 }
647 }
648 return r;
649 }
650
651 void host_cpuid(uint32_t function, uint32_t count,
652 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
653 {
654 uint32_t vec[4];
655
656 #ifdef __x86_64__
657 asm volatile("cpuid"
658 : "=a"(vec[0]), "=b"(vec[1]),
659 "=c"(vec[2]), "=d"(vec[3])
660 : "0"(function), "c"(count) : "cc");
661 #elif defined(__i386__)
662 asm volatile("pusha \n\t"
663 "cpuid \n\t"
664 "mov %%eax, 0(%2) \n\t"
665 "mov %%ebx, 4(%2) \n\t"
666 "mov %%ecx, 8(%2) \n\t"
667 "mov %%edx, 12(%2) \n\t"
668 "popa"
669 : : "a"(function), "c"(count), "S"(vec)
670 : "memory", "cc");
671 #else
672 abort();
673 #endif
674
675 if (eax)
676 *eax = vec[0];
677 if (ebx)
678 *ebx = vec[1];
679 if (ecx)
680 *ecx = vec[2];
681 if (edx)
682 *edx = vec[3];
683 }
684
685 /* CPU class name definitions: */
686
687 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
688 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
689
690 /* Return type name for a given CPU model name
691 * Caller is responsible for freeing the returned string.
692 */
693 static char *x86_cpu_type_name(const char *model_name)
694 {
695 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
696 }
697
698 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
699 {
700 ObjectClass *oc;
701 char *typename;
702
703 if (cpu_model == NULL) {
704 return NULL;
705 }
706
707 typename = x86_cpu_type_name(cpu_model);
708 oc = object_class_by_name(typename);
709 g_free(typename);
710 return oc;
711 }
712
713 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
714 {
715 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
716 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
717 return g_strndup(class_name,
718 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
719 }
720
721 struct X86CPUDefinition {
722 const char *name;
723 uint32_t level;
724 uint32_t xlevel;
725 /* vendor is zero-terminated, 12 character ASCII string */
726 char vendor[CPUID_VENDOR_SZ + 1];
727 int family;
728 int model;
729 int stepping;
730 FeatureWordArray features;
731 char model_id[48];
732 };
733
734 static X86CPUDefinition builtin_x86_defs[] = {
735 {
736 .name = "qemu64",
737 .level = 0xd,
738 .vendor = CPUID_VENDOR_AMD,
739 .family = 6,
740 .model = 6,
741 .stepping = 3,
742 .features[FEAT_1_EDX] =
743 PPRO_FEATURES |
744 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
745 CPUID_PSE36,
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
752 .xlevel = 0x8000000A,
753 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
754 },
755 {
756 .name = "phenom",
757 .level = 5,
758 .vendor = CPUID_VENDOR_AMD,
759 .family = 16,
760 .model = 2,
761 .stepping = 3,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36 | CPUID_VME,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
769 CPUID_EXT_POPCNT,
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
772 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
773 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
774 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
775 CPUID_EXT3_CR8LEG,
776 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
777 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
778 .features[FEAT_8000_0001_ECX] =
779 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
780 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
781 /* Missing: CPUID_SVM_LBRV */
782 .features[FEAT_SVM] =
783 CPUID_SVM_NPT,
784 .xlevel = 0x8000001A,
785 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
786 },
787 {
788 .name = "core2duo",
789 .level = 10,
790 .vendor = CPUID_VENDOR_INTEL,
791 .family = 6,
792 .model = 15,
793 .stepping = 11,
794 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES |
797 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
798 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
799 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
800 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
801 .features[FEAT_1_ECX] =
802 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
803 CPUID_EXT_CX16,
804 .features[FEAT_8000_0001_EDX] =
805 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
806 .features[FEAT_8000_0001_ECX] =
807 CPUID_EXT3_LAHF_LM,
808 .xlevel = 0x80000008,
809 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
810 },
811 {
812 .name = "kvm64",
813 .level = 0xd,
814 .vendor = CPUID_VENDOR_INTEL,
815 .family = 15,
816 .model = 6,
817 .stepping = 1,
818 /* Missing: CPUID_HT */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
822 CPUID_PSE36,
823 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
826 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
830 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
831 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
832 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
833 .features[FEAT_8000_0001_ECX] =
834 0,
835 .xlevel = 0x80000008,
836 .model_id = "Common KVM processor"
837 },
838 {
839 .name = "qemu32",
840 .level = 4,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 6,
844 .stepping = 3,
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES,
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3,
849 .xlevel = 0x80000004,
850 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
851 },
852 {
853 .name = "kvm32",
854 .level = 5,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 15,
857 .model = 6,
858 .stepping = 1,
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
862 .features[FEAT_1_ECX] =
863 CPUID_EXT_SSE3,
864 .features[FEAT_8000_0001_ECX] =
865 0,
866 .xlevel = 0x80000008,
867 .model_id = "Common 32-bit KVM processor"
868 },
869 {
870 .name = "coreduo",
871 .level = 10,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 14,
875 .stepping = 8,
876 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
877 .features[FEAT_1_EDX] =
878 PPRO_FEATURES | CPUID_VME |
879 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
880 CPUID_SS,
881 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
882 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
885 .features[FEAT_8000_0001_EDX] =
886 CPUID_EXT2_NX,
887 .xlevel = 0x80000008,
888 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
889 },
890 {
891 .name = "486",
892 .level = 1,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 4,
895 .model = 8,
896 .stepping = 0,
897 .features[FEAT_1_EDX] =
898 I486_FEATURES,
899 .xlevel = 0,
900 },
901 {
902 .name = "pentium",
903 .level = 1,
904 .vendor = CPUID_VENDOR_INTEL,
905 .family = 5,
906 .model = 4,
907 .stepping = 3,
908 .features[FEAT_1_EDX] =
909 PENTIUM_FEATURES,
910 .xlevel = 0,
911 },
912 {
913 .name = "pentium2",
914 .level = 2,
915 .vendor = CPUID_VENDOR_INTEL,
916 .family = 6,
917 .model = 5,
918 .stepping = 2,
919 .features[FEAT_1_EDX] =
920 PENTIUM2_FEATURES,
921 .xlevel = 0,
922 },
923 {
924 .name = "pentium3",
925 .level = 3,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 7,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 PENTIUM3_FEATURES,
932 .xlevel = 0,
933 },
934 {
935 .name = "athlon",
936 .level = 2,
937 .vendor = CPUID_VENDOR_AMD,
938 .family = 6,
939 .model = 2,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
943 CPUID_MCA,
944 .features[FEAT_8000_0001_EDX] =
945 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
946 .xlevel = 0x80000008,
947 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
948 },
949 {
950 .name = "n270",
951 .level = 10,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 28,
955 .stepping = 2,
956 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
957 .features[FEAT_1_EDX] =
958 PPRO_FEATURES |
959 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
960 CPUID_ACPI | CPUID_SS,
961 /* Some CPUs got no CPUID_SEP */
962 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
963 * CPUID_EXT_XTPR */
964 .features[FEAT_1_ECX] =
965 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
966 CPUID_EXT_MOVBE,
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_NX,
969 .features[FEAT_8000_0001_ECX] =
970 CPUID_EXT3_LAHF_LM,
971 .xlevel = 0x80000008,
972 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
973 },
974 {
975 .name = "Conroe",
976 .level = 10,
977 .vendor = CPUID_VENDOR_INTEL,
978 .family = 6,
979 .model = 15,
980 .stepping = 3,
981 .features[FEAT_1_EDX] =
982 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
983 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
984 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
985 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
986 CPUID_DE | CPUID_FP87,
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
989 .features[FEAT_8000_0001_EDX] =
990 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
991 .features[FEAT_8000_0001_ECX] =
992 CPUID_EXT3_LAHF_LM,
993 .xlevel = 0x80000008,
994 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
995 },
996 {
997 .name = "Penryn",
998 .level = 10,
999 .vendor = CPUID_VENDOR_INTEL,
1000 .family = 6,
1001 .model = 23,
1002 .stepping = 3,
1003 .features[FEAT_1_EDX] =
1004 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1005 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1006 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1007 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1008 CPUID_DE | CPUID_FP87,
1009 .features[FEAT_1_ECX] =
1010 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1011 CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1018 },
1019 {
1020 .name = "Nehalem",
1021 .level = 11,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 26,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1034 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1041 },
1042 {
1043 .name = "Westmere",
1044 .level = 11,
1045 .vendor = CPUID_VENDOR_INTEL,
1046 .family = 6,
1047 .model = 44,
1048 .stepping = 1,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1058 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1061 .features[FEAT_8000_0001_ECX] =
1062 CPUID_EXT3_LAHF_LM,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1067 },
1068 {
1069 .name = "SandyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 42,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3,
1087 .features[FEAT_8000_0001_EDX] =
1088 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1089 CPUID_EXT2_SYSCALL,
1090 .features[FEAT_8000_0001_ECX] =
1091 CPUID_EXT3_LAHF_LM,
1092 .features[FEAT_XSAVE] =
1093 CPUID_XSAVE_XSAVEOPT,
1094 .features[FEAT_6_EAX] =
1095 CPUID_6_EAX_ARAT,
1096 .xlevel = 0x80000008,
1097 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1098 },
1099 {
1100 .name = "IvyBridge",
1101 .level = 0xd,
1102 .vendor = CPUID_VENDOR_INTEL,
1103 .family = 6,
1104 .model = 58,
1105 .stepping = 9,
1106 .features[FEAT_1_EDX] =
1107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1111 CPUID_DE | CPUID_FP87,
1112 .features[FEAT_1_ECX] =
1113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1114 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1115 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1116 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1117 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_ERMS,
1121 .features[FEAT_8000_0001_EDX] =
1122 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1123 CPUID_EXT2_SYSCALL,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_LAHF_LM,
1126 .features[FEAT_XSAVE] =
1127 CPUID_XSAVE_XSAVEOPT,
1128 .features[FEAT_6_EAX] =
1129 CPUID_6_EAX_ARAT,
1130 .xlevel = 0x80000008,
1131 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1132 },
1133 {
1134 .name = "Haswell-noTSX",
1135 .level = 0xd,
1136 .vendor = CPUID_VENDOR_INTEL,
1137 .family = 6,
1138 .model = 60,
1139 .stepping = 1,
1140 .features[FEAT_1_EDX] =
1141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1145 CPUID_DE | CPUID_FP87,
1146 .features[FEAT_1_ECX] =
1147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1153 .features[FEAT_8000_0001_EDX] =
1154 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1155 CPUID_EXT2_SYSCALL,
1156 .features[FEAT_8000_0001_ECX] =
1157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1158 .features[FEAT_7_0_EBX] =
1159 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1160 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1161 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Haswell, no TSX)",
1168 }, {
1169 .name = "Haswell",
1170 .level = 0xd,
1171 .vendor = CPUID_VENDOR_INTEL,
1172 .family = 6,
1173 .model = 60,
1174 .stepping = 1,
1175 .features[FEAT_1_EDX] =
1176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1188 .features[FEAT_8000_0001_EDX] =
1189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1190 CPUID_EXT2_SYSCALL,
1191 .features[FEAT_8000_0001_ECX] =
1192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1193 .features[FEAT_7_0_EBX] =
1194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1197 CPUID_7_0_EBX_RTM,
1198 .features[FEAT_XSAVE] =
1199 CPUID_XSAVE_XSAVEOPT,
1200 .features[FEAT_6_EAX] =
1201 CPUID_6_EAX_ARAT,
1202 .xlevel = 0x80000008,
1203 .model_id = "Intel Core Processor (Haswell)",
1204 },
1205 {
1206 .name = "Broadwell-noTSX",
1207 .level = 0xd,
1208 .vendor = CPUID_VENDOR_INTEL,
1209 .family = 6,
1210 .model = 61,
1211 .stepping = 2,
1212 .features[FEAT_1_EDX] =
1213 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1214 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1215 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1216 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1217 CPUID_DE | CPUID_FP87,
1218 .features[FEAT_1_ECX] =
1219 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1220 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1221 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1222 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1223 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1224 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1225 .features[FEAT_8000_0001_EDX] =
1226 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1227 CPUID_EXT2_SYSCALL,
1228 .features[FEAT_8000_0001_ECX] =
1229 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1230 .features[FEAT_7_0_EBX] =
1231 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1232 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1233 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1234 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1235 CPUID_7_0_EBX_SMAP,
1236 .features[FEAT_XSAVE] =
1237 CPUID_XSAVE_XSAVEOPT,
1238 .features[FEAT_6_EAX] =
1239 CPUID_6_EAX_ARAT,
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1242 },
1243 {
1244 .name = "Broadwell",
1245 .level = 0xd,
1246 .vendor = CPUID_VENDOR_INTEL,
1247 .family = 6,
1248 .model = 61,
1249 .stepping = 2,
1250 .features[FEAT_1_EDX] =
1251 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1252 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1253 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1254 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1255 CPUID_DE | CPUID_FP87,
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1258 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1259 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1260 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1261 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1262 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1263 .features[FEAT_8000_0001_EDX] =
1264 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1265 CPUID_EXT2_SYSCALL,
1266 .features[FEAT_8000_0001_ECX] =
1267 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1268 .features[FEAT_7_0_EBX] =
1269 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1270 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1271 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1272 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1273 CPUID_7_0_EBX_SMAP,
1274 .features[FEAT_XSAVE] =
1275 CPUID_XSAVE_XSAVEOPT,
1276 .features[FEAT_6_EAX] =
1277 CPUID_6_EAX_ARAT,
1278 .xlevel = 0x80000008,
1279 .model_id = "Intel Core Processor (Broadwell)",
1280 },
1281 {
1282 .name = "Skylake-Client",
1283 .level = 0xd,
1284 .vendor = CPUID_VENDOR_INTEL,
1285 .family = 6,
1286 .model = 94,
1287 .stepping = 3,
1288 .features[FEAT_1_EDX] =
1289 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1290 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1291 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1292 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1293 CPUID_DE | CPUID_FP87,
1294 .features[FEAT_1_ECX] =
1295 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1296 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1297 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1298 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1299 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1300 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1303 CPUID_EXT2_SYSCALL,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1306 .features[FEAT_7_0_EBX] =
1307 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1308 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1309 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1310 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1311 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1312 /* Missing: XSAVES (not supported by some Linux versions,
1313 * including v4.1 to v4.6).
1314 * KVM doesn't yet expose any XSAVES state save component,
1315 * and the only one defined in Skylake (processor tracing)
1316 * probably will block migration anyway.
1317 */
1318 .features[FEAT_XSAVE] =
1319 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1320 CPUID_XSAVE_XGETBV1,
1321 .features[FEAT_6_EAX] =
1322 CPUID_6_EAX_ARAT,
1323 .xlevel = 0x80000008,
1324 .model_id = "Intel Core Processor (Skylake)",
1325 },
1326 {
1327 .name = "Opteron_G1",
1328 .level = 5,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 15,
1331 .model = 6,
1332 .stepping = 1,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1343 .xlevel = 0x80000008,
1344 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1345 },
1346 {
1347 .name = "Opteron_G2",
1348 .level = 5,
1349 .vendor = CPUID_VENDOR_AMD,
1350 .family = 15,
1351 .model = 6,
1352 .stepping = 1,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1361 /* Missing: CPUID_EXT2_RDTSCP */
1362 .features[FEAT_8000_0001_EDX] =
1363 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1364 .features[FEAT_8000_0001_ECX] =
1365 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1366 .xlevel = 0x80000008,
1367 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1368 },
1369 {
1370 .name = "Opteron_G3",
1371 .level = 5,
1372 .vendor = CPUID_VENDOR_AMD,
1373 .family = 16,
1374 .model = 2,
1375 .stepping = 3,
1376 .features[FEAT_1_EDX] =
1377 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1378 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1379 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1380 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1381 CPUID_DE | CPUID_FP87,
1382 .features[FEAT_1_ECX] =
1383 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1384 CPUID_EXT_SSE3,
1385 /* Missing: CPUID_EXT2_RDTSCP */
1386 .features[FEAT_8000_0001_EDX] =
1387 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1388 .features[FEAT_8000_0001_ECX] =
1389 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1390 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1391 .xlevel = 0x80000008,
1392 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1393 },
1394 {
1395 .name = "Opteron_G4",
1396 .level = 0xd,
1397 .vendor = CPUID_VENDOR_AMD,
1398 .family = 21,
1399 .model = 1,
1400 .stepping = 2,
1401 .features[FEAT_1_EDX] =
1402 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1403 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1404 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1405 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1406 CPUID_DE | CPUID_FP87,
1407 .features[FEAT_1_ECX] =
1408 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1409 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1410 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1411 CPUID_EXT_SSE3,
1412 /* Missing: CPUID_EXT2_RDTSCP */
1413 .features[FEAT_8000_0001_EDX] =
1414 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1415 CPUID_EXT2_SYSCALL,
1416 .features[FEAT_8000_0001_ECX] =
1417 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1418 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1419 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1420 CPUID_EXT3_LAHF_LM,
1421 /* no xsaveopt! */
1422 .xlevel = 0x8000001A,
1423 .model_id = "AMD Opteron 62xx class CPU",
1424 },
1425 {
1426 .name = "Opteron_G5",
1427 .level = 0xd,
1428 .vendor = CPUID_VENDOR_AMD,
1429 .family = 21,
1430 .model = 2,
1431 .stepping = 0,
1432 .features[FEAT_1_EDX] =
1433 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1434 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1435 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1436 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1437 CPUID_DE | CPUID_FP87,
1438 .features[FEAT_1_ECX] =
1439 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1440 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1441 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1442 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1443 /* Missing: CPUID_EXT2_RDTSCP */
1444 .features[FEAT_8000_0001_EDX] =
1445 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1446 CPUID_EXT2_SYSCALL,
1447 .features[FEAT_8000_0001_ECX] =
1448 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1449 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1450 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1451 CPUID_EXT3_LAHF_LM,
1452 /* no xsaveopt! */
1453 .xlevel = 0x8000001A,
1454 .model_id = "AMD Opteron 63xx class CPU",
1455 },
1456 };
1457
1458 typedef struct PropValue {
1459 const char *prop, *value;
1460 } PropValue;
1461
1462 /* KVM-specific features that are automatically added/removed
1463 * from all CPU models when KVM is enabled.
1464 */
1465 static PropValue kvm_default_props[] = {
1466 { "kvmclock", "on" },
1467 { "kvm-nopiodelay", "on" },
1468 { "kvm-asyncpf", "on" },
1469 { "kvm-steal-time", "on" },
1470 { "kvm-pv-eoi", "on" },
1471 { "kvmclock-stable-bit", "on" },
1472 { "x2apic", "on" },
1473 { "acpi", "off" },
1474 { "monitor", "off" },
1475 { "svm", "off" },
1476 { NULL, NULL },
1477 };
1478
1479 /* TCG-specific defaults that override all CPU models when using TCG
1480 */
1481 static PropValue tcg_default_props[] = {
1482 { "vme", "off" },
1483 { NULL, NULL },
1484 };
1485
1486
1487 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1488 {
1489 PropValue *pv;
1490 for (pv = kvm_default_props; pv->prop; pv++) {
1491 if (!strcmp(pv->prop, prop)) {
1492 pv->value = value;
1493 break;
1494 }
1495 }
1496
1497 /* It is valid to call this function only for properties that
1498 * are already present in the kvm_default_props table.
1499 */
1500 assert(pv->prop);
1501 }
1502
1503 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1504 bool migratable_only);
1505
1506 static bool lmce_supported(void)
1507 {
1508 uint64_t mce_cap = 0;
1509
1510 #ifdef CONFIG_KVM
1511 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1512 return false;
1513 }
1514 #endif
1515
1516 return !!(mce_cap & MCG_LMCE_P);
1517 }
1518
1519 static int cpu_x86_fill_model_id(char *str)
1520 {
1521 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1522 int i;
1523
1524 for (i = 0; i < 3; i++) {
1525 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1526 memcpy(str + i * 16 + 0, &eax, 4);
1527 memcpy(str + i * 16 + 4, &ebx, 4);
1528 memcpy(str + i * 16 + 8, &ecx, 4);
1529 memcpy(str + i * 16 + 12, &edx, 4);
1530 }
1531 return 0;
1532 }
1533
1534 static Property max_x86_cpu_properties[] = {
1535 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1536 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1537 DEFINE_PROP_END_OF_LIST()
1538 };
1539
1540 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1541 {
1542 DeviceClass *dc = DEVICE_CLASS(oc);
1543 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1544
1545 xcc->ordering = 9;
1546
1547 xcc->model_description =
1548 "Enables all features supported by the accelerator in the current host";
1549
1550 dc->props = max_x86_cpu_properties;
1551 }
1552
1553 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1554
1555 static void max_x86_cpu_initfn(Object *obj)
1556 {
1557 X86CPU *cpu = X86_CPU(obj);
1558 CPUX86State *env = &cpu->env;
1559 KVMState *s = kvm_state;
1560
1561 /* We can't fill the features array here because we don't know yet if
1562 * "migratable" is true or false.
1563 */
1564 cpu->max_features = true;
1565
1566 if (kvm_enabled()) {
1567 X86CPUDefinition host_cpudef = { };
1568 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1569
1570 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1571 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1572
1573 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1574 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1575 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1576 host_cpudef.stepping = eax & 0x0F;
1577
1578 cpu_x86_fill_model_id(host_cpudef.model_id);
1579
1580 x86_cpu_load_def(cpu, &host_cpudef, &error_abort);
1581
1582 env->cpuid_min_level =
1583 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1584 env->cpuid_min_xlevel =
1585 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1586 env->cpuid_min_xlevel2 =
1587 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1588
1589 if (lmce_supported()) {
1590 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1591 }
1592 } else {
1593 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1594 "vendor", &error_abort);
1595 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1596 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1597 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1598 object_property_set_str(OBJECT(cpu),
1599 "QEMU TCG CPU version " QEMU_HW_VERSION,
1600 "model-id", &error_abort);
1601 }
1602
1603 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1604 }
1605
1606 static const TypeInfo max_x86_cpu_type_info = {
1607 .name = X86_CPU_TYPE_NAME("max"),
1608 .parent = TYPE_X86_CPU,
1609 .instance_init = max_x86_cpu_initfn,
1610 .class_init = max_x86_cpu_class_init,
1611 };
1612
1613 #ifdef CONFIG_KVM
1614
1615 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1616 {
1617 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1618
1619 xcc->kvm_required = true;
1620 xcc->ordering = 8;
1621
1622 xcc->model_description =
1623 "KVM processor with all supported host features "
1624 "(only available in KVM mode)";
1625 }
1626
1627 static const TypeInfo host_x86_cpu_type_info = {
1628 .name = X86_CPU_TYPE_NAME("host"),
1629 .parent = X86_CPU_TYPE_NAME("max"),
1630 .class_init = host_x86_cpu_class_init,
1631 };
1632
1633 #endif
1634
1635 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1636 {
1637 FeatureWordInfo *f = &feature_word_info[w];
1638 int i;
1639
1640 for (i = 0; i < 32; ++i) {
1641 if ((1UL << i) & mask) {
1642 const char *reg = get_register_name_32(f->cpuid_reg);
1643 assert(reg);
1644 fprintf(stderr, "warning: %s doesn't support requested feature: "
1645 "CPUID.%02XH:%s%s%s [bit %d]\n",
1646 kvm_enabled() ? "host" : "TCG",
1647 f->cpuid_eax, reg,
1648 f->feat_names[i] ? "." : "",
1649 f->feat_names[i] ? f->feat_names[i] : "", i);
1650 }
1651 }
1652 }
1653
1654 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1655 const char *name, void *opaque,
1656 Error **errp)
1657 {
1658 X86CPU *cpu = X86_CPU(obj);
1659 CPUX86State *env = &cpu->env;
1660 int64_t value;
1661
1662 value = (env->cpuid_version >> 8) & 0xf;
1663 if (value == 0xf) {
1664 value += (env->cpuid_version >> 20) & 0xff;
1665 }
1666 visit_type_int(v, name, &value, errp);
1667 }
1668
1669 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1670 const char *name, void *opaque,
1671 Error **errp)
1672 {
1673 X86CPU *cpu = X86_CPU(obj);
1674 CPUX86State *env = &cpu->env;
1675 const int64_t min = 0;
1676 const int64_t max = 0xff + 0xf;
1677 Error *local_err = NULL;
1678 int64_t value;
1679
1680 visit_type_int(v, name, &value, &local_err);
1681 if (local_err) {
1682 error_propagate(errp, local_err);
1683 return;
1684 }
1685 if (value < min || value > max) {
1686 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1687 name ? name : "null", value, min, max);
1688 return;
1689 }
1690
1691 env->cpuid_version &= ~0xff00f00;
1692 if (value > 0x0f) {
1693 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1694 } else {
1695 env->cpuid_version |= value << 8;
1696 }
1697 }
1698
1699 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1700 const char *name, void *opaque,
1701 Error **errp)
1702 {
1703 X86CPU *cpu = X86_CPU(obj);
1704 CPUX86State *env = &cpu->env;
1705 int64_t value;
1706
1707 value = (env->cpuid_version >> 4) & 0xf;
1708 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1709 visit_type_int(v, name, &value, errp);
1710 }
1711
1712 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1713 const char *name, void *opaque,
1714 Error **errp)
1715 {
1716 X86CPU *cpu = X86_CPU(obj);
1717 CPUX86State *env = &cpu->env;
1718 const int64_t min = 0;
1719 const int64_t max = 0xff;
1720 Error *local_err = NULL;
1721 int64_t value;
1722
1723 visit_type_int(v, name, &value, &local_err);
1724 if (local_err) {
1725 error_propagate(errp, local_err);
1726 return;
1727 }
1728 if (value < min || value > max) {
1729 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1730 name ? name : "null", value, min, max);
1731 return;
1732 }
1733
1734 env->cpuid_version &= ~0xf00f0;
1735 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1736 }
1737
1738 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1739 const char *name, void *opaque,
1740 Error **errp)
1741 {
1742 X86CPU *cpu = X86_CPU(obj);
1743 CPUX86State *env = &cpu->env;
1744 int64_t value;
1745
1746 value = env->cpuid_version & 0xf;
1747 visit_type_int(v, name, &value, errp);
1748 }
1749
1750 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1751 const char *name, void *opaque,
1752 Error **errp)
1753 {
1754 X86CPU *cpu = X86_CPU(obj);
1755 CPUX86State *env = &cpu->env;
1756 const int64_t min = 0;
1757 const int64_t max = 0xf;
1758 Error *local_err = NULL;
1759 int64_t value;
1760
1761 visit_type_int(v, name, &value, &local_err);
1762 if (local_err) {
1763 error_propagate(errp, local_err);
1764 return;
1765 }
1766 if (value < min || value > max) {
1767 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1768 name ? name : "null", value, min, max);
1769 return;
1770 }
1771
1772 env->cpuid_version &= ~0xf;
1773 env->cpuid_version |= value & 0xf;
1774 }
1775
1776 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1777 {
1778 X86CPU *cpu = X86_CPU(obj);
1779 CPUX86State *env = &cpu->env;
1780 char *value;
1781
1782 value = g_malloc(CPUID_VENDOR_SZ + 1);
1783 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1784 env->cpuid_vendor3);
1785 return value;
1786 }
1787
1788 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1789 Error **errp)
1790 {
1791 X86CPU *cpu = X86_CPU(obj);
1792 CPUX86State *env = &cpu->env;
1793 int i;
1794
1795 if (strlen(value) != CPUID_VENDOR_SZ) {
1796 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1797 return;
1798 }
1799
1800 env->cpuid_vendor1 = 0;
1801 env->cpuid_vendor2 = 0;
1802 env->cpuid_vendor3 = 0;
1803 for (i = 0; i < 4; i++) {
1804 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1805 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1806 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1807 }
1808 }
1809
1810 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1811 {
1812 X86CPU *cpu = X86_CPU(obj);
1813 CPUX86State *env = &cpu->env;
1814 char *value;
1815 int i;
1816
1817 value = g_malloc(48 + 1);
1818 for (i = 0; i < 48; i++) {
1819 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1820 }
1821 value[48] = '\0';
1822 return value;
1823 }
1824
1825 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1826 Error **errp)
1827 {
1828 X86CPU *cpu = X86_CPU(obj);
1829 CPUX86State *env = &cpu->env;
1830 int c, len, i;
1831
1832 if (model_id == NULL) {
1833 model_id = "";
1834 }
1835 len = strlen(model_id);
1836 memset(env->cpuid_model, 0, 48);
1837 for (i = 0; i < 48; i++) {
1838 if (i >= len) {
1839 c = '\0';
1840 } else {
1841 c = (uint8_t)model_id[i];
1842 }
1843 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1844 }
1845 }
1846
1847 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1848 void *opaque, Error **errp)
1849 {
1850 X86CPU *cpu = X86_CPU(obj);
1851 int64_t value;
1852
1853 value = cpu->env.tsc_khz * 1000;
1854 visit_type_int(v, name, &value, errp);
1855 }
1856
1857 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1858 void *opaque, Error **errp)
1859 {
1860 X86CPU *cpu = X86_CPU(obj);
1861 const int64_t min = 0;
1862 const int64_t max = INT64_MAX;
1863 Error *local_err = NULL;
1864 int64_t value;
1865
1866 visit_type_int(v, name, &value, &local_err);
1867 if (local_err) {
1868 error_propagate(errp, local_err);
1869 return;
1870 }
1871 if (value < min || value > max) {
1872 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1873 name ? name : "null", value, min, max);
1874 return;
1875 }
1876
1877 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1878 }
1879
1880 /* Generic getter for "feature-words" and "filtered-features" properties */
1881 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1882 const char *name, void *opaque,
1883 Error **errp)
1884 {
1885 uint32_t *array = (uint32_t *)opaque;
1886 FeatureWord w;
1887 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1888 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1889 X86CPUFeatureWordInfoList *list = NULL;
1890
1891 for (w = 0; w < FEATURE_WORDS; w++) {
1892 FeatureWordInfo *wi = &feature_word_info[w];
1893 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1894 qwi->cpuid_input_eax = wi->cpuid_eax;
1895 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1896 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1897 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1898 qwi->features = array[w];
1899
1900 /* List will be in reverse order, but order shouldn't matter */
1901 list_entries[w].next = list;
1902 list_entries[w].value = &word_infos[w];
1903 list = &list_entries[w];
1904 }
1905
1906 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1907 }
1908
1909 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1910 void *opaque, Error **errp)
1911 {
1912 X86CPU *cpu = X86_CPU(obj);
1913 int64_t value = cpu->hyperv_spinlock_attempts;
1914
1915 visit_type_int(v, name, &value, errp);
1916 }
1917
1918 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1919 void *opaque, Error **errp)
1920 {
1921 const int64_t min = 0xFFF;
1922 const int64_t max = UINT_MAX;
1923 X86CPU *cpu = X86_CPU(obj);
1924 Error *err = NULL;
1925 int64_t value;
1926
1927 visit_type_int(v, name, &value, &err);
1928 if (err) {
1929 error_propagate(errp, err);
1930 return;
1931 }
1932
1933 if (value < min || value > max) {
1934 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1935 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1936 object_get_typename(obj), name ? name : "null",
1937 value, min, max);
1938 return;
1939 }
1940 cpu->hyperv_spinlock_attempts = value;
1941 }
1942
1943 static PropertyInfo qdev_prop_spinlocks = {
1944 .name = "int",
1945 .get = x86_get_hv_spinlocks,
1946 .set = x86_set_hv_spinlocks,
1947 };
1948
1949 /* Convert all '_' in a feature string option name to '-', to make feature
1950 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1951 */
1952 static inline void feat2prop(char *s)
1953 {
1954 while ((s = strchr(s, '_'))) {
1955 *s = '-';
1956 }
1957 }
1958
1959 /* Return the feature property name for a feature flag bit */
1960 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1961 {
1962 /* XSAVE components are automatically enabled by other features,
1963 * so return the original feature name instead
1964 */
1965 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1966 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1967
1968 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1969 x86_ext_save_areas[comp].bits) {
1970 w = x86_ext_save_areas[comp].feature;
1971 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1972 }
1973 }
1974
1975 assert(bitnr < 32);
1976 assert(w < FEATURE_WORDS);
1977 return feature_word_info[w].feat_names[bitnr];
1978 }
1979
1980 /* Compatibily hack to maintain legacy +-feat semantic,
1981 * where +-feat overwrites any feature set by
1982 * feat=on|feat even if the later is parsed after +-feat
1983 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1984 */
1985 static GList *plus_features, *minus_features;
1986
1987 static gint compare_string(gconstpointer a, gconstpointer b)
1988 {
1989 return g_strcmp0(a, b);
1990 }
1991
1992 /* Parse "+feature,-feature,feature=foo" CPU feature string
1993 */
1994 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1995 Error **errp)
1996 {
1997 char *featurestr; /* Single 'key=value" string being parsed */
1998 static bool cpu_globals_initialized;
1999 bool ambiguous = false;
2000
2001 if (cpu_globals_initialized) {
2002 return;
2003 }
2004 cpu_globals_initialized = true;
2005
2006 if (!features) {
2007 return;
2008 }
2009
2010 for (featurestr = strtok(features, ",");
2011 featurestr;
2012 featurestr = strtok(NULL, ",")) {
2013 const char *name;
2014 const char *val = NULL;
2015 char *eq = NULL;
2016 char num[32];
2017 GlobalProperty *prop;
2018
2019 /* Compatibility syntax: */
2020 if (featurestr[0] == '+') {
2021 plus_features = g_list_append(plus_features,
2022 g_strdup(featurestr + 1));
2023 continue;
2024 } else if (featurestr[0] == '-') {
2025 minus_features = g_list_append(minus_features,
2026 g_strdup(featurestr + 1));
2027 continue;
2028 }
2029
2030 eq = strchr(featurestr, '=');
2031 if (eq) {
2032 *eq++ = 0;
2033 val = eq;
2034 } else {
2035 val = "on";
2036 }
2037
2038 feat2prop(featurestr);
2039 name = featurestr;
2040
2041 if (g_list_find_custom(plus_features, name, compare_string)) {
2042 error_report("warning: Ambiguous CPU model string. "
2043 "Don't mix both \"+%s\" and \"%s=%s\"",
2044 name, name, val);
2045 ambiguous = true;
2046 }
2047 if (g_list_find_custom(minus_features, name, compare_string)) {
2048 error_report("warning: Ambiguous CPU model string. "
2049 "Don't mix both \"-%s\" and \"%s=%s\"",
2050 name, name, val);
2051 ambiguous = true;
2052 }
2053
2054 /* Special case: */
2055 if (!strcmp(name, "tsc-freq")) {
2056 int ret;
2057 uint64_t tsc_freq;
2058
2059 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2060 if (ret < 0 || tsc_freq > INT64_MAX) {
2061 error_setg(errp, "bad numerical value %s", val);
2062 return;
2063 }
2064 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2065 val = num;
2066 name = "tsc-frequency";
2067 }
2068
2069 prop = g_new0(typeof(*prop), 1);
2070 prop->driver = typename;
2071 prop->property = g_strdup(name);
2072 prop->value = g_strdup(val);
2073 prop->errp = &error_fatal;
2074 qdev_prop_register_global(prop);
2075 }
2076
2077 if (ambiguous) {
2078 error_report("warning: Compatibility of ambiguous CPU model "
2079 "strings won't be kept on future QEMU versions");
2080 }
2081 }
2082
2083 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2084 static int x86_cpu_filter_features(X86CPU *cpu);
2085
2086 /* Check for missing features that may prevent the CPU class from
2087 * running using the current machine and accelerator.
2088 */
2089 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2090 strList **missing_feats)
2091 {
2092 X86CPU *xc;
2093 FeatureWord w;
2094 Error *err = NULL;
2095 strList **next = missing_feats;
2096
2097 if (xcc->kvm_required && !kvm_enabled()) {
2098 strList *new = g_new0(strList, 1);
2099 new->value = g_strdup("kvm");;
2100 *missing_feats = new;
2101 return;
2102 }
2103
2104 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2105
2106 x86_cpu_expand_features(xc, &err);
2107 if (err) {
2108 /* Errors at x86_cpu_expand_features should never happen,
2109 * but in case it does, just report the model as not
2110 * runnable at all using the "type" property.
2111 */
2112 strList *new = g_new0(strList, 1);
2113 new->value = g_strdup("type");
2114 *next = new;
2115 next = &new->next;
2116 }
2117
2118 x86_cpu_filter_features(xc);
2119
2120 for (w = 0; w < FEATURE_WORDS; w++) {
2121 uint32_t filtered = xc->filtered_features[w];
2122 int i;
2123 for (i = 0; i < 32; i++) {
2124 if (filtered & (1UL << i)) {
2125 strList *new = g_new0(strList, 1);
2126 new->value = g_strdup(x86_cpu_feature_name(w, i));
2127 *next = new;
2128 next = &new->next;
2129 }
2130 }
2131 }
2132
2133 object_unref(OBJECT(xc));
2134 }
2135
2136 /* Print all cpuid feature names in featureset
2137 */
2138 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2139 {
2140 int bit;
2141 bool first = true;
2142
2143 for (bit = 0; bit < 32; bit++) {
2144 if (featureset[bit]) {
2145 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2146 first = false;
2147 }
2148 }
2149 }
2150
2151 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2152 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2153 {
2154 ObjectClass *class_a = (ObjectClass *)a;
2155 ObjectClass *class_b = (ObjectClass *)b;
2156 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2157 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2158 const char *name_a, *name_b;
2159
2160 if (cc_a->ordering != cc_b->ordering) {
2161 return cc_a->ordering - cc_b->ordering;
2162 } else {
2163 name_a = object_class_get_name(class_a);
2164 name_b = object_class_get_name(class_b);
2165 return strcmp(name_a, name_b);
2166 }
2167 }
2168
2169 static GSList *get_sorted_cpu_model_list(void)
2170 {
2171 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2172 list = g_slist_sort(list, x86_cpu_list_compare);
2173 return list;
2174 }
2175
2176 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2177 {
2178 ObjectClass *oc = data;
2179 X86CPUClass *cc = X86_CPU_CLASS(oc);
2180 CPUListState *s = user_data;
2181 char *name = x86_cpu_class_get_model_name(cc);
2182 const char *desc = cc->model_description;
2183 if (!desc && cc->cpu_def) {
2184 desc = cc->cpu_def->model_id;
2185 }
2186
2187 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2188 name, desc);
2189 g_free(name);
2190 }
2191
2192 /* list available CPU models and flags */
2193 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2194 {
2195 int i;
2196 CPUListState s = {
2197 .file = f,
2198 .cpu_fprintf = cpu_fprintf,
2199 };
2200 GSList *list;
2201
2202 (*cpu_fprintf)(f, "Available CPUs:\n");
2203 list = get_sorted_cpu_model_list();
2204 g_slist_foreach(list, x86_cpu_list_entry, &s);
2205 g_slist_free(list);
2206
2207 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2208 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2209 FeatureWordInfo *fw = &feature_word_info[i];
2210
2211 (*cpu_fprintf)(f, " ");
2212 listflags(f, cpu_fprintf, fw->feat_names);
2213 (*cpu_fprintf)(f, "\n");
2214 }
2215 }
2216
2217 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2218 {
2219 ObjectClass *oc = data;
2220 X86CPUClass *cc = X86_CPU_CLASS(oc);
2221 CpuDefinitionInfoList **cpu_list = user_data;
2222 CpuDefinitionInfoList *entry;
2223 CpuDefinitionInfo *info;
2224
2225 info = g_malloc0(sizeof(*info));
2226 info->name = x86_cpu_class_get_model_name(cc);
2227 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2228 info->has_unavailable_features = true;
2229 info->q_typename = g_strdup(object_class_get_name(oc));
2230 info->migration_safe = cc->migration_safe;
2231 info->has_migration_safe = true;
2232
2233 entry = g_malloc0(sizeof(*entry));
2234 entry->value = info;
2235 entry->next = *cpu_list;
2236 *cpu_list = entry;
2237 }
2238
2239 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2240 {
2241 CpuDefinitionInfoList *cpu_list = NULL;
2242 GSList *list = get_sorted_cpu_model_list();
2243 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2244 g_slist_free(list);
2245 return cpu_list;
2246 }
2247
2248 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2249 bool migratable_only)
2250 {
2251 FeatureWordInfo *wi = &feature_word_info[w];
2252 uint32_t r;
2253
2254 if (kvm_enabled()) {
2255 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2256 wi->cpuid_ecx,
2257 wi->cpuid_reg);
2258 } else if (tcg_enabled()) {
2259 r = wi->tcg_features;
2260 } else {
2261 return ~0;
2262 }
2263 if (migratable_only) {
2264 r &= x86_cpu_get_migratable_flags(w);
2265 }
2266 return r;
2267 }
2268
2269 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2270 {
2271 FeatureWord w;
2272
2273 for (w = 0; w < FEATURE_WORDS; w++) {
2274 report_unavailable_features(w, cpu->filtered_features[w]);
2275 }
2276 }
2277
2278 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2279 {
2280 PropValue *pv;
2281 for (pv = props; pv->prop; pv++) {
2282 if (!pv->value) {
2283 continue;
2284 }
2285 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2286 &error_abort);
2287 }
2288 }
2289
2290 /* Load data from X86CPUDefinition
2291 */
2292 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2293 {
2294 CPUX86State *env = &cpu->env;
2295 const char *vendor;
2296 char host_vendor[CPUID_VENDOR_SZ + 1];
2297 FeatureWord w;
2298
2299 /* CPU models only set _minimum_ values for level/xlevel: */
2300 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2301 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2302
2303 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2304 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2305 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2306 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2307 for (w = 0; w < FEATURE_WORDS; w++) {
2308 env->features[w] = def->features[w];
2309 }
2310
2311 /* Special cases not set in the X86CPUDefinition structs: */
2312 if (kvm_enabled()) {
2313 if (!kvm_irqchip_in_kernel()) {
2314 x86_cpu_change_kvm_default("x2apic", "off");
2315 }
2316
2317 x86_cpu_apply_props(cpu, kvm_default_props);
2318 } else if (tcg_enabled()) {
2319 x86_cpu_apply_props(cpu, tcg_default_props);
2320 }
2321
2322 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2323
2324 /* sysenter isn't supported in compatibility mode on AMD,
2325 * syscall isn't supported in compatibility mode on Intel.
2326 * Normally we advertise the actual CPU vendor, but you can
2327 * override this using the 'vendor' property if you want to use
2328 * KVM's sysenter/syscall emulation in compatibility mode and
2329 * when doing cross vendor migration
2330 */
2331 vendor = def->vendor;
2332 if (kvm_enabled()) {
2333 uint32_t ebx = 0, ecx = 0, edx = 0;
2334 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2335 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2336 vendor = host_vendor;
2337 }
2338
2339 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2340
2341 }
2342
2343 X86CPU *cpu_x86_init(const char *cpu_model)
2344 {
2345 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2346 }
2347
2348 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2349 {
2350 X86CPUDefinition *cpudef = data;
2351 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2352
2353 xcc->cpu_def = cpudef;
2354 xcc->migration_safe = true;
2355 }
2356
2357 static void x86_register_cpudef_type(X86CPUDefinition *def)
2358 {
2359 char *typename = x86_cpu_type_name(def->name);
2360 TypeInfo ti = {
2361 .name = typename,
2362 .parent = TYPE_X86_CPU,
2363 .class_init = x86_cpu_cpudef_class_init,
2364 .class_data = def,
2365 };
2366
2367 /* AMD aliases are handled at runtime based on CPUID vendor, so
2368 * they shouldn't be set on the CPU model table.
2369 */
2370 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2371
2372 type_register(&ti);
2373 g_free(typename);
2374 }
2375
2376 #if !defined(CONFIG_USER_ONLY)
2377
2378 void cpu_clear_apic_feature(CPUX86State *env)
2379 {
2380 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2381 }
2382
2383 #endif /* !CONFIG_USER_ONLY */
2384
2385 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2386 uint32_t *eax, uint32_t *ebx,
2387 uint32_t *ecx, uint32_t *edx)
2388 {
2389 X86CPU *cpu = x86_env_get_cpu(env);
2390 CPUState *cs = CPU(cpu);
2391 uint32_t pkg_offset;
2392
2393 /* test if maximum index reached */
2394 if (index & 0x80000000) {
2395 if (index > env->cpuid_xlevel) {
2396 if (env->cpuid_xlevel2 > 0) {
2397 /* Handle the Centaur's CPUID instruction. */
2398 if (index > env->cpuid_xlevel2) {
2399 index = env->cpuid_xlevel2;
2400 } else if (index < 0xC0000000) {
2401 index = env->cpuid_xlevel;
2402 }
2403 } else {
2404 /* Intel documentation states that invalid EAX input will
2405 * return the same information as EAX=cpuid_level
2406 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2407 */
2408 index = env->cpuid_level;
2409 }
2410 }
2411 } else {
2412 if (index > env->cpuid_level)
2413 index = env->cpuid_level;
2414 }
2415
2416 switch(index) {
2417 case 0:
2418 *eax = env->cpuid_level;
2419 *ebx = env->cpuid_vendor1;
2420 *edx = env->cpuid_vendor2;
2421 *ecx = env->cpuid_vendor3;
2422 break;
2423 case 1:
2424 *eax = env->cpuid_version;
2425 *ebx = (cpu->apic_id << 24) |
2426 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2427 *ecx = env->features[FEAT_1_ECX];
2428 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2429 *ecx |= CPUID_EXT_OSXSAVE;
2430 }
2431 *edx = env->features[FEAT_1_EDX];
2432 if (cs->nr_cores * cs->nr_threads > 1) {
2433 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2434 *edx |= CPUID_HT;
2435 }
2436 break;
2437 case 2:
2438 /* cache info: needed for Pentium Pro compatibility */
2439 if (cpu->cache_info_passthrough) {
2440 host_cpuid(index, 0, eax, ebx, ecx, edx);
2441 break;
2442 }
2443 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2444 *ebx = 0;
2445 if (!cpu->enable_l3_cache) {
2446 *ecx = 0;
2447 } else {
2448 *ecx = L3_N_DESCRIPTOR;
2449 }
2450 *edx = (L1D_DESCRIPTOR << 16) | \
2451 (L1I_DESCRIPTOR << 8) | \
2452 (L2_DESCRIPTOR);
2453 break;
2454 case 4:
2455 /* cache info: needed for Core compatibility */
2456 if (cpu->cache_info_passthrough) {
2457 host_cpuid(index, count, eax, ebx, ecx, edx);
2458 *eax &= ~0xFC000000;
2459 } else {
2460 *eax = 0;
2461 switch (count) {
2462 case 0: /* L1 dcache info */
2463 *eax |= CPUID_4_TYPE_DCACHE | \
2464 CPUID_4_LEVEL(1) | \
2465 CPUID_4_SELF_INIT_LEVEL;
2466 *ebx = (L1D_LINE_SIZE - 1) | \
2467 ((L1D_PARTITIONS - 1) << 12) | \
2468 ((L1D_ASSOCIATIVITY - 1) << 22);
2469 *ecx = L1D_SETS - 1;
2470 *edx = CPUID_4_NO_INVD_SHARING;
2471 break;
2472 case 1: /* L1 icache info */
2473 *eax |= CPUID_4_TYPE_ICACHE | \
2474 CPUID_4_LEVEL(1) | \
2475 CPUID_4_SELF_INIT_LEVEL;
2476 *ebx = (L1I_LINE_SIZE - 1) | \
2477 ((L1I_PARTITIONS - 1) << 12) | \
2478 ((L1I_ASSOCIATIVITY - 1) << 22);
2479 *ecx = L1I_SETS - 1;
2480 *edx = CPUID_4_NO_INVD_SHARING;
2481 break;
2482 case 2: /* L2 cache info */
2483 *eax |= CPUID_4_TYPE_UNIFIED | \
2484 CPUID_4_LEVEL(2) | \
2485 CPUID_4_SELF_INIT_LEVEL;
2486 if (cs->nr_threads > 1) {
2487 *eax |= (cs->nr_threads - 1) << 14;
2488 }
2489 *ebx = (L2_LINE_SIZE - 1) | \
2490 ((L2_PARTITIONS - 1) << 12) | \
2491 ((L2_ASSOCIATIVITY - 1) << 22);
2492 *ecx = L2_SETS - 1;
2493 *edx = CPUID_4_NO_INVD_SHARING;
2494 break;
2495 case 3: /* L3 cache info */
2496 if (!cpu->enable_l3_cache) {
2497 *eax = 0;
2498 *ebx = 0;
2499 *ecx = 0;
2500 *edx = 0;
2501 break;
2502 }
2503 *eax |= CPUID_4_TYPE_UNIFIED | \
2504 CPUID_4_LEVEL(3) | \
2505 CPUID_4_SELF_INIT_LEVEL;
2506 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2507 *eax |= ((1 << pkg_offset) - 1) << 14;
2508 *ebx = (L3_N_LINE_SIZE - 1) | \
2509 ((L3_N_PARTITIONS - 1) << 12) | \
2510 ((L3_N_ASSOCIATIVITY - 1) << 22);
2511 *ecx = L3_N_SETS - 1;
2512 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2513 break;
2514 default: /* end of info */
2515 *eax = 0;
2516 *ebx = 0;
2517 *ecx = 0;
2518 *edx = 0;
2519 break;
2520 }
2521 }
2522
2523 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2524 if ((*eax & 31) && cs->nr_cores > 1) {
2525 *eax |= (cs->nr_cores - 1) << 26;
2526 }
2527 break;
2528 case 5:
2529 /* mwait info: needed for Core compatibility */
2530 *eax = 0; /* Smallest monitor-line size in bytes */
2531 *ebx = 0; /* Largest monitor-line size in bytes */
2532 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2533 *edx = 0;
2534 break;
2535 case 6:
2536 /* Thermal and Power Leaf */
2537 *eax = env->features[FEAT_6_EAX];
2538 *ebx = 0;
2539 *ecx = 0;
2540 *edx = 0;
2541 break;
2542 case 7:
2543 /* Structured Extended Feature Flags Enumeration Leaf */
2544 if (count == 0) {
2545 *eax = 0; /* Maximum ECX value for sub-leaves */
2546 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2547 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2548 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2549 *ecx |= CPUID_7_0_ECX_OSPKE;
2550 }
2551 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2552 } else {
2553 *eax = 0;
2554 *ebx = 0;
2555 *ecx = 0;
2556 *edx = 0;
2557 }
2558 break;
2559 case 9:
2560 /* Direct Cache Access Information Leaf */
2561 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2562 *ebx = 0;
2563 *ecx = 0;
2564 *edx = 0;
2565 break;
2566 case 0xA:
2567 /* Architectural Performance Monitoring Leaf */
2568 if (kvm_enabled() && cpu->enable_pmu) {
2569 KVMState *s = cs->kvm_state;
2570
2571 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2572 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2573 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2574 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2575 } else {
2576 *eax = 0;
2577 *ebx = 0;
2578 *ecx = 0;
2579 *edx = 0;
2580 }
2581 break;
2582 case 0xB:
2583 /* Extended Topology Enumeration Leaf */
2584 if (!cpu->enable_cpuid_0xb) {
2585 *eax = *ebx = *ecx = *edx = 0;
2586 break;
2587 }
2588
2589 *ecx = count & 0xff;
2590 *edx = cpu->apic_id;
2591
2592 switch (count) {
2593 case 0:
2594 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2595 *ebx = cs->nr_threads;
2596 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2597 break;
2598 case 1:
2599 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2600 *ebx = cs->nr_cores * cs->nr_threads;
2601 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2602 break;
2603 default:
2604 *eax = 0;
2605 *ebx = 0;
2606 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2607 }
2608
2609 assert(!(*eax & ~0x1f));
2610 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2611 break;
2612 case 0xD: {
2613 /* Processor Extended State */
2614 *eax = 0;
2615 *ebx = 0;
2616 *ecx = 0;
2617 *edx = 0;
2618 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2619 break;
2620 }
2621
2622 if (count == 0) {
2623 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2624 *eax = env->features[FEAT_XSAVE_COMP_LO];
2625 *edx = env->features[FEAT_XSAVE_COMP_HI];
2626 *ebx = *ecx;
2627 } else if (count == 1) {
2628 *eax = env->features[FEAT_XSAVE];
2629 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2630 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2631 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2632 *eax = esa->size;
2633 *ebx = esa->offset;
2634 }
2635 }
2636 break;
2637 }
2638 case 0x80000000:
2639 *eax = env->cpuid_xlevel;
2640 *ebx = env->cpuid_vendor1;
2641 *edx = env->cpuid_vendor2;
2642 *ecx = env->cpuid_vendor3;
2643 break;
2644 case 0x80000001:
2645 *eax = env->cpuid_version;
2646 *ebx = 0;
2647 *ecx = env->features[FEAT_8000_0001_ECX];
2648 *edx = env->features[FEAT_8000_0001_EDX];
2649
2650 /* The Linux kernel checks for the CMPLegacy bit and
2651 * discards multiple thread information if it is set.
2652 * So don't set it here for Intel to make Linux guests happy.
2653 */
2654 if (cs->nr_cores * cs->nr_threads > 1) {
2655 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2656 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2657 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2658 *ecx |= 1 << 1; /* CmpLegacy bit */
2659 }
2660 }
2661 break;
2662 case 0x80000002:
2663 case 0x80000003:
2664 case 0x80000004:
2665 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2666 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2667 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2668 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2669 break;
2670 case 0x80000005:
2671 /* cache info (L1 cache) */
2672 if (cpu->cache_info_passthrough) {
2673 host_cpuid(index, 0, eax, ebx, ecx, edx);
2674 break;
2675 }
2676 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2677 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2678 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2679 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2680 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2681 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2682 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2683 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2684 break;
2685 case 0x80000006:
2686 /* cache info (L2 cache) */
2687 if (cpu->cache_info_passthrough) {
2688 host_cpuid(index, 0, eax, ebx, ecx, edx);
2689 break;
2690 }
2691 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2692 (L2_DTLB_2M_ENTRIES << 16) | \
2693 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2694 (L2_ITLB_2M_ENTRIES);
2695 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2696 (L2_DTLB_4K_ENTRIES << 16) | \
2697 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2698 (L2_ITLB_4K_ENTRIES);
2699 *ecx = (L2_SIZE_KB_AMD << 16) | \
2700 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2701 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2702 if (!cpu->enable_l3_cache) {
2703 *edx = ((L3_SIZE_KB / 512) << 18) | \
2704 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2705 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2706 } else {
2707 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2708 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2709 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2710 }
2711 break;
2712 case 0x80000007:
2713 *eax = 0;
2714 *ebx = 0;
2715 *ecx = 0;
2716 *edx = env->features[FEAT_8000_0007_EDX];
2717 break;
2718 case 0x80000008:
2719 /* virtual & phys address size in low 2 bytes. */
2720 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2721 /* 64 bit processor */
2722 *eax = cpu->phys_bits; /* configurable physical bits */
2723 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2724 *eax |= 0x00003900; /* 57 bits virtual */
2725 } else {
2726 *eax |= 0x00003000; /* 48 bits virtual */
2727 }
2728 } else {
2729 *eax = cpu->phys_bits;
2730 }
2731 *ebx = 0;
2732 *ecx = 0;
2733 *edx = 0;
2734 if (cs->nr_cores * cs->nr_threads > 1) {
2735 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2736 }
2737 break;
2738 case 0x8000000A:
2739 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2740 *eax = 0x00000001; /* SVM Revision */
2741 *ebx = 0x00000010; /* nr of ASIDs */
2742 *ecx = 0;
2743 *edx = env->features[FEAT_SVM]; /* optional features */
2744 } else {
2745 *eax = 0;
2746 *ebx = 0;
2747 *ecx = 0;
2748 *edx = 0;
2749 }
2750 break;
2751 case 0xC0000000:
2752 *eax = env->cpuid_xlevel2;
2753 *ebx = 0;
2754 *ecx = 0;
2755 *edx = 0;
2756 break;
2757 case 0xC0000001:
2758 /* Support for VIA CPU's CPUID instruction */
2759 *eax = env->cpuid_version;
2760 *ebx = 0;
2761 *ecx = 0;
2762 *edx = env->features[FEAT_C000_0001_EDX];
2763 break;
2764 case 0xC0000002:
2765 case 0xC0000003:
2766 case 0xC0000004:
2767 /* Reserved for the future, and now filled with zero */
2768 *eax = 0;
2769 *ebx = 0;
2770 *ecx = 0;
2771 *edx = 0;
2772 break;
2773 default:
2774 /* reserved values: zero */
2775 *eax = 0;
2776 *ebx = 0;
2777 *ecx = 0;
2778 *edx = 0;
2779 break;
2780 }
2781 }
2782
2783 /* CPUClass::reset() */
2784 static void x86_cpu_reset(CPUState *s)
2785 {
2786 X86CPU *cpu = X86_CPU(s);
2787 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2788 CPUX86State *env = &cpu->env;
2789 target_ulong cr4;
2790 uint64_t xcr0;
2791 int i;
2792
2793 xcc->parent_reset(s);
2794
2795 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2796
2797 env->old_exception = -1;
2798
2799 /* init to reset state */
2800
2801 env->hflags2 |= HF2_GIF_MASK;
2802
2803 cpu_x86_update_cr0(env, 0x60000010);
2804 env->a20_mask = ~0x0;
2805 env->smbase = 0x30000;
2806
2807 env->idt.limit = 0xffff;
2808 env->gdt.limit = 0xffff;
2809 env->ldt.limit = 0xffff;
2810 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2811 env->tr.limit = 0xffff;
2812 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2813
2814 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2815 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2816 DESC_R_MASK | DESC_A_MASK);
2817 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2818 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2819 DESC_A_MASK);
2820 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2821 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2822 DESC_A_MASK);
2823 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2824 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2825 DESC_A_MASK);
2826 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2827 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2828 DESC_A_MASK);
2829 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2830 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2831 DESC_A_MASK);
2832
2833 env->eip = 0xfff0;
2834 env->regs[R_EDX] = env->cpuid_version;
2835
2836 env->eflags = 0x2;
2837
2838 /* FPU init */
2839 for (i = 0; i < 8; i++) {
2840 env->fptags[i] = 1;
2841 }
2842 cpu_set_fpuc(env, 0x37f);
2843
2844 env->mxcsr = 0x1f80;
2845 /* All units are in INIT state. */
2846 env->xstate_bv = 0;
2847
2848 env->pat = 0x0007040600070406ULL;
2849 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2850
2851 memset(env->dr, 0, sizeof(env->dr));
2852 env->dr[6] = DR6_FIXED_1;
2853 env->dr[7] = DR7_FIXED_1;
2854 cpu_breakpoint_remove_all(s, BP_CPU);
2855 cpu_watchpoint_remove_all(s, BP_CPU);
2856
2857 cr4 = 0;
2858 xcr0 = XSTATE_FP_MASK;
2859
2860 #ifdef CONFIG_USER_ONLY
2861 /* Enable all the features for user-mode. */
2862 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2863 xcr0 |= XSTATE_SSE_MASK;
2864 }
2865 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2866 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2867 if (env->features[esa->feature] & esa->bits) {
2868 xcr0 |= 1ull << i;
2869 }
2870 }
2871
2872 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2873 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2874 }
2875 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2876 cr4 |= CR4_FSGSBASE_MASK;
2877 }
2878 #endif
2879
2880 env->xcr0 = xcr0;
2881 cpu_x86_update_cr4(env, cr4);
2882
2883 /*
2884 * SDM 11.11.5 requires:
2885 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2886 * - IA32_MTRR_PHYSMASKn.V = 0
2887 * All other bits are undefined. For simplification, zero it all.
2888 */
2889 env->mtrr_deftype = 0;
2890 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2891 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2892
2893 #if !defined(CONFIG_USER_ONLY)
2894 /* We hard-wire the BSP to the first CPU. */
2895 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2896
2897 s->halted = !cpu_is_bsp(cpu);
2898
2899 if (kvm_enabled()) {
2900 kvm_arch_reset_vcpu(cpu);
2901 }
2902 #endif
2903 }
2904
2905 #ifndef CONFIG_USER_ONLY
2906 bool cpu_is_bsp(X86CPU *cpu)
2907 {
2908 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2909 }
2910
2911 /* TODO: remove me, when reset over QOM tree is implemented */
2912 static void x86_cpu_machine_reset_cb(void *opaque)
2913 {
2914 X86CPU *cpu = opaque;
2915 cpu_reset(CPU(cpu));
2916 }
2917 #endif
2918
2919 static void mce_init(X86CPU *cpu)
2920 {
2921 CPUX86State *cenv = &cpu->env;
2922 unsigned int bank;
2923
2924 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2925 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2926 (CPUID_MCE | CPUID_MCA)) {
2927 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2928 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2929 cenv->mcg_ctl = ~(uint64_t)0;
2930 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2931 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2932 }
2933 }
2934 }
2935
2936 #ifndef CONFIG_USER_ONLY
2937 APICCommonClass *apic_get_class(void)
2938 {
2939 const char *apic_type = "apic";
2940
2941 if (kvm_apic_in_kernel()) {
2942 apic_type = "kvm-apic";
2943 } else if (xen_enabled()) {
2944 apic_type = "xen-apic";
2945 }
2946
2947 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2948 }
2949
2950 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2951 {
2952 APICCommonState *apic;
2953 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2954
2955 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2956
2957 object_property_add_child(OBJECT(cpu), "lapic",
2958 OBJECT(cpu->apic_state), &error_abort);
2959 object_unref(OBJECT(cpu->apic_state));
2960
2961 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
2962 /* TODO: convert to link<> */
2963 apic = APIC_COMMON(cpu->apic_state);
2964 apic->cpu = cpu;
2965 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2966 }
2967
2968 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2969 {
2970 APICCommonState *apic;
2971 static bool apic_mmio_map_once;
2972
2973 if (cpu->apic_state == NULL) {
2974 return;
2975 }
2976 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2977 errp);
2978
2979 /* Map APIC MMIO area */
2980 apic = APIC_COMMON(cpu->apic_state);
2981 if (!apic_mmio_map_once) {
2982 memory_region_add_subregion_overlap(get_system_memory(),
2983 apic->apicbase &
2984 MSR_IA32_APICBASE_BASE,
2985 &apic->io_memory,
2986 0x1000);
2987 apic_mmio_map_once = true;
2988 }
2989 }
2990
2991 static void x86_cpu_machine_done(Notifier *n, void *unused)
2992 {
2993 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2994 MemoryRegion *smram =
2995 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2996
2997 if (smram) {
2998 cpu->smram = g_new(MemoryRegion, 1);
2999 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3000 smram, 0, 1ull << 32);
3001 memory_region_set_enabled(cpu->smram, false);
3002 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3003 }
3004 }
3005 #else
3006 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3007 {
3008 }
3009 #endif
3010
3011 /* Note: Only safe for use on x86(-64) hosts */
3012 static uint32_t x86_host_phys_bits(void)
3013 {
3014 uint32_t eax;
3015 uint32_t host_phys_bits;
3016
3017 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3018 if (eax >= 0x80000008) {
3019 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3020 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3021 * at 23:16 that can specify a maximum physical address bits for
3022 * the guest that can override this value; but I've not seen
3023 * anything with that set.
3024 */
3025 host_phys_bits = eax & 0xff;
3026 } else {
3027 /* It's an odd 64 bit machine that doesn't have the leaf for
3028 * physical address bits; fall back to 36 that's most older
3029 * Intel.
3030 */
3031 host_phys_bits = 36;
3032 }
3033
3034 return host_phys_bits;
3035 }
3036
3037 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3038 {
3039 if (*min < value) {
3040 *min = value;
3041 }
3042 }
3043
3044 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3045 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3046 {
3047 CPUX86State *env = &cpu->env;
3048 FeatureWordInfo *fi = &feature_word_info[w];
3049 uint32_t eax = fi->cpuid_eax;
3050 uint32_t region = eax & 0xF0000000;
3051
3052 if (!env->features[w]) {
3053 return;
3054 }
3055
3056 switch (region) {
3057 case 0x00000000:
3058 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3059 break;
3060 case 0x80000000:
3061 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3062 break;
3063 case 0xC0000000:
3064 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3065 break;
3066 }
3067 }
3068
3069 /* Calculate XSAVE components based on the configured CPU feature flags */
3070 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3071 {
3072 CPUX86State *env = &cpu->env;
3073 int i;
3074 uint64_t mask;
3075
3076 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3077 return;
3078 }
3079
3080 mask = 0;
3081 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3082 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3083 if (env->features[esa->feature] & esa->bits) {
3084 mask |= (1ULL << i);
3085 }
3086 }
3087
3088 env->features[FEAT_XSAVE_COMP_LO] = mask;
3089 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3090 }
3091
3092 /***** Steps involved on loading and filtering CPUID data
3093 *
3094 * When initializing and realizing a CPU object, the steps
3095 * involved in setting up CPUID data are:
3096 *
3097 * 1) Loading CPU model definition (X86CPUDefinition). This is
3098 * implemented by x86_cpu_load_def() and should be completely
3099 * transparent, as it is done automatically by instance_init.
3100 * No code should need to look at X86CPUDefinition structs
3101 * outside instance_init.
3102 *
3103 * 2) CPU expansion. This is done by realize before CPUID
3104 * filtering, and will make sure host/accelerator data is
3105 * loaded for CPU models that depend on host capabilities
3106 * (e.g. "host"). Done by x86_cpu_expand_features().
3107 *
3108 * 3) CPUID filtering. This initializes extra data related to
3109 * CPUID, and checks if the host supports all capabilities
3110 * required by the CPU. Runnability of a CPU model is
3111 * determined at this step. Done by x86_cpu_filter_features().
3112 *
3113 * Some operations don't require all steps to be performed.
3114 * More precisely:
3115 *
3116 * - CPU instance creation (instance_init) will run only CPU
3117 * model loading. CPU expansion can't run at instance_init-time
3118 * because host/accelerator data may be not available yet.
3119 * - CPU realization will perform both CPU model expansion and CPUID
3120 * filtering, and return an error in case one of them fails.
3121 * - query-cpu-definitions needs to run all 3 steps. It needs
3122 * to run CPUID filtering, as the 'unavailable-features'
3123 * field is set based on the filtering results.
3124 * - The query-cpu-model-expansion QMP command only needs to run
3125 * CPU model loading and CPU expansion. It should not filter
3126 * any CPUID data based on host capabilities.
3127 */
3128
3129 /* Expand CPU configuration data, based on configured features
3130 * and host/accelerator capabilities when appropriate.
3131 */
3132 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3133 {
3134 CPUX86State *env = &cpu->env;
3135 FeatureWord w;
3136 GList *l;
3137 Error *local_err = NULL;
3138
3139 /*TODO: cpu->max_features incorrectly overwrites features
3140 * set using "feat=on|off". Once we fix this, we can convert
3141 * plus_features & minus_features to global properties
3142 * inside x86_cpu_parse_featurestr() too.
3143 */
3144 if (cpu->max_features) {
3145 for (w = 0; w < FEATURE_WORDS; w++) {
3146 env->features[w] =
3147 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3148 }
3149 }
3150
3151 for (l = plus_features; l; l = l->next) {
3152 const char *prop = l->data;
3153 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3154 if (local_err) {
3155 goto out;
3156 }
3157 }
3158
3159 for (l = minus_features; l; l = l->next) {
3160 const char *prop = l->data;
3161 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3162 if (local_err) {
3163 goto out;
3164 }
3165 }
3166
3167 if (!kvm_enabled() || !cpu->expose_kvm) {
3168 env->features[FEAT_KVM] = 0;
3169 }
3170
3171 x86_cpu_enable_xsave_components(cpu);
3172
3173 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3174 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3175 if (cpu->full_cpuid_auto_level) {
3176 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3177 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3178 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3179 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3180 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3181 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3182 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3183 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3184 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3185 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3186 /* SVM requires CPUID[0x8000000A] */
3187 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3188 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3189 }
3190 }
3191
3192 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3193 if (env->cpuid_level == UINT32_MAX) {
3194 env->cpuid_level = env->cpuid_min_level;
3195 }
3196 if (env->cpuid_xlevel == UINT32_MAX) {
3197 env->cpuid_xlevel = env->cpuid_min_xlevel;
3198 }
3199 if (env->cpuid_xlevel2 == UINT32_MAX) {
3200 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3201 }
3202
3203 out:
3204 if (local_err != NULL) {
3205 error_propagate(errp, local_err);
3206 }
3207 }
3208
3209 /*
3210 * Finishes initialization of CPUID data, filters CPU feature
3211 * words based on host availability of each feature.
3212 *
3213 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3214 */
3215 static int x86_cpu_filter_features(X86CPU *cpu)
3216 {
3217 CPUX86State *env = &cpu->env;
3218 FeatureWord w;
3219 int rv = 0;
3220
3221 for (w = 0; w < FEATURE_WORDS; w++) {
3222 uint32_t host_feat =
3223 x86_cpu_get_supported_feature_word(w, false);
3224 uint32_t requested_features = env->features[w];
3225 env->features[w] &= host_feat;
3226 cpu->filtered_features[w] = requested_features & ~env->features[w];
3227 if (cpu->filtered_features[w]) {
3228 rv = 1;
3229 }
3230 }
3231
3232 return rv;
3233 }
3234
3235 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3236 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3237 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3238 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3239 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3240 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3241 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3242 {
3243 CPUState *cs = CPU(dev);
3244 X86CPU *cpu = X86_CPU(dev);
3245 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3246 CPUX86State *env = &cpu->env;
3247 Error *local_err = NULL;
3248 static bool ht_warned;
3249
3250 if (xcc->kvm_required && !kvm_enabled()) {
3251 char *name = x86_cpu_class_get_model_name(xcc);
3252 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3253 g_free(name);
3254 goto out;
3255 }
3256
3257 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3258 error_setg(errp, "apic-id property was not initialized properly");
3259 return;
3260 }
3261
3262 x86_cpu_expand_features(cpu, &local_err);
3263 if (local_err) {
3264 goto out;
3265 }
3266
3267 if (x86_cpu_filter_features(cpu) &&
3268 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3269 x86_cpu_report_filtered_features(cpu);
3270 if (cpu->enforce_cpuid) {
3271 error_setg(&local_err,
3272 kvm_enabled() ?
3273 "Host doesn't support requested features" :
3274 "TCG doesn't support requested features");
3275 goto out;
3276 }
3277 }
3278
3279 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3280 * CPUID[1].EDX.
3281 */
3282 if (IS_AMD_CPU(env)) {
3283 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3284 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3285 & CPUID_EXT2_AMD_ALIASES);
3286 }
3287
3288 /* For 64bit systems think about the number of physical bits to present.
3289 * ideally this should be the same as the host; anything other than matching
3290 * the host can cause incorrect guest behaviour.
3291 * QEMU used to pick the magic value of 40 bits that corresponds to
3292 * consumer AMD devices but nothing else.
3293 */
3294 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3295 if (kvm_enabled()) {
3296 uint32_t host_phys_bits = x86_host_phys_bits();
3297 static bool warned;
3298
3299 if (cpu->host_phys_bits) {
3300 /* The user asked for us to use the host physical bits */
3301 cpu->phys_bits = host_phys_bits;
3302 }
3303
3304 /* Print a warning if the user set it to a value that's not the
3305 * host value.
3306 */
3307 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3308 !warned) {
3309 error_report("Warning: Host physical bits (%u)"
3310 " does not match phys-bits property (%u)",
3311 host_phys_bits, cpu->phys_bits);
3312 warned = true;
3313 }
3314
3315 if (cpu->phys_bits &&
3316 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3317 cpu->phys_bits < 32)) {
3318 error_setg(errp, "phys-bits should be between 32 and %u "
3319 " (but is %u)",
3320 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3321 return;
3322 }
3323 } else {
3324 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3325 error_setg(errp, "TCG only supports phys-bits=%u",
3326 TCG_PHYS_ADDR_BITS);
3327 return;
3328 }
3329 }
3330 /* 0 means it was not explicitly set by the user (or by machine
3331 * compat_props or by the host code above). In this case, the default
3332 * is the value used by TCG (40).
3333 */
3334 if (cpu->phys_bits == 0) {
3335 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3336 }
3337 } else {
3338 /* For 32 bit systems don't use the user set value, but keep
3339 * phys_bits consistent with what we tell the guest.
3340 */
3341 if (cpu->phys_bits != 0) {
3342 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3343 return;
3344 }
3345
3346 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3347 cpu->phys_bits = 36;
3348 } else {
3349 cpu->phys_bits = 32;
3350 }
3351 }
3352 cpu_exec_realizefn(cs, &local_err);
3353 if (local_err != NULL) {
3354 error_propagate(errp, local_err);
3355 return;
3356 }
3357
3358 if (tcg_enabled()) {
3359 tcg_x86_init();
3360 }
3361
3362 #ifndef CONFIG_USER_ONLY
3363 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3364
3365 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3366 x86_cpu_apic_create(cpu, &local_err);
3367 if (local_err != NULL) {
3368 goto out;
3369 }
3370 }
3371 #endif
3372
3373 mce_init(cpu);
3374
3375 #ifndef CONFIG_USER_ONLY
3376 if (tcg_enabled()) {
3377 AddressSpace *newas = g_new(AddressSpace, 1);
3378
3379 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3380 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3381
3382 /* Outer container... */
3383 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3384 memory_region_set_enabled(cpu->cpu_as_root, true);
3385
3386 /* ... with two regions inside: normal system memory with low
3387 * priority, and...
3388 */
3389 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3390 get_system_memory(), 0, ~0ull);
3391 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3392 memory_region_set_enabled(cpu->cpu_as_mem, true);
3393 address_space_init(newas, cpu->cpu_as_root, "CPU");
3394 cs->num_ases = 1;
3395 cpu_address_space_init(cs, newas, 0);
3396
3397 /* ... SMRAM with higher priority, linked from /machine/smram. */
3398 cpu->machine_done.notify = x86_cpu_machine_done;
3399 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3400 }
3401 #endif
3402
3403 qemu_init_vcpu(cs);
3404
3405 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3406 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3407 * based on inputs (sockets,cores,threads), it is still better to gives
3408 * users a warning.
3409 *
3410 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3411 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3412 */
3413 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3414 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3415 " -smp options properly.");
3416 ht_warned = true;
3417 }
3418
3419 x86_cpu_apic_realize(cpu, &local_err);
3420 if (local_err != NULL) {
3421 goto out;
3422 }
3423 cpu_reset(cs);
3424
3425 xcc->parent_realize(dev, &local_err);
3426
3427 out:
3428 if (local_err != NULL) {
3429 error_propagate(errp, local_err);
3430 return;
3431 }
3432 }
3433
3434 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3435 {
3436 X86CPU *cpu = X86_CPU(dev);
3437 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3438 Error *local_err = NULL;
3439
3440 #ifndef CONFIG_USER_ONLY
3441 cpu_remove_sync(CPU(dev));
3442 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3443 #endif
3444
3445 if (cpu->apic_state) {
3446 object_unparent(OBJECT(cpu->apic_state));
3447 cpu->apic_state = NULL;
3448 }
3449
3450 xcc->parent_unrealize(dev, &local_err);
3451 if (local_err != NULL) {
3452 error_propagate(errp, local_err);
3453 return;
3454 }
3455 }
3456
3457 typedef struct BitProperty {
3458 uint32_t *ptr;
3459 uint32_t mask;
3460 } BitProperty;
3461
3462 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3463 void *opaque, Error **errp)
3464 {
3465 BitProperty *fp = opaque;
3466 bool value = (*fp->ptr & fp->mask) == fp->mask;
3467 visit_type_bool(v, name, &value, errp);
3468 }
3469
3470 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3471 void *opaque, Error **errp)
3472 {
3473 DeviceState *dev = DEVICE(obj);
3474 BitProperty *fp = opaque;
3475 Error *local_err = NULL;
3476 bool value;
3477
3478 if (dev->realized) {
3479 qdev_prop_set_after_realize(dev, name, errp);
3480 return;
3481 }
3482
3483 visit_type_bool(v, name, &value, &local_err);
3484 if (local_err) {
3485 error_propagate(errp, local_err);
3486 return;
3487 }
3488
3489 if (value) {
3490 *fp->ptr |= fp->mask;
3491 } else {
3492 *fp->ptr &= ~fp->mask;
3493 }
3494 }
3495
3496 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3497 void *opaque)
3498 {
3499 BitProperty *prop = opaque;
3500 g_free(prop);
3501 }
3502
3503 /* Register a boolean property to get/set a single bit in a uint32_t field.
3504 *
3505 * The same property name can be registered multiple times to make it affect
3506 * multiple bits in the same FeatureWord. In that case, the getter will return
3507 * true only if all bits are set.
3508 */
3509 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3510 const char *prop_name,
3511 uint32_t *field,
3512 int bitnr)
3513 {
3514 BitProperty *fp;
3515 ObjectProperty *op;
3516 uint32_t mask = (1UL << bitnr);
3517
3518 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3519 if (op) {
3520 fp = op->opaque;
3521 assert(fp->ptr == field);
3522 fp->mask |= mask;
3523 } else {
3524 fp = g_new0(BitProperty, 1);
3525 fp->ptr = field;
3526 fp->mask = mask;
3527 object_property_add(OBJECT(cpu), prop_name, "bool",
3528 x86_cpu_get_bit_prop,
3529 x86_cpu_set_bit_prop,
3530 x86_cpu_release_bit_prop, fp, &error_abort);
3531 }
3532 }
3533
3534 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3535 FeatureWord w,
3536 int bitnr)
3537 {
3538 FeatureWordInfo *fi = &feature_word_info[w];
3539 const char *name = fi->feat_names[bitnr];
3540
3541 if (!name) {
3542 return;
3543 }
3544
3545 /* Property names should use "-" instead of "_".
3546 * Old names containing underscores are registered as aliases
3547 * using object_property_add_alias()
3548 */
3549 assert(!strchr(name, '_'));
3550 /* aliases don't use "|" delimiters anymore, they are registered
3551 * manually using object_property_add_alias() */
3552 assert(!strchr(name, '|'));
3553 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3554 }
3555
3556 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3557 {
3558 X86CPU *cpu = X86_CPU(cs);
3559 CPUX86State *env = &cpu->env;
3560 GuestPanicInformation *panic_info = NULL;
3561
3562 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3563 GuestPanicInformationHyperV *panic_info_hv =
3564 g_malloc0(sizeof(GuestPanicInformationHyperV));
3565 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3566
3567 panic_info->type = GUEST_PANIC_INFORMATION_KIND_HYPER_V;
3568 panic_info->u.hyper_v.data = panic_info_hv;
3569
3570 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3571 panic_info_hv->arg1 = env->msr_hv_crash_params[0];
3572 panic_info_hv->arg2 = env->msr_hv_crash_params[1];
3573 panic_info_hv->arg3 = env->msr_hv_crash_params[2];
3574 panic_info_hv->arg4 = env->msr_hv_crash_params[3];
3575 panic_info_hv->arg5 = env->msr_hv_crash_params[4];
3576 }
3577
3578 return panic_info;
3579 }
3580 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3581 const char *name, void *opaque,
3582 Error **errp)
3583 {
3584 CPUState *cs = CPU(obj);
3585 GuestPanicInformation *panic_info;
3586
3587 if (!cs->crash_occurred) {
3588 error_setg(errp, "No crash occured");
3589 return;
3590 }
3591
3592 panic_info = x86_cpu_get_crash_info(cs);
3593 if (panic_info == NULL) {
3594 error_setg(errp, "No crash information");
3595 return;
3596 }
3597
3598 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3599 errp);
3600 qapi_free_GuestPanicInformation(panic_info);
3601 }
3602
3603 static void x86_cpu_initfn(Object *obj)
3604 {
3605 CPUState *cs = CPU(obj);
3606 X86CPU *cpu = X86_CPU(obj);
3607 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3608 CPUX86State *env = &cpu->env;
3609 FeatureWord w;
3610
3611 cs->env_ptr = env;
3612
3613 object_property_add(obj, "family", "int",
3614 x86_cpuid_version_get_family,
3615 x86_cpuid_version_set_family, NULL, NULL, NULL);
3616 object_property_add(obj, "model", "int",
3617 x86_cpuid_version_get_model,
3618 x86_cpuid_version_set_model, NULL, NULL, NULL);
3619 object_property_add(obj, "stepping", "int",
3620 x86_cpuid_version_get_stepping,
3621 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3622 object_property_add_str(obj, "vendor",
3623 x86_cpuid_get_vendor,
3624 x86_cpuid_set_vendor, NULL);
3625 object_property_add_str(obj, "model-id",
3626 x86_cpuid_get_model_id,
3627 x86_cpuid_set_model_id, NULL);
3628 object_property_add(obj, "tsc-frequency", "int",
3629 x86_cpuid_get_tsc_freq,
3630 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3631 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3632 x86_cpu_get_feature_words,
3633 NULL, NULL, (void *)env->features, NULL);
3634 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3635 x86_cpu_get_feature_words,
3636 NULL, NULL, (void *)cpu->filtered_features, NULL);
3637
3638 object_property_add(obj, "crash-information", "GuestPanicInformation",
3639 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3640
3641 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3642
3643 for (w = 0; w < FEATURE_WORDS; w++) {
3644 int bitnr;
3645
3646 for (bitnr = 0; bitnr < 32; bitnr++) {
3647 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3648 }
3649 }
3650
3651 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3652 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3653 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3654 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3655 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3656 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3657 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3658
3659 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3660 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3661 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3662 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3663 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3664 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3665 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3666 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3667 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3668 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3669 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3670 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3671 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3672 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3673 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3674 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3675 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3676 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3677 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3678 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3679 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3680
3681 if (xcc->cpu_def) {
3682 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3683 }
3684 }
3685
3686 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3687 {
3688 X86CPU *cpu = X86_CPU(cs);
3689
3690 return cpu->apic_id;
3691 }
3692
3693 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3694 {
3695 X86CPU *cpu = X86_CPU(cs);
3696
3697 return cpu->env.cr[0] & CR0_PG_MASK;
3698 }
3699
3700 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3701 {
3702 X86CPU *cpu = X86_CPU(cs);
3703
3704 cpu->env.eip = value;
3705 }
3706
3707 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3708 {
3709 X86CPU *cpu = X86_CPU(cs);
3710
3711 cpu->env.eip = tb->pc - tb->cs_base;
3712 }
3713
3714 static bool x86_cpu_has_work(CPUState *cs)
3715 {
3716 X86CPU *cpu = X86_CPU(cs);
3717 CPUX86State *env = &cpu->env;
3718
3719 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3720 CPU_INTERRUPT_POLL)) &&
3721 (env->eflags & IF_MASK)) ||
3722 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3723 CPU_INTERRUPT_INIT |
3724 CPU_INTERRUPT_SIPI |
3725 CPU_INTERRUPT_MCE)) ||
3726 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3727 !(env->hflags & HF_SMM_MASK));
3728 }
3729
3730 static Property x86_cpu_properties[] = {
3731 #ifdef CONFIG_USER_ONLY
3732 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3733 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3734 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3735 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3736 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3737 #else
3738 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3739 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3740 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3741 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3742 #endif
3743 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3744 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3745 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3746 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3747 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3748 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3749 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3750 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3751 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3752 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3753 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3754 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3755 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3756 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3757 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3758 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3759 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3760 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3761 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3762 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3763 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3764 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3765 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3766 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3767 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3768 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3769 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3770 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3771 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
3772 DEFINE_PROP_END_OF_LIST()
3773 };
3774
3775 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3776 {
3777 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3778 CPUClass *cc = CPU_CLASS(oc);
3779 DeviceClass *dc = DEVICE_CLASS(oc);
3780
3781 xcc->parent_realize = dc->realize;
3782 xcc->parent_unrealize = dc->unrealize;
3783 dc->realize = x86_cpu_realizefn;
3784 dc->unrealize = x86_cpu_unrealizefn;
3785 dc->props = x86_cpu_properties;
3786
3787 xcc->parent_reset = cc->reset;
3788 cc->reset = x86_cpu_reset;
3789 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3790
3791 cc->class_by_name = x86_cpu_class_by_name;
3792 cc->parse_features = x86_cpu_parse_featurestr;
3793 cc->has_work = x86_cpu_has_work;
3794 cc->do_interrupt = x86_cpu_do_interrupt;
3795 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3796 cc->dump_state = x86_cpu_dump_state;
3797 cc->get_crash_info = x86_cpu_get_crash_info;
3798 cc->set_pc = x86_cpu_set_pc;
3799 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3800 cc->gdb_read_register = x86_cpu_gdb_read_register;
3801 cc->gdb_write_register = x86_cpu_gdb_write_register;
3802 cc->get_arch_id = x86_cpu_get_arch_id;
3803 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3804 #ifdef CONFIG_USER_ONLY
3805 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3806 #else
3807 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3808 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3809 cc->write_elf64_note = x86_cpu_write_elf64_note;
3810 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3811 cc->write_elf32_note = x86_cpu_write_elf32_note;
3812 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3813 cc->vmsd = &vmstate_x86_cpu;
3814 #endif
3815 /* CPU_NB_REGS * 2 = general regs + xmm regs
3816 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
3817 */
3818 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3819 #ifndef CONFIG_USER_ONLY
3820 cc->debug_excp_handler = breakpoint_handler;
3821 #endif
3822 cc->cpu_exec_enter = x86_cpu_exec_enter;
3823 cc->cpu_exec_exit = x86_cpu_exec_exit;
3824
3825 dc->cannot_instantiate_with_device_add_yet = false;
3826 }
3827
3828 static const TypeInfo x86_cpu_type_info = {
3829 .name = TYPE_X86_CPU,
3830 .parent = TYPE_CPU,
3831 .instance_size = sizeof(X86CPU),
3832 .instance_init = x86_cpu_initfn,
3833 .abstract = true,
3834 .class_size = sizeof(X86CPUClass),
3835 .class_init = x86_cpu_common_class_init,
3836 };
3837
3838 static void x86_cpu_register_types(void)
3839 {
3840 int i;
3841
3842 type_register_static(&x86_cpu_type_info);
3843 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3844 x86_register_cpudef_type(&builtin_x86_defs[i]);
3845 }
3846 type_register_static(&max_x86_cpu_type_info);
3847 #ifdef CONFIG_KVM
3848 type_register_static(&host_x86_cpu_type_info);
3849 #endif
3850 }
3851
3852 type_init(x86_cpu_register_types)