]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Make "max" model not use any host CPUID info on TCG
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
242 CPUID_7_0_ECX_LA57)
243 #define TCG_7_0_EDX_FEATURES 0
244 #define TCG_APM_FEATURES 0
245 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
246 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
247 /* missing:
248 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
249
250 typedef struct FeatureWordInfo {
251 /* feature flags names are taken from "Intel Processor Identification and
252 * the CPUID Instruction" and AMD's "CPUID Specification".
253 * In cases of disagreement between feature naming conventions,
254 * aliases may be added.
255 */
256 const char *feat_names[32];
257 uint32_t cpuid_eax; /* Input EAX for CPUID */
258 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
259 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
260 int cpuid_reg; /* output register (R_* constant) */
261 uint32_t tcg_features; /* Feature flags supported by TCG */
262 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
263 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 } FeatureWordInfo;
265
266 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 [FEAT_1_EDX] = {
268 .feat_names = {
269 "fpu", "vme", "de", "pse",
270 "tsc", "msr", "pae", "mce",
271 "cx8", "apic", NULL, "sep",
272 "mtrr", "pge", "mca", "cmov",
273 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
274 NULL, "ds" /* Intel dts */, "acpi", "mmx",
275 "fxsr", "sse", "sse2", "ss",
276 "ht" /* Intel htt */, "tm", "ia64", "pbe",
277 },
278 .cpuid_eax = 1, .cpuid_reg = R_EDX,
279 .tcg_features = TCG_FEATURES,
280 },
281 [FEAT_1_ECX] = {
282 .feat_names = {
283 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
284 "ds-cpl", "vmx", "smx", "est",
285 "tm2", "ssse3", "cid", NULL,
286 "fma", "cx16", "xtpr", "pdcm",
287 NULL, "pcid", "dca", "sse4.1",
288 "sse4.2", "x2apic", "movbe", "popcnt",
289 "tsc-deadline", "aes", "xsave", "osxsave",
290 "avx", "f16c", "rdrand", "hypervisor",
291 },
292 .cpuid_eax = 1, .cpuid_reg = R_ECX,
293 .tcg_features = TCG_EXT_FEATURES,
294 },
295 /* Feature names that are already defined on feature_name[] but
296 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
297 * names on feat_names below. They are copied automatically
298 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
299 */
300 [FEAT_8000_0001_EDX] = {
301 .feat_names = {
302 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
303 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
304 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
305 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
306 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
307 "nx", NULL, "mmxext", NULL /* mmx */,
308 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
309 NULL, "lm", "3dnowext", "3dnow",
310 },
311 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
312 .tcg_features = TCG_EXT2_FEATURES,
313 },
314 [FEAT_8000_0001_ECX] = {
315 .feat_names = {
316 "lahf-lm", "cmp-legacy", "svm", "extapic",
317 "cr8legacy", "abm", "sse4a", "misalignsse",
318 "3dnowprefetch", "osvw", "ibs", "xop",
319 "skinit", "wdt", NULL, "lwp",
320 "fma4", "tce", NULL, "nodeid-msr",
321 NULL, "tbm", "topoext", "perfctr-core",
322 "perfctr-nb", NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 },
325 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
326 .tcg_features = TCG_EXT3_FEATURES,
327 },
328 [FEAT_C000_0001_EDX] = {
329 .feat_names = {
330 NULL, NULL, "xstore", "xstore-en",
331 NULL, NULL, "xcrypt", "xcrypt-en",
332 "ace2", "ace2-en", "phe", "phe-en",
333 "pmm", "pmm-en", NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 },
339 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
340 .tcg_features = TCG_EXT4_FEATURES,
341 },
342 [FEAT_KVM] = {
343 .feat_names = {
344 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
345 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 "kvmclock-stable-bit", NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 },
353 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
354 .tcg_features = TCG_KVM_FEATURES,
355 },
356 [FEAT_HYPERV_EAX] = {
357 .feat_names = {
358 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
359 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
360 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
361 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
362 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
363 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 },
370 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
371 },
372 [FEAT_HYPERV_EBX] = {
373 .feat_names = {
374 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
375 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
376 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
377 NULL /* hv_create_port */, NULL /* hv_connect_port */,
378 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
379 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
380 NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 },
386 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
387 },
388 [FEAT_HYPERV_EDX] = {
389 .feat_names = {
390 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
391 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
392 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
393 NULL, NULL,
394 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 },
401 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
402 },
403 [FEAT_SVM] = {
404 .feat_names = {
405 "npt", "lbrv", "svm-lock", "nrip-save",
406 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
407 NULL, NULL, "pause-filter", NULL,
408 "pfthreshold", NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 },
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
416 },
417 [FEAT_7_0_EBX] = {
418 .feat_names = {
419 "fsgsbase", "tsc-adjust", NULL, "bmi1",
420 "hle", "avx2", NULL, "smep",
421 "bmi2", "erms", "invpcid", "rtm",
422 NULL, NULL, "mpx", NULL,
423 "avx512f", "avx512dq", "rdseed", "adx",
424 "smap", "avx512ifma", "pcommit", "clflushopt",
425 "clwb", NULL, "avx512pf", "avx512er",
426 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
427 },
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_EBX,
431 .tcg_features = TCG_7_0_EBX_FEATURES,
432 },
433 [FEAT_7_0_ECX] = {
434 .feat_names = {
435 NULL, "avx512vbmi", "umip", "pku",
436 "ospke", NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "avx512-vpopcntdq", NULL,
439 "la57", NULL, NULL, NULL,
440 NULL, NULL, "rdpid", NULL,
441 NULL, NULL, NULL, NULL,
442 NULL, NULL, NULL, NULL,
443 },
444 .cpuid_eax = 7,
445 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
446 .cpuid_reg = R_ECX,
447 .tcg_features = TCG_7_0_ECX_FEATURES,
448 },
449 [FEAT_7_0_EDX] = {
450 .feat_names = {
451 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 },
460 .cpuid_eax = 7,
461 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
462 .cpuid_reg = R_EDX,
463 .tcg_features = TCG_7_0_EDX_FEATURES,
464 },
465 [FEAT_8000_0007_EDX] = {
466 .feat_names = {
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 "invtsc", NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 },
476 .cpuid_eax = 0x80000007,
477 .cpuid_reg = R_EDX,
478 .tcg_features = TCG_APM_FEATURES,
479 .unmigratable_flags = CPUID_APM_INVTSC,
480 },
481 [FEAT_XSAVE] = {
482 .feat_names = {
483 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 },
492 .cpuid_eax = 0xd,
493 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
494 .cpuid_reg = R_EAX,
495 .tcg_features = TCG_XSAVE_FEATURES,
496 },
497 [FEAT_6_EAX] = {
498 .feat_names = {
499 NULL, NULL, "arat", NULL,
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 },
508 .cpuid_eax = 6, .cpuid_reg = R_EAX,
509 .tcg_features = TCG_6_EAX_FEATURES,
510 },
511 [FEAT_XSAVE_COMP_LO] = {
512 .cpuid_eax = 0xD,
513 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
514 .cpuid_reg = R_EAX,
515 .tcg_features = ~0U,
516 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
517 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
518 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
519 XSTATE_PKRU_MASK,
520 },
521 [FEAT_XSAVE_COMP_HI] = {
522 .cpuid_eax = 0xD,
523 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
524 .cpuid_reg = R_EDX,
525 .tcg_features = ~0U,
526 },
527 };
528
529 typedef struct X86RegisterInfo32 {
530 /* Name of register */
531 const char *name;
532 /* QAPI enum value register */
533 X86CPURegister32 qapi_enum;
534 } X86RegisterInfo32;
535
536 #define REGISTER(reg) \
537 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
538 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
539 REGISTER(EAX),
540 REGISTER(ECX),
541 REGISTER(EDX),
542 REGISTER(EBX),
543 REGISTER(ESP),
544 REGISTER(EBP),
545 REGISTER(ESI),
546 REGISTER(EDI),
547 };
548 #undef REGISTER
549
550 typedef struct ExtSaveArea {
551 uint32_t feature, bits;
552 uint32_t offset, size;
553 } ExtSaveArea;
554
555 static const ExtSaveArea x86_ext_save_areas[] = {
556 [XSTATE_FP_BIT] = {
557 /* x87 FP state component is always enabled if XSAVE is supported */
558 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
559 /* x87 state is in the legacy region of the XSAVE area */
560 .offset = 0,
561 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
562 },
563 [XSTATE_SSE_BIT] = {
564 /* SSE state component is always enabled if XSAVE is supported */
565 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
566 /* SSE state is in the legacy region of the XSAVE area */
567 .offset = 0,
568 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
569 },
570 [XSTATE_YMM_BIT] =
571 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
572 .offset = offsetof(X86XSaveArea, avx_state),
573 .size = sizeof(XSaveAVX) },
574 [XSTATE_BNDREGS_BIT] =
575 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
576 .offset = offsetof(X86XSaveArea, bndreg_state),
577 .size = sizeof(XSaveBNDREG) },
578 [XSTATE_BNDCSR_BIT] =
579 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
580 .offset = offsetof(X86XSaveArea, bndcsr_state),
581 .size = sizeof(XSaveBNDCSR) },
582 [XSTATE_OPMASK_BIT] =
583 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
584 .offset = offsetof(X86XSaveArea, opmask_state),
585 .size = sizeof(XSaveOpmask) },
586 [XSTATE_ZMM_Hi256_BIT] =
587 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
588 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
589 .size = sizeof(XSaveZMM_Hi256) },
590 [XSTATE_Hi16_ZMM_BIT] =
591 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
592 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
593 .size = sizeof(XSaveHi16_ZMM) },
594 [XSTATE_PKRU_BIT] =
595 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
596 .offset = offsetof(X86XSaveArea, pkru_state),
597 .size = sizeof(XSavePKRU) },
598 };
599
600 static uint32_t xsave_area_size(uint64_t mask)
601 {
602 int i;
603 uint64_t ret = 0;
604
605 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
606 const ExtSaveArea *esa = &x86_ext_save_areas[i];
607 if ((mask >> i) & 1) {
608 ret = MAX(ret, esa->offset + esa->size);
609 }
610 }
611 return ret;
612 }
613
614 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
615 {
616 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
617 cpu->env.features[FEAT_XSAVE_COMP_LO];
618 }
619
620 const char *get_register_name_32(unsigned int reg)
621 {
622 if (reg >= CPU_NB_REGS32) {
623 return NULL;
624 }
625 return x86_reg_info_32[reg].name;
626 }
627
628 /*
629 * Returns the set of feature flags that are supported and migratable by
630 * QEMU, for a given FeatureWord.
631 */
632 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
633 {
634 FeatureWordInfo *wi = &feature_word_info[w];
635 uint32_t r = 0;
636 int i;
637
638 for (i = 0; i < 32; i++) {
639 uint32_t f = 1U << i;
640
641 /* If the feature name is known, it is implicitly considered migratable,
642 * unless it is explicitly set in unmigratable_flags */
643 if ((wi->migratable_flags & f) ||
644 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
645 r |= f;
646 }
647 }
648 return r;
649 }
650
651 void host_cpuid(uint32_t function, uint32_t count,
652 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
653 {
654 uint32_t vec[4];
655
656 #ifdef __x86_64__
657 asm volatile("cpuid"
658 : "=a"(vec[0]), "=b"(vec[1]),
659 "=c"(vec[2]), "=d"(vec[3])
660 : "0"(function), "c"(count) : "cc");
661 #elif defined(__i386__)
662 asm volatile("pusha \n\t"
663 "cpuid \n\t"
664 "mov %%eax, 0(%2) \n\t"
665 "mov %%ebx, 4(%2) \n\t"
666 "mov %%ecx, 8(%2) \n\t"
667 "mov %%edx, 12(%2) \n\t"
668 "popa"
669 : : "a"(function), "c"(count), "S"(vec)
670 : "memory", "cc");
671 #else
672 abort();
673 #endif
674
675 if (eax)
676 *eax = vec[0];
677 if (ebx)
678 *ebx = vec[1];
679 if (ecx)
680 *ecx = vec[2];
681 if (edx)
682 *edx = vec[3];
683 }
684
685 /* CPU class name definitions: */
686
687 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
688 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
689
690 /* Return type name for a given CPU model name
691 * Caller is responsible for freeing the returned string.
692 */
693 static char *x86_cpu_type_name(const char *model_name)
694 {
695 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
696 }
697
698 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
699 {
700 ObjectClass *oc;
701 char *typename;
702
703 if (cpu_model == NULL) {
704 return NULL;
705 }
706
707 typename = x86_cpu_type_name(cpu_model);
708 oc = object_class_by_name(typename);
709 g_free(typename);
710 return oc;
711 }
712
713 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
714 {
715 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
716 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
717 return g_strndup(class_name,
718 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
719 }
720
721 struct X86CPUDefinition {
722 const char *name;
723 uint32_t level;
724 uint32_t xlevel;
725 /* vendor is zero-terminated, 12 character ASCII string */
726 char vendor[CPUID_VENDOR_SZ + 1];
727 int family;
728 int model;
729 int stepping;
730 FeatureWordArray features;
731 char model_id[48];
732 };
733
734 static X86CPUDefinition builtin_x86_defs[] = {
735 {
736 .name = "qemu64",
737 .level = 0xd,
738 .vendor = CPUID_VENDOR_AMD,
739 .family = 6,
740 .model = 6,
741 .stepping = 3,
742 .features[FEAT_1_EDX] =
743 PPRO_FEATURES |
744 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
745 CPUID_PSE36,
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
752 .xlevel = 0x8000000A,
753 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
754 },
755 {
756 .name = "phenom",
757 .level = 5,
758 .vendor = CPUID_VENDOR_AMD,
759 .family = 16,
760 .model = 2,
761 .stepping = 3,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36 | CPUID_VME,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
769 CPUID_EXT_POPCNT,
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
772 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
773 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
774 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
775 CPUID_EXT3_CR8LEG,
776 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
777 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
778 .features[FEAT_8000_0001_ECX] =
779 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
780 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
781 /* Missing: CPUID_SVM_LBRV */
782 .features[FEAT_SVM] =
783 CPUID_SVM_NPT,
784 .xlevel = 0x8000001A,
785 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
786 },
787 {
788 .name = "core2duo",
789 .level = 10,
790 .vendor = CPUID_VENDOR_INTEL,
791 .family = 6,
792 .model = 15,
793 .stepping = 11,
794 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES |
797 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
798 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
799 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
800 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
801 .features[FEAT_1_ECX] =
802 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
803 CPUID_EXT_CX16,
804 .features[FEAT_8000_0001_EDX] =
805 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
806 .features[FEAT_8000_0001_ECX] =
807 CPUID_EXT3_LAHF_LM,
808 .xlevel = 0x80000008,
809 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
810 },
811 {
812 .name = "kvm64",
813 .level = 0xd,
814 .vendor = CPUID_VENDOR_INTEL,
815 .family = 15,
816 .model = 6,
817 .stepping = 1,
818 /* Missing: CPUID_HT */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
822 CPUID_PSE36,
823 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
826 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
830 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
831 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
832 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
833 .features[FEAT_8000_0001_ECX] =
834 0,
835 .xlevel = 0x80000008,
836 .model_id = "Common KVM processor"
837 },
838 {
839 .name = "qemu32",
840 .level = 4,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 6,
844 .stepping = 3,
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES,
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3,
849 .xlevel = 0x80000004,
850 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
851 },
852 {
853 .name = "kvm32",
854 .level = 5,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 15,
857 .model = 6,
858 .stepping = 1,
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
862 .features[FEAT_1_ECX] =
863 CPUID_EXT_SSE3,
864 .features[FEAT_8000_0001_ECX] =
865 0,
866 .xlevel = 0x80000008,
867 .model_id = "Common 32-bit KVM processor"
868 },
869 {
870 .name = "coreduo",
871 .level = 10,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 14,
875 .stepping = 8,
876 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
877 .features[FEAT_1_EDX] =
878 PPRO_FEATURES | CPUID_VME |
879 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
880 CPUID_SS,
881 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
882 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
885 .features[FEAT_8000_0001_EDX] =
886 CPUID_EXT2_NX,
887 .xlevel = 0x80000008,
888 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
889 },
890 {
891 .name = "486",
892 .level = 1,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 4,
895 .model = 8,
896 .stepping = 0,
897 .features[FEAT_1_EDX] =
898 I486_FEATURES,
899 .xlevel = 0,
900 },
901 {
902 .name = "pentium",
903 .level = 1,
904 .vendor = CPUID_VENDOR_INTEL,
905 .family = 5,
906 .model = 4,
907 .stepping = 3,
908 .features[FEAT_1_EDX] =
909 PENTIUM_FEATURES,
910 .xlevel = 0,
911 },
912 {
913 .name = "pentium2",
914 .level = 2,
915 .vendor = CPUID_VENDOR_INTEL,
916 .family = 6,
917 .model = 5,
918 .stepping = 2,
919 .features[FEAT_1_EDX] =
920 PENTIUM2_FEATURES,
921 .xlevel = 0,
922 },
923 {
924 .name = "pentium3",
925 .level = 3,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 7,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 PENTIUM3_FEATURES,
932 .xlevel = 0,
933 },
934 {
935 .name = "athlon",
936 .level = 2,
937 .vendor = CPUID_VENDOR_AMD,
938 .family = 6,
939 .model = 2,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
943 CPUID_MCA,
944 .features[FEAT_8000_0001_EDX] =
945 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
946 .xlevel = 0x80000008,
947 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
948 },
949 {
950 .name = "n270",
951 .level = 10,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 28,
955 .stepping = 2,
956 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
957 .features[FEAT_1_EDX] =
958 PPRO_FEATURES |
959 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
960 CPUID_ACPI | CPUID_SS,
961 /* Some CPUs got no CPUID_SEP */
962 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
963 * CPUID_EXT_XTPR */
964 .features[FEAT_1_ECX] =
965 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
966 CPUID_EXT_MOVBE,
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_NX,
969 .features[FEAT_8000_0001_ECX] =
970 CPUID_EXT3_LAHF_LM,
971 .xlevel = 0x80000008,
972 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
973 },
974 {
975 .name = "Conroe",
976 .level = 10,
977 .vendor = CPUID_VENDOR_INTEL,
978 .family = 6,
979 .model = 15,
980 .stepping = 3,
981 .features[FEAT_1_EDX] =
982 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
983 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
984 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
985 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
986 CPUID_DE | CPUID_FP87,
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
989 .features[FEAT_8000_0001_EDX] =
990 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
991 .features[FEAT_8000_0001_ECX] =
992 CPUID_EXT3_LAHF_LM,
993 .xlevel = 0x80000008,
994 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
995 },
996 {
997 .name = "Penryn",
998 .level = 10,
999 .vendor = CPUID_VENDOR_INTEL,
1000 .family = 6,
1001 .model = 23,
1002 .stepping = 3,
1003 .features[FEAT_1_EDX] =
1004 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1005 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1006 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1007 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1008 CPUID_DE | CPUID_FP87,
1009 .features[FEAT_1_ECX] =
1010 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1011 CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1018 },
1019 {
1020 .name = "Nehalem",
1021 .level = 11,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 26,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1034 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1041 },
1042 {
1043 .name = "Westmere",
1044 .level = 11,
1045 .vendor = CPUID_VENDOR_INTEL,
1046 .family = 6,
1047 .model = 44,
1048 .stepping = 1,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1058 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1061 .features[FEAT_8000_0001_ECX] =
1062 CPUID_EXT3_LAHF_LM,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1067 },
1068 {
1069 .name = "SandyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 42,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3,
1087 .features[FEAT_8000_0001_EDX] =
1088 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1089 CPUID_EXT2_SYSCALL,
1090 .features[FEAT_8000_0001_ECX] =
1091 CPUID_EXT3_LAHF_LM,
1092 .features[FEAT_XSAVE] =
1093 CPUID_XSAVE_XSAVEOPT,
1094 .features[FEAT_6_EAX] =
1095 CPUID_6_EAX_ARAT,
1096 .xlevel = 0x80000008,
1097 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1098 },
1099 {
1100 .name = "IvyBridge",
1101 .level = 0xd,
1102 .vendor = CPUID_VENDOR_INTEL,
1103 .family = 6,
1104 .model = 58,
1105 .stepping = 9,
1106 .features[FEAT_1_EDX] =
1107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1111 CPUID_DE | CPUID_FP87,
1112 .features[FEAT_1_ECX] =
1113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1114 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1115 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1116 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1117 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_ERMS,
1121 .features[FEAT_8000_0001_EDX] =
1122 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1123 CPUID_EXT2_SYSCALL,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_LAHF_LM,
1126 .features[FEAT_XSAVE] =
1127 CPUID_XSAVE_XSAVEOPT,
1128 .features[FEAT_6_EAX] =
1129 CPUID_6_EAX_ARAT,
1130 .xlevel = 0x80000008,
1131 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1132 },
1133 {
1134 .name = "Haswell-noTSX",
1135 .level = 0xd,
1136 .vendor = CPUID_VENDOR_INTEL,
1137 .family = 6,
1138 .model = 60,
1139 .stepping = 1,
1140 .features[FEAT_1_EDX] =
1141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1145 CPUID_DE | CPUID_FP87,
1146 .features[FEAT_1_ECX] =
1147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1153 .features[FEAT_8000_0001_EDX] =
1154 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1155 CPUID_EXT2_SYSCALL,
1156 .features[FEAT_8000_0001_ECX] =
1157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1158 .features[FEAT_7_0_EBX] =
1159 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1160 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1161 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Haswell, no TSX)",
1168 }, {
1169 .name = "Haswell",
1170 .level = 0xd,
1171 .vendor = CPUID_VENDOR_INTEL,
1172 .family = 6,
1173 .model = 60,
1174 .stepping = 1,
1175 .features[FEAT_1_EDX] =
1176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1188 .features[FEAT_8000_0001_EDX] =
1189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1190 CPUID_EXT2_SYSCALL,
1191 .features[FEAT_8000_0001_ECX] =
1192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1193 .features[FEAT_7_0_EBX] =
1194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1197 CPUID_7_0_EBX_RTM,
1198 .features[FEAT_XSAVE] =
1199 CPUID_XSAVE_XSAVEOPT,
1200 .features[FEAT_6_EAX] =
1201 CPUID_6_EAX_ARAT,
1202 .xlevel = 0x80000008,
1203 .model_id = "Intel Core Processor (Haswell)",
1204 },
1205 {
1206 .name = "Broadwell-noTSX",
1207 .level = 0xd,
1208 .vendor = CPUID_VENDOR_INTEL,
1209 .family = 6,
1210 .model = 61,
1211 .stepping = 2,
1212 .features[FEAT_1_EDX] =
1213 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1214 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1215 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1216 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1217 CPUID_DE | CPUID_FP87,
1218 .features[FEAT_1_ECX] =
1219 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1220 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1221 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1222 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1223 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1224 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1225 .features[FEAT_8000_0001_EDX] =
1226 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1227 CPUID_EXT2_SYSCALL,
1228 .features[FEAT_8000_0001_ECX] =
1229 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1230 .features[FEAT_7_0_EBX] =
1231 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1232 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1233 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1234 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1235 CPUID_7_0_EBX_SMAP,
1236 .features[FEAT_XSAVE] =
1237 CPUID_XSAVE_XSAVEOPT,
1238 .features[FEAT_6_EAX] =
1239 CPUID_6_EAX_ARAT,
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1242 },
1243 {
1244 .name = "Broadwell",
1245 .level = 0xd,
1246 .vendor = CPUID_VENDOR_INTEL,
1247 .family = 6,
1248 .model = 61,
1249 .stepping = 2,
1250 .features[FEAT_1_EDX] =
1251 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1252 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1253 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1254 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1255 CPUID_DE | CPUID_FP87,
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1258 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1259 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1260 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1261 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1262 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1263 .features[FEAT_8000_0001_EDX] =
1264 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1265 CPUID_EXT2_SYSCALL,
1266 .features[FEAT_8000_0001_ECX] =
1267 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1268 .features[FEAT_7_0_EBX] =
1269 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1270 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1271 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1272 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1273 CPUID_7_0_EBX_SMAP,
1274 .features[FEAT_XSAVE] =
1275 CPUID_XSAVE_XSAVEOPT,
1276 .features[FEAT_6_EAX] =
1277 CPUID_6_EAX_ARAT,
1278 .xlevel = 0x80000008,
1279 .model_id = "Intel Core Processor (Broadwell)",
1280 },
1281 {
1282 .name = "Skylake-Client",
1283 .level = 0xd,
1284 .vendor = CPUID_VENDOR_INTEL,
1285 .family = 6,
1286 .model = 94,
1287 .stepping = 3,
1288 .features[FEAT_1_EDX] =
1289 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1290 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1291 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1292 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1293 CPUID_DE | CPUID_FP87,
1294 .features[FEAT_1_ECX] =
1295 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1296 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1297 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1298 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1299 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1300 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1303 CPUID_EXT2_SYSCALL,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1306 .features[FEAT_7_0_EBX] =
1307 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1308 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1309 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1310 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1311 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1312 /* Missing: XSAVES (not supported by some Linux versions,
1313 * including v4.1 to v4.6).
1314 * KVM doesn't yet expose any XSAVES state save component,
1315 * and the only one defined in Skylake (processor tracing)
1316 * probably will block migration anyway.
1317 */
1318 .features[FEAT_XSAVE] =
1319 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1320 CPUID_XSAVE_XGETBV1,
1321 .features[FEAT_6_EAX] =
1322 CPUID_6_EAX_ARAT,
1323 .xlevel = 0x80000008,
1324 .model_id = "Intel Core Processor (Skylake)",
1325 },
1326 {
1327 .name = "Opteron_G1",
1328 .level = 5,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 15,
1331 .model = 6,
1332 .stepping = 1,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1343 .xlevel = 0x80000008,
1344 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1345 },
1346 {
1347 .name = "Opteron_G2",
1348 .level = 5,
1349 .vendor = CPUID_VENDOR_AMD,
1350 .family = 15,
1351 .model = 6,
1352 .stepping = 1,
1353 .features[FEAT_1_EDX] =
1354 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1355 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1356 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1357 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1358 CPUID_DE | CPUID_FP87,
1359 .features[FEAT_1_ECX] =
1360 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1361 /* Missing: CPUID_EXT2_RDTSCP */
1362 .features[FEAT_8000_0001_EDX] =
1363 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1364 .features[FEAT_8000_0001_ECX] =
1365 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1366 .xlevel = 0x80000008,
1367 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1368 },
1369 {
1370 .name = "Opteron_G3",
1371 .level = 5,
1372 .vendor = CPUID_VENDOR_AMD,
1373 .family = 16,
1374 .model = 2,
1375 .stepping = 3,
1376 .features[FEAT_1_EDX] =
1377 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1378 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1379 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1380 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1381 CPUID_DE | CPUID_FP87,
1382 .features[FEAT_1_ECX] =
1383 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1384 CPUID_EXT_SSE3,
1385 /* Missing: CPUID_EXT2_RDTSCP */
1386 .features[FEAT_8000_0001_EDX] =
1387 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1388 .features[FEAT_8000_0001_ECX] =
1389 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1390 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1391 .xlevel = 0x80000008,
1392 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1393 },
1394 {
1395 .name = "Opteron_G4",
1396 .level = 0xd,
1397 .vendor = CPUID_VENDOR_AMD,
1398 .family = 21,
1399 .model = 1,
1400 .stepping = 2,
1401 .features[FEAT_1_EDX] =
1402 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1403 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1404 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1405 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1406 CPUID_DE | CPUID_FP87,
1407 .features[FEAT_1_ECX] =
1408 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1409 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1410 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1411 CPUID_EXT_SSE3,
1412 /* Missing: CPUID_EXT2_RDTSCP */
1413 .features[FEAT_8000_0001_EDX] =
1414 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1415 CPUID_EXT2_SYSCALL,
1416 .features[FEAT_8000_0001_ECX] =
1417 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1418 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1419 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1420 CPUID_EXT3_LAHF_LM,
1421 /* no xsaveopt! */
1422 .xlevel = 0x8000001A,
1423 .model_id = "AMD Opteron 62xx class CPU",
1424 },
1425 {
1426 .name = "Opteron_G5",
1427 .level = 0xd,
1428 .vendor = CPUID_VENDOR_AMD,
1429 .family = 21,
1430 .model = 2,
1431 .stepping = 0,
1432 .features[FEAT_1_EDX] =
1433 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1434 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1435 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1436 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1437 CPUID_DE | CPUID_FP87,
1438 .features[FEAT_1_ECX] =
1439 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1440 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1441 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1442 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1443 /* Missing: CPUID_EXT2_RDTSCP */
1444 .features[FEAT_8000_0001_EDX] =
1445 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1446 CPUID_EXT2_SYSCALL,
1447 .features[FEAT_8000_0001_ECX] =
1448 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1449 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1450 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1451 CPUID_EXT3_LAHF_LM,
1452 /* no xsaveopt! */
1453 .xlevel = 0x8000001A,
1454 .model_id = "AMD Opteron 63xx class CPU",
1455 },
1456 };
1457
1458 typedef struct PropValue {
1459 const char *prop, *value;
1460 } PropValue;
1461
1462 /* KVM-specific features that are automatically added/removed
1463 * from all CPU models when KVM is enabled.
1464 */
1465 static PropValue kvm_default_props[] = {
1466 { "kvmclock", "on" },
1467 { "kvm-nopiodelay", "on" },
1468 { "kvm-asyncpf", "on" },
1469 { "kvm-steal-time", "on" },
1470 { "kvm-pv-eoi", "on" },
1471 { "kvmclock-stable-bit", "on" },
1472 { "x2apic", "on" },
1473 { "acpi", "off" },
1474 { "monitor", "off" },
1475 { "svm", "off" },
1476 { NULL, NULL },
1477 };
1478
1479 /* TCG-specific defaults that override all CPU models when using TCG
1480 */
1481 static PropValue tcg_default_props[] = {
1482 { "vme", "off" },
1483 { NULL, NULL },
1484 };
1485
1486
1487 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1488 {
1489 PropValue *pv;
1490 for (pv = kvm_default_props; pv->prop; pv++) {
1491 if (!strcmp(pv->prop, prop)) {
1492 pv->value = value;
1493 break;
1494 }
1495 }
1496
1497 /* It is valid to call this function only for properties that
1498 * are already present in the kvm_default_props table.
1499 */
1500 assert(pv->prop);
1501 }
1502
1503 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1504 bool migratable_only);
1505
1506 static bool lmce_supported(void)
1507 {
1508 uint64_t mce_cap = 0;
1509
1510 #ifdef CONFIG_KVM
1511 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1512 return false;
1513 }
1514 #endif
1515
1516 return !!(mce_cap & MCG_LMCE_P);
1517 }
1518
1519 static int cpu_x86_fill_model_id(char *str)
1520 {
1521 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1522 int i;
1523
1524 for (i = 0; i < 3; i++) {
1525 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1526 memcpy(str + i * 16 + 0, &eax, 4);
1527 memcpy(str + i * 16 + 4, &ebx, 4);
1528 memcpy(str + i * 16 + 8, &ecx, 4);
1529 memcpy(str + i * 16 + 12, &edx, 4);
1530 }
1531 return 0;
1532 }
1533
1534 static X86CPUDefinition host_cpudef;
1535
1536 static Property max_x86_cpu_properties[] = {
1537 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1538 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1539 DEFINE_PROP_END_OF_LIST()
1540 };
1541
1542 /* class_init for the "max" CPU model
1543 *
1544 * This function may be called before KVM is initialized.
1545 */
1546 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1547 {
1548 DeviceClass *dc = DEVICE_CLASS(oc);
1549 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1550 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1551
1552 xcc->ordering = 9;
1553
1554 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1555 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1556
1557 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1558 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1559 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1560 host_cpudef.stepping = eax & 0x0F;
1561
1562 cpu_x86_fill_model_id(host_cpudef.model_id);
1563
1564 xcc->cpu_def = &host_cpudef;
1565 xcc->model_description =
1566 "Enables all features supported by the accelerator in the current host";
1567
1568 /* level, xlevel, xlevel2, and the feature words are initialized on
1569 * instance_init, because they require KVM to be initialized.
1570 */
1571
1572 dc->props = max_x86_cpu_properties;
1573 }
1574
1575 static void max_x86_cpu_initfn(Object *obj)
1576 {
1577 X86CPU *cpu = X86_CPU(obj);
1578 CPUX86State *env = &cpu->env;
1579 KVMState *s = kvm_state;
1580
1581 /* We can't fill the features array here because we don't know yet if
1582 * "migratable" is true or false.
1583 */
1584 cpu->max_features = true;
1585
1586 if (kvm_enabled()) {
1587 env->cpuid_min_level =
1588 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1589 env->cpuid_min_xlevel =
1590 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1591 env->cpuid_min_xlevel2 =
1592 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1593
1594 if (lmce_supported()) {
1595 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1596 }
1597 } else {
1598 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1599 "vendor", &error_abort);
1600 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1601 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1602 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1603 object_property_set_str(OBJECT(cpu),
1604 "QEMU TCG CPU version " QEMU_HW_VERSION,
1605 "model-id", &error_abort);
1606 }
1607
1608 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1609 }
1610
1611 static const TypeInfo max_x86_cpu_type_info = {
1612 .name = X86_CPU_TYPE_NAME("max"),
1613 .parent = TYPE_X86_CPU,
1614 .instance_init = max_x86_cpu_initfn,
1615 .class_init = max_x86_cpu_class_init,
1616 };
1617
1618 #ifdef CONFIG_KVM
1619
1620 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1621 {
1622 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1623
1624 xcc->kvm_required = true;
1625 xcc->ordering = 8;
1626
1627 xcc->model_description =
1628 "KVM processor with all supported host features "
1629 "(only available in KVM mode)";
1630 }
1631
1632 static const TypeInfo host_x86_cpu_type_info = {
1633 .name = X86_CPU_TYPE_NAME("host"),
1634 .parent = X86_CPU_TYPE_NAME("max"),
1635 .class_init = host_x86_cpu_class_init,
1636 };
1637
1638 #endif
1639
1640 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1641 {
1642 FeatureWordInfo *f = &feature_word_info[w];
1643 int i;
1644
1645 for (i = 0; i < 32; ++i) {
1646 if ((1UL << i) & mask) {
1647 const char *reg = get_register_name_32(f->cpuid_reg);
1648 assert(reg);
1649 fprintf(stderr, "warning: %s doesn't support requested feature: "
1650 "CPUID.%02XH:%s%s%s [bit %d]\n",
1651 kvm_enabled() ? "host" : "TCG",
1652 f->cpuid_eax, reg,
1653 f->feat_names[i] ? "." : "",
1654 f->feat_names[i] ? f->feat_names[i] : "", i);
1655 }
1656 }
1657 }
1658
1659 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1660 const char *name, void *opaque,
1661 Error **errp)
1662 {
1663 X86CPU *cpu = X86_CPU(obj);
1664 CPUX86State *env = &cpu->env;
1665 int64_t value;
1666
1667 value = (env->cpuid_version >> 8) & 0xf;
1668 if (value == 0xf) {
1669 value += (env->cpuid_version >> 20) & 0xff;
1670 }
1671 visit_type_int(v, name, &value, errp);
1672 }
1673
1674 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1675 const char *name, void *opaque,
1676 Error **errp)
1677 {
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 const int64_t min = 0;
1681 const int64_t max = 0xff + 0xf;
1682 Error *local_err = NULL;
1683 int64_t value;
1684
1685 visit_type_int(v, name, &value, &local_err);
1686 if (local_err) {
1687 error_propagate(errp, local_err);
1688 return;
1689 }
1690 if (value < min || value > max) {
1691 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1692 name ? name : "null", value, min, max);
1693 return;
1694 }
1695
1696 env->cpuid_version &= ~0xff00f00;
1697 if (value > 0x0f) {
1698 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1699 } else {
1700 env->cpuid_version |= value << 8;
1701 }
1702 }
1703
1704 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1705 const char *name, void *opaque,
1706 Error **errp)
1707 {
1708 X86CPU *cpu = X86_CPU(obj);
1709 CPUX86State *env = &cpu->env;
1710 int64_t value;
1711
1712 value = (env->cpuid_version >> 4) & 0xf;
1713 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1714 visit_type_int(v, name, &value, errp);
1715 }
1716
1717 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1718 const char *name, void *opaque,
1719 Error **errp)
1720 {
1721 X86CPU *cpu = X86_CPU(obj);
1722 CPUX86State *env = &cpu->env;
1723 const int64_t min = 0;
1724 const int64_t max = 0xff;
1725 Error *local_err = NULL;
1726 int64_t value;
1727
1728 visit_type_int(v, name, &value, &local_err);
1729 if (local_err) {
1730 error_propagate(errp, local_err);
1731 return;
1732 }
1733 if (value < min || value > max) {
1734 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1735 name ? name : "null", value, min, max);
1736 return;
1737 }
1738
1739 env->cpuid_version &= ~0xf00f0;
1740 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1741 }
1742
1743 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1744 const char *name, void *opaque,
1745 Error **errp)
1746 {
1747 X86CPU *cpu = X86_CPU(obj);
1748 CPUX86State *env = &cpu->env;
1749 int64_t value;
1750
1751 value = env->cpuid_version & 0xf;
1752 visit_type_int(v, name, &value, errp);
1753 }
1754
1755 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1756 const char *name, void *opaque,
1757 Error **errp)
1758 {
1759 X86CPU *cpu = X86_CPU(obj);
1760 CPUX86State *env = &cpu->env;
1761 const int64_t min = 0;
1762 const int64_t max = 0xf;
1763 Error *local_err = NULL;
1764 int64_t value;
1765
1766 visit_type_int(v, name, &value, &local_err);
1767 if (local_err) {
1768 error_propagate(errp, local_err);
1769 return;
1770 }
1771 if (value < min || value > max) {
1772 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1773 name ? name : "null", value, min, max);
1774 return;
1775 }
1776
1777 env->cpuid_version &= ~0xf;
1778 env->cpuid_version |= value & 0xf;
1779 }
1780
1781 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1782 {
1783 X86CPU *cpu = X86_CPU(obj);
1784 CPUX86State *env = &cpu->env;
1785 char *value;
1786
1787 value = g_malloc(CPUID_VENDOR_SZ + 1);
1788 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1789 env->cpuid_vendor3);
1790 return value;
1791 }
1792
1793 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1794 Error **errp)
1795 {
1796 X86CPU *cpu = X86_CPU(obj);
1797 CPUX86State *env = &cpu->env;
1798 int i;
1799
1800 if (strlen(value) != CPUID_VENDOR_SZ) {
1801 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1802 return;
1803 }
1804
1805 env->cpuid_vendor1 = 0;
1806 env->cpuid_vendor2 = 0;
1807 env->cpuid_vendor3 = 0;
1808 for (i = 0; i < 4; i++) {
1809 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1810 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1811 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1812 }
1813 }
1814
1815 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1816 {
1817 X86CPU *cpu = X86_CPU(obj);
1818 CPUX86State *env = &cpu->env;
1819 char *value;
1820 int i;
1821
1822 value = g_malloc(48 + 1);
1823 for (i = 0; i < 48; i++) {
1824 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1825 }
1826 value[48] = '\0';
1827 return value;
1828 }
1829
1830 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1831 Error **errp)
1832 {
1833 X86CPU *cpu = X86_CPU(obj);
1834 CPUX86State *env = &cpu->env;
1835 int c, len, i;
1836
1837 if (model_id == NULL) {
1838 model_id = "";
1839 }
1840 len = strlen(model_id);
1841 memset(env->cpuid_model, 0, 48);
1842 for (i = 0; i < 48; i++) {
1843 if (i >= len) {
1844 c = '\0';
1845 } else {
1846 c = (uint8_t)model_id[i];
1847 }
1848 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1849 }
1850 }
1851
1852 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1853 void *opaque, Error **errp)
1854 {
1855 X86CPU *cpu = X86_CPU(obj);
1856 int64_t value;
1857
1858 value = cpu->env.tsc_khz * 1000;
1859 visit_type_int(v, name, &value, errp);
1860 }
1861
1862 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1863 void *opaque, Error **errp)
1864 {
1865 X86CPU *cpu = X86_CPU(obj);
1866 const int64_t min = 0;
1867 const int64_t max = INT64_MAX;
1868 Error *local_err = NULL;
1869 int64_t value;
1870
1871 visit_type_int(v, name, &value, &local_err);
1872 if (local_err) {
1873 error_propagate(errp, local_err);
1874 return;
1875 }
1876 if (value < min || value > max) {
1877 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1878 name ? name : "null", value, min, max);
1879 return;
1880 }
1881
1882 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1883 }
1884
1885 /* Generic getter for "feature-words" and "filtered-features" properties */
1886 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1887 const char *name, void *opaque,
1888 Error **errp)
1889 {
1890 uint32_t *array = (uint32_t *)opaque;
1891 FeatureWord w;
1892 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1893 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1894 X86CPUFeatureWordInfoList *list = NULL;
1895
1896 for (w = 0; w < FEATURE_WORDS; w++) {
1897 FeatureWordInfo *wi = &feature_word_info[w];
1898 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1899 qwi->cpuid_input_eax = wi->cpuid_eax;
1900 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1901 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1902 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1903 qwi->features = array[w];
1904
1905 /* List will be in reverse order, but order shouldn't matter */
1906 list_entries[w].next = list;
1907 list_entries[w].value = &word_infos[w];
1908 list = &list_entries[w];
1909 }
1910
1911 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1912 }
1913
1914 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1915 void *opaque, Error **errp)
1916 {
1917 X86CPU *cpu = X86_CPU(obj);
1918 int64_t value = cpu->hyperv_spinlock_attempts;
1919
1920 visit_type_int(v, name, &value, errp);
1921 }
1922
1923 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1924 void *opaque, Error **errp)
1925 {
1926 const int64_t min = 0xFFF;
1927 const int64_t max = UINT_MAX;
1928 X86CPU *cpu = X86_CPU(obj);
1929 Error *err = NULL;
1930 int64_t value;
1931
1932 visit_type_int(v, name, &value, &err);
1933 if (err) {
1934 error_propagate(errp, err);
1935 return;
1936 }
1937
1938 if (value < min || value > max) {
1939 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1940 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1941 object_get_typename(obj), name ? name : "null",
1942 value, min, max);
1943 return;
1944 }
1945 cpu->hyperv_spinlock_attempts = value;
1946 }
1947
1948 static PropertyInfo qdev_prop_spinlocks = {
1949 .name = "int",
1950 .get = x86_get_hv_spinlocks,
1951 .set = x86_set_hv_spinlocks,
1952 };
1953
1954 /* Convert all '_' in a feature string option name to '-', to make feature
1955 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1956 */
1957 static inline void feat2prop(char *s)
1958 {
1959 while ((s = strchr(s, '_'))) {
1960 *s = '-';
1961 }
1962 }
1963
1964 /* Return the feature property name for a feature flag bit */
1965 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1966 {
1967 /* XSAVE components are automatically enabled by other features,
1968 * so return the original feature name instead
1969 */
1970 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1971 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1972
1973 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1974 x86_ext_save_areas[comp].bits) {
1975 w = x86_ext_save_areas[comp].feature;
1976 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1977 }
1978 }
1979
1980 assert(bitnr < 32);
1981 assert(w < FEATURE_WORDS);
1982 return feature_word_info[w].feat_names[bitnr];
1983 }
1984
1985 /* Compatibily hack to maintain legacy +-feat semantic,
1986 * where +-feat overwrites any feature set by
1987 * feat=on|feat even if the later is parsed after +-feat
1988 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1989 */
1990 static GList *plus_features, *minus_features;
1991
1992 static gint compare_string(gconstpointer a, gconstpointer b)
1993 {
1994 return g_strcmp0(a, b);
1995 }
1996
1997 /* Parse "+feature,-feature,feature=foo" CPU feature string
1998 */
1999 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2000 Error **errp)
2001 {
2002 char *featurestr; /* Single 'key=value" string being parsed */
2003 static bool cpu_globals_initialized;
2004 bool ambiguous = false;
2005
2006 if (cpu_globals_initialized) {
2007 return;
2008 }
2009 cpu_globals_initialized = true;
2010
2011 if (!features) {
2012 return;
2013 }
2014
2015 for (featurestr = strtok(features, ",");
2016 featurestr;
2017 featurestr = strtok(NULL, ",")) {
2018 const char *name;
2019 const char *val = NULL;
2020 char *eq = NULL;
2021 char num[32];
2022 GlobalProperty *prop;
2023
2024 /* Compatibility syntax: */
2025 if (featurestr[0] == '+') {
2026 plus_features = g_list_append(plus_features,
2027 g_strdup(featurestr + 1));
2028 continue;
2029 } else if (featurestr[0] == '-') {
2030 minus_features = g_list_append(minus_features,
2031 g_strdup(featurestr + 1));
2032 continue;
2033 }
2034
2035 eq = strchr(featurestr, '=');
2036 if (eq) {
2037 *eq++ = 0;
2038 val = eq;
2039 } else {
2040 val = "on";
2041 }
2042
2043 feat2prop(featurestr);
2044 name = featurestr;
2045
2046 if (g_list_find_custom(plus_features, name, compare_string)) {
2047 error_report("warning: Ambiguous CPU model string. "
2048 "Don't mix both \"+%s\" and \"%s=%s\"",
2049 name, name, val);
2050 ambiguous = true;
2051 }
2052 if (g_list_find_custom(minus_features, name, compare_string)) {
2053 error_report("warning: Ambiguous CPU model string. "
2054 "Don't mix both \"-%s\" and \"%s=%s\"",
2055 name, name, val);
2056 ambiguous = true;
2057 }
2058
2059 /* Special case: */
2060 if (!strcmp(name, "tsc-freq")) {
2061 int ret;
2062 uint64_t tsc_freq;
2063
2064 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2065 if (ret < 0 || tsc_freq > INT64_MAX) {
2066 error_setg(errp, "bad numerical value %s", val);
2067 return;
2068 }
2069 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2070 val = num;
2071 name = "tsc-frequency";
2072 }
2073
2074 prop = g_new0(typeof(*prop), 1);
2075 prop->driver = typename;
2076 prop->property = g_strdup(name);
2077 prop->value = g_strdup(val);
2078 prop->errp = &error_fatal;
2079 qdev_prop_register_global(prop);
2080 }
2081
2082 if (ambiguous) {
2083 error_report("warning: Compatibility of ambiguous CPU model "
2084 "strings won't be kept on future QEMU versions");
2085 }
2086 }
2087
2088 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2089 static int x86_cpu_filter_features(X86CPU *cpu);
2090
2091 /* Check for missing features that may prevent the CPU class from
2092 * running using the current machine and accelerator.
2093 */
2094 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2095 strList **missing_feats)
2096 {
2097 X86CPU *xc;
2098 FeatureWord w;
2099 Error *err = NULL;
2100 strList **next = missing_feats;
2101
2102 if (xcc->kvm_required && !kvm_enabled()) {
2103 strList *new = g_new0(strList, 1);
2104 new->value = g_strdup("kvm");;
2105 *missing_feats = new;
2106 return;
2107 }
2108
2109 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2110
2111 x86_cpu_expand_features(xc, &err);
2112 if (err) {
2113 /* Errors at x86_cpu_expand_features should never happen,
2114 * but in case it does, just report the model as not
2115 * runnable at all using the "type" property.
2116 */
2117 strList *new = g_new0(strList, 1);
2118 new->value = g_strdup("type");
2119 *next = new;
2120 next = &new->next;
2121 }
2122
2123 x86_cpu_filter_features(xc);
2124
2125 for (w = 0; w < FEATURE_WORDS; w++) {
2126 uint32_t filtered = xc->filtered_features[w];
2127 int i;
2128 for (i = 0; i < 32; i++) {
2129 if (filtered & (1UL << i)) {
2130 strList *new = g_new0(strList, 1);
2131 new->value = g_strdup(x86_cpu_feature_name(w, i));
2132 *next = new;
2133 next = &new->next;
2134 }
2135 }
2136 }
2137
2138 object_unref(OBJECT(xc));
2139 }
2140
2141 /* Print all cpuid feature names in featureset
2142 */
2143 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2144 {
2145 int bit;
2146 bool first = true;
2147
2148 for (bit = 0; bit < 32; bit++) {
2149 if (featureset[bit]) {
2150 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2151 first = false;
2152 }
2153 }
2154 }
2155
2156 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2157 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2158 {
2159 ObjectClass *class_a = (ObjectClass *)a;
2160 ObjectClass *class_b = (ObjectClass *)b;
2161 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2162 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2163 const char *name_a, *name_b;
2164
2165 if (cc_a->ordering != cc_b->ordering) {
2166 return cc_a->ordering - cc_b->ordering;
2167 } else {
2168 name_a = object_class_get_name(class_a);
2169 name_b = object_class_get_name(class_b);
2170 return strcmp(name_a, name_b);
2171 }
2172 }
2173
2174 static GSList *get_sorted_cpu_model_list(void)
2175 {
2176 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2177 list = g_slist_sort(list, x86_cpu_list_compare);
2178 return list;
2179 }
2180
2181 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2182 {
2183 ObjectClass *oc = data;
2184 X86CPUClass *cc = X86_CPU_CLASS(oc);
2185 CPUListState *s = user_data;
2186 char *name = x86_cpu_class_get_model_name(cc);
2187 const char *desc = cc->model_description;
2188 if (!desc) {
2189 desc = cc->cpu_def->model_id;
2190 }
2191
2192 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2193 name, desc);
2194 g_free(name);
2195 }
2196
2197 /* list available CPU models and flags */
2198 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2199 {
2200 int i;
2201 CPUListState s = {
2202 .file = f,
2203 .cpu_fprintf = cpu_fprintf,
2204 };
2205 GSList *list;
2206
2207 (*cpu_fprintf)(f, "Available CPUs:\n");
2208 list = get_sorted_cpu_model_list();
2209 g_slist_foreach(list, x86_cpu_list_entry, &s);
2210 g_slist_free(list);
2211
2212 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2213 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2214 FeatureWordInfo *fw = &feature_word_info[i];
2215
2216 (*cpu_fprintf)(f, " ");
2217 listflags(f, cpu_fprintf, fw->feat_names);
2218 (*cpu_fprintf)(f, "\n");
2219 }
2220 }
2221
2222 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2223 {
2224 ObjectClass *oc = data;
2225 X86CPUClass *cc = X86_CPU_CLASS(oc);
2226 CpuDefinitionInfoList **cpu_list = user_data;
2227 CpuDefinitionInfoList *entry;
2228 CpuDefinitionInfo *info;
2229
2230 info = g_malloc0(sizeof(*info));
2231 info->name = x86_cpu_class_get_model_name(cc);
2232 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2233 info->has_unavailable_features = true;
2234 info->q_typename = g_strdup(object_class_get_name(oc));
2235 info->migration_safe = cc->migration_safe;
2236 info->has_migration_safe = true;
2237
2238 entry = g_malloc0(sizeof(*entry));
2239 entry->value = info;
2240 entry->next = *cpu_list;
2241 *cpu_list = entry;
2242 }
2243
2244 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2245 {
2246 CpuDefinitionInfoList *cpu_list = NULL;
2247 GSList *list = get_sorted_cpu_model_list();
2248 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2249 g_slist_free(list);
2250 return cpu_list;
2251 }
2252
2253 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2254 bool migratable_only)
2255 {
2256 FeatureWordInfo *wi = &feature_word_info[w];
2257 uint32_t r;
2258
2259 if (kvm_enabled()) {
2260 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2261 wi->cpuid_ecx,
2262 wi->cpuid_reg);
2263 } else if (tcg_enabled()) {
2264 r = wi->tcg_features;
2265 } else {
2266 return ~0;
2267 }
2268 if (migratable_only) {
2269 r &= x86_cpu_get_migratable_flags(w);
2270 }
2271 return r;
2272 }
2273
2274 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2275 {
2276 FeatureWord w;
2277
2278 for (w = 0; w < FEATURE_WORDS; w++) {
2279 report_unavailable_features(w, cpu->filtered_features[w]);
2280 }
2281 }
2282
2283 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2284 {
2285 PropValue *pv;
2286 for (pv = props; pv->prop; pv++) {
2287 if (!pv->value) {
2288 continue;
2289 }
2290 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2291 &error_abort);
2292 }
2293 }
2294
2295 /* Load data from X86CPUDefinition
2296 */
2297 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2298 {
2299 CPUX86State *env = &cpu->env;
2300 const char *vendor;
2301 char host_vendor[CPUID_VENDOR_SZ + 1];
2302 FeatureWord w;
2303
2304 /* CPU models only set _minimum_ values for level/xlevel: */
2305 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2306 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2307
2308 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2309 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2310 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2311 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2312 for (w = 0; w < FEATURE_WORDS; w++) {
2313 env->features[w] = def->features[w];
2314 }
2315
2316 /* Special cases not set in the X86CPUDefinition structs: */
2317 if (kvm_enabled()) {
2318 if (!kvm_irqchip_in_kernel()) {
2319 x86_cpu_change_kvm_default("x2apic", "off");
2320 }
2321
2322 x86_cpu_apply_props(cpu, kvm_default_props);
2323 } else if (tcg_enabled()) {
2324 x86_cpu_apply_props(cpu, tcg_default_props);
2325 }
2326
2327 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2328
2329 /* sysenter isn't supported in compatibility mode on AMD,
2330 * syscall isn't supported in compatibility mode on Intel.
2331 * Normally we advertise the actual CPU vendor, but you can
2332 * override this using the 'vendor' property if you want to use
2333 * KVM's sysenter/syscall emulation in compatibility mode and
2334 * when doing cross vendor migration
2335 */
2336 vendor = def->vendor;
2337 if (kvm_enabled()) {
2338 uint32_t ebx = 0, ecx = 0, edx = 0;
2339 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2340 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2341 vendor = host_vendor;
2342 }
2343
2344 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2345
2346 }
2347
2348 X86CPU *cpu_x86_init(const char *cpu_model)
2349 {
2350 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2351 }
2352
2353 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2354 {
2355 X86CPUDefinition *cpudef = data;
2356 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2357
2358 xcc->cpu_def = cpudef;
2359 xcc->migration_safe = true;
2360 }
2361
2362 static void x86_register_cpudef_type(X86CPUDefinition *def)
2363 {
2364 char *typename = x86_cpu_type_name(def->name);
2365 TypeInfo ti = {
2366 .name = typename,
2367 .parent = TYPE_X86_CPU,
2368 .class_init = x86_cpu_cpudef_class_init,
2369 .class_data = def,
2370 };
2371
2372 /* AMD aliases are handled at runtime based on CPUID vendor, so
2373 * they shouldn't be set on the CPU model table.
2374 */
2375 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2376
2377 type_register(&ti);
2378 g_free(typename);
2379 }
2380
2381 #if !defined(CONFIG_USER_ONLY)
2382
2383 void cpu_clear_apic_feature(CPUX86State *env)
2384 {
2385 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2386 }
2387
2388 #endif /* !CONFIG_USER_ONLY */
2389
2390 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2391 uint32_t *eax, uint32_t *ebx,
2392 uint32_t *ecx, uint32_t *edx)
2393 {
2394 X86CPU *cpu = x86_env_get_cpu(env);
2395 CPUState *cs = CPU(cpu);
2396 uint32_t pkg_offset;
2397
2398 /* test if maximum index reached */
2399 if (index & 0x80000000) {
2400 if (index > env->cpuid_xlevel) {
2401 if (env->cpuid_xlevel2 > 0) {
2402 /* Handle the Centaur's CPUID instruction. */
2403 if (index > env->cpuid_xlevel2) {
2404 index = env->cpuid_xlevel2;
2405 } else if (index < 0xC0000000) {
2406 index = env->cpuid_xlevel;
2407 }
2408 } else {
2409 /* Intel documentation states that invalid EAX input will
2410 * return the same information as EAX=cpuid_level
2411 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2412 */
2413 index = env->cpuid_level;
2414 }
2415 }
2416 } else {
2417 if (index > env->cpuid_level)
2418 index = env->cpuid_level;
2419 }
2420
2421 switch(index) {
2422 case 0:
2423 *eax = env->cpuid_level;
2424 *ebx = env->cpuid_vendor1;
2425 *edx = env->cpuid_vendor2;
2426 *ecx = env->cpuid_vendor3;
2427 break;
2428 case 1:
2429 *eax = env->cpuid_version;
2430 *ebx = (cpu->apic_id << 24) |
2431 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2432 *ecx = env->features[FEAT_1_ECX];
2433 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2434 *ecx |= CPUID_EXT_OSXSAVE;
2435 }
2436 *edx = env->features[FEAT_1_EDX];
2437 if (cs->nr_cores * cs->nr_threads > 1) {
2438 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2439 *edx |= CPUID_HT;
2440 }
2441 break;
2442 case 2:
2443 /* cache info: needed for Pentium Pro compatibility */
2444 if (cpu->cache_info_passthrough) {
2445 host_cpuid(index, 0, eax, ebx, ecx, edx);
2446 break;
2447 }
2448 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2449 *ebx = 0;
2450 if (!cpu->enable_l3_cache) {
2451 *ecx = 0;
2452 } else {
2453 *ecx = L3_N_DESCRIPTOR;
2454 }
2455 *edx = (L1D_DESCRIPTOR << 16) | \
2456 (L1I_DESCRIPTOR << 8) | \
2457 (L2_DESCRIPTOR);
2458 break;
2459 case 4:
2460 /* cache info: needed for Core compatibility */
2461 if (cpu->cache_info_passthrough) {
2462 host_cpuid(index, count, eax, ebx, ecx, edx);
2463 *eax &= ~0xFC000000;
2464 } else {
2465 *eax = 0;
2466 switch (count) {
2467 case 0: /* L1 dcache info */
2468 *eax |= CPUID_4_TYPE_DCACHE | \
2469 CPUID_4_LEVEL(1) | \
2470 CPUID_4_SELF_INIT_LEVEL;
2471 *ebx = (L1D_LINE_SIZE - 1) | \
2472 ((L1D_PARTITIONS - 1) << 12) | \
2473 ((L1D_ASSOCIATIVITY - 1) << 22);
2474 *ecx = L1D_SETS - 1;
2475 *edx = CPUID_4_NO_INVD_SHARING;
2476 break;
2477 case 1: /* L1 icache info */
2478 *eax |= CPUID_4_TYPE_ICACHE | \
2479 CPUID_4_LEVEL(1) | \
2480 CPUID_4_SELF_INIT_LEVEL;
2481 *ebx = (L1I_LINE_SIZE - 1) | \
2482 ((L1I_PARTITIONS - 1) << 12) | \
2483 ((L1I_ASSOCIATIVITY - 1) << 22);
2484 *ecx = L1I_SETS - 1;
2485 *edx = CPUID_4_NO_INVD_SHARING;
2486 break;
2487 case 2: /* L2 cache info */
2488 *eax |= CPUID_4_TYPE_UNIFIED | \
2489 CPUID_4_LEVEL(2) | \
2490 CPUID_4_SELF_INIT_LEVEL;
2491 if (cs->nr_threads > 1) {
2492 *eax |= (cs->nr_threads - 1) << 14;
2493 }
2494 *ebx = (L2_LINE_SIZE - 1) | \
2495 ((L2_PARTITIONS - 1) << 12) | \
2496 ((L2_ASSOCIATIVITY - 1) << 22);
2497 *ecx = L2_SETS - 1;
2498 *edx = CPUID_4_NO_INVD_SHARING;
2499 break;
2500 case 3: /* L3 cache info */
2501 if (!cpu->enable_l3_cache) {
2502 *eax = 0;
2503 *ebx = 0;
2504 *ecx = 0;
2505 *edx = 0;
2506 break;
2507 }
2508 *eax |= CPUID_4_TYPE_UNIFIED | \
2509 CPUID_4_LEVEL(3) | \
2510 CPUID_4_SELF_INIT_LEVEL;
2511 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2512 *eax |= ((1 << pkg_offset) - 1) << 14;
2513 *ebx = (L3_N_LINE_SIZE - 1) | \
2514 ((L3_N_PARTITIONS - 1) << 12) | \
2515 ((L3_N_ASSOCIATIVITY - 1) << 22);
2516 *ecx = L3_N_SETS - 1;
2517 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2518 break;
2519 default: /* end of info */
2520 *eax = 0;
2521 *ebx = 0;
2522 *ecx = 0;
2523 *edx = 0;
2524 break;
2525 }
2526 }
2527
2528 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2529 if ((*eax & 31) && cs->nr_cores > 1) {
2530 *eax |= (cs->nr_cores - 1) << 26;
2531 }
2532 break;
2533 case 5:
2534 /* mwait info: needed for Core compatibility */
2535 *eax = 0; /* Smallest monitor-line size in bytes */
2536 *ebx = 0; /* Largest monitor-line size in bytes */
2537 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2538 *edx = 0;
2539 break;
2540 case 6:
2541 /* Thermal and Power Leaf */
2542 *eax = env->features[FEAT_6_EAX];
2543 *ebx = 0;
2544 *ecx = 0;
2545 *edx = 0;
2546 break;
2547 case 7:
2548 /* Structured Extended Feature Flags Enumeration Leaf */
2549 if (count == 0) {
2550 *eax = 0; /* Maximum ECX value for sub-leaves */
2551 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2552 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2553 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2554 *ecx |= CPUID_7_0_ECX_OSPKE;
2555 }
2556 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2557 } else {
2558 *eax = 0;
2559 *ebx = 0;
2560 *ecx = 0;
2561 *edx = 0;
2562 }
2563 break;
2564 case 9:
2565 /* Direct Cache Access Information Leaf */
2566 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2567 *ebx = 0;
2568 *ecx = 0;
2569 *edx = 0;
2570 break;
2571 case 0xA:
2572 /* Architectural Performance Monitoring Leaf */
2573 if (kvm_enabled() && cpu->enable_pmu) {
2574 KVMState *s = cs->kvm_state;
2575
2576 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2577 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2578 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2579 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2580 } else {
2581 *eax = 0;
2582 *ebx = 0;
2583 *ecx = 0;
2584 *edx = 0;
2585 }
2586 break;
2587 case 0xB:
2588 /* Extended Topology Enumeration Leaf */
2589 if (!cpu->enable_cpuid_0xb) {
2590 *eax = *ebx = *ecx = *edx = 0;
2591 break;
2592 }
2593
2594 *ecx = count & 0xff;
2595 *edx = cpu->apic_id;
2596
2597 switch (count) {
2598 case 0:
2599 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2600 *ebx = cs->nr_threads;
2601 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2602 break;
2603 case 1:
2604 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2605 *ebx = cs->nr_cores * cs->nr_threads;
2606 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2607 break;
2608 default:
2609 *eax = 0;
2610 *ebx = 0;
2611 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2612 }
2613
2614 assert(!(*eax & ~0x1f));
2615 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2616 break;
2617 case 0xD: {
2618 /* Processor Extended State */
2619 *eax = 0;
2620 *ebx = 0;
2621 *ecx = 0;
2622 *edx = 0;
2623 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2624 break;
2625 }
2626
2627 if (count == 0) {
2628 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2629 *eax = env->features[FEAT_XSAVE_COMP_LO];
2630 *edx = env->features[FEAT_XSAVE_COMP_HI];
2631 *ebx = *ecx;
2632 } else if (count == 1) {
2633 *eax = env->features[FEAT_XSAVE];
2634 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2635 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2636 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2637 *eax = esa->size;
2638 *ebx = esa->offset;
2639 }
2640 }
2641 break;
2642 }
2643 case 0x80000000:
2644 *eax = env->cpuid_xlevel;
2645 *ebx = env->cpuid_vendor1;
2646 *edx = env->cpuid_vendor2;
2647 *ecx = env->cpuid_vendor3;
2648 break;
2649 case 0x80000001:
2650 *eax = env->cpuid_version;
2651 *ebx = 0;
2652 *ecx = env->features[FEAT_8000_0001_ECX];
2653 *edx = env->features[FEAT_8000_0001_EDX];
2654
2655 /* The Linux kernel checks for the CMPLegacy bit and
2656 * discards multiple thread information if it is set.
2657 * So don't set it here for Intel to make Linux guests happy.
2658 */
2659 if (cs->nr_cores * cs->nr_threads > 1) {
2660 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2661 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2662 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2663 *ecx |= 1 << 1; /* CmpLegacy bit */
2664 }
2665 }
2666 break;
2667 case 0x80000002:
2668 case 0x80000003:
2669 case 0x80000004:
2670 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2671 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2672 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2673 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2674 break;
2675 case 0x80000005:
2676 /* cache info (L1 cache) */
2677 if (cpu->cache_info_passthrough) {
2678 host_cpuid(index, 0, eax, ebx, ecx, edx);
2679 break;
2680 }
2681 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2682 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2683 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2684 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2685 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2686 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2687 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2688 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2689 break;
2690 case 0x80000006:
2691 /* cache info (L2 cache) */
2692 if (cpu->cache_info_passthrough) {
2693 host_cpuid(index, 0, eax, ebx, ecx, edx);
2694 break;
2695 }
2696 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2697 (L2_DTLB_2M_ENTRIES << 16) | \
2698 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2699 (L2_ITLB_2M_ENTRIES);
2700 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2701 (L2_DTLB_4K_ENTRIES << 16) | \
2702 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2703 (L2_ITLB_4K_ENTRIES);
2704 *ecx = (L2_SIZE_KB_AMD << 16) | \
2705 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2706 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2707 if (!cpu->enable_l3_cache) {
2708 *edx = ((L3_SIZE_KB / 512) << 18) | \
2709 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2710 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2711 } else {
2712 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2713 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2714 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2715 }
2716 break;
2717 case 0x80000007:
2718 *eax = 0;
2719 *ebx = 0;
2720 *ecx = 0;
2721 *edx = env->features[FEAT_8000_0007_EDX];
2722 break;
2723 case 0x80000008:
2724 /* virtual & phys address size in low 2 bytes. */
2725 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2726 /* 64 bit processor */
2727 *eax = cpu->phys_bits; /* configurable physical bits */
2728 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2729 *eax |= 0x00003900; /* 57 bits virtual */
2730 } else {
2731 *eax |= 0x00003000; /* 48 bits virtual */
2732 }
2733 } else {
2734 *eax = cpu->phys_bits;
2735 }
2736 *ebx = 0;
2737 *ecx = 0;
2738 *edx = 0;
2739 if (cs->nr_cores * cs->nr_threads > 1) {
2740 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2741 }
2742 break;
2743 case 0x8000000A:
2744 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2745 *eax = 0x00000001; /* SVM Revision */
2746 *ebx = 0x00000010; /* nr of ASIDs */
2747 *ecx = 0;
2748 *edx = env->features[FEAT_SVM]; /* optional features */
2749 } else {
2750 *eax = 0;
2751 *ebx = 0;
2752 *ecx = 0;
2753 *edx = 0;
2754 }
2755 break;
2756 case 0xC0000000:
2757 *eax = env->cpuid_xlevel2;
2758 *ebx = 0;
2759 *ecx = 0;
2760 *edx = 0;
2761 break;
2762 case 0xC0000001:
2763 /* Support for VIA CPU's CPUID instruction */
2764 *eax = env->cpuid_version;
2765 *ebx = 0;
2766 *ecx = 0;
2767 *edx = env->features[FEAT_C000_0001_EDX];
2768 break;
2769 case 0xC0000002:
2770 case 0xC0000003:
2771 case 0xC0000004:
2772 /* Reserved for the future, and now filled with zero */
2773 *eax = 0;
2774 *ebx = 0;
2775 *ecx = 0;
2776 *edx = 0;
2777 break;
2778 default:
2779 /* reserved values: zero */
2780 *eax = 0;
2781 *ebx = 0;
2782 *ecx = 0;
2783 *edx = 0;
2784 break;
2785 }
2786 }
2787
2788 /* CPUClass::reset() */
2789 static void x86_cpu_reset(CPUState *s)
2790 {
2791 X86CPU *cpu = X86_CPU(s);
2792 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2793 CPUX86State *env = &cpu->env;
2794 target_ulong cr4;
2795 uint64_t xcr0;
2796 int i;
2797
2798 xcc->parent_reset(s);
2799
2800 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2801
2802 env->old_exception = -1;
2803
2804 /* init to reset state */
2805
2806 env->hflags2 |= HF2_GIF_MASK;
2807
2808 cpu_x86_update_cr0(env, 0x60000010);
2809 env->a20_mask = ~0x0;
2810 env->smbase = 0x30000;
2811
2812 env->idt.limit = 0xffff;
2813 env->gdt.limit = 0xffff;
2814 env->ldt.limit = 0xffff;
2815 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2816 env->tr.limit = 0xffff;
2817 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2818
2819 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2820 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2821 DESC_R_MASK | DESC_A_MASK);
2822 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2823 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2824 DESC_A_MASK);
2825 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2826 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2827 DESC_A_MASK);
2828 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2829 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2830 DESC_A_MASK);
2831 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2832 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2833 DESC_A_MASK);
2834 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2835 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2836 DESC_A_MASK);
2837
2838 env->eip = 0xfff0;
2839 env->regs[R_EDX] = env->cpuid_version;
2840
2841 env->eflags = 0x2;
2842
2843 /* FPU init */
2844 for (i = 0; i < 8; i++) {
2845 env->fptags[i] = 1;
2846 }
2847 cpu_set_fpuc(env, 0x37f);
2848
2849 env->mxcsr = 0x1f80;
2850 /* All units are in INIT state. */
2851 env->xstate_bv = 0;
2852
2853 env->pat = 0x0007040600070406ULL;
2854 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2855
2856 memset(env->dr, 0, sizeof(env->dr));
2857 env->dr[6] = DR6_FIXED_1;
2858 env->dr[7] = DR7_FIXED_1;
2859 cpu_breakpoint_remove_all(s, BP_CPU);
2860 cpu_watchpoint_remove_all(s, BP_CPU);
2861
2862 cr4 = 0;
2863 xcr0 = XSTATE_FP_MASK;
2864
2865 #ifdef CONFIG_USER_ONLY
2866 /* Enable all the features for user-mode. */
2867 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2868 xcr0 |= XSTATE_SSE_MASK;
2869 }
2870 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2871 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2872 if (env->features[esa->feature] & esa->bits) {
2873 xcr0 |= 1ull << i;
2874 }
2875 }
2876
2877 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2878 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2879 }
2880 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2881 cr4 |= CR4_FSGSBASE_MASK;
2882 }
2883 #endif
2884
2885 env->xcr0 = xcr0;
2886 cpu_x86_update_cr4(env, cr4);
2887
2888 /*
2889 * SDM 11.11.5 requires:
2890 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2891 * - IA32_MTRR_PHYSMASKn.V = 0
2892 * All other bits are undefined. For simplification, zero it all.
2893 */
2894 env->mtrr_deftype = 0;
2895 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2896 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2897
2898 #if !defined(CONFIG_USER_ONLY)
2899 /* We hard-wire the BSP to the first CPU. */
2900 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2901
2902 s->halted = !cpu_is_bsp(cpu);
2903
2904 if (kvm_enabled()) {
2905 kvm_arch_reset_vcpu(cpu);
2906 }
2907 #endif
2908 }
2909
2910 #ifndef CONFIG_USER_ONLY
2911 bool cpu_is_bsp(X86CPU *cpu)
2912 {
2913 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2914 }
2915
2916 /* TODO: remove me, when reset over QOM tree is implemented */
2917 static void x86_cpu_machine_reset_cb(void *opaque)
2918 {
2919 X86CPU *cpu = opaque;
2920 cpu_reset(CPU(cpu));
2921 }
2922 #endif
2923
2924 static void mce_init(X86CPU *cpu)
2925 {
2926 CPUX86State *cenv = &cpu->env;
2927 unsigned int bank;
2928
2929 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2930 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2931 (CPUID_MCE | CPUID_MCA)) {
2932 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2933 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2934 cenv->mcg_ctl = ~(uint64_t)0;
2935 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2936 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2937 }
2938 }
2939 }
2940
2941 #ifndef CONFIG_USER_ONLY
2942 APICCommonClass *apic_get_class(void)
2943 {
2944 const char *apic_type = "apic";
2945
2946 if (kvm_apic_in_kernel()) {
2947 apic_type = "kvm-apic";
2948 } else if (xen_enabled()) {
2949 apic_type = "xen-apic";
2950 }
2951
2952 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2953 }
2954
2955 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2956 {
2957 APICCommonState *apic;
2958 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2959
2960 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2961
2962 object_property_add_child(OBJECT(cpu), "lapic",
2963 OBJECT(cpu->apic_state), &error_abort);
2964 object_unref(OBJECT(cpu->apic_state));
2965
2966 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
2967 /* TODO: convert to link<> */
2968 apic = APIC_COMMON(cpu->apic_state);
2969 apic->cpu = cpu;
2970 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2971 }
2972
2973 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2974 {
2975 APICCommonState *apic;
2976 static bool apic_mmio_map_once;
2977
2978 if (cpu->apic_state == NULL) {
2979 return;
2980 }
2981 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2982 errp);
2983
2984 /* Map APIC MMIO area */
2985 apic = APIC_COMMON(cpu->apic_state);
2986 if (!apic_mmio_map_once) {
2987 memory_region_add_subregion_overlap(get_system_memory(),
2988 apic->apicbase &
2989 MSR_IA32_APICBASE_BASE,
2990 &apic->io_memory,
2991 0x1000);
2992 apic_mmio_map_once = true;
2993 }
2994 }
2995
2996 static void x86_cpu_machine_done(Notifier *n, void *unused)
2997 {
2998 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2999 MemoryRegion *smram =
3000 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3001
3002 if (smram) {
3003 cpu->smram = g_new(MemoryRegion, 1);
3004 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3005 smram, 0, 1ull << 32);
3006 memory_region_set_enabled(cpu->smram, false);
3007 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3008 }
3009 }
3010 #else
3011 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3012 {
3013 }
3014 #endif
3015
3016 /* Note: Only safe for use on x86(-64) hosts */
3017 static uint32_t x86_host_phys_bits(void)
3018 {
3019 uint32_t eax;
3020 uint32_t host_phys_bits;
3021
3022 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3023 if (eax >= 0x80000008) {
3024 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3025 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3026 * at 23:16 that can specify a maximum physical address bits for
3027 * the guest that can override this value; but I've not seen
3028 * anything with that set.
3029 */
3030 host_phys_bits = eax & 0xff;
3031 } else {
3032 /* It's an odd 64 bit machine that doesn't have the leaf for
3033 * physical address bits; fall back to 36 that's most older
3034 * Intel.
3035 */
3036 host_phys_bits = 36;
3037 }
3038
3039 return host_phys_bits;
3040 }
3041
3042 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3043 {
3044 if (*min < value) {
3045 *min = value;
3046 }
3047 }
3048
3049 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3050 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3051 {
3052 CPUX86State *env = &cpu->env;
3053 FeatureWordInfo *fi = &feature_word_info[w];
3054 uint32_t eax = fi->cpuid_eax;
3055 uint32_t region = eax & 0xF0000000;
3056
3057 if (!env->features[w]) {
3058 return;
3059 }
3060
3061 switch (region) {
3062 case 0x00000000:
3063 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3064 break;
3065 case 0x80000000:
3066 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3067 break;
3068 case 0xC0000000:
3069 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3070 break;
3071 }
3072 }
3073
3074 /* Calculate XSAVE components based on the configured CPU feature flags */
3075 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3076 {
3077 CPUX86State *env = &cpu->env;
3078 int i;
3079 uint64_t mask;
3080
3081 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3082 return;
3083 }
3084
3085 mask = 0;
3086 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3087 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3088 if (env->features[esa->feature] & esa->bits) {
3089 mask |= (1ULL << i);
3090 }
3091 }
3092
3093 env->features[FEAT_XSAVE_COMP_LO] = mask;
3094 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3095 }
3096
3097 /***** Steps involved on loading and filtering CPUID data
3098 *
3099 * When initializing and realizing a CPU object, the steps
3100 * involved in setting up CPUID data are:
3101 *
3102 * 1) Loading CPU model definition (X86CPUDefinition). This is
3103 * implemented by x86_cpu_load_def() and should be completely
3104 * transparent, as it is done automatically by instance_init.
3105 * No code should need to look at X86CPUDefinition structs
3106 * outside instance_init.
3107 *
3108 * 2) CPU expansion. This is done by realize before CPUID
3109 * filtering, and will make sure host/accelerator data is
3110 * loaded for CPU models that depend on host capabilities
3111 * (e.g. "host"). Done by x86_cpu_expand_features().
3112 *
3113 * 3) CPUID filtering. This initializes extra data related to
3114 * CPUID, and checks if the host supports all capabilities
3115 * required by the CPU. Runnability of a CPU model is
3116 * determined at this step. Done by x86_cpu_filter_features().
3117 *
3118 * Some operations don't require all steps to be performed.
3119 * More precisely:
3120 *
3121 * - CPU instance creation (instance_init) will run only CPU
3122 * model loading. CPU expansion can't run at instance_init-time
3123 * because host/accelerator data may be not available yet.
3124 * - CPU realization will perform both CPU model expansion and CPUID
3125 * filtering, and return an error in case one of them fails.
3126 * - query-cpu-definitions needs to run all 3 steps. It needs
3127 * to run CPUID filtering, as the 'unavailable-features'
3128 * field is set based on the filtering results.
3129 * - The query-cpu-model-expansion QMP command only needs to run
3130 * CPU model loading and CPU expansion. It should not filter
3131 * any CPUID data based on host capabilities.
3132 */
3133
3134 /* Expand CPU configuration data, based on configured features
3135 * and host/accelerator capabilities when appropriate.
3136 */
3137 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3138 {
3139 CPUX86State *env = &cpu->env;
3140 FeatureWord w;
3141 GList *l;
3142 Error *local_err = NULL;
3143
3144 /*TODO: cpu->max_features incorrectly overwrites features
3145 * set using "feat=on|off". Once we fix this, we can convert
3146 * plus_features & minus_features to global properties
3147 * inside x86_cpu_parse_featurestr() too.
3148 */
3149 if (cpu->max_features) {
3150 for (w = 0; w < FEATURE_WORDS; w++) {
3151 env->features[w] =
3152 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3153 }
3154 }
3155
3156 for (l = plus_features; l; l = l->next) {
3157 const char *prop = l->data;
3158 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3159 if (local_err) {
3160 goto out;
3161 }
3162 }
3163
3164 for (l = minus_features; l; l = l->next) {
3165 const char *prop = l->data;
3166 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3167 if (local_err) {
3168 goto out;
3169 }
3170 }
3171
3172 if (!kvm_enabled() || !cpu->expose_kvm) {
3173 env->features[FEAT_KVM] = 0;
3174 }
3175
3176 x86_cpu_enable_xsave_components(cpu);
3177
3178 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3179 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3180 if (cpu->full_cpuid_auto_level) {
3181 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3182 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3183 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3184 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3185 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3186 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3187 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3188 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3189 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3190 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3191 /* SVM requires CPUID[0x8000000A] */
3192 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3193 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3194 }
3195 }
3196
3197 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3198 if (env->cpuid_level == UINT32_MAX) {
3199 env->cpuid_level = env->cpuid_min_level;
3200 }
3201 if (env->cpuid_xlevel == UINT32_MAX) {
3202 env->cpuid_xlevel = env->cpuid_min_xlevel;
3203 }
3204 if (env->cpuid_xlevel2 == UINT32_MAX) {
3205 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3206 }
3207
3208 out:
3209 if (local_err != NULL) {
3210 error_propagate(errp, local_err);
3211 }
3212 }
3213
3214 /*
3215 * Finishes initialization of CPUID data, filters CPU feature
3216 * words based on host availability of each feature.
3217 *
3218 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3219 */
3220 static int x86_cpu_filter_features(X86CPU *cpu)
3221 {
3222 CPUX86State *env = &cpu->env;
3223 FeatureWord w;
3224 int rv = 0;
3225
3226 for (w = 0; w < FEATURE_WORDS; w++) {
3227 uint32_t host_feat =
3228 x86_cpu_get_supported_feature_word(w, false);
3229 uint32_t requested_features = env->features[w];
3230 env->features[w] &= host_feat;
3231 cpu->filtered_features[w] = requested_features & ~env->features[w];
3232 if (cpu->filtered_features[w]) {
3233 rv = 1;
3234 }
3235 }
3236
3237 return rv;
3238 }
3239
3240 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3241 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3242 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3243 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3244 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3245 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3246 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3247 {
3248 CPUState *cs = CPU(dev);
3249 X86CPU *cpu = X86_CPU(dev);
3250 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3251 CPUX86State *env = &cpu->env;
3252 Error *local_err = NULL;
3253 static bool ht_warned;
3254
3255 if (xcc->kvm_required && !kvm_enabled()) {
3256 char *name = x86_cpu_class_get_model_name(xcc);
3257 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3258 g_free(name);
3259 goto out;
3260 }
3261
3262 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3263 error_setg(errp, "apic-id property was not initialized properly");
3264 return;
3265 }
3266
3267 x86_cpu_expand_features(cpu, &local_err);
3268 if (local_err) {
3269 goto out;
3270 }
3271
3272 if (x86_cpu_filter_features(cpu) &&
3273 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3274 x86_cpu_report_filtered_features(cpu);
3275 if (cpu->enforce_cpuid) {
3276 error_setg(&local_err,
3277 kvm_enabled() ?
3278 "Host doesn't support requested features" :
3279 "TCG doesn't support requested features");
3280 goto out;
3281 }
3282 }
3283
3284 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3285 * CPUID[1].EDX.
3286 */
3287 if (IS_AMD_CPU(env)) {
3288 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3289 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3290 & CPUID_EXT2_AMD_ALIASES);
3291 }
3292
3293 /* For 64bit systems think about the number of physical bits to present.
3294 * ideally this should be the same as the host; anything other than matching
3295 * the host can cause incorrect guest behaviour.
3296 * QEMU used to pick the magic value of 40 bits that corresponds to
3297 * consumer AMD devices but nothing else.
3298 */
3299 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3300 if (kvm_enabled()) {
3301 uint32_t host_phys_bits = x86_host_phys_bits();
3302 static bool warned;
3303
3304 if (cpu->host_phys_bits) {
3305 /* The user asked for us to use the host physical bits */
3306 cpu->phys_bits = host_phys_bits;
3307 }
3308
3309 /* Print a warning if the user set it to a value that's not the
3310 * host value.
3311 */
3312 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3313 !warned) {
3314 error_report("Warning: Host physical bits (%u)"
3315 " does not match phys-bits property (%u)",
3316 host_phys_bits, cpu->phys_bits);
3317 warned = true;
3318 }
3319
3320 if (cpu->phys_bits &&
3321 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3322 cpu->phys_bits < 32)) {
3323 error_setg(errp, "phys-bits should be between 32 and %u "
3324 " (but is %u)",
3325 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3326 return;
3327 }
3328 } else {
3329 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3330 error_setg(errp, "TCG only supports phys-bits=%u",
3331 TCG_PHYS_ADDR_BITS);
3332 return;
3333 }
3334 }
3335 /* 0 means it was not explicitly set by the user (or by machine
3336 * compat_props or by the host code above). In this case, the default
3337 * is the value used by TCG (40).
3338 */
3339 if (cpu->phys_bits == 0) {
3340 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3341 }
3342 } else {
3343 /* For 32 bit systems don't use the user set value, but keep
3344 * phys_bits consistent with what we tell the guest.
3345 */
3346 if (cpu->phys_bits != 0) {
3347 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3348 return;
3349 }
3350
3351 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3352 cpu->phys_bits = 36;
3353 } else {
3354 cpu->phys_bits = 32;
3355 }
3356 }
3357 cpu_exec_realizefn(cs, &local_err);
3358 if (local_err != NULL) {
3359 error_propagate(errp, local_err);
3360 return;
3361 }
3362
3363 if (tcg_enabled()) {
3364 tcg_x86_init();
3365 }
3366
3367 #ifndef CONFIG_USER_ONLY
3368 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3369
3370 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3371 x86_cpu_apic_create(cpu, &local_err);
3372 if (local_err != NULL) {
3373 goto out;
3374 }
3375 }
3376 #endif
3377
3378 mce_init(cpu);
3379
3380 #ifndef CONFIG_USER_ONLY
3381 if (tcg_enabled()) {
3382 AddressSpace *newas = g_new(AddressSpace, 1);
3383
3384 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3385 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3386
3387 /* Outer container... */
3388 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3389 memory_region_set_enabled(cpu->cpu_as_root, true);
3390
3391 /* ... with two regions inside: normal system memory with low
3392 * priority, and...
3393 */
3394 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3395 get_system_memory(), 0, ~0ull);
3396 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3397 memory_region_set_enabled(cpu->cpu_as_mem, true);
3398 address_space_init(newas, cpu->cpu_as_root, "CPU");
3399 cs->num_ases = 1;
3400 cpu_address_space_init(cs, newas, 0);
3401
3402 /* ... SMRAM with higher priority, linked from /machine/smram. */
3403 cpu->machine_done.notify = x86_cpu_machine_done;
3404 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3405 }
3406 #endif
3407
3408 qemu_init_vcpu(cs);
3409
3410 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3411 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3412 * based on inputs (sockets,cores,threads), it is still better to gives
3413 * users a warning.
3414 *
3415 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3416 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3417 */
3418 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3419 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3420 " -smp options properly.");
3421 ht_warned = true;
3422 }
3423
3424 x86_cpu_apic_realize(cpu, &local_err);
3425 if (local_err != NULL) {
3426 goto out;
3427 }
3428 cpu_reset(cs);
3429
3430 xcc->parent_realize(dev, &local_err);
3431
3432 out:
3433 if (local_err != NULL) {
3434 error_propagate(errp, local_err);
3435 return;
3436 }
3437 }
3438
3439 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3440 {
3441 X86CPU *cpu = X86_CPU(dev);
3442 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3443 Error *local_err = NULL;
3444
3445 #ifndef CONFIG_USER_ONLY
3446 cpu_remove_sync(CPU(dev));
3447 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3448 #endif
3449
3450 if (cpu->apic_state) {
3451 object_unparent(OBJECT(cpu->apic_state));
3452 cpu->apic_state = NULL;
3453 }
3454
3455 xcc->parent_unrealize(dev, &local_err);
3456 if (local_err != NULL) {
3457 error_propagate(errp, local_err);
3458 return;
3459 }
3460 }
3461
3462 typedef struct BitProperty {
3463 uint32_t *ptr;
3464 uint32_t mask;
3465 } BitProperty;
3466
3467 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3468 void *opaque, Error **errp)
3469 {
3470 BitProperty *fp = opaque;
3471 bool value = (*fp->ptr & fp->mask) == fp->mask;
3472 visit_type_bool(v, name, &value, errp);
3473 }
3474
3475 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3476 void *opaque, Error **errp)
3477 {
3478 DeviceState *dev = DEVICE(obj);
3479 BitProperty *fp = opaque;
3480 Error *local_err = NULL;
3481 bool value;
3482
3483 if (dev->realized) {
3484 qdev_prop_set_after_realize(dev, name, errp);
3485 return;
3486 }
3487
3488 visit_type_bool(v, name, &value, &local_err);
3489 if (local_err) {
3490 error_propagate(errp, local_err);
3491 return;
3492 }
3493
3494 if (value) {
3495 *fp->ptr |= fp->mask;
3496 } else {
3497 *fp->ptr &= ~fp->mask;
3498 }
3499 }
3500
3501 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3502 void *opaque)
3503 {
3504 BitProperty *prop = opaque;
3505 g_free(prop);
3506 }
3507
3508 /* Register a boolean property to get/set a single bit in a uint32_t field.
3509 *
3510 * The same property name can be registered multiple times to make it affect
3511 * multiple bits in the same FeatureWord. In that case, the getter will return
3512 * true only if all bits are set.
3513 */
3514 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3515 const char *prop_name,
3516 uint32_t *field,
3517 int bitnr)
3518 {
3519 BitProperty *fp;
3520 ObjectProperty *op;
3521 uint32_t mask = (1UL << bitnr);
3522
3523 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3524 if (op) {
3525 fp = op->opaque;
3526 assert(fp->ptr == field);
3527 fp->mask |= mask;
3528 } else {
3529 fp = g_new0(BitProperty, 1);
3530 fp->ptr = field;
3531 fp->mask = mask;
3532 object_property_add(OBJECT(cpu), prop_name, "bool",
3533 x86_cpu_get_bit_prop,
3534 x86_cpu_set_bit_prop,
3535 x86_cpu_release_bit_prop, fp, &error_abort);
3536 }
3537 }
3538
3539 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3540 FeatureWord w,
3541 int bitnr)
3542 {
3543 FeatureWordInfo *fi = &feature_word_info[w];
3544 const char *name = fi->feat_names[bitnr];
3545
3546 if (!name) {
3547 return;
3548 }
3549
3550 /* Property names should use "-" instead of "_".
3551 * Old names containing underscores are registered as aliases
3552 * using object_property_add_alias()
3553 */
3554 assert(!strchr(name, '_'));
3555 /* aliases don't use "|" delimiters anymore, they are registered
3556 * manually using object_property_add_alias() */
3557 assert(!strchr(name, '|'));
3558 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3559 }
3560
3561 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3562 {
3563 X86CPU *cpu = X86_CPU(cs);
3564 CPUX86State *env = &cpu->env;
3565 GuestPanicInformation *panic_info = NULL;
3566
3567 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3568 GuestPanicInformationHyperV *panic_info_hv =
3569 g_malloc0(sizeof(GuestPanicInformationHyperV));
3570 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3571
3572 panic_info->type = GUEST_PANIC_INFORMATION_KIND_HYPER_V;
3573 panic_info->u.hyper_v.data = panic_info_hv;
3574
3575 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3576 panic_info_hv->arg1 = env->msr_hv_crash_params[0];
3577 panic_info_hv->arg2 = env->msr_hv_crash_params[1];
3578 panic_info_hv->arg3 = env->msr_hv_crash_params[2];
3579 panic_info_hv->arg4 = env->msr_hv_crash_params[3];
3580 panic_info_hv->arg5 = env->msr_hv_crash_params[4];
3581 }
3582
3583 return panic_info;
3584 }
3585 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3586 const char *name, void *opaque,
3587 Error **errp)
3588 {
3589 CPUState *cs = CPU(obj);
3590 GuestPanicInformation *panic_info;
3591
3592 if (!cs->crash_occurred) {
3593 error_setg(errp, "No crash occured");
3594 return;
3595 }
3596
3597 panic_info = x86_cpu_get_crash_info(cs);
3598 if (panic_info == NULL) {
3599 error_setg(errp, "No crash information");
3600 return;
3601 }
3602
3603 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3604 errp);
3605 qapi_free_GuestPanicInformation(panic_info);
3606 }
3607
3608 static void x86_cpu_initfn(Object *obj)
3609 {
3610 CPUState *cs = CPU(obj);
3611 X86CPU *cpu = X86_CPU(obj);
3612 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3613 CPUX86State *env = &cpu->env;
3614 FeatureWord w;
3615
3616 cs->env_ptr = env;
3617
3618 object_property_add(obj, "family", "int",
3619 x86_cpuid_version_get_family,
3620 x86_cpuid_version_set_family, NULL, NULL, NULL);
3621 object_property_add(obj, "model", "int",
3622 x86_cpuid_version_get_model,
3623 x86_cpuid_version_set_model, NULL, NULL, NULL);
3624 object_property_add(obj, "stepping", "int",
3625 x86_cpuid_version_get_stepping,
3626 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3627 object_property_add_str(obj, "vendor",
3628 x86_cpuid_get_vendor,
3629 x86_cpuid_set_vendor, NULL);
3630 object_property_add_str(obj, "model-id",
3631 x86_cpuid_get_model_id,
3632 x86_cpuid_set_model_id, NULL);
3633 object_property_add(obj, "tsc-frequency", "int",
3634 x86_cpuid_get_tsc_freq,
3635 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3636 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3637 x86_cpu_get_feature_words,
3638 NULL, NULL, (void *)env->features, NULL);
3639 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3640 x86_cpu_get_feature_words,
3641 NULL, NULL, (void *)cpu->filtered_features, NULL);
3642
3643 object_property_add(obj, "crash-information", "GuestPanicInformation",
3644 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
3645
3646 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3647
3648 for (w = 0; w < FEATURE_WORDS; w++) {
3649 int bitnr;
3650
3651 for (bitnr = 0; bitnr < 32; bitnr++) {
3652 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3653 }
3654 }
3655
3656 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3657 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3658 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3659 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3660 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3661 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3662 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3663
3664 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3665 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3666 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3667 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3668 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3669 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3670 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3671 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3672 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3673 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3674 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3675 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3676 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3677 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3678 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3679 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3680 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3681 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3682 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3683 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3684 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3685
3686 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3687 }
3688
3689 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3690 {
3691 X86CPU *cpu = X86_CPU(cs);
3692
3693 return cpu->apic_id;
3694 }
3695
3696 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3697 {
3698 X86CPU *cpu = X86_CPU(cs);
3699
3700 return cpu->env.cr[0] & CR0_PG_MASK;
3701 }
3702
3703 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3704 {
3705 X86CPU *cpu = X86_CPU(cs);
3706
3707 cpu->env.eip = value;
3708 }
3709
3710 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3711 {
3712 X86CPU *cpu = X86_CPU(cs);
3713
3714 cpu->env.eip = tb->pc - tb->cs_base;
3715 }
3716
3717 static bool x86_cpu_has_work(CPUState *cs)
3718 {
3719 X86CPU *cpu = X86_CPU(cs);
3720 CPUX86State *env = &cpu->env;
3721
3722 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3723 CPU_INTERRUPT_POLL)) &&
3724 (env->eflags & IF_MASK)) ||
3725 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3726 CPU_INTERRUPT_INIT |
3727 CPU_INTERRUPT_SIPI |
3728 CPU_INTERRUPT_MCE)) ||
3729 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3730 !(env->hflags & HF_SMM_MASK));
3731 }
3732
3733 static Property x86_cpu_properties[] = {
3734 #ifdef CONFIG_USER_ONLY
3735 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3736 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3737 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3738 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3739 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3740 #else
3741 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3742 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3743 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3744 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3745 #endif
3746 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3747 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3748 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3749 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3750 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3751 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3752 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3753 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3754 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3755 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3756 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3757 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3758 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3759 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3760 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3761 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3762 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3763 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3764 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3765 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3766 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3767 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3768 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3769 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3770 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3771 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3772 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3773 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3774 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
3775 DEFINE_PROP_END_OF_LIST()
3776 };
3777
3778 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3779 {
3780 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3781 CPUClass *cc = CPU_CLASS(oc);
3782 DeviceClass *dc = DEVICE_CLASS(oc);
3783
3784 xcc->parent_realize = dc->realize;
3785 xcc->parent_unrealize = dc->unrealize;
3786 dc->realize = x86_cpu_realizefn;
3787 dc->unrealize = x86_cpu_unrealizefn;
3788 dc->props = x86_cpu_properties;
3789
3790 xcc->parent_reset = cc->reset;
3791 cc->reset = x86_cpu_reset;
3792 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3793
3794 cc->class_by_name = x86_cpu_class_by_name;
3795 cc->parse_features = x86_cpu_parse_featurestr;
3796 cc->has_work = x86_cpu_has_work;
3797 cc->do_interrupt = x86_cpu_do_interrupt;
3798 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3799 cc->dump_state = x86_cpu_dump_state;
3800 cc->get_crash_info = x86_cpu_get_crash_info;
3801 cc->set_pc = x86_cpu_set_pc;
3802 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3803 cc->gdb_read_register = x86_cpu_gdb_read_register;
3804 cc->gdb_write_register = x86_cpu_gdb_write_register;
3805 cc->get_arch_id = x86_cpu_get_arch_id;
3806 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3807 #ifdef CONFIG_USER_ONLY
3808 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3809 #else
3810 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3811 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3812 cc->write_elf64_note = x86_cpu_write_elf64_note;
3813 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3814 cc->write_elf32_note = x86_cpu_write_elf32_note;
3815 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3816 cc->vmsd = &vmstate_x86_cpu;
3817 #endif
3818 /* CPU_NB_REGS * 2 = general regs + xmm regs
3819 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
3820 */
3821 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3822 #ifndef CONFIG_USER_ONLY
3823 cc->debug_excp_handler = breakpoint_handler;
3824 #endif
3825 cc->cpu_exec_enter = x86_cpu_exec_enter;
3826 cc->cpu_exec_exit = x86_cpu_exec_exit;
3827
3828 dc->cannot_instantiate_with_device_add_yet = false;
3829 }
3830
3831 static const TypeInfo x86_cpu_type_info = {
3832 .name = TYPE_X86_CPU,
3833 .parent = TYPE_CPU,
3834 .instance_size = sizeof(X86CPU),
3835 .instance_init = x86_cpu_initfn,
3836 .abstract = true,
3837 .class_size = sizeof(X86CPUClass),
3838 .class_init = x86_cpu_common_class_init,
3839 };
3840
3841 static void x86_cpu_register_types(void)
3842 {
3843 int i;
3844
3845 type_register_static(&x86_cpu_type_info);
3846 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3847 x86_register_cpudef_type(&builtin_x86_defs[i]);
3848 }
3849 type_register_static(&max_x86_cpu_type_info);
3850 #ifdef CONFIG_KVM
3851 type_register_static(&host_x86_cpu_type_info);
3852 #endif
3853 }
3854
3855 type_init(x86_cpu_register_types)