]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
x86: implement la57 paging mode
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
242 CPUID_7_0_ECX_LA57)
243 #define TCG_7_0_EDX_FEATURES 0
244 #define TCG_APM_FEATURES 0
245 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
246 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
247 /* missing:
248 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
249
250 typedef struct FeatureWordInfo {
251 /* feature flags names are taken from "Intel Processor Identification and
252 * the CPUID Instruction" and AMD's "CPUID Specification".
253 * In cases of disagreement between feature naming conventions,
254 * aliases may be added.
255 */
256 const char *feat_names[32];
257 uint32_t cpuid_eax; /* Input EAX for CPUID */
258 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
259 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
260 int cpuid_reg; /* output register (R_* constant) */
261 uint32_t tcg_features; /* Feature flags supported by TCG */
262 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
263 uint32_t migratable_flags; /* Feature flags known to be migratable */
264 } FeatureWordInfo;
265
266 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
267 [FEAT_1_EDX] = {
268 .feat_names = {
269 "fpu", "vme", "de", "pse",
270 "tsc", "msr", "pae", "mce",
271 "cx8", "apic", NULL, "sep",
272 "mtrr", "pge", "mca", "cmov",
273 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
274 NULL, "ds" /* Intel dts */, "acpi", "mmx",
275 "fxsr", "sse", "sse2", "ss",
276 "ht" /* Intel htt */, "tm", "ia64", "pbe",
277 },
278 .cpuid_eax = 1, .cpuid_reg = R_EDX,
279 .tcg_features = TCG_FEATURES,
280 },
281 [FEAT_1_ECX] = {
282 .feat_names = {
283 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
284 "ds-cpl", "vmx", "smx", "est",
285 "tm2", "ssse3", "cid", NULL,
286 "fma", "cx16", "xtpr", "pdcm",
287 NULL, "pcid", "dca", "sse4.1",
288 "sse4.2", "x2apic", "movbe", "popcnt",
289 "tsc-deadline", "aes", "xsave", "osxsave",
290 "avx", "f16c", "rdrand", "hypervisor",
291 },
292 .cpuid_eax = 1, .cpuid_reg = R_ECX,
293 .tcg_features = TCG_EXT_FEATURES,
294 },
295 /* Feature names that are already defined on feature_name[] but
296 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
297 * names on feat_names below. They are copied automatically
298 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
299 */
300 [FEAT_8000_0001_EDX] = {
301 .feat_names = {
302 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
303 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
304 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
305 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
306 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
307 "nx", NULL, "mmxext", NULL /* mmx */,
308 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
309 NULL, "lm", "3dnowext", "3dnow",
310 },
311 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
312 .tcg_features = TCG_EXT2_FEATURES,
313 },
314 [FEAT_8000_0001_ECX] = {
315 .feat_names = {
316 "lahf-lm", "cmp-legacy", "svm", "extapic",
317 "cr8legacy", "abm", "sse4a", "misalignsse",
318 "3dnowprefetch", "osvw", "ibs", "xop",
319 "skinit", "wdt", NULL, "lwp",
320 "fma4", "tce", NULL, "nodeid-msr",
321 NULL, "tbm", "topoext", "perfctr-core",
322 "perfctr-nb", NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 },
325 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
326 .tcg_features = TCG_EXT3_FEATURES,
327 },
328 [FEAT_C000_0001_EDX] = {
329 .feat_names = {
330 NULL, NULL, "xstore", "xstore-en",
331 NULL, NULL, "xcrypt", "xcrypt-en",
332 "ace2", "ace2-en", "phe", "phe-en",
333 "pmm", "pmm-en", NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 },
339 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
340 .tcg_features = TCG_EXT4_FEATURES,
341 },
342 [FEAT_KVM] = {
343 .feat_names = {
344 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
345 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 "kvmclock-stable-bit", NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 },
353 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
354 .tcg_features = TCG_KVM_FEATURES,
355 },
356 [FEAT_HYPERV_EAX] = {
357 .feat_names = {
358 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
359 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
360 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
361 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
362 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
363 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 },
370 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
371 },
372 [FEAT_HYPERV_EBX] = {
373 .feat_names = {
374 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
375 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
376 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
377 NULL /* hv_create_port */, NULL /* hv_connect_port */,
378 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
379 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
380 NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 },
386 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
387 },
388 [FEAT_HYPERV_EDX] = {
389 .feat_names = {
390 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
391 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
392 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
393 NULL, NULL,
394 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 },
401 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
402 },
403 [FEAT_SVM] = {
404 .feat_names = {
405 "npt", "lbrv", "svm-lock", "nrip-save",
406 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
407 NULL, NULL, "pause-filter", NULL,
408 "pfthreshold", NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 },
414 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
415 .tcg_features = TCG_SVM_FEATURES,
416 },
417 [FEAT_7_0_EBX] = {
418 .feat_names = {
419 "fsgsbase", "tsc-adjust", NULL, "bmi1",
420 "hle", "avx2", NULL, "smep",
421 "bmi2", "erms", "invpcid", "rtm",
422 NULL, NULL, "mpx", NULL,
423 "avx512f", "avx512dq", "rdseed", "adx",
424 "smap", "avx512ifma", "pcommit", "clflushopt",
425 "clwb", NULL, "avx512pf", "avx512er",
426 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
427 },
428 .cpuid_eax = 7,
429 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
430 .cpuid_reg = R_EBX,
431 .tcg_features = TCG_7_0_EBX_FEATURES,
432 },
433 [FEAT_7_0_ECX] = {
434 .feat_names = {
435 NULL, "avx512vbmi", "umip", "pku",
436 "ospke", NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL,
439 "la57", NULL, NULL, NULL,
440 NULL, NULL, "rdpid", NULL,
441 NULL, NULL, NULL, NULL,
442 NULL, NULL, NULL, NULL,
443 },
444 .cpuid_eax = 7,
445 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
446 .cpuid_reg = R_ECX,
447 .tcg_features = TCG_7_0_ECX_FEATURES,
448 },
449 [FEAT_7_0_EDX] = {
450 .feat_names = {
451 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 },
460 .cpuid_eax = 7,
461 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
462 .cpuid_reg = R_EDX,
463 .tcg_features = TCG_7_0_EDX_FEATURES,
464 },
465 [FEAT_8000_0007_EDX] = {
466 .feat_names = {
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 "invtsc", NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 },
476 .cpuid_eax = 0x80000007,
477 .cpuid_reg = R_EDX,
478 .tcg_features = TCG_APM_FEATURES,
479 .unmigratable_flags = CPUID_APM_INVTSC,
480 },
481 [FEAT_XSAVE] = {
482 .feat_names = {
483 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 },
492 .cpuid_eax = 0xd,
493 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
494 .cpuid_reg = R_EAX,
495 .tcg_features = TCG_XSAVE_FEATURES,
496 },
497 [FEAT_6_EAX] = {
498 .feat_names = {
499 NULL, NULL, "arat", NULL,
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 },
508 .cpuid_eax = 6, .cpuid_reg = R_EAX,
509 .tcg_features = TCG_6_EAX_FEATURES,
510 },
511 [FEAT_XSAVE_COMP_LO] = {
512 .cpuid_eax = 0xD,
513 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
514 .cpuid_reg = R_EAX,
515 .tcg_features = ~0U,
516 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
517 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
518 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
519 XSTATE_PKRU_MASK,
520 },
521 [FEAT_XSAVE_COMP_HI] = {
522 .cpuid_eax = 0xD,
523 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
524 .cpuid_reg = R_EDX,
525 .tcg_features = ~0U,
526 },
527 };
528
529 typedef struct X86RegisterInfo32 {
530 /* Name of register */
531 const char *name;
532 /* QAPI enum value register */
533 X86CPURegister32 qapi_enum;
534 } X86RegisterInfo32;
535
536 #define REGISTER(reg) \
537 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
538 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
539 REGISTER(EAX),
540 REGISTER(ECX),
541 REGISTER(EDX),
542 REGISTER(EBX),
543 REGISTER(ESP),
544 REGISTER(EBP),
545 REGISTER(ESI),
546 REGISTER(EDI),
547 };
548 #undef REGISTER
549
550 typedef struct ExtSaveArea {
551 uint32_t feature, bits;
552 uint32_t offset, size;
553 } ExtSaveArea;
554
555 static const ExtSaveArea x86_ext_save_areas[] = {
556 [XSTATE_FP_BIT] = {
557 /* x87 FP state component is always enabled if XSAVE is supported */
558 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
559 /* x87 state is in the legacy region of the XSAVE area */
560 .offset = 0,
561 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
562 },
563 [XSTATE_SSE_BIT] = {
564 /* SSE state component is always enabled if XSAVE is supported */
565 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
566 /* SSE state is in the legacy region of the XSAVE area */
567 .offset = 0,
568 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
569 },
570 [XSTATE_YMM_BIT] =
571 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
572 .offset = offsetof(X86XSaveArea, avx_state),
573 .size = sizeof(XSaveAVX) },
574 [XSTATE_BNDREGS_BIT] =
575 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
576 .offset = offsetof(X86XSaveArea, bndreg_state),
577 .size = sizeof(XSaveBNDREG) },
578 [XSTATE_BNDCSR_BIT] =
579 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
580 .offset = offsetof(X86XSaveArea, bndcsr_state),
581 .size = sizeof(XSaveBNDCSR) },
582 [XSTATE_OPMASK_BIT] =
583 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
584 .offset = offsetof(X86XSaveArea, opmask_state),
585 .size = sizeof(XSaveOpmask) },
586 [XSTATE_ZMM_Hi256_BIT] =
587 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
588 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
589 .size = sizeof(XSaveZMM_Hi256) },
590 [XSTATE_Hi16_ZMM_BIT] =
591 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
592 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
593 .size = sizeof(XSaveHi16_ZMM) },
594 [XSTATE_PKRU_BIT] =
595 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
596 .offset = offsetof(X86XSaveArea, pkru_state),
597 .size = sizeof(XSavePKRU) },
598 };
599
600 static uint32_t xsave_area_size(uint64_t mask)
601 {
602 int i;
603 uint64_t ret = 0;
604
605 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
606 const ExtSaveArea *esa = &x86_ext_save_areas[i];
607 if ((mask >> i) & 1) {
608 ret = MAX(ret, esa->offset + esa->size);
609 }
610 }
611 return ret;
612 }
613
614 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
615 {
616 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
617 cpu->env.features[FEAT_XSAVE_COMP_LO];
618 }
619
620 const char *get_register_name_32(unsigned int reg)
621 {
622 if (reg >= CPU_NB_REGS32) {
623 return NULL;
624 }
625 return x86_reg_info_32[reg].name;
626 }
627
628 /*
629 * Returns the set of feature flags that are supported and migratable by
630 * QEMU, for a given FeatureWord.
631 */
632 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
633 {
634 FeatureWordInfo *wi = &feature_word_info[w];
635 uint32_t r = 0;
636 int i;
637
638 for (i = 0; i < 32; i++) {
639 uint32_t f = 1U << i;
640
641 /* If the feature name is known, it is implicitly considered migratable,
642 * unless it is explicitly set in unmigratable_flags */
643 if ((wi->migratable_flags & f) ||
644 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
645 r |= f;
646 }
647 }
648 return r;
649 }
650
651 void host_cpuid(uint32_t function, uint32_t count,
652 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
653 {
654 uint32_t vec[4];
655
656 #ifdef __x86_64__
657 asm volatile("cpuid"
658 : "=a"(vec[0]), "=b"(vec[1]),
659 "=c"(vec[2]), "=d"(vec[3])
660 : "0"(function), "c"(count) : "cc");
661 #elif defined(__i386__)
662 asm volatile("pusha \n\t"
663 "cpuid \n\t"
664 "mov %%eax, 0(%2) \n\t"
665 "mov %%ebx, 4(%2) \n\t"
666 "mov %%ecx, 8(%2) \n\t"
667 "mov %%edx, 12(%2) \n\t"
668 "popa"
669 : : "a"(function), "c"(count), "S"(vec)
670 : "memory", "cc");
671 #else
672 abort();
673 #endif
674
675 if (eax)
676 *eax = vec[0];
677 if (ebx)
678 *ebx = vec[1];
679 if (ecx)
680 *ecx = vec[2];
681 if (edx)
682 *edx = vec[3];
683 }
684
685 /* CPU class name definitions: */
686
687 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
688 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
689
690 /* Return type name for a given CPU model name
691 * Caller is responsible for freeing the returned string.
692 */
693 static char *x86_cpu_type_name(const char *model_name)
694 {
695 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
696 }
697
698 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
699 {
700 ObjectClass *oc;
701 char *typename;
702
703 if (cpu_model == NULL) {
704 return NULL;
705 }
706
707 typename = x86_cpu_type_name(cpu_model);
708 oc = object_class_by_name(typename);
709 g_free(typename);
710 return oc;
711 }
712
713 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
714 {
715 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
716 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
717 return g_strndup(class_name,
718 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
719 }
720
721 struct X86CPUDefinition {
722 const char *name;
723 uint32_t level;
724 uint32_t xlevel;
725 /* vendor is zero-terminated, 12 character ASCII string */
726 char vendor[CPUID_VENDOR_SZ + 1];
727 int family;
728 int model;
729 int stepping;
730 FeatureWordArray features;
731 char model_id[48];
732 };
733
734 static X86CPUDefinition builtin_x86_defs[] = {
735 {
736 .name = "qemu64",
737 .level = 0xd,
738 .vendor = CPUID_VENDOR_AMD,
739 .family = 6,
740 .model = 6,
741 .stepping = 3,
742 .features[FEAT_1_EDX] =
743 PPRO_FEATURES |
744 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
745 CPUID_PSE36,
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
748 .features[FEAT_8000_0001_EDX] =
749 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
750 .features[FEAT_8000_0001_ECX] =
751 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
752 .xlevel = 0x8000000A,
753 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
754 },
755 {
756 .name = "phenom",
757 .level = 5,
758 .vendor = CPUID_VENDOR_AMD,
759 .family = 16,
760 .model = 2,
761 .stepping = 3,
762 /* Missing: CPUID_HT */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36 | CPUID_VME,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
769 CPUID_EXT_POPCNT,
770 .features[FEAT_8000_0001_EDX] =
771 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
772 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
773 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
774 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
775 CPUID_EXT3_CR8LEG,
776 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
777 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
778 .features[FEAT_8000_0001_ECX] =
779 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
780 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
781 /* Missing: CPUID_SVM_LBRV */
782 .features[FEAT_SVM] =
783 CPUID_SVM_NPT,
784 .xlevel = 0x8000001A,
785 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
786 },
787 {
788 .name = "core2duo",
789 .level = 10,
790 .vendor = CPUID_VENDOR_INTEL,
791 .family = 6,
792 .model = 15,
793 .stepping = 11,
794 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
795 .features[FEAT_1_EDX] =
796 PPRO_FEATURES |
797 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
798 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
799 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
800 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
801 .features[FEAT_1_ECX] =
802 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
803 CPUID_EXT_CX16,
804 .features[FEAT_8000_0001_EDX] =
805 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
806 .features[FEAT_8000_0001_ECX] =
807 CPUID_EXT3_LAHF_LM,
808 .xlevel = 0x80000008,
809 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
810 },
811 {
812 .name = "kvm64",
813 .level = 0xd,
814 .vendor = CPUID_VENDOR_INTEL,
815 .family = 15,
816 .model = 6,
817 .stepping = 1,
818 /* Missing: CPUID_HT */
819 .features[FEAT_1_EDX] =
820 PPRO_FEATURES | CPUID_VME |
821 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
822 CPUID_PSE36,
823 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
826 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
830 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
831 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
832 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
833 .features[FEAT_8000_0001_ECX] =
834 0,
835 .xlevel = 0x80000008,
836 .model_id = "Common KVM processor"
837 },
838 {
839 .name = "qemu32",
840 .level = 4,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 6,
843 .model = 6,
844 .stepping = 3,
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES,
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3,
849 .xlevel = 0x80000004,
850 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
851 },
852 {
853 .name = "kvm32",
854 .level = 5,
855 .vendor = CPUID_VENDOR_INTEL,
856 .family = 15,
857 .model = 6,
858 .stepping = 1,
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
862 .features[FEAT_1_ECX] =
863 CPUID_EXT_SSE3,
864 .features[FEAT_8000_0001_ECX] =
865 0,
866 .xlevel = 0x80000008,
867 .model_id = "Common 32-bit KVM processor"
868 },
869 {
870 .name = "coreduo",
871 .level = 10,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 6,
874 .model = 14,
875 .stepping = 8,
876 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
877 .features[FEAT_1_EDX] =
878 PPRO_FEATURES | CPUID_VME |
879 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
880 CPUID_SS,
881 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
882 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
885 .features[FEAT_8000_0001_EDX] =
886 CPUID_EXT2_NX,
887 .xlevel = 0x80000008,
888 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
889 },
890 {
891 .name = "486",
892 .level = 1,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 4,
895 .model = 8,
896 .stepping = 0,
897 .features[FEAT_1_EDX] =
898 I486_FEATURES,
899 .xlevel = 0,
900 },
901 {
902 .name = "pentium",
903 .level = 1,
904 .vendor = CPUID_VENDOR_INTEL,
905 .family = 5,
906 .model = 4,
907 .stepping = 3,
908 .features[FEAT_1_EDX] =
909 PENTIUM_FEATURES,
910 .xlevel = 0,
911 },
912 {
913 .name = "pentium2",
914 .level = 2,
915 .vendor = CPUID_VENDOR_INTEL,
916 .family = 6,
917 .model = 5,
918 .stepping = 2,
919 .features[FEAT_1_EDX] =
920 PENTIUM2_FEATURES,
921 .xlevel = 0,
922 },
923 {
924 .name = "pentium3",
925 .level = 3,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 7,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 PENTIUM3_FEATURES,
932 .xlevel = 0,
933 },
934 {
935 .name = "athlon",
936 .level = 2,
937 .vendor = CPUID_VENDOR_AMD,
938 .family = 6,
939 .model = 2,
940 .stepping = 3,
941 .features[FEAT_1_EDX] =
942 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
943 CPUID_MCA,
944 .features[FEAT_8000_0001_EDX] =
945 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
946 .xlevel = 0x80000008,
947 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
948 },
949 {
950 .name = "n270",
951 .level = 10,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 28,
955 .stepping = 2,
956 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
957 .features[FEAT_1_EDX] =
958 PPRO_FEATURES |
959 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
960 CPUID_ACPI | CPUID_SS,
961 /* Some CPUs got no CPUID_SEP */
962 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
963 * CPUID_EXT_XTPR */
964 .features[FEAT_1_ECX] =
965 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
966 CPUID_EXT_MOVBE,
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_NX,
969 .features[FEAT_8000_0001_ECX] =
970 CPUID_EXT3_LAHF_LM,
971 .xlevel = 0x80000008,
972 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
973 },
974 {
975 .name = "Conroe",
976 .level = 10,
977 .vendor = CPUID_VENDOR_INTEL,
978 .family = 6,
979 .model = 15,
980 .stepping = 3,
981 .features[FEAT_1_EDX] =
982 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
983 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
984 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
985 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
986 CPUID_DE | CPUID_FP87,
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
989 .features[FEAT_8000_0001_EDX] =
990 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
991 .features[FEAT_8000_0001_ECX] =
992 CPUID_EXT3_LAHF_LM,
993 .xlevel = 0x80000008,
994 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
995 },
996 {
997 .name = "Penryn",
998 .level = 10,
999 .vendor = CPUID_VENDOR_INTEL,
1000 .family = 6,
1001 .model = 23,
1002 .stepping = 3,
1003 .features[FEAT_1_EDX] =
1004 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1005 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1006 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1007 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1008 CPUID_DE | CPUID_FP87,
1009 .features[FEAT_1_ECX] =
1010 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1011 CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1018 },
1019 {
1020 .name = "Nehalem",
1021 .level = 11,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 26,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1034 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1041 },
1042 {
1043 .name = "Westmere",
1044 .level = 11,
1045 .vendor = CPUID_VENDOR_INTEL,
1046 .family = 6,
1047 .model = 44,
1048 .stepping = 1,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1057 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1058 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1059 .features[FEAT_8000_0001_EDX] =
1060 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1061 .features[FEAT_8000_0001_ECX] =
1062 CPUID_EXT3_LAHF_LM,
1063 .features[FEAT_6_EAX] =
1064 CPUID_6_EAX_ARAT,
1065 .xlevel = 0x80000008,
1066 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1067 },
1068 {
1069 .name = "SandyBridge",
1070 .level = 0xd,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 42,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1083 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1084 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1085 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1086 CPUID_EXT_SSE3,
1087 .features[FEAT_8000_0001_EDX] =
1088 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1089 CPUID_EXT2_SYSCALL,
1090 .features[FEAT_8000_0001_ECX] =
1091 CPUID_EXT3_LAHF_LM,
1092 .features[FEAT_XSAVE] =
1093 CPUID_XSAVE_XSAVEOPT,
1094 .features[FEAT_6_EAX] =
1095 CPUID_6_EAX_ARAT,
1096 .xlevel = 0x80000008,
1097 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1098 },
1099 {
1100 .name = "IvyBridge",
1101 .level = 0xd,
1102 .vendor = CPUID_VENDOR_INTEL,
1103 .family = 6,
1104 .model = 58,
1105 .stepping = 9,
1106 .features[FEAT_1_EDX] =
1107 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1108 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1109 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1110 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1111 CPUID_DE | CPUID_FP87,
1112 .features[FEAT_1_ECX] =
1113 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1114 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1115 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1116 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1117 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1118 .features[FEAT_7_0_EBX] =
1119 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1120 CPUID_7_0_EBX_ERMS,
1121 .features[FEAT_8000_0001_EDX] =
1122 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1123 CPUID_EXT2_SYSCALL,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_LAHF_LM,
1126 .features[FEAT_XSAVE] =
1127 CPUID_XSAVE_XSAVEOPT,
1128 .features[FEAT_6_EAX] =
1129 CPUID_6_EAX_ARAT,
1130 .xlevel = 0x80000008,
1131 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1132 },
1133 {
1134 .name = "Haswell-noTSX",
1135 .level = 0xd,
1136 .vendor = CPUID_VENDOR_INTEL,
1137 .family = 6,
1138 .model = 60,
1139 .stepping = 1,
1140 .features[FEAT_1_EDX] =
1141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1145 CPUID_DE | CPUID_FP87,
1146 .features[FEAT_1_ECX] =
1147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1153 .features[FEAT_8000_0001_EDX] =
1154 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1155 CPUID_EXT2_SYSCALL,
1156 .features[FEAT_8000_0001_ECX] =
1157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1158 .features[FEAT_7_0_EBX] =
1159 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1160 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1161 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1162 .features[FEAT_XSAVE] =
1163 CPUID_XSAVE_XSAVEOPT,
1164 .features[FEAT_6_EAX] =
1165 CPUID_6_EAX_ARAT,
1166 .xlevel = 0x80000008,
1167 .model_id = "Intel Core Processor (Haswell, no TSX)",
1168 }, {
1169 .name = "Haswell",
1170 .level = 0xd,
1171 .vendor = CPUID_VENDOR_INTEL,
1172 .family = 6,
1173 .model = 60,
1174 .stepping = 1,
1175 .features[FEAT_1_EDX] =
1176 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1183 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1184 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1185 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1186 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1187 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1188 .features[FEAT_8000_0001_EDX] =
1189 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1190 CPUID_EXT2_SYSCALL,
1191 .features[FEAT_8000_0001_ECX] =
1192 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1193 .features[FEAT_7_0_EBX] =
1194 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1195 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1196 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1197 CPUID_7_0_EBX_RTM,
1198 .features[FEAT_XSAVE] =
1199 CPUID_XSAVE_XSAVEOPT,
1200 .features[FEAT_6_EAX] =
1201 CPUID_6_EAX_ARAT,
1202 .xlevel = 0x80000008,
1203 .model_id = "Intel Core Processor (Haswell)",
1204 },
1205 {
1206 .name = "Broadwell-noTSX",
1207 .level = 0xd,
1208 .vendor = CPUID_VENDOR_INTEL,
1209 .family = 6,
1210 .model = 61,
1211 .stepping = 2,
1212 .features[FEAT_1_EDX] =
1213 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1214 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1215 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1216 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1217 CPUID_DE | CPUID_FP87,
1218 .features[FEAT_1_ECX] =
1219 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1220 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1221 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1222 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1223 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1224 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1225 .features[FEAT_8000_0001_EDX] =
1226 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1227 CPUID_EXT2_SYSCALL,
1228 .features[FEAT_8000_0001_ECX] =
1229 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1230 .features[FEAT_7_0_EBX] =
1231 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1232 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1233 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1234 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1235 CPUID_7_0_EBX_SMAP,
1236 .features[FEAT_XSAVE] =
1237 CPUID_XSAVE_XSAVEOPT,
1238 .features[FEAT_6_EAX] =
1239 CPUID_6_EAX_ARAT,
1240 .xlevel = 0x80000008,
1241 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1242 },
1243 {
1244 .name = "Broadwell",
1245 .level = 0xd,
1246 .vendor = CPUID_VENDOR_INTEL,
1247 .family = 6,
1248 .model = 61,
1249 .stepping = 2,
1250 .features[FEAT_1_EDX] =
1251 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1252 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1253 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1254 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1255 CPUID_DE | CPUID_FP87,
1256 .features[FEAT_1_ECX] =
1257 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1258 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1259 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1260 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1261 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1262 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1263 .features[FEAT_8000_0001_EDX] =
1264 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1265 CPUID_EXT2_SYSCALL,
1266 .features[FEAT_8000_0001_ECX] =
1267 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1268 .features[FEAT_7_0_EBX] =
1269 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1270 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1271 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1272 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1273 CPUID_7_0_EBX_SMAP,
1274 .features[FEAT_XSAVE] =
1275 CPUID_XSAVE_XSAVEOPT,
1276 .features[FEAT_6_EAX] =
1277 CPUID_6_EAX_ARAT,
1278 .xlevel = 0x80000008,
1279 .model_id = "Intel Core Processor (Broadwell)",
1280 },
1281 {
1282 .name = "Skylake-Client",
1283 .level = 0xd,
1284 .vendor = CPUID_VENDOR_INTEL,
1285 .family = 6,
1286 .model = 94,
1287 .stepping = 3,
1288 .features[FEAT_1_EDX] =
1289 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1290 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1291 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1292 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1293 CPUID_DE | CPUID_FP87,
1294 .features[FEAT_1_ECX] =
1295 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1296 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1297 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1298 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1299 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1300 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1301 .features[FEAT_8000_0001_EDX] =
1302 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1303 CPUID_EXT2_SYSCALL,
1304 .features[FEAT_8000_0001_ECX] =
1305 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1306 .features[FEAT_7_0_EBX] =
1307 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1308 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1309 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1310 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1311 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1312 /* Missing: XSAVES (not supported by some Linux versions,
1313 * including v4.1 to v4.6).
1314 * KVM doesn't yet expose any XSAVES state save component,
1315 * and the only one defined in Skylake (processor tracing)
1316 * probably will block migration anyway.
1317 */
1318 .features[FEAT_XSAVE] =
1319 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1320 CPUID_XSAVE_XGETBV1,
1321 .features[FEAT_6_EAX] =
1322 CPUID_6_EAX_ARAT,
1323 .xlevel = 0x80000008,
1324 .model_id = "Intel Core Processor (Skylake)",
1325 },
1326 {
1327 .name = "Opteron_G1",
1328 .level = 5,
1329 .vendor = CPUID_VENDOR_AMD,
1330 .family = 15,
1331 .model = 6,
1332 .stepping = 1,
1333 .features[FEAT_1_EDX] =
1334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1338 CPUID_DE | CPUID_FP87,
1339 .features[FEAT_1_ECX] =
1340 CPUID_EXT_SSE3,
1341 .features[FEAT_8000_0001_EDX] =
1342 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1343 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1344 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1345 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1346 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1347 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1348 .xlevel = 0x80000008,
1349 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1350 },
1351 {
1352 .name = "Opteron_G2",
1353 .level = 5,
1354 .vendor = CPUID_VENDOR_AMD,
1355 .family = 15,
1356 .model = 6,
1357 .stepping = 1,
1358 .features[FEAT_1_EDX] =
1359 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1360 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1361 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1362 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1363 CPUID_DE | CPUID_FP87,
1364 .features[FEAT_1_ECX] =
1365 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1366 /* Missing: CPUID_EXT2_RDTSCP */
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1369 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1370 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1371 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1372 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1373 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1374 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1375 .features[FEAT_8000_0001_ECX] =
1376 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1377 .xlevel = 0x80000008,
1378 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1379 },
1380 {
1381 .name = "Opteron_G3",
1382 .level = 5,
1383 .vendor = CPUID_VENDOR_AMD,
1384 .family = 16,
1385 .model = 2,
1386 .stepping = 3,
1387 .features[FEAT_1_EDX] =
1388 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1389 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1390 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1391 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1392 CPUID_DE | CPUID_FP87,
1393 .features[FEAT_1_ECX] =
1394 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1395 CPUID_EXT_SSE3,
1396 /* Missing: CPUID_EXT2_RDTSCP */
1397 .features[FEAT_8000_0001_EDX] =
1398 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1399 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1400 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1401 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1402 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1403 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1404 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1405 .features[FEAT_8000_0001_ECX] =
1406 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1407 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1408 .xlevel = 0x80000008,
1409 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1410 },
1411 {
1412 .name = "Opteron_G4",
1413 .level = 0xd,
1414 .vendor = CPUID_VENDOR_AMD,
1415 .family = 21,
1416 .model = 1,
1417 .stepping = 2,
1418 .features[FEAT_1_EDX] =
1419 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1420 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1421 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1422 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1423 CPUID_DE | CPUID_FP87,
1424 .features[FEAT_1_ECX] =
1425 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1426 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1427 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1428 CPUID_EXT_SSE3,
1429 /* Missing: CPUID_EXT2_RDTSCP */
1430 .features[FEAT_8000_0001_EDX] =
1431 CPUID_EXT2_LM |
1432 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1433 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1434 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1435 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1436 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1437 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1438 .features[FEAT_8000_0001_ECX] =
1439 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1440 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1441 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1442 CPUID_EXT3_LAHF_LM,
1443 /* no xsaveopt! */
1444 .xlevel = 0x8000001A,
1445 .model_id = "AMD Opteron 62xx class CPU",
1446 },
1447 {
1448 .name = "Opteron_G5",
1449 .level = 0xd,
1450 .vendor = CPUID_VENDOR_AMD,
1451 .family = 21,
1452 .model = 2,
1453 .stepping = 0,
1454 .features[FEAT_1_EDX] =
1455 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1456 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1457 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1458 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1459 CPUID_DE | CPUID_FP87,
1460 .features[FEAT_1_ECX] =
1461 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1462 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1463 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1464 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1465 /* Missing: CPUID_EXT2_RDTSCP */
1466 .features[FEAT_8000_0001_EDX] =
1467 CPUID_EXT2_LM |
1468 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1469 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1470 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1471 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1472 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1473 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1474 .features[FEAT_8000_0001_ECX] =
1475 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1476 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1477 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1478 CPUID_EXT3_LAHF_LM,
1479 /* no xsaveopt! */
1480 .xlevel = 0x8000001A,
1481 .model_id = "AMD Opteron 63xx class CPU",
1482 },
1483 };
1484
1485 typedef struct PropValue {
1486 const char *prop, *value;
1487 } PropValue;
1488
1489 /* KVM-specific features that are automatically added/removed
1490 * from all CPU models when KVM is enabled.
1491 */
1492 static PropValue kvm_default_props[] = {
1493 { "kvmclock", "on" },
1494 { "kvm-nopiodelay", "on" },
1495 { "kvm-asyncpf", "on" },
1496 { "kvm-steal-time", "on" },
1497 { "kvm-pv-eoi", "on" },
1498 { "kvmclock-stable-bit", "on" },
1499 { "x2apic", "on" },
1500 { "acpi", "off" },
1501 { "monitor", "off" },
1502 { "svm", "off" },
1503 { NULL, NULL },
1504 };
1505
1506 /* TCG-specific defaults that override all CPU models when using TCG
1507 */
1508 static PropValue tcg_default_props[] = {
1509 { "vme", "off" },
1510 { NULL, NULL },
1511 };
1512
1513
1514 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1515 {
1516 PropValue *pv;
1517 for (pv = kvm_default_props; pv->prop; pv++) {
1518 if (!strcmp(pv->prop, prop)) {
1519 pv->value = value;
1520 break;
1521 }
1522 }
1523
1524 /* It is valid to call this function only for properties that
1525 * are already present in the kvm_default_props table.
1526 */
1527 assert(pv->prop);
1528 }
1529
1530 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1531 bool migratable_only);
1532
1533 #ifdef CONFIG_KVM
1534
1535 static bool lmce_supported(void)
1536 {
1537 uint64_t mce_cap;
1538
1539 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1540 return false;
1541 }
1542
1543 return !!(mce_cap & MCG_LMCE_P);
1544 }
1545
1546 static int cpu_x86_fill_model_id(char *str)
1547 {
1548 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1549 int i;
1550
1551 for (i = 0; i < 3; i++) {
1552 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1553 memcpy(str + i * 16 + 0, &eax, 4);
1554 memcpy(str + i * 16 + 4, &ebx, 4);
1555 memcpy(str + i * 16 + 8, &ecx, 4);
1556 memcpy(str + i * 16 + 12, &edx, 4);
1557 }
1558 return 0;
1559 }
1560
1561 static X86CPUDefinition host_cpudef;
1562
1563 static Property host_x86_cpu_properties[] = {
1564 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1565 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1566 DEFINE_PROP_END_OF_LIST()
1567 };
1568
1569 /* class_init for the "host" CPU model
1570 *
1571 * This function may be called before KVM is initialized.
1572 */
1573 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1574 {
1575 DeviceClass *dc = DEVICE_CLASS(oc);
1576 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1577 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1578
1579 xcc->kvm_required = true;
1580
1581 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1582 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1583
1584 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1585 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1586 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1587 host_cpudef.stepping = eax & 0x0F;
1588
1589 cpu_x86_fill_model_id(host_cpudef.model_id);
1590
1591 xcc->cpu_def = &host_cpudef;
1592 xcc->model_description =
1593 "KVM processor with all supported host features "
1594 "(only available in KVM mode)";
1595
1596 /* level, xlevel, xlevel2, and the feature words are initialized on
1597 * instance_init, because they require KVM to be initialized.
1598 */
1599
1600 dc->props = host_x86_cpu_properties;
1601 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1602 dc->cannot_destroy_with_object_finalize_yet = true;
1603 }
1604
1605 static void host_x86_cpu_initfn(Object *obj)
1606 {
1607 X86CPU *cpu = X86_CPU(obj);
1608 CPUX86State *env = &cpu->env;
1609 KVMState *s = kvm_state;
1610
1611 /* We can't fill the features array here because we don't know yet if
1612 * "migratable" is true or false.
1613 */
1614 cpu->host_features = true;
1615
1616 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1617 if (kvm_enabled()) {
1618 env->cpuid_min_level =
1619 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1620 env->cpuid_min_xlevel =
1621 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1622 env->cpuid_min_xlevel2 =
1623 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1624
1625 if (lmce_supported()) {
1626 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1627 }
1628 }
1629
1630 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1631 }
1632
1633 static const TypeInfo host_x86_cpu_type_info = {
1634 .name = X86_CPU_TYPE_NAME("host"),
1635 .parent = TYPE_X86_CPU,
1636 .instance_init = host_x86_cpu_initfn,
1637 .class_init = host_x86_cpu_class_init,
1638 };
1639
1640 #endif
1641
1642 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1643 {
1644 FeatureWordInfo *f = &feature_word_info[w];
1645 int i;
1646
1647 for (i = 0; i < 32; ++i) {
1648 if ((1UL << i) & mask) {
1649 const char *reg = get_register_name_32(f->cpuid_reg);
1650 assert(reg);
1651 fprintf(stderr, "warning: %s doesn't support requested feature: "
1652 "CPUID.%02XH:%s%s%s [bit %d]\n",
1653 kvm_enabled() ? "host" : "TCG",
1654 f->cpuid_eax, reg,
1655 f->feat_names[i] ? "." : "",
1656 f->feat_names[i] ? f->feat_names[i] : "", i);
1657 }
1658 }
1659 }
1660
1661 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1662 const char *name, void *opaque,
1663 Error **errp)
1664 {
1665 X86CPU *cpu = X86_CPU(obj);
1666 CPUX86State *env = &cpu->env;
1667 int64_t value;
1668
1669 value = (env->cpuid_version >> 8) & 0xf;
1670 if (value == 0xf) {
1671 value += (env->cpuid_version >> 20) & 0xff;
1672 }
1673 visit_type_int(v, name, &value, errp);
1674 }
1675
1676 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1679 {
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 const int64_t min = 0;
1683 const int64_t max = 0xff + 0xf;
1684 Error *local_err = NULL;
1685 int64_t value;
1686
1687 visit_type_int(v, name, &value, &local_err);
1688 if (local_err) {
1689 error_propagate(errp, local_err);
1690 return;
1691 }
1692 if (value < min || value > max) {
1693 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1694 name ? name : "null", value, min, max);
1695 return;
1696 }
1697
1698 env->cpuid_version &= ~0xff00f00;
1699 if (value > 0x0f) {
1700 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1701 } else {
1702 env->cpuid_version |= value << 8;
1703 }
1704 }
1705
1706 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1707 const char *name, void *opaque,
1708 Error **errp)
1709 {
1710 X86CPU *cpu = X86_CPU(obj);
1711 CPUX86State *env = &cpu->env;
1712 int64_t value;
1713
1714 value = (env->cpuid_version >> 4) & 0xf;
1715 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1716 visit_type_int(v, name, &value, errp);
1717 }
1718
1719 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1720 const char *name, void *opaque,
1721 Error **errp)
1722 {
1723 X86CPU *cpu = X86_CPU(obj);
1724 CPUX86State *env = &cpu->env;
1725 const int64_t min = 0;
1726 const int64_t max = 0xff;
1727 Error *local_err = NULL;
1728 int64_t value;
1729
1730 visit_type_int(v, name, &value, &local_err);
1731 if (local_err) {
1732 error_propagate(errp, local_err);
1733 return;
1734 }
1735 if (value < min || value > max) {
1736 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1737 name ? name : "null", value, min, max);
1738 return;
1739 }
1740
1741 env->cpuid_version &= ~0xf00f0;
1742 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1743 }
1744
1745 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1746 const char *name, void *opaque,
1747 Error **errp)
1748 {
1749 X86CPU *cpu = X86_CPU(obj);
1750 CPUX86State *env = &cpu->env;
1751 int64_t value;
1752
1753 value = env->cpuid_version & 0xf;
1754 visit_type_int(v, name, &value, errp);
1755 }
1756
1757 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1758 const char *name, void *opaque,
1759 Error **errp)
1760 {
1761 X86CPU *cpu = X86_CPU(obj);
1762 CPUX86State *env = &cpu->env;
1763 const int64_t min = 0;
1764 const int64_t max = 0xf;
1765 Error *local_err = NULL;
1766 int64_t value;
1767
1768 visit_type_int(v, name, &value, &local_err);
1769 if (local_err) {
1770 error_propagate(errp, local_err);
1771 return;
1772 }
1773 if (value < min || value > max) {
1774 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1775 name ? name : "null", value, min, max);
1776 return;
1777 }
1778
1779 env->cpuid_version &= ~0xf;
1780 env->cpuid_version |= value & 0xf;
1781 }
1782
1783 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1784 {
1785 X86CPU *cpu = X86_CPU(obj);
1786 CPUX86State *env = &cpu->env;
1787 char *value;
1788
1789 value = g_malloc(CPUID_VENDOR_SZ + 1);
1790 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1791 env->cpuid_vendor3);
1792 return value;
1793 }
1794
1795 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1796 Error **errp)
1797 {
1798 X86CPU *cpu = X86_CPU(obj);
1799 CPUX86State *env = &cpu->env;
1800 int i;
1801
1802 if (strlen(value) != CPUID_VENDOR_SZ) {
1803 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1804 return;
1805 }
1806
1807 env->cpuid_vendor1 = 0;
1808 env->cpuid_vendor2 = 0;
1809 env->cpuid_vendor3 = 0;
1810 for (i = 0; i < 4; i++) {
1811 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1812 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1813 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1814 }
1815 }
1816
1817 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1818 {
1819 X86CPU *cpu = X86_CPU(obj);
1820 CPUX86State *env = &cpu->env;
1821 char *value;
1822 int i;
1823
1824 value = g_malloc(48 + 1);
1825 for (i = 0; i < 48; i++) {
1826 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1827 }
1828 value[48] = '\0';
1829 return value;
1830 }
1831
1832 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1833 Error **errp)
1834 {
1835 X86CPU *cpu = X86_CPU(obj);
1836 CPUX86State *env = &cpu->env;
1837 int c, len, i;
1838
1839 if (model_id == NULL) {
1840 model_id = "";
1841 }
1842 len = strlen(model_id);
1843 memset(env->cpuid_model, 0, 48);
1844 for (i = 0; i < 48; i++) {
1845 if (i >= len) {
1846 c = '\0';
1847 } else {
1848 c = (uint8_t)model_id[i];
1849 }
1850 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1851 }
1852 }
1853
1854 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1855 void *opaque, Error **errp)
1856 {
1857 X86CPU *cpu = X86_CPU(obj);
1858 int64_t value;
1859
1860 value = cpu->env.tsc_khz * 1000;
1861 visit_type_int(v, name, &value, errp);
1862 }
1863
1864 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1865 void *opaque, Error **errp)
1866 {
1867 X86CPU *cpu = X86_CPU(obj);
1868 const int64_t min = 0;
1869 const int64_t max = INT64_MAX;
1870 Error *local_err = NULL;
1871 int64_t value;
1872
1873 visit_type_int(v, name, &value, &local_err);
1874 if (local_err) {
1875 error_propagate(errp, local_err);
1876 return;
1877 }
1878 if (value < min || value > max) {
1879 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1880 name ? name : "null", value, min, max);
1881 return;
1882 }
1883
1884 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1885 }
1886
1887 /* Generic getter for "feature-words" and "filtered-features" properties */
1888 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1889 const char *name, void *opaque,
1890 Error **errp)
1891 {
1892 uint32_t *array = (uint32_t *)opaque;
1893 FeatureWord w;
1894 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1895 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1896 X86CPUFeatureWordInfoList *list = NULL;
1897
1898 for (w = 0; w < FEATURE_WORDS; w++) {
1899 FeatureWordInfo *wi = &feature_word_info[w];
1900 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1901 qwi->cpuid_input_eax = wi->cpuid_eax;
1902 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1903 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1904 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1905 qwi->features = array[w];
1906
1907 /* List will be in reverse order, but order shouldn't matter */
1908 list_entries[w].next = list;
1909 list_entries[w].value = &word_infos[w];
1910 list = &list_entries[w];
1911 }
1912
1913 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1914 }
1915
1916 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1917 void *opaque, Error **errp)
1918 {
1919 X86CPU *cpu = X86_CPU(obj);
1920 int64_t value = cpu->hyperv_spinlock_attempts;
1921
1922 visit_type_int(v, name, &value, errp);
1923 }
1924
1925 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1926 void *opaque, Error **errp)
1927 {
1928 const int64_t min = 0xFFF;
1929 const int64_t max = UINT_MAX;
1930 X86CPU *cpu = X86_CPU(obj);
1931 Error *err = NULL;
1932 int64_t value;
1933
1934 visit_type_int(v, name, &value, &err);
1935 if (err) {
1936 error_propagate(errp, err);
1937 return;
1938 }
1939
1940 if (value < min || value > max) {
1941 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1942 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1943 object_get_typename(obj), name ? name : "null",
1944 value, min, max);
1945 return;
1946 }
1947 cpu->hyperv_spinlock_attempts = value;
1948 }
1949
1950 static PropertyInfo qdev_prop_spinlocks = {
1951 .name = "int",
1952 .get = x86_get_hv_spinlocks,
1953 .set = x86_set_hv_spinlocks,
1954 };
1955
1956 /* Convert all '_' in a feature string option name to '-', to make feature
1957 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1958 */
1959 static inline void feat2prop(char *s)
1960 {
1961 while ((s = strchr(s, '_'))) {
1962 *s = '-';
1963 }
1964 }
1965
1966 /* Return the feature property name for a feature flag bit */
1967 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1968 {
1969 /* XSAVE components are automatically enabled by other features,
1970 * so return the original feature name instead
1971 */
1972 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1973 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1974
1975 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1976 x86_ext_save_areas[comp].bits) {
1977 w = x86_ext_save_areas[comp].feature;
1978 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1979 }
1980 }
1981
1982 assert(bitnr < 32);
1983 assert(w < FEATURE_WORDS);
1984 return feature_word_info[w].feat_names[bitnr];
1985 }
1986
1987 /* Compatibily hack to maintain legacy +-feat semantic,
1988 * where +-feat overwrites any feature set by
1989 * feat=on|feat even if the later is parsed after +-feat
1990 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1991 */
1992 static GList *plus_features, *minus_features;
1993
1994 static gint compare_string(gconstpointer a, gconstpointer b)
1995 {
1996 return g_strcmp0(a, b);
1997 }
1998
1999 /* Parse "+feature,-feature,feature=foo" CPU feature string
2000 */
2001 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2002 Error **errp)
2003 {
2004 char *featurestr; /* Single 'key=value" string being parsed */
2005 static bool cpu_globals_initialized;
2006 bool ambiguous = false;
2007
2008 if (cpu_globals_initialized) {
2009 return;
2010 }
2011 cpu_globals_initialized = true;
2012
2013 if (!features) {
2014 return;
2015 }
2016
2017 for (featurestr = strtok(features, ",");
2018 featurestr;
2019 featurestr = strtok(NULL, ",")) {
2020 const char *name;
2021 const char *val = NULL;
2022 char *eq = NULL;
2023 char num[32];
2024 GlobalProperty *prop;
2025
2026 /* Compatibility syntax: */
2027 if (featurestr[0] == '+') {
2028 plus_features = g_list_append(plus_features,
2029 g_strdup(featurestr + 1));
2030 continue;
2031 } else if (featurestr[0] == '-') {
2032 minus_features = g_list_append(minus_features,
2033 g_strdup(featurestr + 1));
2034 continue;
2035 }
2036
2037 eq = strchr(featurestr, '=');
2038 if (eq) {
2039 *eq++ = 0;
2040 val = eq;
2041 } else {
2042 val = "on";
2043 }
2044
2045 feat2prop(featurestr);
2046 name = featurestr;
2047
2048 if (g_list_find_custom(plus_features, name, compare_string)) {
2049 error_report("warning: Ambiguous CPU model string. "
2050 "Don't mix both \"+%s\" and \"%s=%s\"",
2051 name, name, val);
2052 ambiguous = true;
2053 }
2054 if (g_list_find_custom(minus_features, name, compare_string)) {
2055 error_report("warning: Ambiguous CPU model string. "
2056 "Don't mix both \"-%s\" and \"%s=%s\"",
2057 name, name, val);
2058 ambiguous = true;
2059 }
2060
2061 /* Special case: */
2062 if (!strcmp(name, "tsc-freq")) {
2063 int64_t tsc_freq;
2064 char *err;
2065
2066 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2067 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2068 if (tsc_freq < 0 || *err) {
2069 error_setg(errp, "bad numerical value %s", val);
2070 return;
2071 }
2072 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2073 val = num;
2074 name = "tsc-frequency";
2075 }
2076
2077 prop = g_new0(typeof(*prop), 1);
2078 prop->driver = typename;
2079 prop->property = g_strdup(name);
2080 prop->value = g_strdup(val);
2081 prop->errp = &error_fatal;
2082 qdev_prop_register_global(prop);
2083 }
2084
2085 if (ambiguous) {
2086 error_report("warning: Compatibility of ambiguous CPU model "
2087 "strings won't be kept on future QEMU versions");
2088 }
2089 }
2090
2091 static void x86_cpu_load_features(X86CPU *cpu, Error **errp);
2092 static int x86_cpu_filter_features(X86CPU *cpu);
2093
2094 /* Check for missing features that may prevent the CPU class from
2095 * running using the current machine and accelerator.
2096 */
2097 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2098 strList **missing_feats)
2099 {
2100 X86CPU *xc;
2101 FeatureWord w;
2102 Error *err = NULL;
2103 strList **next = missing_feats;
2104
2105 if (xcc->kvm_required && !kvm_enabled()) {
2106 strList *new = g_new0(strList, 1);
2107 new->value = g_strdup("kvm");;
2108 *missing_feats = new;
2109 return;
2110 }
2111
2112 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2113
2114 x86_cpu_load_features(xc, &err);
2115 if (err) {
2116 /* Errors at x86_cpu_load_features should never happen,
2117 * but in case it does, just report the model as not
2118 * runnable at all using the "type" property.
2119 */
2120 strList *new = g_new0(strList, 1);
2121 new->value = g_strdup("type");
2122 *next = new;
2123 next = &new->next;
2124 }
2125
2126 x86_cpu_filter_features(xc);
2127
2128 for (w = 0; w < FEATURE_WORDS; w++) {
2129 uint32_t filtered = xc->filtered_features[w];
2130 int i;
2131 for (i = 0; i < 32; i++) {
2132 if (filtered & (1UL << i)) {
2133 strList *new = g_new0(strList, 1);
2134 new->value = g_strdup(x86_cpu_feature_name(w, i));
2135 *next = new;
2136 next = &new->next;
2137 }
2138 }
2139 }
2140
2141 object_unref(OBJECT(xc));
2142 }
2143
2144 /* Print all cpuid feature names in featureset
2145 */
2146 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2147 {
2148 int bit;
2149 bool first = true;
2150
2151 for (bit = 0; bit < 32; bit++) {
2152 if (featureset[bit]) {
2153 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2154 first = false;
2155 }
2156 }
2157 }
2158
2159 /* Sort alphabetically by type name, listing kvm_required models last. */
2160 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2161 {
2162 ObjectClass *class_a = (ObjectClass *)a;
2163 ObjectClass *class_b = (ObjectClass *)b;
2164 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2165 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2166 const char *name_a, *name_b;
2167
2168 if (cc_a->kvm_required != cc_b->kvm_required) {
2169 /* kvm_required items go last */
2170 return cc_a->kvm_required ? 1 : -1;
2171 } else {
2172 name_a = object_class_get_name(class_a);
2173 name_b = object_class_get_name(class_b);
2174 return strcmp(name_a, name_b);
2175 }
2176 }
2177
2178 static GSList *get_sorted_cpu_model_list(void)
2179 {
2180 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2181 list = g_slist_sort(list, x86_cpu_list_compare);
2182 return list;
2183 }
2184
2185 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2186 {
2187 ObjectClass *oc = data;
2188 X86CPUClass *cc = X86_CPU_CLASS(oc);
2189 CPUListState *s = user_data;
2190 char *name = x86_cpu_class_get_model_name(cc);
2191 const char *desc = cc->model_description;
2192 if (!desc) {
2193 desc = cc->cpu_def->model_id;
2194 }
2195
2196 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2197 name, desc);
2198 g_free(name);
2199 }
2200
2201 /* list available CPU models and flags */
2202 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2203 {
2204 int i;
2205 CPUListState s = {
2206 .file = f,
2207 .cpu_fprintf = cpu_fprintf,
2208 };
2209 GSList *list;
2210
2211 (*cpu_fprintf)(f, "Available CPUs:\n");
2212 list = get_sorted_cpu_model_list();
2213 g_slist_foreach(list, x86_cpu_list_entry, &s);
2214 g_slist_free(list);
2215
2216 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2217 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2218 FeatureWordInfo *fw = &feature_word_info[i];
2219
2220 (*cpu_fprintf)(f, " ");
2221 listflags(f, cpu_fprintf, fw->feat_names);
2222 (*cpu_fprintf)(f, "\n");
2223 }
2224 }
2225
2226 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2227 {
2228 ObjectClass *oc = data;
2229 X86CPUClass *cc = X86_CPU_CLASS(oc);
2230 CpuDefinitionInfoList **cpu_list = user_data;
2231 CpuDefinitionInfoList *entry;
2232 CpuDefinitionInfo *info;
2233
2234 info = g_malloc0(sizeof(*info));
2235 info->name = x86_cpu_class_get_model_name(cc);
2236 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2237 info->has_unavailable_features = true;
2238
2239 entry = g_malloc0(sizeof(*entry));
2240 entry->value = info;
2241 entry->next = *cpu_list;
2242 *cpu_list = entry;
2243 }
2244
2245 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2246 {
2247 CpuDefinitionInfoList *cpu_list = NULL;
2248 GSList *list = get_sorted_cpu_model_list();
2249 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2250 g_slist_free(list);
2251 return cpu_list;
2252 }
2253
2254 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2255 bool migratable_only)
2256 {
2257 FeatureWordInfo *wi = &feature_word_info[w];
2258 uint32_t r;
2259
2260 if (kvm_enabled()) {
2261 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2262 wi->cpuid_ecx,
2263 wi->cpuid_reg);
2264 } else if (tcg_enabled()) {
2265 r = wi->tcg_features;
2266 } else {
2267 return ~0;
2268 }
2269 if (migratable_only) {
2270 r &= x86_cpu_get_migratable_flags(w);
2271 }
2272 return r;
2273 }
2274
2275 /*
2276 * Filters CPU feature words based on host availability of each feature.
2277 *
2278 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2279 */
2280 static int x86_cpu_filter_features(X86CPU *cpu)
2281 {
2282 CPUX86State *env = &cpu->env;
2283 FeatureWord w;
2284 int rv = 0;
2285
2286 for (w = 0; w < FEATURE_WORDS; w++) {
2287 uint32_t host_feat =
2288 x86_cpu_get_supported_feature_word(w, false);
2289 uint32_t requested_features = env->features[w];
2290 env->features[w] &= host_feat;
2291 cpu->filtered_features[w] = requested_features & ~env->features[w];
2292 if (cpu->filtered_features[w]) {
2293 rv = 1;
2294 }
2295 }
2296
2297 return rv;
2298 }
2299
2300 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2301 {
2302 FeatureWord w;
2303
2304 for (w = 0; w < FEATURE_WORDS; w++) {
2305 report_unavailable_features(w, cpu->filtered_features[w]);
2306 }
2307 }
2308
2309 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2310 {
2311 PropValue *pv;
2312 for (pv = props; pv->prop; pv++) {
2313 if (!pv->value) {
2314 continue;
2315 }
2316 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2317 &error_abort);
2318 }
2319 }
2320
2321 /* Load data from X86CPUDefinition
2322 */
2323 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2324 {
2325 CPUX86State *env = &cpu->env;
2326 const char *vendor;
2327 char host_vendor[CPUID_VENDOR_SZ + 1];
2328 FeatureWord w;
2329
2330 /* CPU models only set _minimum_ values for level/xlevel: */
2331 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2332 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2333
2334 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2335 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2336 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2337 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2338 for (w = 0; w < FEATURE_WORDS; w++) {
2339 env->features[w] = def->features[w];
2340 }
2341
2342 /* Special cases not set in the X86CPUDefinition structs: */
2343 if (kvm_enabled()) {
2344 if (!kvm_irqchip_in_kernel()) {
2345 x86_cpu_change_kvm_default("x2apic", "off");
2346 }
2347
2348 x86_cpu_apply_props(cpu, kvm_default_props);
2349 } else if (tcg_enabled()) {
2350 x86_cpu_apply_props(cpu, tcg_default_props);
2351 }
2352
2353 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2354
2355 /* sysenter isn't supported in compatibility mode on AMD,
2356 * syscall isn't supported in compatibility mode on Intel.
2357 * Normally we advertise the actual CPU vendor, but you can
2358 * override this using the 'vendor' property if you want to use
2359 * KVM's sysenter/syscall emulation in compatibility mode and
2360 * when doing cross vendor migration
2361 */
2362 vendor = def->vendor;
2363 if (kvm_enabled()) {
2364 uint32_t ebx = 0, ecx = 0, edx = 0;
2365 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2366 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2367 vendor = host_vendor;
2368 }
2369
2370 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2371
2372 }
2373
2374 X86CPU *cpu_x86_init(const char *cpu_model)
2375 {
2376 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2377 }
2378
2379 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2380 {
2381 X86CPUDefinition *cpudef = data;
2382 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2383
2384 xcc->cpu_def = cpudef;
2385 }
2386
2387 static void x86_register_cpudef_type(X86CPUDefinition *def)
2388 {
2389 char *typename = x86_cpu_type_name(def->name);
2390 TypeInfo ti = {
2391 .name = typename,
2392 .parent = TYPE_X86_CPU,
2393 .class_init = x86_cpu_cpudef_class_init,
2394 .class_data = def,
2395 };
2396
2397 type_register(&ti);
2398 g_free(typename);
2399 }
2400
2401 #if !defined(CONFIG_USER_ONLY)
2402
2403 void cpu_clear_apic_feature(CPUX86State *env)
2404 {
2405 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2406 }
2407
2408 #endif /* !CONFIG_USER_ONLY */
2409
2410 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2411 uint32_t *eax, uint32_t *ebx,
2412 uint32_t *ecx, uint32_t *edx)
2413 {
2414 X86CPU *cpu = x86_env_get_cpu(env);
2415 CPUState *cs = CPU(cpu);
2416 uint32_t pkg_offset;
2417
2418 /* test if maximum index reached */
2419 if (index & 0x80000000) {
2420 if (index > env->cpuid_xlevel) {
2421 if (env->cpuid_xlevel2 > 0) {
2422 /* Handle the Centaur's CPUID instruction. */
2423 if (index > env->cpuid_xlevel2) {
2424 index = env->cpuid_xlevel2;
2425 } else if (index < 0xC0000000) {
2426 index = env->cpuid_xlevel;
2427 }
2428 } else {
2429 /* Intel documentation states that invalid EAX input will
2430 * return the same information as EAX=cpuid_level
2431 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2432 */
2433 index = env->cpuid_level;
2434 }
2435 }
2436 } else {
2437 if (index > env->cpuid_level)
2438 index = env->cpuid_level;
2439 }
2440
2441 switch(index) {
2442 case 0:
2443 *eax = env->cpuid_level;
2444 *ebx = env->cpuid_vendor1;
2445 *edx = env->cpuid_vendor2;
2446 *ecx = env->cpuid_vendor3;
2447 break;
2448 case 1:
2449 *eax = env->cpuid_version;
2450 *ebx = (cpu->apic_id << 24) |
2451 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2452 *ecx = env->features[FEAT_1_ECX];
2453 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2454 *ecx |= CPUID_EXT_OSXSAVE;
2455 }
2456 *edx = env->features[FEAT_1_EDX];
2457 if (cs->nr_cores * cs->nr_threads > 1) {
2458 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2459 *edx |= CPUID_HT;
2460 }
2461 break;
2462 case 2:
2463 /* cache info: needed for Pentium Pro compatibility */
2464 if (cpu->cache_info_passthrough) {
2465 host_cpuid(index, 0, eax, ebx, ecx, edx);
2466 break;
2467 }
2468 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2469 *ebx = 0;
2470 if (!cpu->enable_l3_cache) {
2471 *ecx = 0;
2472 } else {
2473 *ecx = L3_N_DESCRIPTOR;
2474 }
2475 *edx = (L1D_DESCRIPTOR << 16) | \
2476 (L1I_DESCRIPTOR << 8) | \
2477 (L2_DESCRIPTOR);
2478 break;
2479 case 4:
2480 /* cache info: needed for Core compatibility */
2481 if (cpu->cache_info_passthrough) {
2482 host_cpuid(index, count, eax, ebx, ecx, edx);
2483 *eax &= ~0xFC000000;
2484 } else {
2485 *eax = 0;
2486 switch (count) {
2487 case 0: /* L1 dcache info */
2488 *eax |= CPUID_4_TYPE_DCACHE | \
2489 CPUID_4_LEVEL(1) | \
2490 CPUID_4_SELF_INIT_LEVEL;
2491 *ebx = (L1D_LINE_SIZE - 1) | \
2492 ((L1D_PARTITIONS - 1) << 12) | \
2493 ((L1D_ASSOCIATIVITY - 1) << 22);
2494 *ecx = L1D_SETS - 1;
2495 *edx = CPUID_4_NO_INVD_SHARING;
2496 break;
2497 case 1: /* L1 icache info */
2498 *eax |= CPUID_4_TYPE_ICACHE | \
2499 CPUID_4_LEVEL(1) | \
2500 CPUID_4_SELF_INIT_LEVEL;
2501 *ebx = (L1I_LINE_SIZE - 1) | \
2502 ((L1I_PARTITIONS - 1) << 12) | \
2503 ((L1I_ASSOCIATIVITY - 1) << 22);
2504 *ecx = L1I_SETS - 1;
2505 *edx = CPUID_4_NO_INVD_SHARING;
2506 break;
2507 case 2: /* L2 cache info */
2508 *eax |= CPUID_4_TYPE_UNIFIED | \
2509 CPUID_4_LEVEL(2) | \
2510 CPUID_4_SELF_INIT_LEVEL;
2511 if (cs->nr_threads > 1) {
2512 *eax |= (cs->nr_threads - 1) << 14;
2513 }
2514 *ebx = (L2_LINE_SIZE - 1) | \
2515 ((L2_PARTITIONS - 1) << 12) | \
2516 ((L2_ASSOCIATIVITY - 1) << 22);
2517 *ecx = L2_SETS - 1;
2518 *edx = CPUID_4_NO_INVD_SHARING;
2519 break;
2520 case 3: /* L3 cache info */
2521 if (!cpu->enable_l3_cache) {
2522 *eax = 0;
2523 *ebx = 0;
2524 *ecx = 0;
2525 *edx = 0;
2526 break;
2527 }
2528 *eax |= CPUID_4_TYPE_UNIFIED | \
2529 CPUID_4_LEVEL(3) | \
2530 CPUID_4_SELF_INIT_LEVEL;
2531 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2532 *eax |= ((1 << pkg_offset) - 1) << 14;
2533 *ebx = (L3_N_LINE_SIZE - 1) | \
2534 ((L3_N_PARTITIONS - 1) << 12) | \
2535 ((L3_N_ASSOCIATIVITY - 1) << 22);
2536 *ecx = L3_N_SETS - 1;
2537 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2538 break;
2539 default: /* end of info */
2540 *eax = 0;
2541 *ebx = 0;
2542 *ecx = 0;
2543 *edx = 0;
2544 break;
2545 }
2546 }
2547
2548 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2549 if ((*eax & 31) && cs->nr_cores > 1) {
2550 *eax |= (cs->nr_cores - 1) << 26;
2551 }
2552 break;
2553 case 5:
2554 /* mwait info: needed for Core compatibility */
2555 *eax = 0; /* Smallest monitor-line size in bytes */
2556 *ebx = 0; /* Largest monitor-line size in bytes */
2557 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2558 *edx = 0;
2559 break;
2560 case 6:
2561 /* Thermal and Power Leaf */
2562 *eax = env->features[FEAT_6_EAX];
2563 *ebx = 0;
2564 *ecx = 0;
2565 *edx = 0;
2566 break;
2567 case 7:
2568 /* Structured Extended Feature Flags Enumeration Leaf */
2569 if (count == 0) {
2570 *eax = 0; /* Maximum ECX value for sub-leaves */
2571 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2572 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2573 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2574 *ecx |= CPUID_7_0_ECX_OSPKE;
2575 }
2576 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2577 } else {
2578 *eax = 0;
2579 *ebx = 0;
2580 *ecx = 0;
2581 *edx = 0;
2582 }
2583 break;
2584 case 9:
2585 /* Direct Cache Access Information Leaf */
2586 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2587 *ebx = 0;
2588 *ecx = 0;
2589 *edx = 0;
2590 break;
2591 case 0xA:
2592 /* Architectural Performance Monitoring Leaf */
2593 if (kvm_enabled() && cpu->enable_pmu) {
2594 KVMState *s = cs->kvm_state;
2595
2596 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2597 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2598 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2599 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2600 } else {
2601 *eax = 0;
2602 *ebx = 0;
2603 *ecx = 0;
2604 *edx = 0;
2605 }
2606 break;
2607 case 0xB:
2608 /* Extended Topology Enumeration Leaf */
2609 if (!cpu->enable_cpuid_0xb) {
2610 *eax = *ebx = *ecx = *edx = 0;
2611 break;
2612 }
2613
2614 *ecx = count & 0xff;
2615 *edx = cpu->apic_id;
2616
2617 switch (count) {
2618 case 0:
2619 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2620 *ebx = cs->nr_threads;
2621 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2622 break;
2623 case 1:
2624 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2625 *ebx = cs->nr_cores * cs->nr_threads;
2626 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2627 break;
2628 default:
2629 *eax = 0;
2630 *ebx = 0;
2631 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2632 }
2633
2634 assert(!(*eax & ~0x1f));
2635 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2636 break;
2637 case 0xD: {
2638 /* Processor Extended State */
2639 *eax = 0;
2640 *ebx = 0;
2641 *ecx = 0;
2642 *edx = 0;
2643 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2644 break;
2645 }
2646
2647 if (count == 0) {
2648 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2649 *eax = env->features[FEAT_XSAVE_COMP_LO];
2650 *edx = env->features[FEAT_XSAVE_COMP_HI];
2651 *ebx = *ecx;
2652 } else if (count == 1) {
2653 *eax = env->features[FEAT_XSAVE];
2654 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2655 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2656 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2657 *eax = esa->size;
2658 *ebx = esa->offset;
2659 }
2660 }
2661 break;
2662 }
2663 case 0x80000000:
2664 *eax = env->cpuid_xlevel;
2665 *ebx = env->cpuid_vendor1;
2666 *edx = env->cpuid_vendor2;
2667 *ecx = env->cpuid_vendor3;
2668 break;
2669 case 0x80000001:
2670 *eax = env->cpuid_version;
2671 *ebx = 0;
2672 *ecx = env->features[FEAT_8000_0001_ECX];
2673 *edx = env->features[FEAT_8000_0001_EDX];
2674
2675 /* The Linux kernel checks for the CMPLegacy bit and
2676 * discards multiple thread information if it is set.
2677 * So don't set it here for Intel to make Linux guests happy.
2678 */
2679 if (cs->nr_cores * cs->nr_threads > 1) {
2680 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2681 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2682 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2683 *ecx |= 1 << 1; /* CmpLegacy bit */
2684 }
2685 }
2686 break;
2687 case 0x80000002:
2688 case 0x80000003:
2689 case 0x80000004:
2690 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2691 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2692 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2693 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2694 break;
2695 case 0x80000005:
2696 /* cache info (L1 cache) */
2697 if (cpu->cache_info_passthrough) {
2698 host_cpuid(index, 0, eax, ebx, ecx, edx);
2699 break;
2700 }
2701 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2702 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2703 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2704 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2705 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2706 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2707 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2708 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2709 break;
2710 case 0x80000006:
2711 /* cache info (L2 cache) */
2712 if (cpu->cache_info_passthrough) {
2713 host_cpuid(index, 0, eax, ebx, ecx, edx);
2714 break;
2715 }
2716 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2717 (L2_DTLB_2M_ENTRIES << 16) | \
2718 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2719 (L2_ITLB_2M_ENTRIES);
2720 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2721 (L2_DTLB_4K_ENTRIES << 16) | \
2722 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2723 (L2_ITLB_4K_ENTRIES);
2724 *ecx = (L2_SIZE_KB_AMD << 16) | \
2725 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2726 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2727 if (!cpu->enable_l3_cache) {
2728 *edx = ((L3_SIZE_KB / 512) << 18) | \
2729 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2730 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2731 } else {
2732 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2733 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2734 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2735 }
2736 break;
2737 case 0x80000007:
2738 *eax = 0;
2739 *ebx = 0;
2740 *ecx = 0;
2741 *edx = env->features[FEAT_8000_0007_EDX];
2742 break;
2743 case 0x80000008:
2744 /* virtual & phys address size in low 2 bytes. */
2745 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2746 /* 64 bit processor */
2747 *eax = cpu->phys_bits; /* configurable physical bits */
2748 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
2749 *eax |= 0x00003900; /* 57 bits virtual */
2750 } else {
2751 *eax |= 0x00003000; /* 48 bits virtual */
2752 }
2753 } else {
2754 *eax = cpu->phys_bits;
2755 }
2756 *ebx = 0;
2757 *ecx = 0;
2758 *edx = 0;
2759 if (cs->nr_cores * cs->nr_threads > 1) {
2760 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2761 }
2762 break;
2763 case 0x8000000A:
2764 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2765 *eax = 0x00000001; /* SVM Revision */
2766 *ebx = 0x00000010; /* nr of ASIDs */
2767 *ecx = 0;
2768 *edx = env->features[FEAT_SVM]; /* optional features */
2769 } else {
2770 *eax = 0;
2771 *ebx = 0;
2772 *ecx = 0;
2773 *edx = 0;
2774 }
2775 break;
2776 case 0xC0000000:
2777 *eax = env->cpuid_xlevel2;
2778 *ebx = 0;
2779 *ecx = 0;
2780 *edx = 0;
2781 break;
2782 case 0xC0000001:
2783 /* Support for VIA CPU's CPUID instruction */
2784 *eax = env->cpuid_version;
2785 *ebx = 0;
2786 *ecx = 0;
2787 *edx = env->features[FEAT_C000_0001_EDX];
2788 break;
2789 case 0xC0000002:
2790 case 0xC0000003:
2791 case 0xC0000004:
2792 /* Reserved for the future, and now filled with zero */
2793 *eax = 0;
2794 *ebx = 0;
2795 *ecx = 0;
2796 *edx = 0;
2797 break;
2798 default:
2799 /* reserved values: zero */
2800 *eax = 0;
2801 *ebx = 0;
2802 *ecx = 0;
2803 *edx = 0;
2804 break;
2805 }
2806 }
2807
2808 /* CPUClass::reset() */
2809 static void x86_cpu_reset(CPUState *s)
2810 {
2811 X86CPU *cpu = X86_CPU(s);
2812 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2813 CPUX86State *env = &cpu->env;
2814 target_ulong cr4;
2815 uint64_t xcr0;
2816 int i;
2817
2818 xcc->parent_reset(s);
2819
2820 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2821
2822 tlb_flush(s, 1);
2823
2824 env->old_exception = -1;
2825
2826 /* init to reset state */
2827
2828 env->hflags2 |= HF2_GIF_MASK;
2829
2830 cpu_x86_update_cr0(env, 0x60000010);
2831 env->a20_mask = ~0x0;
2832 env->smbase = 0x30000;
2833
2834 env->idt.limit = 0xffff;
2835 env->gdt.limit = 0xffff;
2836 env->ldt.limit = 0xffff;
2837 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2838 env->tr.limit = 0xffff;
2839 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2840
2841 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2842 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2843 DESC_R_MASK | DESC_A_MASK);
2844 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2845 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2846 DESC_A_MASK);
2847 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2848 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2849 DESC_A_MASK);
2850 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2851 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2852 DESC_A_MASK);
2853 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2854 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2855 DESC_A_MASK);
2856 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2857 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2858 DESC_A_MASK);
2859
2860 env->eip = 0xfff0;
2861 env->regs[R_EDX] = env->cpuid_version;
2862
2863 env->eflags = 0x2;
2864
2865 /* FPU init */
2866 for (i = 0; i < 8; i++) {
2867 env->fptags[i] = 1;
2868 }
2869 cpu_set_fpuc(env, 0x37f);
2870
2871 env->mxcsr = 0x1f80;
2872 /* All units are in INIT state. */
2873 env->xstate_bv = 0;
2874
2875 env->pat = 0x0007040600070406ULL;
2876 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2877
2878 memset(env->dr, 0, sizeof(env->dr));
2879 env->dr[6] = DR6_FIXED_1;
2880 env->dr[7] = DR7_FIXED_1;
2881 cpu_breakpoint_remove_all(s, BP_CPU);
2882 cpu_watchpoint_remove_all(s, BP_CPU);
2883
2884 cr4 = 0;
2885 xcr0 = XSTATE_FP_MASK;
2886
2887 #ifdef CONFIG_USER_ONLY
2888 /* Enable all the features for user-mode. */
2889 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2890 xcr0 |= XSTATE_SSE_MASK;
2891 }
2892 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2893 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2894 if (env->features[esa->feature] & esa->bits) {
2895 xcr0 |= 1ull << i;
2896 }
2897 }
2898
2899 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2900 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2901 }
2902 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2903 cr4 |= CR4_FSGSBASE_MASK;
2904 }
2905 #endif
2906
2907 env->xcr0 = xcr0;
2908 cpu_x86_update_cr4(env, cr4);
2909
2910 /*
2911 * SDM 11.11.5 requires:
2912 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2913 * - IA32_MTRR_PHYSMASKn.V = 0
2914 * All other bits are undefined. For simplification, zero it all.
2915 */
2916 env->mtrr_deftype = 0;
2917 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2918 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2919
2920 #if !defined(CONFIG_USER_ONLY)
2921 /* We hard-wire the BSP to the first CPU. */
2922 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2923
2924 s->halted = !cpu_is_bsp(cpu);
2925
2926 if (kvm_enabled()) {
2927 kvm_arch_reset_vcpu(cpu);
2928 }
2929 #endif
2930 }
2931
2932 #ifndef CONFIG_USER_ONLY
2933 bool cpu_is_bsp(X86CPU *cpu)
2934 {
2935 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2936 }
2937
2938 /* TODO: remove me, when reset over QOM tree is implemented */
2939 static void x86_cpu_machine_reset_cb(void *opaque)
2940 {
2941 X86CPU *cpu = opaque;
2942 cpu_reset(CPU(cpu));
2943 }
2944 #endif
2945
2946 static void mce_init(X86CPU *cpu)
2947 {
2948 CPUX86State *cenv = &cpu->env;
2949 unsigned int bank;
2950
2951 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2952 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2953 (CPUID_MCE | CPUID_MCA)) {
2954 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2955 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2956 cenv->mcg_ctl = ~(uint64_t)0;
2957 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2958 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2959 }
2960 }
2961 }
2962
2963 #ifndef CONFIG_USER_ONLY
2964 APICCommonClass *apic_get_class(void)
2965 {
2966 const char *apic_type = "apic";
2967
2968 if (kvm_apic_in_kernel()) {
2969 apic_type = "kvm-apic";
2970 } else if (xen_enabled()) {
2971 apic_type = "xen-apic";
2972 }
2973
2974 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2975 }
2976
2977 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2978 {
2979 APICCommonState *apic;
2980 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2981
2982 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2983
2984 object_property_add_child(OBJECT(cpu), "lapic",
2985 OBJECT(cpu->apic_state), &error_abort);
2986 object_unref(OBJECT(cpu->apic_state));
2987
2988 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
2989 /* TODO: convert to link<> */
2990 apic = APIC_COMMON(cpu->apic_state);
2991 apic->cpu = cpu;
2992 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2993 }
2994
2995 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2996 {
2997 APICCommonState *apic;
2998 static bool apic_mmio_map_once;
2999
3000 if (cpu->apic_state == NULL) {
3001 return;
3002 }
3003 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3004 errp);
3005
3006 /* Map APIC MMIO area */
3007 apic = APIC_COMMON(cpu->apic_state);
3008 if (!apic_mmio_map_once) {
3009 memory_region_add_subregion_overlap(get_system_memory(),
3010 apic->apicbase &
3011 MSR_IA32_APICBASE_BASE,
3012 &apic->io_memory,
3013 0x1000);
3014 apic_mmio_map_once = true;
3015 }
3016 }
3017
3018 static void x86_cpu_machine_done(Notifier *n, void *unused)
3019 {
3020 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3021 MemoryRegion *smram =
3022 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3023
3024 if (smram) {
3025 cpu->smram = g_new(MemoryRegion, 1);
3026 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3027 smram, 0, 1ull << 32);
3028 memory_region_set_enabled(cpu->smram, false);
3029 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3030 }
3031 }
3032 #else
3033 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3034 {
3035 }
3036 #endif
3037
3038 /* Note: Only safe for use on x86(-64) hosts */
3039 static uint32_t x86_host_phys_bits(void)
3040 {
3041 uint32_t eax;
3042 uint32_t host_phys_bits;
3043
3044 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3045 if (eax >= 0x80000008) {
3046 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3047 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3048 * at 23:16 that can specify a maximum physical address bits for
3049 * the guest that can override this value; but I've not seen
3050 * anything with that set.
3051 */
3052 host_phys_bits = eax & 0xff;
3053 } else {
3054 /* It's an odd 64 bit machine that doesn't have the leaf for
3055 * physical address bits; fall back to 36 that's most older
3056 * Intel.
3057 */
3058 host_phys_bits = 36;
3059 }
3060
3061 return host_phys_bits;
3062 }
3063
3064 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3065 {
3066 if (*min < value) {
3067 *min = value;
3068 }
3069 }
3070
3071 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3072 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3073 {
3074 CPUX86State *env = &cpu->env;
3075 FeatureWordInfo *fi = &feature_word_info[w];
3076 uint32_t eax = fi->cpuid_eax;
3077 uint32_t region = eax & 0xF0000000;
3078
3079 if (!env->features[w]) {
3080 return;
3081 }
3082
3083 switch (region) {
3084 case 0x00000000:
3085 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3086 break;
3087 case 0x80000000:
3088 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3089 break;
3090 case 0xC0000000:
3091 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3092 break;
3093 }
3094 }
3095
3096 /* Calculate XSAVE components based on the configured CPU feature flags */
3097 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3098 {
3099 CPUX86State *env = &cpu->env;
3100 int i;
3101 uint64_t mask;
3102
3103 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3104 return;
3105 }
3106
3107 mask = 0;
3108 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3109 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3110 if (env->features[esa->feature] & esa->bits) {
3111 mask |= (1ULL << i);
3112 }
3113 }
3114
3115 env->features[FEAT_XSAVE_COMP_LO] = mask;
3116 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3117 }
3118
3119 /* Load CPUID data based on configured features */
3120 static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
3121 {
3122 CPUX86State *env = &cpu->env;
3123 FeatureWord w;
3124 GList *l;
3125 Error *local_err = NULL;
3126
3127 /*TODO: cpu->host_features incorrectly overwrites features
3128 * set using "feat=on|off". Once we fix this, we can convert
3129 * plus_features & minus_features to global properties
3130 * inside x86_cpu_parse_featurestr() too.
3131 */
3132 if (cpu->host_features) {
3133 for (w = 0; w < FEATURE_WORDS; w++) {
3134 env->features[w] =
3135 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3136 }
3137 }
3138
3139 for (l = plus_features; l; l = l->next) {
3140 const char *prop = l->data;
3141 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3142 if (local_err) {
3143 goto out;
3144 }
3145 }
3146
3147 for (l = minus_features; l; l = l->next) {
3148 const char *prop = l->data;
3149 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3150 if (local_err) {
3151 goto out;
3152 }
3153 }
3154
3155 if (!kvm_enabled() || !cpu->expose_kvm) {
3156 env->features[FEAT_KVM] = 0;
3157 }
3158
3159 x86_cpu_enable_xsave_components(cpu);
3160
3161 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3162 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3163 if (cpu->full_cpuid_auto_level) {
3164 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3165 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3166 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3167 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3168 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3169 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3170 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3171 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3172 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3173 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3174 /* SVM requires CPUID[0x8000000A] */
3175 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3176 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3177 }
3178 }
3179
3180 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3181 if (env->cpuid_level == UINT32_MAX) {
3182 env->cpuid_level = env->cpuid_min_level;
3183 }
3184 if (env->cpuid_xlevel == UINT32_MAX) {
3185 env->cpuid_xlevel = env->cpuid_min_xlevel;
3186 }
3187 if (env->cpuid_xlevel2 == UINT32_MAX) {
3188 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3189 }
3190
3191 out:
3192 if (local_err != NULL) {
3193 error_propagate(errp, local_err);
3194 }
3195 }
3196
3197 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3198 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3199 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3200 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3201 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3202 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3203 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3204 {
3205 CPUState *cs = CPU(dev);
3206 X86CPU *cpu = X86_CPU(dev);
3207 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3208 CPUX86State *env = &cpu->env;
3209 Error *local_err = NULL;
3210 static bool ht_warned;
3211
3212 if (xcc->kvm_required && !kvm_enabled()) {
3213 char *name = x86_cpu_class_get_model_name(xcc);
3214 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3215 g_free(name);
3216 goto out;
3217 }
3218
3219 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3220 error_setg(errp, "apic-id property was not initialized properly");
3221 return;
3222 }
3223
3224 x86_cpu_load_features(cpu, &local_err);
3225 if (local_err) {
3226 goto out;
3227 }
3228
3229 if (x86_cpu_filter_features(cpu) &&
3230 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3231 x86_cpu_report_filtered_features(cpu);
3232 if (cpu->enforce_cpuid) {
3233 error_setg(&local_err,
3234 kvm_enabled() ?
3235 "Host doesn't support requested features" :
3236 "TCG doesn't support requested features");
3237 goto out;
3238 }
3239 }
3240
3241 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3242 * CPUID[1].EDX.
3243 */
3244 if (IS_AMD_CPU(env)) {
3245 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3246 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3247 & CPUID_EXT2_AMD_ALIASES);
3248 }
3249
3250 /* For 64bit systems think about the number of physical bits to present.
3251 * ideally this should be the same as the host; anything other than matching
3252 * the host can cause incorrect guest behaviour.
3253 * QEMU used to pick the magic value of 40 bits that corresponds to
3254 * consumer AMD devices but nothing else.
3255 */
3256 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3257 if (kvm_enabled()) {
3258 uint32_t host_phys_bits = x86_host_phys_bits();
3259 static bool warned;
3260
3261 if (cpu->host_phys_bits) {
3262 /* The user asked for us to use the host physical bits */
3263 cpu->phys_bits = host_phys_bits;
3264 }
3265
3266 /* Print a warning if the user set it to a value that's not the
3267 * host value.
3268 */
3269 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3270 !warned) {
3271 error_report("Warning: Host physical bits (%u)"
3272 " does not match phys-bits property (%u)",
3273 host_phys_bits, cpu->phys_bits);
3274 warned = true;
3275 }
3276
3277 if (cpu->phys_bits &&
3278 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3279 cpu->phys_bits < 32)) {
3280 error_setg(errp, "phys-bits should be between 32 and %u "
3281 " (but is %u)",
3282 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3283 return;
3284 }
3285 } else {
3286 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3287 error_setg(errp, "TCG only supports phys-bits=%u",
3288 TCG_PHYS_ADDR_BITS);
3289 return;
3290 }
3291 }
3292 /* 0 means it was not explicitly set by the user (or by machine
3293 * compat_props or by the host code above). In this case, the default
3294 * is the value used by TCG (40).
3295 */
3296 if (cpu->phys_bits == 0) {
3297 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3298 }
3299 } else {
3300 /* For 32 bit systems don't use the user set value, but keep
3301 * phys_bits consistent with what we tell the guest.
3302 */
3303 if (cpu->phys_bits != 0) {
3304 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3305 return;
3306 }
3307
3308 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3309 cpu->phys_bits = 36;
3310 } else {
3311 cpu->phys_bits = 32;
3312 }
3313 }
3314 cpu_exec_realizefn(cs, &local_err);
3315 if (local_err != NULL) {
3316 error_propagate(errp, local_err);
3317 return;
3318 }
3319
3320 if (tcg_enabled()) {
3321 tcg_x86_init();
3322 }
3323
3324 #ifndef CONFIG_USER_ONLY
3325 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3326
3327 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3328 x86_cpu_apic_create(cpu, &local_err);
3329 if (local_err != NULL) {
3330 goto out;
3331 }
3332 }
3333 #endif
3334
3335 mce_init(cpu);
3336
3337 #ifndef CONFIG_USER_ONLY
3338 if (tcg_enabled()) {
3339 AddressSpace *newas = g_new(AddressSpace, 1);
3340
3341 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3342 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3343
3344 /* Outer container... */
3345 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3346 memory_region_set_enabled(cpu->cpu_as_root, true);
3347
3348 /* ... with two regions inside: normal system memory with low
3349 * priority, and...
3350 */
3351 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3352 get_system_memory(), 0, ~0ull);
3353 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3354 memory_region_set_enabled(cpu->cpu_as_mem, true);
3355 address_space_init(newas, cpu->cpu_as_root, "CPU");
3356 cs->num_ases = 1;
3357 cpu_address_space_init(cs, newas, 0);
3358
3359 /* ... SMRAM with higher priority, linked from /machine/smram. */
3360 cpu->machine_done.notify = x86_cpu_machine_done;
3361 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3362 }
3363 #endif
3364
3365 qemu_init_vcpu(cs);
3366
3367 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3368 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3369 * based on inputs (sockets,cores,threads), it is still better to gives
3370 * users a warning.
3371 *
3372 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3373 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3374 */
3375 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3376 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3377 " -smp options properly.");
3378 ht_warned = true;
3379 }
3380
3381 x86_cpu_apic_realize(cpu, &local_err);
3382 if (local_err != NULL) {
3383 goto out;
3384 }
3385 cpu_reset(cs);
3386
3387 xcc->parent_realize(dev, &local_err);
3388
3389 out:
3390 if (local_err != NULL) {
3391 error_propagate(errp, local_err);
3392 return;
3393 }
3394 }
3395
3396 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3397 {
3398 X86CPU *cpu = X86_CPU(dev);
3399 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3400 Error *local_err = NULL;
3401
3402 #ifndef CONFIG_USER_ONLY
3403 cpu_remove_sync(CPU(dev));
3404 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3405 #endif
3406
3407 if (cpu->apic_state) {
3408 object_unparent(OBJECT(cpu->apic_state));
3409 cpu->apic_state = NULL;
3410 }
3411
3412 xcc->parent_unrealize(dev, &local_err);
3413 if (local_err != NULL) {
3414 error_propagate(errp, local_err);
3415 return;
3416 }
3417 }
3418
3419 typedef struct BitProperty {
3420 uint32_t *ptr;
3421 uint32_t mask;
3422 } BitProperty;
3423
3424 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3425 void *opaque, Error **errp)
3426 {
3427 BitProperty *fp = opaque;
3428 bool value = (*fp->ptr & fp->mask) == fp->mask;
3429 visit_type_bool(v, name, &value, errp);
3430 }
3431
3432 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3433 void *opaque, Error **errp)
3434 {
3435 DeviceState *dev = DEVICE(obj);
3436 BitProperty *fp = opaque;
3437 Error *local_err = NULL;
3438 bool value;
3439
3440 if (dev->realized) {
3441 qdev_prop_set_after_realize(dev, name, errp);
3442 return;
3443 }
3444
3445 visit_type_bool(v, name, &value, &local_err);
3446 if (local_err) {
3447 error_propagate(errp, local_err);
3448 return;
3449 }
3450
3451 if (value) {
3452 *fp->ptr |= fp->mask;
3453 } else {
3454 *fp->ptr &= ~fp->mask;
3455 }
3456 }
3457
3458 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3459 void *opaque)
3460 {
3461 BitProperty *prop = opaque;
3462 g_free(prop);
3463 }
3464
3465 /* Register a boolean property to get/set a single bit in a uint32_t field.
3466 *
3467 * The same property name can be registered multiple times to make it affect
3468 * multiple bits in the same FeatureWord. In that case, the getter will return
3469 * true only if all bits are set.
3470 */
3471 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3472 const char *prop_name,
3473 uint32_t *field,
3474 int bitnr)
3475 {
3476 BitProperty *fp;
3477 ObjectProperty *op;
3478 uint32_t mask = (1UL << bitnr);
3479
3480 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3481 if (op) {
3482 fp = op->opaque;
3483 assert(fp->ptr == field);
3484 fp->mask |= mask;
3485 } else {
3486 fp = g_new0(BitProperty, 1);
3487 fp->ptr = field;
3488 fp->mask = mask;
3489 object_property_add(OBJECT(cpu), prop_name, "bool",
3490 x86_cpu_get_bit_prop,
3491 x86_cpu_set_bit_prop,
3492 x86_cpu_release_bit_prop, fp, &error_abort);
3493 }
3494 }
3495
3496 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3497 FeatureWord w,
3498 int bitnr)
3499 {
3500 FeatureWordInfo *fi = &feature_word_info[w];
3501 const char *name = fi->feat_names[bitnr];
3502
3503 if (!name) {
3504 return;
3505 }
3506
3507 /* Property names should use "-" instead of "_".
3508 * Old names containing underscores are registered as aliases
3509 * using object_property_add_alias()
3510 */
3511 assert(!strchr(name, '_'));
3512 /* aliases don't use "|" delimiters anymore, they are registered
3513 * manually using object_property_add_alias() */
3514 assert(!strchr(name, '|'));
3515 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3516 }
3517
3518 static void x86_cpu_initfn(Object *obj)
3519 {
3520 CPUState *cs = CPU(obj);
3521 X86CPU *cpu = X86_CPU(obj);
3522 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3523 CPUX86State *env = &cpu->env;
3524 FeatureWord w;
3525
3526 cs->env_ptr = env;
3527
3528 object_property_add(obj, "family", "int",
3529 x86_cpuid_version_get_family,
3530 x86_cpuid_version_set_family, NULL, NULL, NULL);
3531 object_property_add(obj, "model", "int",
3532 x86_cpuid_version_get_model,
3533 x86_cpuid_version_set_model, NULL, NULL, NULL);
3534 object_property_add(obj, "stepping", "int",
3535 x86_cpuid_version_get_stepping,
3536 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3537 object_property_add_str(obj, "vendor",
3538 x86_cpuid_get_vendor,
3539 x86_cpuid_set_vendor, NULL);
3540 object_property_add_str(obj, "model-id",
3541 x86_cpuid_get_model_id,
3542 x86_cpuid_set_model_id, NULL);
3543 object_property_add(obj, "tsc-frequency", "int",
3544 x86_cpuid_get_tsc_freq,
3545 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3546 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3547 x86_cpu_get_feature_words,
3548 NULL, NULL, (void *)env->features, NULL);
3549 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3550 x86_cpu_get_feature_words,
3551 NULL, NULL, (void *)cpu->filtered_features, NULL);
3552
3553 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3554
3555 for (w = 0; w < FEATURE_WORDS; w++) {
3556 int bitnr;
3557
3558 for (bitnr = 0; bitnr < 32; bitnr++) {
3559 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3560 }
3561 }
3562
3563 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3564 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3565 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3566 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3567 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3568 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3569 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3570
3571 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3572 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3573 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3574 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3575 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3576 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3577 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3578 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3579 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3580 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3581 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3582 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3583 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3584 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3585 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3586 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3587 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3588 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3589 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3590 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3591 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3592
3593 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3594 }
3595
3596 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3597 {
3598 X86CPU *cpu = X86_CPU(cs);
3599
3600 return cpu->apic_id;
3601 }
3602
3603 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3604 {
3605 X86CPU *cpu = X86_CPU(cs);
3606
3607 return cpu->env.cr[0] & CR0_PG_MASK;
3608 }
3609
3610 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3611 {
3612 X86CPU *cpu = X86_CPU(cs);
3613
3614 cpu->env.eip = value;
3615 }
3616
3617 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3618 {
3619 X86CPU *cpu = X86_CPU(cs);
3620
3621 cpu->env.eip = tb->pc - tb->cs_base;
3622 }
3623
3624 static bool x86_cpu_has_work(CPUState *cs)
3625 {
3626 X86CPU *cpu = X86_CPU(cs);
3627 CPUX86State *env = &cpu->env;
3628
3629 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3630 CPU_INTERRUPT_POLL)) &&
3631 (env->eflags & IF_MASK)) ||
3632 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3633 CPU_INTERRUPT_INIT |
3634 CPU_INTERRUPT_SIPI |
3635 CPU_INTERRUPT_MCE)) ||
3636 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3637 !(env->hflags & HF_SMM_MASK));
3638 }
3639
3640 static Property x86_cpu_properties[] = {
3641 #ifdef CONFIG_USER_ONLY
3642 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3643 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3644 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3645 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3646 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3647 #else
3648 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3649 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3650 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3651 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3652 #endif
3653 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3654 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3655 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3656 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3657 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3658 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3659 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3660 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3661 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3662 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3663 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3664 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3665 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3666 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3667 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3668 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3669 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3670 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3671 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3672 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3673 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3674 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3675 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3676 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3677 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3678 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3679 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3680 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3681 DEFINE_PROP_END_OF_LIST()
3682 };
3683
3684 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3685 {
3686 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3687 CPUClass *cc = CPU_CLASS(oc);
3688 DeviceClass *dc = DEVICE_CLASS(oc);
3689
3690 xcc->parent_realize = dc->realize;
3691 xcc->parent_unrealize = dc->unrealize;
3692 dc->realize = x86_cpu_realizefn;
3693 dc->unrealize = x86_cpu_unrealizefn;
3694 dc->props = x86_cpu_properties;
3695
3696 xcc->parent_reset = cc->reset;
3697 cc->reset = x86_cpu_reset;
3698 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3699
3700 cc->class_by_name = x86_cpu_class_by_name;
3701 cc->parse_features = x86_cpu_parse_featurestr;
3702 cc->has_work = x86_cpu_has_work;
3703 cc->do_interrupt = x86_cpu_do_interrupt;
3704 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3705 cc->dump_state = x86_cpu_dump_state;
3706 cc->set_pc = x86_cpu_set_pc;
3707 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3708 cc->gdb_read_register = x86_cpu_gdb_read_register;
3709 cc->gdb_write_register = x86_cpu_gdb_write_register;
3710 cc->get_arch_id = x86_cpu_get_arch_id;
3711 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3712 #ifdef CONFIG_USER_ONLY
3713 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3714 #else
3715 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3716 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3717 cc->write_elf64_note = x86_cpu_write_elf64_note;
3718 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3719 cc->write_elf32_note = x86_cpu_write_elf32_note;
3720 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3721 cc->vmsd = &vmstate_x86_cpu;
3722 #endif
3723 /* CPU_NB_REGS * 2 = general regs + xmm regs
3724 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
3725 */
3726 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3727 #ifndef CONFIG_USER_ONLY
3728 cc->debug_excp_handler = breakpoint_handler;
3729 #endif
3730 cc->cpu_exec_enter = x86_cpu_exec_enter;
3731 cc->cpu_exec_exit = x86_cpu_exec_exit;
3732
3733 dc->cannot_instantiate_with_device_add_yet = false;
3734 }
3735
3736 static const TypeInfo x86_cpu_type_info = {
3737 .name = TYPE_X86_CPU,
3738 .parent = TYPE_CPU,
3739 .instance_size = sizeof(X86CPU),
3740 .instance_init = x86_cpu_initfn,
3741 .abstract = true,
3742 .class_size = sizeof(X86CPUClass),
3743 .class_init = x86_cpu_common_class_init,
3744 };
3745
3746 static void x86_cpu_register_types(void)
3747 {
3748 int i;
3749
3750 type_register_static(&x86_cpu_type_info);
3751 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3752 x86_register_cpudef_type(&builtin_x86_defs[i]);
3753 }
3754 #ifdef CONFIG_KVM
3755 type_register_static(&host_x86_cpu_type_info);
3756 #endif
3757 }
3758
3759 type_init(x86_cpu_register_types)