]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
Merge remote-tracking branch 'remotes/huth/tags/target-dirs-20161220' into staging
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_7_0_EDX_FEATURES 0
243 #define TCG_APM_FEATURES 0
244 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
245 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
246 /* missing:
247 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
248
249 typedef struct FeatureWordInfo {
250 /* feature flags names are taken from "Intel Processor Identification and
251 * the CPUID Instruction" and AMD's "CPUID Specification".
252 * In cases of disagreement between feature naming conventions,
253 * aliases may be added.
254 */
255 const char *feat_names[32];
256 uint32_t cpuid_eax; /* Input EAX for CPUID */
257 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
258 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
259 int cpuid_reg; /* output register (R_* constant) */
260 uint32_t tcg_features; /* Feature flags supported by TCG */
261 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
262 uint32_t migratable_flags; /* Feature flags known to be migratable */
263 } FeatureWordInfo;
264
265 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
266 [FEAT_1_EDX] = {
267 .feat_names = {
268 "fpu", "vme", "de", "pse",
269 "tsc", "msr", "pae", "mce",
270 "cx8", "apic", NULL, "sep",
271 "mtrr", "pge", "mca", "cmov",
272 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
273 NULL, "ds" /* Intel dts */, "acpi", "mmx",
274 "fxsr", "sse", "sse2", "ss",
275 "ht" /* Intel htt */, "tm", "ia64", "pbe",
276 },
277 .cpuid_eax = 1, .cpuid_reg = R_EDX,
278 .tcg_features = TCG_FEATURES,
279 },
280 [FEAT_1_ECX] = {
281 .feat_names = {
282 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
283 "ds-cpl", "vmx", "smx", "est",
284 "tm2", "ssse3", "cid", NULL,
285 "fma", "cx16", "xtpr", "pdcm",
286 NULL, "pcid", "dca", "sse4.1",
287 "sse4.2", "x2apic", "movbe", "popcnt",
288 "tsc-deadline", "aes", "xsave", "osxsave",
289 "avx", "f16c", "rdrand", "hypervisor",
290 },
291 .cpuid_eax = 1, .cpuid_reg = R_ECX,
292 .tcg_features = TCG_EXT_FEATURES,
293 },
294 /* Feature names that are already defined on feature_name[] but
295 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
296 * names on feat_names below. They are copied automatically
297 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
298 */
299 [FEAT_8000_0001_EDX] = {
300 .feat_names = {
301 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
302 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
303 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
304 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
305 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
306 "nx", NULL, "mmxext", NULL /* mmx */,
307 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
308 NULL, "lm", "3dnowext", "3dnow",
309 },
310 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
311 .tcg_features = TCG_EXT2_FEATURES,
312 },
313 [FEAT_8000_0001_ECX] = {
314 .feat_names = {
315 "lahf-lm", "cmp-legacy", "svm", "extapic",
316 "cr8legacy", "abm", "sse4a", "misalignsse",
317 "3dnowprefetch", "osvw", "ibs", "xop",
318 "skinit", "wdt", NULL, "lwp",
319 "fma4", "tce", NULL, "nodeid-msr",
320 NULL, "tbm", "topoext", "perfctr-core",
321 "perfctr-nb", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 },
324 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
325 .tcg_features = TCG_EXT3_FEATURES,
326 },
327 [FEAT_C000_0001_EDX] = {
328 .feat_names = {
329 NULL, NULL, "xstore", "xstore-en",
330 NULL, NULL, "xcrypt", "xcrypt-en",
331 "ace2", "ace2-en", "phe", "phe-en",
332 "pmm", "pmm-en", NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 },
338 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
339 .tcg_features = TCG_EXT4_FEATURES,
340 },
341 [FEAT_KVM] = {
342 .feat_names = {
343 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
344 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 "kvmclock-stable-bit", NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 },
352 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
353 .tcg_features = TCG_KVM_FEATURES,
354 },
355 [FEAT_HYPERV_EAX] = {
356 .feat_names = {
357 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
358 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
359 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
360 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
361 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
362 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 },
369 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
370 },
371 [FEAT_HYPERV_EBX] = {
372 .feat_names = {
373 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
374 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
375 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
376 NULL /* hv_create_port */, NULL /* hv_connect_port */,
377 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
378 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
379 NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 },
385 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
386 },
387 [FEAT_HYPERV_EDX] = {
388 .feat_names = {
389 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
390 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
391 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
392 NULL, NULL,
393 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 },
400 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
401 },
402 [FEAT_SVM] = {
403 .feat_names = {
404 "npt", "lbrv", "svm-lock", "nrip-save",
405 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
406 NULL, NULL, "pause-filter", NULL,
407 "pfthreshold", NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 },
413 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
414 .tcg_features = TCG_SVM_FEATURES,
415 },
416 [FEAT_7_0_EBX] = {
417 .feat_names = {
418 "fsgsbase", "tsc-adjust", NULL, "bmi1",
419 "hle", "avx2", NULL, "smep",
420 "bmi2", "erms", "invpcid", "rtm",
421 NULL, NULL, "mpx", NULL,
422 "avx512f", "avx512dq", "rdseed", "adx",
423 "smap", "avx512ifma", "pcommit", "clflushopt",
424 "clwb", NULL, "avx512pf", "avx512er",
425 "avx512cd", NULL, "avx512bw", "avx512vl",
426 },
427 .cpuid_eax = 7,
428 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
429 .cpuid_reg = R_EBX,
430 .tcg_features = TCG_7_0_EBX_FEATURES,
431 },
432 [FEAT_7_0_ECX] = {
433 .feat_names = {
434 NULL, "avx512vbmi", "umip", "pku",
435 "ospke", NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, "rdpid", NULL,
440 NULL, NULL, NULL, NULL,
441 NULL, NULL, NULL, NULL,
442 },
443 .cpuid_eax = 7,
444 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
445 .cpuid_reg = R_ECX,
446 .tcg_features = TCG_7_0_ECX_FEATURES,
447 },
448 [FEAT_7_0_EDX] = {
449 .feat_names = {
450 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 },
459 .cpuid_eax = 7,
460 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
461 .cpuid_reg = R_EDX,
462 .tcg_features = TCG_7_0_EDX_FEATURES,
463 },
464 [FEAT_8000_0007_EDX] = {
465 .feat_names = {
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 "invtsc", NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 },
475 .cpuid_eax = 0x80000007,
476 .cpuid_reg = R_EDX,
477 .tcg_features = TCG_APM_FEATURES,
478 .unmigratable_flags = CPUID_APM_INVTSC,
479 },
480 [FEAT_XSAVE] = {
481 .feat_names = {
482 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 },
491 .cpuid_eax = 0xd,
492 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
493 .cpuid_reg = R_EAX,
494 .tcg_features = TCG_XSAVE_FEATURES,
495 },
496 [FEAT_6_EAX] = {
497 .feat_names = {
498 NULL, NULL, "arat", NULL,
499 NULL, NULL, NULL, NULL,
500 NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 },
507 .cpuid_eax = 6, .cpuid_reg = R_EAX,
508 .tcg_features = TCG_6_EAX_FEATURES,
509 },
510 [FEAT_XSAVE_COMP_LO] = {
511 .cpuid_eax = 0xD,
512 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
513 .cpuid_reg = R_EAX,
514 .tcg_features = ~0U,
515 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
516 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
517 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
518 XSTATE_PKRU_MASK,
519 },
520 [FEAT_XSAVE_COMP_HI] = {
521 .cpuid_eax = 0xD,
522 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
523 .cpuid_reg = R_EDX,
524 .tcg_features = ~0U,
525 },
526 };
527
528 typedef struct X86RegisterInfo32 {
529 /* Name of register */
530 const char *name;
531 /* QAPI enum value register */
532 X86CPURegister32 qapi_enum;
533 } X86RegisterInfo32;
534
535 #define REGISTER(reg) \
536 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
537 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
538 REGISTER(EAX),
539 REGISTER(ECX),
540 REGISTER(EDX),
541 REGISTER(EBX),
542 REGISTER(ESP),
543 REGISTER(EBP),
544 REGISTER(ESI),
545 REGISTER(EDI),
546 };
547 #undef REGISTER
548
549 typedef struct ExtSaveArea {
550 uint32_t feature, bits;
551 uint32_t offset, size;
552 } ExtSaveArea;
553
554 static const ExtSaveArea x86_ext_save_areas[] = {
555 [XSTATE_FP_BIT] = {
556 /* x87 FP state component is always enabled if XSAVE is supported */
557 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
558 /* x87 state is in the legacy region of the XSAVE area */
559 .offset = 0,
560 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
561 },
562 [XSTATE_SSE_BIT] = {
563 /* SSE state component is always enabled if XSAVE is supported */
564 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
565 /* SSE state is in the legacy region of the XSAVE area */
566 .offset = 0,
567 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
568 },
569 [XSTATE_YMM_BIT] =
570 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
571 .offset = offsetof(X86XSaveArea, avx_state),
572 .size = sizeof(XSaveAVX) },
573 [XSTATE_BNDREGS_BIT] =
574 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
575 .offset = offsetof(X86XSaveArea, bndreg_state),
576 .size = sizeof(XSaveBNDREG) },
577 [XSTATE_BNDCSR_BIT] =
578 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
579 .offset = offsetof(X86XSaveArea, bndcsr_state),
580 .size = sizeof(XSaveBNDCSR) },
581 [XSTATE_OPMASK_BIT] =
582 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
583 .offset = offsetof(X86XSaveArea, opmask_state),
584 .size = sizeof(XSaveOpmask) },
585 [XSTATE_ZMM_Hi256_BIT] =
586 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
587 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
588 .size = sizeof(XSaveZMM_Hi256) },
589 [XSTATE_Hi16_ZMM_BIT] =
590 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
591 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
592 .size = sizeof(XSaveHi16_ZMM) },
593 [XSTATE_PKRU_BIT] =
594 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
595 .offset = offsetof(X86XSaveArea, pkru_state),
596 .size = sizeof(XSavePKRU) },
597 };
598
599 static uint32_t xsave_area_size(uint64_t mask)
600 {
601 int i;
602 uint64_t ret = 0;
603
604 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
605 const ExtSaveArea *esa = &x86_ext_save_areas[i];
606 if ((mask >> i) & 1) {
607 ret = MAX(ret, esa->offset + esa->size);
608 }
609 }
610 return ret;
611 }
612
613 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
614 {
615 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
616 cpu->env.features[FEAT_XSAVE_COMP_LO];
617 }
618
619 const char *get_register_name_32(unsigned int reg)
620 {
621 if (reg >= CPU_NB_REGS32) {
622 return NULL;
623 }
624 return x86_reg_info_32[reg].name;
625 }
626
627 /*
628 * Returns the set of feature flags that are supported and migratable by
629 * QEMU, for a given FeatureWord.
630 */
631 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
632 {
633 FeatureWordInfo *wi = &feature_word_info[w];
634 uint32_t r = 0;
635 int i;
636
637 for (i = 0; i < 32; i++) {
638 uint32_t f = 1U << i;
639
640 /* If the feature name is known, it is implicitly considered migratable,
641 * unless it is explicitly set in unmigratable_flags */
642 if ((wi->migratable_flags & f) ||
643 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
644 r |= f;
645 }
646 }
647 return r;
648 }
649
650 void host_cpuid(uint32_t function, uint32_t count,
651 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
652 {
653 uint32_t vec[4];
654
655 #ifdef __x86_64__
656 asm volatile("cpuid"
657 : "=a"(vec[0]), "=b"(vec[1]),
658 "=c"(vec[2]), "=d"(vec[3])
659 : "0"(function), "c"(count) : "cc");
660 #elif defined(__i386__)
661 asm volatile("pusha \n\t"
662 "cpuid \n\t"
663 "mov %%eax, 0(%2) \n\t"
664 "mov %%ebx, 4(%2) \n\t"
665 "mov %%ecx, 8(%2) \n\t"
666 "mov %%edx, 12(%2) \n\t"
667 "popa"
668 : : "a"(function), "c"(count), "S"(vec)
669 : "memory", "cc");
670 #else
671 abort();
672 #endif
673
674 if (eax)
675 *eax = vec[0];
676 if (ebx)
677 *ebx = vec[1];
678 if (ecx)
679 *ecx = vec[2];
680 if (edx)
681 *edx = vec[3];
682 }
683
684 /* CPU class name definitions: */
685
686 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
687 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
688
689 /* Return type name for a given CPU model name
690 * Caller is responsible for freeing the returned string.
691 */
692 static char *x86_cpu_type_name(const char *model_name)
693 {
694 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
695 }
696
697 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
698 {
699 ObjectClass *oc;
700 char *typename;
701
702 if (cpu_model == NULL) {
703 return NULL;
704 }
705
706 typename = x86_cpu_type_name(cpu_model);
707 oc = object_class_by_name(typename);
708 g_free(typename);
709 return oc;
710 }
711
712 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
713 {
714 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
715 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
716 return g_strndup(class_name,
717 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
718 }
719
720 struct X86CPUDefinition {
721 const char *name;
722 uint32_t level;
723 uint32_t xlevel;
724 /* vendor is zero-terminated, 12 character ASCII string */
725 char vendor[CPUID_VENDOR_SZ + 1];
726 int family;
727 int model;
728 int stepping;
729 FeatureWordArray features;
730 char model_id[48];
731 };
732
733 static X86CPUDefinition builtin_x86_defs[] = {
734 {
735 .name = "qemu64",
736 .level = 0xd,
737 .vendor = CPUID_VENDOR_AMD,
738 .family = 6,
739 .model = 6,
740 .stepping = 3,
741 .features[FEAT_1_EDX] =
742 PPRO_FEATURES |
743 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
744 CPUID_PSE36,
745 .features[FEAT_1_ECX] =
746 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
747 .features[FEAT_8000_0001_EDX] =
748 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
749 .features[FEAT_8000_0001_ECX] =
750 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
751 .xlevel = 0x8000000A,
752 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
753 },
754 {
755 .name = "phenom",
756 .level = 5,
757 .vendor = CPUID_VENDOR_AMD,
758 .family = 16,
759 .model = 2,
760 .stepping = 3,
761 /* Missing: CPUID_HT */
762 .features[FEAT_1_EDX] =
763 PPRO_FEATURES |
764 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
765 CPUID_PSE36 | CPUID_VME,
766 .features[FEAT_1_ECX] =
767 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
768 CPUID_EXT_POPCNT,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
771 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
772 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
773 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
774 CPUID_EXT3_CR8LEG,
775 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
776 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
777 .features[FEAT_8000_0001_ECX] =
778 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
779 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
780 /* Missing: CPUID_SVM_LBRV */
781 .features[FEAT_SVM] =
782 CPUID_SVM_NPT,
783 .xlevel = 0x8000001A,
784 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
785 },
786 {
787 .name = "core2duo",
788 .level = 10,
789 .vendor = CPUID_VENDOR_INTEL,
790 .family = 6,
791 .model = 15,
792 .stepping = 11,
793 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
794 .features[FEAT_1_EDX] =
795 PPRO_FEATURES |
796 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
797 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
798 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
799 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
800 .features[FEAT_1_ECX] =
801 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
802 CPUID_EXT_CX16,
803 .features[FEAT_8000_0001_EDX] =
804 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
805 .features[FEAT_8000_0001_ECX] =
806 CPUID_EXT3_LAHF_LM,
807 .xlevel = 0x80000008,
808 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
809 },
810 {
811 .name = "kvm64",
812 .level = 0xd,
813 .vendor = CPUID_VENDOR_INTEL,
814 .family = 15,
815 .model = 6,
816 .stepping = 1,
817 /* Missing: CPUID_HT */
818 .features[FEAT_1_EDX] =
819 PPRO_FEATURES | CPUID_VME |
820 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
821 CPUID_PSE36,
822 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
823 .features[FEAT_1_ECX] =
824 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
825 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
826 .features[FEAT_8000_0001_EDX] =
827 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
828 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
829 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
830 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
831 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
832 .features[FEAT_8000_0001_ECX] =
833 0,
834 .xlevel = 0x80000008,
835 .model_id = "Common KVM processor"
836 },
837 {
838 .name = "qemu32",
839 .level = 4,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 6,
842 .model = 6,
843 .stepping = 3,
844 .features[FEAT_1_EDX] =
845 PPRO_FEATURES,
846 .features[FEAT_1_ECX] =
847 CPUID_EXT_SSE3,
848 .xlevel = 0x80000004,
849 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
850 },
851 {
852 .name = "kvm32",
853 .level = 5,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 15,
856 .model = 6,
857 .stepping = 1,
858 .features[FEAT_1_EDX] =
859 PPRO_FEATURES | CPUID_VME |
860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
861 .features[FEAT_1_ECX] =
862 CPUID_EXT_SSE3,
863 .features[FEAT_8000_0001_ECX] =
864 0,
865 .xlevel = 0x80000008,
866 .model_id = "Common 32-bit KVM processor"
867 },
868 {
869 .name = "coreduo",
870 .level = 10,
871 .vendor = CPUID_VENDOR_INTEL,
872 .family = 6,
873 .model = 14,
874 .stepping = 8,
875 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
876 .features[FEAT_1_EDX] =
877 PPRO_FEATURES | CPUID_VME |
878 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
879 CPUID_SS,
880 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
881 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
882 .features[FEAT_1_ECX] =
883 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
884 .features[FEAT_8000_0001_EDX] =
885 CPUID_EXT2_NX,
886 .xlevel = 0x80000008,
887 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
888 },
889 {
890 .name = "486",
891 .level = 1,
892 .vendor = CPUID_VENDOR_INTEL,
893 .family = 4,
894 .model = 8,
895 .stepping = 0,
896 .features[FEAT_1_EDX] =
897 I486_FEATURES,
898 .xlevel = 0,
899 },
900 {
901 .name = "pentium",
902 .level = 1,
903 .vendor = CPUID_VENDOR_INTEL,
904 .family = 5,
905 .model = 4,
906 .stepping = 3,
907 .features[FEAT_1_EDX] =
908 PENTIUM_FEATURES,
909 .xlevel = 0,
910 },
911 {
912 .name = "pentium2",
913 .level = 2,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 6,
916 .model = 5,
917 .stepping = 2,
918 .features[FEAT_1_EDX] =
919 PENTIUM2_FEATURES,
920 .xlevel = 0,
921 },
922 {
923 .name = "pentium3",
924 .level = 3,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 6,
927 .model = 7,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM3_FEATURES,
931 .xlevel = 0,
932 },
933 {
934 .name = "athlon",
935 .level = 2,
936 .vendor = CPUID_VENDOR_AMD,
937 .family = 6,
938 .model = 2,
939 .stepping = 3,
940 .features[FEAT_1_EDX] =
941 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
942 CPUID_MCA,
943 .features[FEAT_8000_0001_EDX] =
944 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
945 .xlevel = 0x80000008,
946 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
947 },
948 {
949 .name = "n270",
950 .level = 10,
951 .vendor = CPUID_VENDOR_INTEL,
952 .family = 6,
953 .model = 28,
954 .stepping = 2,
955 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
956 .features[FEAT_1_EDX] =
957 PPRO_FEATURES |
958 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
959 CPUID_ACPI | CPUID_SS,
960 /* Some CPUs got no CPUID_SEP */
961 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
962 * CPUID_EXT_XTPR */
963 .features[FEAT_1_ECX] =
964 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
965 CPUID_EXT_MOVBE,
966 .features[FEAT_8000_0001_EDX] =
967 CPUID_EXT2_NX,
968 .features[FEAT_8000_0001_ECX] =
969 CPUID_EXT3_LAHF_LM,
970 .xlevel = 0x80000008,
971 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
972 },
973 {
974 .name = "Conroe",
975 .level = 10,
976 .vendor = CPUID_VENDOR_INTEL,
977 .family = 6,
978 .model = 15,
979 .stepping = 3,
980 .features[FEAT_1_EDX] =
981 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
982 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
983 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
984 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
985 CPUID_DE | CPUID_FP87,
986 .features[FEAT_1_ECX] =
987 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
994 },
995 {
996 .name = "Penryn",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 23,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1010 CPUID_EXT_SSE3,
1011 .features[FEAT_8000_0001_EDX] =
1012 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1013 .features[FEAT_8000_0001_ECX] =
1014 CPUID_EXT3_LAHF_LM,
1015 .xlevel = 0x80000008,
1016 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1017 },
1018 {
1019 .name = "Nehalem",
1020 .level = 11,
1021 .vendor = CPUID_VENDOR_INTEL,
1022 .family = 6,
1023 .model = 26,
1024 .stepping = 3,
1025 .features[FEAT_1_EDX] =
1026 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1027 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1028 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1029 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1030 CPUID_DE | CPUID_FP87,
1031 .features[FEAT_1_ECX] =
1032 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1033 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1036 .features[FEAT_8000_0001_ECX] =
1037 CPUID_EXT3_LAHF_LM,
1038 .xlevel = 0x80000008,
1039 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1040 },
1041 {
1042 .name = "Westmere",
1043 .level = 11,
1044 .vendor = CPUID_VENDOR_INTEL,
1045 .family = 6,
1046 .model = 44,
1047 .stepping = 1,
1048 .features[FEAT_1_EDX] =
1049 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1050 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1051 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1052 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1053 CPUID_DE | CPUID_FP87,
1054 .features[FEAT_1_ECX] =
1055 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .features[FEAT_6_EAX] =
1063 CPUID_6_EAX_ARAT,
1064 .xlevel = 0x80000008,
1065 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1066 },
1067 {
1068 .name = "SandyBridge",
1069 .level = 0xd,
1070 .vendor = CPUID_VENDOR_INTEL,
1071 .family = 6,
1072 .model = 42,
1073 .stepping = 1,
1074 .features[FEAT_1_EDX] =
1075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1079 CPUID_DE | CPUID_FP87,
1080 .features[FEAT_1_ECX] =
1081 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1082 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1083 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1084 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1085 CPUID_EXT_SSE3,
1086 .features[FEAT_8000_0001_EDX] =
1087 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1088 CPUID_EXT2_SYSCALL,
1089 .features[FEAT_8000_0001_ECX] =
1090 CPUID_EXT3_LAHF_LM,
1091 .features[FEAT_XSAVE] =
1092 CPUID_XSAVE_XSAVEOPT,
1093 .features[FEAT_6_EAX] =
1094 CPUID_6_EAX_ARAT,
1095 .xlevel = 0x80000008,
1096 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1097 },
1098 {
1099 .name = "IvyBridge",
1100 .level = 0xd,
1101 .vendor = CPUID_VENDOR_INTEL,
1102 .family = 6,
1103 .model = 58,
1104 .stepping = 9,
1105 .features[FEAT_1_EDX] =
1106 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1107 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1108 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1109 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1110 CPUID_DE | CPUID_FP87,
1111 .features[FEAT_1_ECX] =
1112 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1113 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1114 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1115 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1116 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1117 .features[FEAT_7_0_EBX] =
1118 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1119 CPUID_7_0_EBX_ERMS,
1120 .features[FEAT_8000_0001_EDX] =
1121 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1122 CPUID_EXT2_SYSCALL,
1123 .features[FEAT_8000_0001_ECX] =
1124 CPUID_EXT3_LAHF_LM,
1125 .features[FEAT_XSAVE] =
1126 CPUID_XSAVE_XSAVEOPT,
1127 .features[FEAT_6_EAX] =
1128 CPUID_6_EAX_ARAT,
1129 .xlevel = 0x80000008,
1130 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1131 },
1132 {
1133 .name = "Haswell-noTSX",
1134 .level = 0xd,
1135 .vendor = CPUID_VENDOR_INTEL,
1136 .family = 6,
1137 .model = 60,
1138 .stepping = 1,
1139 .features[FEAT_1_EDX] =
1140 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1141 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1142 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1143 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1144 CPUID_DE | CPUID_FP87,
1145 .features[FEAT_1_ECX] =
1146 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1147 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1148 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1149 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1150 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1151 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1152 .features[FEAT_8000_0001_EDX] =
1153 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1154 CPUID_EXT2_SYSCALL,
1155 .features[FEAT_8000_0001_ECX] =
1156 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1157 .features[FEAT_7_0_EBX] =
1158 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1159 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1160 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1161 .features[FEAT_XSAVE] =
1162 CPUID_XSAVE_XSAVEOPT,
1163 .features[FEAT_6_EAX] =
1164 CPUID_6_EAX_ARAT,
1165 .xlevel = 0x80000008,
1166 .model_id = "Intel Core Processor (Haswell, no TSX)",
1167 }, {
1168 .name = "Haswell",
1169 .level = 0xd,
1170 .vendor = CPUID_VENDOR_INTEL,
1171 .family = 6,
1172 .model = 60,
1173 .stepping = 1,
1174 .features[FEAT_1_EDX] =
1175 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1176 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1177 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1178 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1179 CPUID_DE | CPUID_FP87,
1180 .features[FEAT_1_ECX] =
1181 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1182 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1183 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1184 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1185 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1186 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1187 .features[FEAT_8000_0001_EDX] =
1188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1189 CPUID_EXT2_SYSCALL,
1190 .features[FEAT_8000_0001_ECX] =
1191 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1192 .features[FEAT_7_0_EBX] =
1193 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1194 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1195 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1196 CPUID_7_0_EBX_RTM,
1197 .features[FEAT_XSAVE] =
1198 CPUID_XSAVE_XSAVEOPT,
1199 .features[FEAT_6_EAX] =
1200 CPUID_6_EAX_ARAT,
1201 .xlevel = 0x80000008,
1202 .model_id = "Intel Core Processor (Haswell)",
1203 },
1204 {
1205 .name = "Broadwell-noTSX",
1206 .level = 0xd,
1207 .vendor = CPUID_VENDOR_INTEL,
1208 .family = 6,
1209 .model = 61,
1210 .stepping = 2,
1211 .features[FEAT_1_EDX] =
1212 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1213 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1214 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1215 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1216 CPUID_DE | CPUID_FP87,
1217 .features[FEAT_1_ECX] =
1218 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1219 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1220 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1221 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1222 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1223 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1224 .features[FEAT_8000_0001_EDX] =
1225 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1226 CPUID_EXT2_SYSCALL,
1227 .features[FEAT_8000_0001_ECX] =
1228 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1229 .features[FEAT_7_0_EBX] =
1230 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1231 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1232 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1233 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1234 CPUID_7_0_EBX_SMAP,
1235 .features[FEAT_XSAVE] =
1236 CPUID_XSAVE_XSAVEOPT,
1237 .features[FEAT_6_EAX] =
1238 CPUID_6_EAX_ARAT,
1239 .xlevel = 0x80000008,
1240 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1241 },
1242 {
1243 .name = "Broadwell",
1244 .level = 0xd,
1245 .vendor = CPUID_VENDOR_INTEL,
1246 .family = 6,
1247 .model = 61,
1248 .stepping = 2,
1249 .features[FEAT_1_EDX] =
1250 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1251 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1252 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1253 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1254 CPUID_DE | CPUID_FP87,
1255 .features[FEAT_1_ECX] =
1256 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1257 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1258 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1259 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1260 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1261 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1262 .features[FEAT_8000_0001_EDX] =
1263 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1264 CPUID_EXT2_SYSCALL,
1265 .features[FEAT_8000_0001_ECX] =
1266 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1267 .features[FEAT_7_0_EBX] =
1268 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1269 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1270 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1271 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1272 CPUID_7_0_EBX_SMAP,
1273 .features[FEAT_XSAVE] =
1274 CPUID_XSAVE_XSAVEOPT,
1275 .features[FEAT_6_EAX] =
1276 CPUID_6_EAX_ARAT,
1277 .xlevel = 0x80000008,
1278 .model_id = "Intel Core Processor (Broadwell)",
1279 },
1280 {
1281 .name = "Skylake-Client",
1282 .level = 0xd,
1283 .vendor = CPUID_VENDOR_INTEL,
1284 .family = 6,
1285 .model = 94,
1286 .stepping = 3,
1287 .features[FEAT_1_EDX] =
1288 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1289 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1290 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1291 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1292 CPUID_DE | CPUID_FP87,
1293 .features[FEAT_1_ECX] =
1294 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1295 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1296 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1297 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1298 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1299 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1300 .features[FEAT_8000_0001_EDX] =
1301 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1302 CPUID_EXT2_SYSCALL,
1303 .features[FEAT_8000_0001_ECX] =
1304 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1305 .features[FEAT_7_0_EBX] =
1306 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1307 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1308 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1309 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1310 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1311 /* Missing: XSAVES (not supported by some Linux versions,
1312 * including v4.1 to v4.6).
1313 * KVM doesn't yet expose any XSAVES state save component,
1314 * and the only one defined in Skylake (processor tracing)
1315 * probably will block migration anyway.
1316 */
1317 .features[FEAT_XSAVE] =
1318 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1319 CPUID_XSAVE_XGETBV1,
1320 .features[FEAT_6_EAX] =
1321 CPUID_6_EAX_ARAT,
1322 .xlevel = 0x80000008,
1323 .model_id = "Intel Core Processor (Skylake)",
1324 },
1325 {
1326 .name = "Opteron_G1",
1327 .level = 5,
1328 .vendor = CPUID_VENDOR_AMD,
1329 .family = 15,
1330 .model = 6,
1331 .stepping = 1,
1332 .features[FEAT_1_EDX] =
1333 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1334 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1335 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1336 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1337 CPUID_DE | CPUID_FP87,
1338 .features[FEAT_1_ECX] =
1339 CPUID_EXT_SSE3,
1340 .features[FEAT_8000_0001_EDX] =
1341 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1342 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1343 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1344 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1345 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1346 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1347 .xlevel = 0x80000008,
1348 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1349 },
1350 {
1351 .name = "Opteron_G2",
1352 .level = 5,
1353 .vendor = CPUID_VENDOR_AMD,
1354 .family = 15,
1355 .model = 6,
1356 .stepping = 1,
1357 .features[FEAT_1_EDX] =
1358 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1359 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1360 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1361 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1362 CPUID_DE | CPUID_FP87,
1363 .features[FEAT_1_ECX] =
1364 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1365 /* Missing: CPUID_EXT2_RDTSCP */
1366 .features[FEAT_8000_0001_EDX] =
1367 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1368 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1369 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1370 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1371 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1372 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1373 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .features[FEAT_8000_0001_ECX] =
1375 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1376 .xlevel = 0x80000008,
1377 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1378 },
1379 {
1380 .name = "Opteron_G3",
1381 .level = 5,
1382 .vendor = CPUID_VENDOR_AMD,
1383 .family = 16,
1384 .model = 2,
1385 .stepping = 3,
1386 .features[FEAT_1_EDX] =
1387 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1388 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1389 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1390 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1391 CPUID_DE | CPUID_FP87,
1392 .features[FEAT_1_ECX] =
1393 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1394 CPUID_EXT_SSE3,
1395 /* Missing: CPUID_EXT2_RDTSCP */
1396 .features[FEAT_8000_0001_EDX] =
1397 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1398 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1399 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1400 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1401 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1402 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1403 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1404 .features[FEAT_8000_0001_ECX] =
1405 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1406 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1407 .xlevel = 0x80000008,
1408 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1409 },
1410 {
1411 .name = "Opteron_G4",
1412 .level = 0xd,
1413 .vendor = CPUID_VENDOR_AMD,
1414 .family = 21,
1415 .model = 1,
1416 .stepping = 2,
1417 .features[FEAT_1_EDX] =
1418 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1419 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1420 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1421 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1422 CPUID_DE | CPUID_FP87,
1423 .features[FEAT_1_ECX] =
1424 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1425 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1426 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1427 CPUID_EXT_SSE3,
1428 /* Missing: CPUID_EXT2_RDTSCP */
1429 .features[FEAT_8000_0001_EDX] =
1430 CPUID_EXT2_LM |
1431 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1432 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1433 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1434 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1435 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1436 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1437 .features[FEAT_8000_0001_ECX] =
1438 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1439 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1440 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1441 CPUID_EXT3_LAHF_LM,
1442 /* no xsaveopt! */
1443 .xlevel = 0x8000001A,
1444 .model_id = "AMD Opteron 62xx class CPU",
1445 },
1446 {
1447 .name = "Opteron_G5",
1448 .level = 0xd,
1449 .vendor = CPUID_VENDOR_AMD,
1450 .family = 21,
1451 .model = 2,
1452 .stepping = 0,
1453 .features[FEAT_1_EDX] =
1454 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1455 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1456 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1457 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1458 CPUID_DE | CPUID_FP87,
1459 .features[FEAT_1_ECX] =
1460 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1461 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1462 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1463 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1464 /* Missing: CPUID_EXT2_RDTSCP */
1465 .features[FEAT_8000_0001_EDX] =
1466 CPUID_EXT2_LM |
1467 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1468 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1469 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1470 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1471 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1472 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1473 .features[FEAT_8000_0001_ECX] =
1474 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1475 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1476 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1477 CPUID_EXT3_LAHF_LM,
1478 /* no xsaveopt! */
1479 .xlevel = 0x8000001A,
1480 .model_id = "AMD Opteron 63xx class CPU",
1481 },
1482 };
1483
1484 typedef struct PropValue {
1485 const char *prop, *value;
1486 } PropValue;
1487
1488 /* KVM-specific features that are automatically added/removed
1489 * from all CPU models when KVM is enabled.
1490 */
1491 static PropValue kvm_default_props[] = {
1492 { "kvmclock", "on" },
1493 { "kvm-nopiodelay", "on" },
1494 { "kvm-asyncpf", "on" },
1495 { "kvm-steal-time", "on" },
1496 { "kvm-pv-eoi", "on" },
1497 { "kvmclock-stable-bit", "on" },
1498 { "x2apic", "on" },
1499 { "acpi", "off" },
1500 { "monitor", "off" },
1501 { "svm", "off" },
1502 { NULL, NULL },
1503 };
1504
1505 /* TCG-specific defaults that override all CPU models when using TCG
1506 */
1507 static PropValue tcg_default_props[] = {
1508 { "vme", "off" },
1509 { NULL, NULL },
1510 };
1511
1512
1513 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1514 {
1515 PropValue *pv;
1516 for (pv = kvm_default_props; pv->prop; pv++) {
1517 if (!strcmp(pv->prop, prop)) {
1518 pv->value = value;
1519 break;
1520 }
1521 }
1522
1523 /* It is valid to call this function only for properties that
1524 * are already present in the kvm_default_props table.
1525 */
1526 assert(pv->prop);
1527 }
1528
1529 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1530 bool migratable_only);
1531
1532 #ifdef CONFIG_KVM
1533
1534 static bool lmce_supported(void)
1535 {
1536 uint64_t mce_cap;
1537
1538 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1539 return false;
1540 }
1541
1542 return !!(mce_cap & MCG_LMCE_P);
1543 }
1544
1545 static int cpu_x86_fill_model_id(char *str)
1546 {
1547 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1548 int i;
1549
1550 for (i = 0; i < 3; i++) {
1551 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1552 memcpy(str + i * 16 + 0, &eax, 4);
1553 memcpy(str + i * 16 + 4, &ebx, 4);
1554 memcpy(str + i * 16 + 8, &ecx, 4);
1555 memcpy(str + i * 16 + 12, &edx, 4);
1556 }
1557 return 0;
1558 }
1559
1560 static X86CPUDefinition host_cpudef;
1561
1562 static Property host_x86_cpu_properties[] = {
1563 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1564 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1565 DEFINE_PROP_END_OF_LIST()
1566 };
1567
1568 /* class_init for the "host" CPU model
1569 *
1570 * This function may be called before KVM is initialized.
1571 */
1572 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1573 {
1574 DeviceClass *dc = DEVICE_CLASS(oc);
1575 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1576 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1577
1578 xcc->kvm_required = true;
1579
1580 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1581 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1582
1583 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1584 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1585 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1586 host_cpudef.stepping = eax & 0x0F;
1587
1588 cpu_x86_fill_model_id(host_cpudef.model_id);
1589
1590 xcc->cpu_def = &host_cpudef;
1591 xcc->model_description =
1592 "KVM processor with all supported host features "
1593 "(only available in KVM mode)";
1594
1595 /* level, xlevel, xlevel2, and the feature words are initialized on
1596 * instance_init, because they require KVM to be initialized.
1597 */
1598
1599 dc->props = host_x86_cpu_properties;
1600 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1601 dc->cannot_destroy_with_object_finalize_yet = true;
1602 }
1603
1604 static void host_x86_cpu_initfn(Object *obj)
1605 {
1606 X86CPU *cpu = X86_CPU(obj);
1607 CPUX86State *env = &cpu->env;
1608 KVMState *s = kvm_state;
1609
1610 /* We can't fill the features array here because we don't know yet if
1611 * "migratable" is true or false.
1612 */
1613 cpu->host_features = true;
1614
1615 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1616 if (kvm_enabled()) {
1617 env->cpuid_min_level =
1618 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1619 env->cpuid_min_xlevel =
1620 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1621 env->cpuid_min_xlevel2 =
1622 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1623
1624 if (lmce_supported()) {
1625 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1626 }
1627 }
1628
1629 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1630 }
1631
1632 static const TypeInfo host_x86_cpu_type_info = {
1633 .name = X86_CPU_TYPE_NAME("host"),
1634 .parent = TYPE_X86_CPU,
1635 .instance_init = host_x86_cpu_initfn,
1636 .class_init = host_x86_cpu_class_init,
1637 };
1638
1639 #endif
1640
1641 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1642 {
1643 FeatureWordInfo *f = &feature_word_info[w];
1644 int i;
1645
1646 for (i = 0; i < 32; ++i) {
1647 if ((1UL << i) & mask) {
1648 const char *reg = get_register_name_32(f->cpuid_reg);
1649 assert(reg);
1650 fprintf(stderr, "warning: %s doesn't support requested feature: "
1651 "CPUID.%02XH:%s%s%s [bit %d]\n",
1652 kvm_enabled() ? "host" : "TCG",
1653 f->cpuid_eax, reg,
1654 f->feat_names[i] ? "." : "",
1655 f->feat_names[i] ? f->feat_names[i] : "", i);
1656 }
1657 }
1658 }
1659
1660 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1661 const char *name, void *opaque,
1662 Error **errp)
1663 {
1664 X86CPU *cpu = X86_CPU(obj);
1665 CPUX86State *env = &cpu->env;
1666 int64_t value;
1667
1668 value = (env->cpuid_version >> 8) & 0xf;
1669 if (value == 0xf) {
1670 value += (env->cpuid_version >> 20) & 0xff;
1671 }
1672 visit_type_int(v, name, &value, errp);
1673 }
1674
1675 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1676 const char *name, void *opaque,
1677 Error **errp)
1678 {
1679 X86CPU *cpu = X86_CPU(obj);
1680 CPUX86State *env = &cpu->env;
1681 const int64_t min = 0;
1682 const int64_t max = 0xff + 0xf;
1683 Error *local_err = NULL;
1684 int64_t value;
1685
1686 visit_type_int(v, name, &value, &local_err);
1687 if (local_err) {
1688 error_propagate(errp, local_err);
1689 return;
1690 }
1691 if (value < min || value > max) {
1692 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1693 name ? name : "null", value, min, max);
1694 return;
1695 }
1696
1697 env->cpuid_version &= ~0xff00f00;
1698 if (value > 0x0f) {
1699 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1700 } else {
1701 env->cpuid_version |= value << 8;
1702 }
1703 }
1704
1705 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1706 const char *name, void *opaque,
1707 Error **errp)
1708 {
1709 X86CPU *cpu = X86_CPU(obj);
1710 CPUX86State *env = &cpu->env;
1711 int64_t value;
1712
1713 value = (env->cpuid_version >> 4) & 0xf;
1714 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1715 visit_type_int(v, name, &value, errp);
1716 }
1717
1718 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1719 const char *name, void *opaque,
1720 Error **errp)
1721 {
1722 X86CPU *cpu = X86_CPU(obj);
1723 CPUX86State *env = &cpu->env;
1724 const int64_t min = 0;
1725 const int64_t max = 0xff;
1726 Error *local_err = NULL;
1727 int64_t value;
1728
1729 visit_type_int(v, name, &value, &local_err);
1730 if (local_err) {
1731 error_propagate(errp, local_err);
1732 return;
1733 }
1734 if (value < min || value > max) {
1735 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1736 name ? name : "null", value, min, max);
1737 return;
1738 }
1739
1740 env->cpuid_version &= ~0xf00f0;
1741 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1742 }
1743
1744 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1745 const char *name, void *opaque,
1746 Error **errp)
1747 {
1748 X86CPU *cpu = X86_CPU(obj);
1749 CPUX86State *env = &cpu->env;
1750 int64_t value;
1751
1752 value = env->cpuid_version & 0xf;
1753 visit_type_int(v, name, &value, errp);
1754 }
1755
1756 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1757 const char *name, void *opaque,
1758 Error **errp)
1759 {
1760 X86CPU *cpu = X86_CPU(obj);
1761 CPUX86State *env = &cpu->env;
1762 const int64_t min = 0;
1763 const int64_t max = 0xf;
1764 Error *local_err = NULL;
1765 int64_t value;
1766
1767 visit_type_int(v, name, &value, &local_err);
1768 if (local_err) {
1769 error_propagate(errp, local_err);
1770 return;
1771 }
1772 if (value < min || value > max) {
1773 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1774 name ? name : "null", value, min, max);
1775 return;
1776 }
1777
1778 env->cpuid_version &= ~0xf;
1779 env->cpuid_version |= value & 0xf;
1780 }
1781
1782 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1783 {
1784 X86CPU *cpu = X86_CPU(obj);
1785 CPUX86State *env = &cpu->env;
1786 char *value;
1787
1788 value = g_malloc(CPUID_VENDOR_SZ + 1);
1789 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1790 env->cpuid_vendor3);
1791 return value;
1792 }
1793
1794 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1795 Error **errp)
1796 {
1797 X86CPU *cpu = X86_CPU(obj);
1798 CPUX86State *env = &cpu->env;
1799 int i;
1800
1801 if (strlen(value) != CPUID_VENDOR_SZ) {
1802 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1803 return;
1804 }
1805
1806 env->cpuid_vendor1 = 0;
1807 env->cpuid_vendor2 = 0;
1808 env->cpuid_vendor3 = 0;
1809 for (i = 0; i < 4; i++) {
1810 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1811 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1812 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1813 }
1814 }
1815
1816 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1817 {
1818 X86CPU *cpu = X86_CPU(obj);
1819 CPUX86State *env = &cpu->env;
1820 char *value;
1821 int i;
1822
1823 value = g_malloc(48 + 1);
1824 for (i = 0; i < 48; i++) {
1825 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1826 }
1827 value[48] = '\0';
1828 return value;
1829 }
1830
1831 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1832 Error **errp)
1833 {
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1836 int c, len, i;
1837
1838 if (model_id == NULL) {
1839 model_id = "";
1840 }
1841 len = strlen(model_id);
1842 memset(env->cpuid_model, 0, 48);
1843 for (i = 0; i < 48; i++) {
1844 if (i >= len) {
1845 c = '\0';
1846 } else {
1847 c = (uint8_t)model_id[i];
1848 }
1849 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1850 }
1851 }
1852
1853 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1854 void *opaque, Error **errp)
1855 {
1856 X86CPU *cpu = X86_CPU(obj);
1857 int64_t value;
1858
1859 value = cpu->env.tsc_khz * 1000;
1860 visit_type_int(v, name, &value, errp);
1861 }
1862
1863 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1864 void *opaque, Error **errp)
1865 {
1866 X86CPU *cpu = X86_CPU(obj);
1867 const int64_t min = 0;
1868 const int64_t max = INT64_MAX;
1869 Error *local_err = NULL;
1870 int64_t value;
1871
1872 visit_type_int(v, name, &value, &local_err);
1873 if (local_err) {
1874 error_propagate(errp, local_err);
1875 return;
1876 }
1877 if (value < min || value > max) {
1878 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1879 name ? name : "null", value, min, max);
1880 return;
1881 }
1882
1883 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1884 }
1885
1886 /* Generic getter for "feature-words" and "filtered-features" properties */
1887 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1888 const char *name, void *opaque,
1889 Error **errp)
1890 {
1891 uint32_t *array = (uint32_t *)opaque;
1892 FeatureWord w;
1893 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1894 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1895 X86CPUFeatureWordInfoList *list = NULL;
1896
1897 for (w = 0; w < FEATURE_WORDS; w++) {
1898 FeatureWordInfo *wi = &feature_word_info[w];
1899 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1900 qwi->cpuid_input_eax = wi->cpuid_eax;
1901 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1902 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1903 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1904 qwi->features = array[w];
1905
1906 /* List will be in reverse order, but order shouldn't matter */
1907 list_entries[w].next = list;
1908 list_entries[w].value = &word_infos[w];
1909 list = &list_entries[w];
1910 }
1911
1912 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1913 }
1914
1915 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1916 void *opaque, Error **errp)
1917 {
1918 X86CPU *cpu = X86_CPU(obj);
1919 int64_t value = cpu->hyperv_spinlock_attempts;
1920
1921 visit_type_int(v, name, &value, errp);
1922 }
1923
1924 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1925 void *opaque, Error **errp)
1926 {
1927 const int64_t min = 0xFFF;
1928 const int64_t max = UINT_MAX;
1929 X86CPU *cpu = X86_CPU(obj);
1930 Error *err = NULL;
1931 int64_t value;
1932
1933 visit_type_int(v, name, &value, &err);
1934 if (err) {
1935 error_propagate(errp, err);
1936 return;
1937 }
1938
1939 if (value < min || value > max) {
1940 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1941 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1942 object_get_typename(obj), name ? name : "null",
1943 value, min, max);
1944 return;
1945 }
1946 cpu->hyperv_spinlock_attempts = value;
1947 }
1948
1949 static PropertyInfo qdev_prop_spinlocks = {
1950 .name = "int",
1951 .get = x86_get_hv_spinlocks,
1952 .set = x86_set_hv_spinlocks,
1953 };
1954
1955 /* Convert all '_' in a feature string option name to '-', to make feature
1956 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1957 */
1958 static inline void feat2prop(char *s)
1959 {
1960 while ((s = strchr(s, '_'))) {
1961 *s = '-';
1962 }
1963 }
1964
1965 /* Return the feature property name for a feature flag bit */
1966 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
1967 {
1968 /* XSAVE components are automatically enabled by other features,
1969 * so return the original feature name instead
1970 */
1971 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
1972 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
1973
1974 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
1975 x86_ext_save_areas[comp].bits) {
1976 w = x86_ext_save_areas[comp].feature;
1977 bitnr = ctz32(x86_ext_save_areas[comp].bits);
1978 }
1979 }
1980
1981 assert(bitnr < 32);
1982 assert(w < FEATURE_WORDS);
1983 return feature_word_info[w].feat_names[bitnr];
1984 }
1985
1986 /* Compatibily hack to maintain legacy +-feat semantic,
1987 * where +-feat overwrites any feature set by
1988 * feat=on|feat even if the later is parsed after +-feat
1989 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1990 */
1991 static GList *plus_features, *minus_features;
1992
1993 static gint compare_string(gconstpointer a, gconstpointer b)
1994 {
1995 return g_strcmp0(a, b);
1996 }
1997
1998 /* Parse "+feature,-feature,feature=foo" CPU feature string
1999 */
2000 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2001 Error **errp)
2002 {
2003 char *featurestr; /* Single 'key=value" string being parsed */
2004 static bool cpu_globals_initialized;
2005 bool ambiguous = false;
2006
2007 if (cpu_globals_initialized) {
2008 return;
2009 }
2010 cpu_globals_initialized = true;
2011
2012 if (!features) {
2013 return;
2014 }
2015
2016 for (featurestr = strtok(features, ",");
2017 featurestr;
2018 featurestr = strtok(NULL, ",")) {
2019 const char *name;
2020 const char *val = NULL;
2021 char *eq = NULL;
2022 char num[32];
2023 GlobalProperty *prop;
2024
2025 /* Compatibility syntax: */
2026 if (featurestr[0] == '+') {
2027 plus_features = g_list_append(plus_features,
2028 g_strdup(featurestr + 1));
2029 continue;
2030 } else if (featurestr[0] == '-') {
2031 minus_features = g_list_append(minus_features,
2032 g_strdup(featurestr + 1));
2033 continue;
2034 }
2035
2036 eq = strchr(featurestr, '=');
2037 if (eq) {
2038 *eq++ = 0;
2039 val = eq;
2040 } else {
2041 val = "on";
2042 }
2043
2044 feat2prop(featurestr);
2045 name = featurestr;
2046
2047 if (g_list_find_custom(plus_features, name, compare_string)) {
2048 error_report("warning: Ambiguous CPU model string. "
2049 "Don't mix both \"+%s\" and \"%s=%s\"",
2050 name, name, val);
2051 ambiguous = true;
2052 }
2053 if (g_list_find_custom(minus_features, name, compare_string)) {
2054 error_report("warning: Ambiguous CPU model string. "
2055 "Don't mix both \"-%s\" and \"%s=%s\"",
2056 name, name, val);
2057 ambiguous = true;
2058 }
2059
2060 /* Special case: */
2061 if (!strcmp(name, "tsc-freq")) {
2062 int64_t tsc_freq;
2063 char *err;
2064
2065 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2066 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2067 if (tsc_freq < 0 || *err) {
2068 error_setg(errp, "bad numerical value %s", val);
2069 return;
2070 }
2071 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2072 val = num;
2073 name = "tsc-frequency";
2074 }
2075
2076 prop = g_new0(typeof(*prop), 1);
2077 prop->driver = typename;
2078 prop->property = g_strdup(name);
2079 prop->value = g_strdup(val);
2080 prop->errp = &error_fatal;
2081 qdev_prop_register_global(prop);
2082 }
2083
2084 if (ambiguous) {
2085 error_report("warning: Compatibility of ambiguous CPU model "
2086 "strings won't be kept on future QEMU versions");
2087 }
2088 }
2089
2090 static void x86_cpu_load_features(X86CPU *cpu, Error **errp);
2091 static int x86_cpu_filter_features(X86CPU *cpu);
2092
2093 /* Check for missing features that may prevent the CPU class from
2094 * running using the current machine and accelerator.
2095 */
2096 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2097 strList **missing_feats)
2098 {
2099 X86CPU *xc;
2100 FeatureWord w;
2101 Error *err = NULL;
2102 strList **next = missing_feats;
2103
2104 if (xcc->kvm_required && !kvm_enabled()) {
2105 strList *new = g_new0(strList, 1);
2106 new->value = g_strdup("kvm");;
2107 *missing_feats = new;
2108 return;
2109 }
2110
2111 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2112
2113 x86_cpu_load_features(xc, &err);
2114 if (err) {
2115 /* Errors at x86_cpu_load_features should never happen,
2116 * but in case it does, just report the model as not
2117 * runnable at all using the "type" property.
2118 */
2119 strList *new = g_new0(strList, 1);
2120 new->value = g_strdup("type");
2121 *next = new;
2122 next = &new->next;
2123 }
2124
2125 x86_cpu_filter_features(xc);
2126
2127 for (w = 0; w < FEATURE_WORDS; w++) {
2128 uint32_t filtered = xc->filtered_features[w];
2129 int i;
2130 for (i = 0; i < 32; i++) {
2131 if (filtered & (1UL << i)) {
2132 strList *new = g_new0(strList, 1);
2133 new->value = g_strdup(x86_cpu_feature_name(w, i));
2134 *next = new;
2135 next = &new->next;
2136 }
2137 }
2138 }
2139
2140 object_unref(OBJECT(xc));
2141 }
2142
2143 /* Print all cpuid feature names in featureset
2144 */
2145 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2146 {
2147 int bit;
2148 bool first = true;
2149
2150 for (bit = 0; bit < 32; bit++) {
2151 if (featureset[bit]) {
2152 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2153 first = false;
2154 }
2155 }
2156 }
2157
2158 /* Sort alphabetically by type name, listing kvm_required models last. */
2159 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2160 {
2161 ObjectClass *class_a = (ObjectClass *)a;
2162 ObjectClass *class_b = (ObjectClass *)b;
2163 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2164 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2165 const char *name_a, *name_b;
2166
2167 if (cc_a->kvm_required != cc_b->kvm_required) {
2168 /* kvm_required items go last */
2169 return cc_a->kvm_required ? 1 : -1;
2170 } else {
2171 name_a = object_class_get_name(class_a);
2172 name_b = object_class_get_name(class_b);
2173 return strcmp(name_a, name_b);
2174 }
2175 }
2176
2177 static GSList *get_sorted_cpu_model_list(void)
2178 {
2179 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2180 list = g_slist_sort(list, x86_cpu_list_compare);
2181 return list;
2182 }
2183
2184 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2185 {
2186 ObjectClass *oc = data;
2187 X86CPUClass *cc = X86_CPU_CLASS(oc);
2188 CPUListState *s = user_data;
2189 char *name = x86_cpu_class_get_model_name(cc);
2190 const char *desc = cc->model_description;
2191 if (!desc) {
2192 desc = cc->cpu_def->model_id;
2193 }
2194
2195 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2196 name, desc);
2197 g_free(name);
2198 }
2199
2200 /* list available CPU models and flags */
2201 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2202 {
2203 int i;
2204 CPUListState s = {
2205 .file = f,
2206 .cpu_fprintf = cpu_fprintf,
2207 };
2208 GSList *list;
2209
2210 (*cpu_fprintf)(f, "Available CPUs:\n");
2211 list = get_sorted_cpu_model_list();
2212 g_slist_foreach(list, x86_cpu_list_entry, &s);
2213 g_slist_free(list);
2214
2215 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2216 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2217 FeatureWordInfo *fw = &feature_word_info[i];
2218
2219 (*cpu_fprintf)(f, " ");
2220 listflags(f, cpu_fprintf, fw->feat_names);
2221 (*cpu_fprintf)(f, "\n");
2222 }
2223 }
2224
2225 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2226 {
2227 ObjectClass *oc = data;
2228 X86CPUClass *cc = X86_CPU_CLASS(oc);
2229 CpuDefinitionInfoList **cpu_list = user_data;
2230 CpuDefinitionInfoList *entry;
2231 CpuDefinitionInfo *info;
2232
2233 info = g_malloc0(sizeof(*info));
2234 info->name = x86_cpu_class_get_model_name(cc);
2235 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2236 info->has_unavailable_features = true;
2237
2238 entry = g_malloc0(sizeof(*entry));
2239 entry->value = info;
2240 entry->next = *cpu_list;
2241 *cpu_list = entry;
2242 }
2243
2244 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2245 {
2246 CpuDefinitionInfoList *cpu_list = NULL;
2247 GSList *list = get_sorted_cpu_model_list();
2248 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2249 g_slist_free(list);
2250 return cpu_list;
2251 }
2252
2253 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2254 bool migratable_only)
2255 {
2256 FeatureWordInfo *wi = &feature_word_info[w];
2257 uint32_t r;
2258
2259 if (kvm_enabled()) {
2260 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2261 wi->cpuid_ecx,
2262 wi->cpuid_reg);
2263 } else if (tcg_enabled()) {
2264 r = wi->tcg_features;
2265 } else {
2266 return ~0;
2267 }
2268 if (migratable_only) {
2269 r &= x86_cpu_get_migratable_flags(w);
2270 }
2271 return r;
2272 }
2273
2274 /*
2275 * Filters CPU feature words based on host availability of each feature.
2276 *
2277 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2278 */
2279 static int x86_cpu_filter_features(X86CPU *cpu)
2280 {
2281 CPUX86State *env = &cpu->env;
2282 FeatureWord w;
2283 int rv = 0;
2284
2285 for (w = 0; w < FEATURE_WORDS; w++) {
2286 uint32_t host_feat =
2287 x86_cpu_get_supported_feature_word(w, false);
2288 uint32_t requested_features = env->features[w];
2289 env->features[w] &= host_feat;
2290 cpu->filtered_features[w] = requested_features & ~env->features[w];
2291 if (cpu->filtered_features[w]) {
2292 rv = 1;
2293 }
2294 }
2295
2296 return rv;
2297 }
2298
2299 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2300 {
2301 FeatureWord w;
2302
2303 for (w = 0; w < FEATURE_WORDS; w++) {
2304 report_unavailable_features(w, cpu->filtered_features[w]);
2305 }
2306 }
2307
2308 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2309 {
2310 PropValue *pv;
2311 for (pv = props; pv->prop; pv++) {
2312 if (!pv->value) {
2313 continue;
2314 }
2315 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2316 &error_abort);
2317 }
2318 }
2319
2320 /* Load data from X86CPUDefinition
2321 */
2322 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2323 {
2324 CPUX86State *env = &cpu->env;
2325 const char *vendor;
2326 char host_vendor[CPUID_VENDOR_SZ + 1];
2327 FeatureWord w;
2328
2329 /* CPU models only set _minimum_ values for level/xlevel: */
2330 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2331 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2332
2333 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2334 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2335 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2336 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2337 for (w = 0; w < FEATURE_WORDS; w++) {
2338 env->features[w] = def->features[w];
2339 }
2340
2341 /* Special cases not set in the X86CPUDefinition structs: */
2342 if (kvm_enabled()) {
2343 if (!kvm_irqchip_in_kernel()) {
2344 x86_cpu_change_kvm_default("x2apic", "off");
2345 }
2346
2347 x86_cpu_apply_props(cpu, kvm_default_props);
2348 } else if (tcg_enabled()) {
2349 x86_cpu_apply_props(cpu, tcg_default_props);
2350 }
2351
2352 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2353
2354 /* sysenter isn't supported in compatibility mode on AMD,
2355 * syscall isn't supported in compatibility mode on Intel.
2356 * Normally we advertise the actual CPU vendor, but you can
2357 * override this using the 'vendor' property if you want to use
2358 * KVM's sysenter/syscall emulation in compatibility mode and
2359 * when doing cross vendor migration
2360 */
2361 vendor = def->vendor;
2362 if (kvm_enabled()) {
2363 uint32_t ebx = 0, ecx = 0, edx = 0;
2364 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2365 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2366 vendor = host_vendor;
2367 }
2368
2369 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2370
2371 }
2372
2373 X86CPU *cpu_x86_init(const char *cpu_model)
2374 {
2375 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2376 }
2377
2378 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2379 {
2380 X86CPUDefinition *cpudef = data;
2381 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2382
2383 xcc->cpu_def = cpudef;
2384 }
2385
2386 static void x86_register_cpudef_type(X86CPUDefinition *def)
2387 {
2388 char *typename = x86_cpu_type_name(def->name);
2389 TypeInfo ti = {
2390 .name = typename,
2391 .parent = TYPE_X86_CPU,
2392 .class_init = x86_cpu_cpudef_class_init,
2393 .class_data = def,
2394 };
2395
2396 type_register(&ti);
2397 g_free(typename);
2398 }
2399
2400 #if !defined(CONFIG_USER_ONLY)
2401
2402 void cpu_clear_apic_feature(CPUX86State *env)
2403 {
2404 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2405 }
2406
2407 #endif /* !CONFIG_USER_ONLY */
2408
2409 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2410 uint32_t *eax, uint32_t *ebx,
2411 uint32_t *ecx, uint32_t *edx)
2412 {
2413 X86CPU *cpu = x86_env_get_cpu(env);
2414 CPUState *cs = CPU(cpu);
2415 uint32_t pkg_offset;
2416
2417 /* test if maximum index reached */
2418 if (index & 0x80000000) {
2419 if (index > env->cpuid_xlevel) {
2420 if (env->cpuid_xlevel2 > 0) {
2421 /* Handle the Centaur's CPUID instruction. */
2422 if (index > env->cpuid_xlevel2) {
2423 index = env->cpuid_xlevel2;
2424 } else if (index < 0xC0000000) {
2425 index = env->cpuid_xlevel;
2426 }
2427 } else {
2428 /* Intel documentation states that invalid EAX input will
2429 * return the same information as EAX=cpuid_level
2430 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2431 */
2432 index = env->cpuid_level;
2433 }
2434 }
2435 } else {
2436 if (index > env->cpuid_level)
2437 index = env->cpuid_level;
2438 }
2439
2440 switch(index) {
2441 case 0:
2442 *eax = env->cpuid_level;
2443 *ebx = env->cpuid_vendor1;
2444 *edx = env->cpuid_vendor2;
2445 *ecx = env->cpuid_vendor3;
2446 break;
2447 case 1:
2448 *eax = env->cpuid_version;
2449 *ebx = (cpu->apic_id << 24) |
2450 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2451 *ecx = env->features[FEAT_1_ECX];
2452 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2453 *ecx |= CPUID_EXT_OSXSAVE;
2454 }
2455 *edx = env->features[FEAT_1_EDX];
2456 if (cs->nr_cores * cs->nr_threads > 1) {
2457 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2458 *edx |= CPUID_HT;
2459 }
2460 break;
2461 case 2:
2462 /* cache info: needed for Pentium Pro compatibility */
2463 if (cpu->cache_info_passthrough) {
2464 host_cpuid(index, 0, eax, ebx, ecx, edx);
2465 break;
2466 }
2467 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2468 *ebx = 0;
2469 if (!cpu->enable_l3_cache) {
2470 *ecx = 0;
2471 } else {
2472 *ecx = L3_N_DESCRIPTOR;
2473 }
2474 *edx = (L1D_DESCRIPTOR << 16) | \
2475 (L1I_DESCRIPTOR << 8) | \
2476 (L2_DESCRIPTOR);
2477 break;
2478 case 4:
2479 /* cache info: needed for Core compatibility */
2480 if (cpu->cache_info_passthrough) {
2481 host_cpuid(index, count, eax, ebx, ecx, edx);
2482 *eax &= ~0xFC000000;
2483 } else {
2484 *eax = 0;
2485 switch (count) {
2486 case 0: /* L1 dcache info */
2487 *eax |= CPUID_4_TYPE_DCACHE | \
2488 CPUID_4_LEVEL(1) | \
2489 CPUID_4_SELF_INIT_LEVEL;
2490 *ebx = (L1D_LINE_SIZE - 1) | \
2491 ((L1D_PARTITIONS - 1) << 12) | \
2492 ((L1D_ASSOCIATIVITY - 1) << 22);
2493 *ecx = L1D_SETS - 1;
2494 *edx = CPUID_4_NO_INVD_SHARING;
2495 break;
2496 case 1: /* L1 icache info */
2497 *eax |= CPUID_4_TYPE_ICACHE | \
2498 CPUID_4_LEVEL(1) | \
2499 CPUID_4_SELF_INIT_LEVEL;
2500 *ebx = (L1I_LINE_SIZE - 1) | \
2501 ((L1I_PARTITIONS - 1) << 12) | \
2502 ((L1I_ASSOCIATIVITY - 1) << 22);
2503 *ecx = L1I_SETS - 1;
2504 *edx = CPUID_4_NO_INVD_SHARING;
2505 break;
2506 case 2: /* L2 cache info */
2507 *eax |= CPUID_4_TYPE_UNIFIED | \
2508 CPUID_4_LEVEL(2) | \
2509 CPUID_4_SELF_INIT_LEVEL;
2510 if (cs->nr_threads > 1) {
2511 *eax |= (cs->nr_threads - 1) << 14;
2512 }
2513 *ebx = (L2_LINE_SIZE - 1) | \
2514 ((L2_PARTITIONS - 1) << 12) | \
2515 ((L2_ASSOCIATIVITY - 1) << 22);
2516 *ecx = L2_SETS - 1;
2517 *edx = CPUID_4_NO_INVD_SHARING;
2518 break;
2519 case 3: /* L3 cache info */
2520 if (!cpu->enable_l3_cache) {
2521 *eax = 0;
2522 *ebx = 0;
2523 *ecx = 0;
2524 *edx = 0;
2525 break;
2526 }
2527 *eax |= CPUID_4_TYPE_UNIFIED | \
2528 CPUID_4_LEVEL(3) | \
2529 CPUID_4_SELF_INIT_LEVEL;
2530 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2531 *eax |= ((1 << pkg_offset) - 1) << 14;
2532 *ebx = (L3_N_LINE_SIZE - 1) | \
2533 ((L3_N_PARTITIONS - 1) << 12) | \
2534 ((L3_N_ASSOCIATIVITY - 1) << 22);
2535 *ecx = L3_N_SETS - 1;
2536 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2537 break;
2538 default: /* end of info */
2539 *eax = 0;
2540 *ebx = 0;
2541 *ecx = 0;
2542 *edx = 0;
2543 break;
2544 }
2545 }
2546
2547 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2548 if ((*eax & 31) && cs->nr_cores > 1) {
2549 *eax |= (cs->nr_cores - 1) << 26;
2550 }
2551 break;
2552 case 5:
2553 /* mwait info: needed for Core compatibility */
2554 *eax = 0; /* Smallest monitor-line size in bytes */
2555 *ebx = 0; /* Largest monitor-line size in bytes */
2556 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2557 *edx = 0;
2558 break;
2559 case 6:
2560 /* Thermal and Power Leaf */
2561 *eax = env->features[FEAT_6_EAX];
2562 *ebx = 0;
2563 *ecx = 0;
2564 *edx = 0;
2565 break;
2566 case 7:
2567 /* Structured Extended Feature Flags Enumeration Leaf */
2568 if (count == 0) {
2569 *eax = 0; /* Maximum ECX value for sub-leaves */
2570 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2571 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2572 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2573 *ecx |= CPUID_7_0_ECX_OSPKE;
2574 }
2575 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2576 } else {
2577 *eax = 0;
2578 *ebx = 0;
2579 *ecx = 0;
2580 *edx = 0;
2581 }
2582 break;
2583 case 9:
2584 /* Direct Cache Access Information Leaf */
2585 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2586 *ebx = 0;
2587 *ecx = 0;
2588 *edx = 0;
2589 break;
2590 case 0xA:
2591 /* Architectural Performance Monitoring Leaf */
2592 if (kvm_enabled() && cpu->enable_pmu) {
2593 KVMState *s = cs->kvm_state;
2594
2595 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2596 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2597 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2598 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2599 } else {
2600 *eax = 0;
2601 *ebx = 0;
2602 *ecx = 0;
2603 *edx = 0;
2604 }
2605 break;
2606 case 0xB:
2607 /* Extended Topology Enumeration Leaf */
2608 if (!cpu->enable_cpuid_0xb) {
2609 *eax = *ebx = *ecx = *edx = 0;
2610 break;
2611 }
2612
2613 *ecx = count & 0xff;
2614 *edx = cpu->apic_id;
2615
2616 switch (count) {
2617 case 0:
2618 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2619 *ebx = cs->nr_threads;
2620 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2621 break;
2622 case 1:
2623 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2624 *ebx = cs->nr_cores * cs->nr_threads;
2625 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2626 break;
2627 default:
2628 *eax = 0;
2629 *ebx = 0;
2630 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2631 }
2632
2633 assert(!(*eax & ~0x1f));
2634 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2635 break;
2636 case 0xD: {
2637 /* Processor Extended State */
2638 *eax = 0;
2639 *ebx = 0;
2640 *ecx = 0;
2641 *edx = 0;
2642 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2643 break;
2644 }
2645
2646 if (count == 0) {
2647 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2648 *eax = env->features[FEAT_XSAVE_COMP_LO];
2649 *edx = env->features[FEAT_XSAVE_COMP_HI];
2650 *ebx = *ecx;
2651 } else if (count == 1) {
2652 *eax = env->features[FEAT_XSAVE];
2653 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2654 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2655 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2656 *eax = esa->size;
2657 *ebx = esa->offset;
2658 }
2659 }
2660 break;
2661 }
2662 case 0x80000000:
2663 *eax = env->cpuid_xlevel;
2664 *ebx = env->cpuid_vendor1;
2665 *edx = env->cpuid_vendor2;
2666 *ecx = env->cpuid_vendor3;
2667 break;
2668 case 0x80000001:
2669 *eax = env->cpuid_version;
2670 *ebx = 0;
2671 *ecx = env->features[FEAT_8000_0001_ECX];
2672 *edx = env->features[FEAT_8000_0001_EDX];
2673
2674 /* The Linux kernel checks for the CMPLegacy bit and
2675 * discards multiple thread information if it is set.
2676 * So don't set it here for Intel to make Linux guests happy.
2677 */
2678 if (cs->nr_cores * cs->nr_threads > 1) {
2679 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2680 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2681 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2682 *ecx |= 1 << 1; /* CmpLegacy bit */
2683 }
2684 }
2685 break;
2686 case 0x80000002:
2687 case 0x80000003:
2688 case 0x80000004:
2689 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2690 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2691 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2692 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2693 break;
2694 case 0x80000005:
2695 /* cache info (L1 cache) */
2696 if (cpu->cache_info_passthrough) {
2697 host_cpuid(index, 0, eax, ebx, ecx, edx);
2698 break;
2699 }
2700 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2701 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2702 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2703 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2704 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2705 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2706 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2707 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2708 break;
2709 case 0x80000006:
2710 /* cache info (L2 cache) */
2711 if (cpu->cache_info_passthrough) {
2712 host_cpuid(index, 0, eax, ebx, ecx, edx);
2713 break;
2714 }
2715 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2716 (L2_DTLB_2M_ENTRIES << 16) | \
2717 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2718 (L2_ITLB_2M_ENTRIES);
2719 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2720 (L2_DTLB_4K_ENTRIES << 16) | \
2721 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2722 (L2_ITLB_4K_ENTRIES);
2723 *ecx = (L2_SIZE_KB_AMD << 16) | \
2724 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2725 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2726 if (!cpu->enable_l3_cache) {
2727 *edx = ((L3_SIZE_KB / 512) << 18) | \
2728 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2729 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2730 } else {
2731 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2732 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2733 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2734 }
2735 break;
2736 case 0x80000007:
2737 *eax = 0;
2738 *ebx = 0;
2739 *ecx = 0;
2740 *edx = env->features[FEAT_8000_0007_EDX];
2741 break;
2742 case 0x80000008:
2743 /* virtual & phys address size in low 2 bytes. */
2744 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2745 /* 64 bit processor, 48 bits virtual, configurable
2746 * physical bits.
2747 */
2748 *eax = 0x00003000 + cpu->phys_bits;
2749 } else {
2750 *eax = cpu->phys_bits;
2751 }
2752 *ebx = 0;
2753 *ecx = 0;
2754 *edx = 0;
2755 if (cs->nr_cores * cs->nr_threads > 1) {
2756 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2757 }
2758 break;
2759 case 0x8000000A:
2760 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2761 *eax = 0x00000001; /* SVM Revision */
2762 *ebx = 0x00000010; /* nr of ASIDs */
2763 *ecx = 0;
2764 *edx = env->features[FEAT_SVM]; /* optional features */
2765 } else {
2766 *eax = 0;
2767 *ebx = 0;
2768 *ecx = 0;
2769 *edx = 0;
2770 }
2771 break;
2772 case 0xC0000000:
2773 *eax = env->cpuid_xlevel2;
2774 *ebx = 0;
2775 *ecx = 0;
2776 *edx = 0;
2777 break;
2778 case 0xC0000001:
2779 /* Support for VIA CPU's CPUID instruction */
2780 *eax = env->cpuid_version;
2781 *ebx = 0;
2782 *ecx = 0;
2783 *edx = env->features[FEAT_C000_0001_EDX];
2784 break;
2785 case 0xC0000002:
2786 case 0xC0000003:
2787 case 0xC0000004:
2788 /* Reserved for the future, and now filled with zero */
2789 *eax = 0;
2790 *ebx = 0;
2791 *ecx = 0;
2792 *edx = 0;
2793 break;
2794 default:
2795 /* reserved values: zero */
2796 *eax = 0;
2797 *ebx = 0;
2798 *ecx = 0;
2799 *edx = 0;
2800 break;
2801 }
2802 }
2803
2804 /* CPUClass::reset() */
2805 static void x86_cpu_reset(CPUState *s)
2806 {
2807 X86CPU *cpu = X86_CPU(s);
2808 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2809 CPUX86State *env = &cpu->env;
2810 target_ulong cr4;
2811 uint64_t xcr0;
2812 int i;
2813
2814 xcc->parent_reset(s);
2815
2816 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2817
2818 tlb_flush(s, 1);
2819
2820 env->old_exception = -1;
2821
2822 /* init to reset state */
2823
2824 env->hflags2 |= HF2_GIF_MASK;
2825
2826 cpu_x86_update_cr0(env, 0x60000010);
2827 env->a20_mask = ~0x0;
2828 env->smbase = 0x30000;
2829
2830 env->idt.limit = 0xffff;
2831 env->gdt.limit = 0xffff;
2832 env->ldt.limit = 0xffff;
2833 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2834 env->tr.limit = 0xffff;
2835 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2836
2837 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2838 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2839 DESC_R_MASK | DESC_A_MASK);
2840 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2841 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2842 DESC_A_MASK);
2843 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2844 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2845 DESC_A_MASK);
2846 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2847 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2848 DESC_A_MASK);
2849 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2850 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2851 DESC_A_MASK);
2852 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2853 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2854 DESC_A_MASK);
2855
2856 env->eip = 0xfff0;
2857 env->regs[R_EDX] = env->cpuid_version;
2858
2859 env->eflags = 0x2;
2860
2861 /* FPU init */
2862 for (i = 0; i < 8; i++) {
2863 env->fptags[i] = 1;
2864 }
2865 cpu_set_fpuc(env, 0x37f);
2866
2867 env->mxcsr = 0x1f80;
2868 /* All units are in INIT state. */
2869 env->xstate_bv = 0;
2870
2871 env->pat = 0x0007040600070406ULL;
2872 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2873
2874 memset(env->dr, 0, sizeof(env->dr));
2875 env->dr[6] = DR6_FIXED_1;
2876 env->dr[7] = DR7_FIXED_1;
2877 cpu_breakpoint_remove_all(s, BP_CPU);
2878 cpu_watchpoint_remove_all(s, BP_CPU);
2879
2880 cr4 = 0;
2881 xcr0 = XSTATE_FP_MASK;
2882
2883 #ifdef CONFIG_USER_ONLY
2884 /* Enable all the features for user-mode. */
2885 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2886 xcr0 |= XSTATE_SSE_MASK;
2887 }
2888 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2889 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2890 if (env->features[esa->feature] & esa->bits) {
2891 xcr0 |= 1ull << i;
2892 }
2893 }
2894
2895 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2896 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2897 }
2898 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2899 cr4 |= CR4_FSGSBASE_MASK;
2900 }
2901 #endif
2902
2903 env->xcr0 = xcr0;
2904 cpu_x86_update_cr4(env, cr4);
2905
2906 /*
2907 * SDM 11.11.5 requires:
2908 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2909 * - IA32_MTRR_PHYSMASKn.V = 0
2910 * All other bits are undefined. For simplification, zero it all.
2911 */
2912 env->mtrr_deftype = 0;
2913 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2914 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2915
2916 #if !defined(CONFIG_USER_ONLY)
2917 /* We hard-wire the BSP to the first CPU. */
2918 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2919
2920 s->halted = !cpu_is_bsp(cpu);
2921
2922 if (kvm_enabled()) {
2923 kvm_arch_reset_vcpu(cpu);
2924 }
2925 #endif
2926 }
2927
2928 #ifndef CONFIG_USER_ONLY
2929 bool cpu_is_bsp(X86CPU *cpu)
2930 {
2931 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2932 }
2933
2934 /* TODO: remove me, when reset over QOM tree is implemented */
2935 static void x86_cpu_machine_reset_cb(void *opaque)
2936 {
2937 X86CPU *cpu = opaque;
2938 cpu_reset(CPU(cpu));
2939 }
2940 #endif
2941
2942 static void mce_init(X86CPU *cpu)
2943 {
2944 CPUX86State *cenv = &cpu->env;
2945 unsigned int bank;
2946
2947 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2948 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2949 (CPUID_MCE | CPUID_MCA)) {
2950 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2951 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2952 cenv->mcg_ctl = ~(uint64_t)0;
2953 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2954 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2955 }
2956 }
2957 }
2958
2959 #ifndef CONFIG_USER_ONLY
2960 APICCommonClass *apic_get_class(void)
2961 {
2962 const char *apic_type = "apic";
2963
2964 if (kvm_apic_in_kernel()) {
2965 apic_type = "kvm-apic";
2966 } else if (xen_enabled()) {
2967 apic_type = "xen-apic";
2968 }
2969
2970 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2971 }
2972
2973 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2974 {
2975 APICCommonState *apic;
2976 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2977
2978 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2979
2980 object_property_add_child(OBJECT(cpu), "lapic",
2981 OBJECT(cpu->apic_state), &error_abort);
2982 object_unref(OBJECT(cpu->apic_state));
2983
2984 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
2985 /* TODO: convert to link<> */
2986 apic = APIC_COMMON(cpu->apic_state);
2987 apic->cpu = cpu;
2988 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2989 }
2990
2991 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2992 {
2993 APICCommonState *apic;
2994 static bool apic_mmio_map_once;
2995
2996 if (cpu->apic_state == NULL) {
2997 return;
2998 }
2999 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3000 errp);
3001
3002 /* Map APIC MMIO area */
3003 apic = APIC_COMMON(cpu->apic_state);
3004 if (!apic_mmio_map_once) {
3005 memory_region_add_subregion_overlap(get_system_memory(),
3006 apic->apicbase &
3007 MSR_IA32_APICBASE_BASE,
3008 &apic->io_memory,
3009 0x1000);
3010 apic_mmio_map_once = true;
3011 }
3012 }
3013
3014 static void x86_cpu_machine_done(Notifier *n, void *unused)
3015 {
3016 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3017 MemoryRegion *smram =
3018 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3019
3020 if (smram) {
3021 cpu->smram = g_new(MemoryRegion, 1);
3022 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3023 smram, 0, 1ull << 32);
3024 memory_region_set_enabled(cpu->smram, false);
3025 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3026 }
3027 }
3028 #else
3029 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3030 {
3031 }
3032 #endif
3033
3034 /* Note: Only safe for use on x86(-64) hosts */
3035 static uint32_t x86_host_phys_bits(void)
3036 {
3037 uint32_t eax;
3038 uint32_t host_phys_bits;
3039
3040 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3041 if (eax >= 0x80000008) {
3042 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3043 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3044 * at 23:16 that can specify a maximum physical address bits for
3045 * the guest that can override this value; but I've not seen
3046 * anything with that set.
3047 */
3048 host_phys_bits = eax & 0xff;
3049 } else {
3050 /* It's an odd 64 bit machine that doesn't have the leaf for
3051 * physical address bits; fall back to 36 that's most older
3052 * Intel.
3053 */
3054 host_phys_bits = 36;
3055 }
3056
3057 return host_phys_bits;
3058 }
3059
3060 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3061 {
3062 if (*min < value) {
3063 *min = value;
3064 }
3065 }
3066
3067 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3068 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3069 {
3070 CPUX86State *env = &cpu->env;
3071 FeatureWordInfo *fi = &feature_word_info[w];
3072 uint32_t eax = fi->cpuid_eax;
3073 uint32_t region = eax & 0xF0000000;
3074
3075 if (!env->features[w]) {
3076 return;
3077 }
3078
3079 switch (region) {
3080 case 0x00000000:
3081 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3082 break;
3083 case 0x80000000:
3084 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3085 break;
3086 case 0xC0000000:
3087 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3088 break;
3089 }
3090 }
3091
3092 /* Calculate XSAVE components based on the configured CPU feature flags */
3093 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3094 {
3095 CPUX86State *env = &cpu->env;
3096 int i;
3097 uint64_t mask;
3098
3099 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3100 return;
3101 }
3102
3103 mask = 0;
3104 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3105 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3106 if (env->features[esa->feature] & esa->bits) {
3107 mask |= (1ULL << i);
3108 }
3109 }
3110
3111 env->features[FEAT_XSAVE_COMP_LO] = mask;
3112 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3113 }
3114
3115 /* Load CPUID data based on configured features */
3116 static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
3117 {
3118 CPUX86State *env = &cpu->env;
3119 FeatureWord w;
3120 GList *l;
3121 Error *local_err = NULL;
3122
3123 /*TODO: cpu->host_features incorrectly overwrites features
3124 * set using "feat=on|off". Once we fix this, we can convert
3125 * plus_features & minus_features to global properties
3126 * inside x86_cpu_parse_featurestr() too.
3127 */
3128 if (cpu->host_features) {
3129 for (w = 0; w < FEATURE_WORDS; w++) {
3130 env->features[w] =
3131 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3132 }
3133 }
3134
3135 for (l = plus_features; l; l = l->next) {
3136 const char *prop = l->data;
3137 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3138 if (local_err) {
3139 goto out;
3140 }
3141 }
3142
3143 for (l = minus_features; l; l = l->next) {
3144 const char *prop = l->data;
3145 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3146 if (local_err) {
3147 goto out;
3148 }
3149 }
3150
3151 if (!kvm_enabled() || !cpu->expose_kvm) {
3152 env->features[FEAT_KVM] = 0;
3153 }
3154
3155 x86_cpu_enable_xsave_components(cpu);
3156
3157 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3158 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3159 if (cpu->full_cpuid_auto_level) {
3160 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3161 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3162 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3163 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3164 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3165 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3166 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3167 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3168 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3169 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3170 /* SVM requires CPUID[0x8000000A] */
3171 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3172 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3173 }
3174 }
3175
3176 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3177 if (env->cpuid_level == UINT32_MAX) {
3178 env->cpuid_level = env->cpuid_min_level;
3179 }
3180 if (env->cpuid_xlevel == UINT32_MAX) {
3181 env->cpuid_xlevel = env->cpuid_min_xlevel;
3182 }
3183 if (env->cpuid_xlevel2 == UINT32_MAX) {
3184 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3185 }
3186
3187 out:
3188 if (local_err != NULL) {
3189 error_propagate(errp, local_err);
3190 }
3191 }
3192
3193 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3194 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3195 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3196 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3197 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3198 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3199 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3200 {
3201 CPUState *cs = CPU(dev);
3202 X86CPU *cpu = X86_CPU(dev);
3203 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3204 CPUX86State *env = &cpu->env;
3205 Error *local_err = NULL;
3206 static bool ht_warned;
3207
3208 if (xcc->kvm_required && !kvm_enabled()) {
3209 char *name = x86_cpu_class_get_model_name(xcc);
3210 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3211 g_free(name);
3212 goto out;
3213 }
3214
3215 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3216 error_setg(errp, "apic-id property was not initialized properly");
3217 return;
3218 }
3219
3220 x86_cpu_load_features(cpu, &local_err);
3221 if (local_err) {
3222 goto out;
3223 }
3224
3225 if (x86_cpu_filter_features(cpu) &&
3226 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3227 x86_cpu_report_filtered_features(cpu);
3228 if (cpu->enforce_cpuid) {
3229 error_setg(&local_err,
3230 kvm_enabled() ?
3231 "Host doesn't support requested features" :
3232 "TCG doesn't support requested features");
3233 goto out;
3234 }
3235 }
3236
3237 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3238 * CPUID[1].EDX.
3239 */
3240 if (IS_AMD_CPU(env)) {
3241 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3242 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3243 & CPUID_EXT2_AMD_ALIASES);
3244 }
3245
3246 /* For 64bit systems think about the number of physical bits to present.
3247 * ideally this should be the same as the host; anything other than matching
3248 * the host can cause incorrect guest behaviour.
3249 * QEMU used to pick the magic value of 40 bits that corresponds to
3250 * consumer AMD devices but nothing else.
3251 */
3252 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3253 if (kvm_enabled()) {
3254 uint32_t host_phys_bits = x86_host_phys_bits();
3255 static bool warned;
3256
3257 if (cpu->host_phys_bits) {
3258 /* The user asked for us to use the host physical bits */
3259 cpu->phys_bits = host_phys_bits;
3260 }
3261
3262 /* Print a warning if the user set it to a value that's not the
3263 * host value.
3264 */
3265 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3266 !warned) {
3267 error_report("Warning: Host physical bits (%u)"
3268 " does not match phys-bits property (%u)",
3269 host_phys_bits, cpu->phys_bits);
3270 warned = true;
3271 }
3272
3273 if (cpu->phys_bits &&
3274 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3275 cpu->phys_bits < 32)) {
3276 error_setg(errp, "phys-bits should be between 32 and %u "
3277 " (but is %u)",
3278 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3279 return;
3280 }
3281 } else {
3282 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3283 error_setg(errp, "TCG only supports phys-bits=%u",
3284 TCG_PHYS_ADDR_BITS);
3285 return;
3286 }
3287 }
3288 /* 0 means it was not explicitly set by the user (or by machine
3289 * compat_props or by the host code above). In this case, the default
3290 * is the value used by TCG (40).
3291 */
3292 if (cpu->phys_bits == 0) {
3293 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3294 }
3295 } else {
3296 /* For 32 bit systems don't use the user set value, but keep
3297 * phys_bits consistent with what we tell the guest.
3298 */
3299 if (cpu->phys_bits != 0) {
3300 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3301 return;
3302 }
3303
3304 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3305 cpu->phys_bits = 36;
3306 } else {
3307 cpu->phys_bits = 32;
3308 }
3309 }
3310 cpu_exec_realizefn(cs, &local_err);
3311 if (local_err != NULL) {
3312 error_propagate(errp, local_err);
3313 return;
3314 }
3315
3316 if (tcg_enabled()) {
3317 tcg_x86_init();
3318 }
3319
3320 #ifndef CONFIG_USER_ONLY
3321 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3322
3323 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3324 x86_cpu_apic_create(cpu, &local_err);
3325 if (local_err != NULL) {
3326 goto out;
3327 }
3328 }
3329 #endif
3330
3331 mce_init(cpu);
3332
3333 #ifndef CONFIG_USER_ONLY
3334 if (tcg_enabled()) {
3335 AddressSpace *newas = g_new(AddressSpace, 1);
3336
3337 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3338 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3339
3340 /* Outer container... */
3341 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3342 memory_region_set_enabled(cpu->cpu_as_root, true);
3343
3344 /* ... with two regions inside: normal system memory with low
3345 * priority, and...
3346 */
3347 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3348 get_system_memory(), 0, ~0ull);
3349 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3350 memory_region_set_enabled(cpu->cpu_as_mem, true);
3351 address_space_init(newas, cpu->cpu_as_root, "CPU");
3352 cs->num_ases = 1;
3353 cpu_address_space_init(cs, newas, 0);
3354
3355 /* ... SMRAM with higher priority, linked from /machine/smram. */
3356 cpu->machine_done.notify = x86_cpu_machine_done;
3357 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3358 }
3359 #endif
3360
3361 qemu_init_vcpu(cs);
3362
3363 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3364 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3365 * based on inputs (sockets,cores,threads), it is still better to gives
3366 * users a warning.
3367 *
3368 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3369 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3370 */
3371 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3372 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3373 " -smp options properly.");
3374 ht_warned = true;
3375 }
3376
3377 x86_cpu_apic_realize(cpu, &local_err);
3378 if (local_err != NULL) {
3379 goto out;
3380 }
3381 cpu_reset(cs);
3382
3383 xcc->parent_realize(dev, &local_err);
3384
3385 out:
3386 if (local_err != NULL) {
3387 error_propagate(errp, local_err);
3388 return;
3389 }
3390 }
3391
3392 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3393 {
3394 X86CPU *cpu = X86_CPU(dev);
3395 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3396 Error *local_err = NULL;
3397
3398 #ifndef CONFIG_USER_ONLY
3399 cpu_remove_sync(CPU(dev));
3400 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3401 #endif
3402
3403 if (cpu->apic_state) {
3404 object_unparent(OBJECT(cpu->apic_state));
3405 cpu->apic_state = NULL;
3406 }
3407
3408 xcc->parent_unrealize(dev, &local_err);
3409 if (local_err != NULL) {
3410 error_propagate(errp, local_err);
3411 return;
3412 }
3413 }
3414
3415 typedef struct BitProperty {
3416 uint32_t *ptr;
3417 uint32_t mask;
3418 } BitProperty;
3419
3420 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3421 void *opaque, Error **errp)
3422 {
3423 BitProperty *fp = opaque;
3424 bool value = (*fp->ptr & fp->mask) == fp->mask;
3425 visit_type_bool(v, name, &value, errp);
3426 }
3427
3428 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3429 void *opaque, Error **errp)
3430 {
3431 DeviceState *dev = DEVICE(obj);
3432 BitProperty *fp = opaque;
3433 Error *local_err = NULL;
3434 bool value;
3435
3436 if (dev->realized) {
3437 qdev_prop_set_after_realize(dev, name, errp);
3438 return;
3439 }
3440
3441 visit_type_bool(v, name, &value, &local_err);
3442 if (local_err) {
3443 error_propagate(errp, local_err);
3444 return;
3445 }
3446
3447 if (value) {
3448 *fp->ptr |= fp->mask;
3449 } else {
3450 *fp->ptr &= ~fp->mask;
3451 }
3452 }
3453
3454 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3455 void *opaque)
3456 {
3457 BitProperty *prop = opaque;
3458 g_free(prop);
3459 }
3460
3461 /* Register a boolean property to get/set a single bit in a uint32_t field.
3462 *
3463 * The same property name can be registered multiple times to make it affect
3464 * multiple bits in the same FeatureWord. In that case, the getter will return
3465 * true only if all bits are set.
3466 */
3467 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3468 const char *prop_name,
3469 uint32_t *field,
3470 int bitnr)
3471 {
3472 BitProperty *fp;
3473 ObjectProperty *op;
3474 uint32_t mask = (1UL << bitnr);
3475
3476 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3477 if (op) {
3478 fp = op->opaque;
3479 assert(fp->ptr == field);
3480 fp->mask |= mask;
3481 } else {
3482 fp = g_new0(BitProperty, 1);
3483 fp->ptr = field;
3484 fp->mask = mask;
3485 object_property_add(OBJECT(cpu), prop_name, "bool",
3486 x86_cpu_get_bit_prop,
3487 x86_cpu_set_bit_prop,
3488 x86_cpu_release_bit_prop, fp, &error_abort);
3489 }
3490 }
3491
3492 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3493 FeatureWord w,
3494 int bitnr)
3495 {
3496 FeatureWordInfo *fi = &feature_word_info[w];
3497 const char *name = fi->feat_names[bitnr];
3498
3499 if (!name) {
3500 return;
3501 }
3502
3503 /* Property names should use "-" instead of "_".
3504 * Old names containing underscores are registered as aliases
3505 * using object_property_add_alias()
3506 */
3507 assert(!strchr(name, '_'));
3508 /* aliases don't use "|" delimiters anymore, they are registered
3509 * manually using object_property_add_alias() */
3510 assert(!strchr(name, '|'));
3511 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3512 }
3513
3514 static void x86_cpu_initfn(Object *obj)
3515 {
3516 CPUState *cs = CPU(obj);
3517 X86CPU *cpu = X86_CPU(obj);
3518 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3519 CPUX86State *env = &cpu->env;
3520 FeatureWord w;
3521
3522 cs->env_ptr = env;
3523
3524 object_property_add(obj, "family", "int",
3525 x86_cpuid_version_get_family,
3526 x86_cpuid_version_set_family, NULL, NULL, NULL);
3527 object_property_add(obj, "model", "int",
3528 x86_cpuid_version_get_model,
3529 x86_cpuid_version_set_model, NULL, NULL, NULL);
3530 object_property_add(obj, "stepping", "int",
3531 x86_cpuid_version_get_stepping,
3532 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3533 object_property_add_str(obj, "vendor",
3534 x86_cpuid_get_vendor,
3535 x86_cpuid_set_vendor, NULL);
3536 object_property_add_str(obj, "model-id",
3537 x86_cpuid_get_model_id,
3538 x86_cpuid_set_model_id, NULL);
3539 object_property_add(obj, "tsc-frequency", "int",
3540 x86_cpuid_get_tsc_freq,
3541 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3542 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3543 x86_cpu_get_feature_words,
3544 NULL, NULL, (void *)env->features, NULL);
3545 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3546 x86_cpu_get_feature_words,
3547 NULL, NULL, (void *)cpu->filtered_features, NULL);
3548
3549 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3550
3551 for (w = 0; w < FEATURE_WORDS; w++) {
3552 int bitnr;
3553
3554 for (bitnr = 0; bitnr < 32; bitnr++) {
3555 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3556 }
3557 }
3558
3559 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3560 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3561 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3562 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3563 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3564 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3565 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3566
3567 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3568 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3569 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3570 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3571 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3572 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3573 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3574 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3575 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3576 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3577 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3578 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3579 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3580 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3581 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3582 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3583 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3584 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3585 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3586 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3587 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3588
3589 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3590 }
3591
3592 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3593 {
3594 X86CPU *cpu = X86_CPU(cs);
3595
3596 return cpu->apic_id;
3597 }
3598
3599 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3600 {
3601 X86CPU *cpu = X86_CPU(cs);
3602
3603 return cpu->env.cr[0] & CR0_PG_MASK;
3604 }
3605
3606 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3607 {
3608 X86CPU *cpu = X86_CPU(cs);
3609
3610 cpu->env.eip = value;
3611 }
3612
3613 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3614 {
3615 X86CPU *cpu = X86_CPU(cs);
3616
3617 cpu->env.eip = tb->pc - tb->cs_base;
3618 }
3619
3620 static bool x86_cpu_has_work(CPUState *cs)
3621 {
3622 X86CPU *cpu = X86_CPU(cs);
3623 CPUX86State *env = &cpu->env;
3624
3625 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3626 CPU_INTERRUPT_POLL)) &&
3627 (env->eflags & IF_MASK)) ||
3628 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3629 CPU_INTERRUPT_INIT |
3630 CPU_INTERRUPT_SIPI |
3631 CPU_INTERRUPT_MCE)) ||
3632 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3633 !(env->hflags & HF_SMM_MASK));
3634 }
3635
3636 static Property x86_cpu_properties[] = {
3637 #ifdef CONFIG_USER_ONLY
3638 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3639 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3640 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3641 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3642 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3643 #else
3644 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3645 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3646 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3647 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3648 #endif
3649 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3650 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3651 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3652 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3653 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3654 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3655 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3656 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3657 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3658 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3659 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3660 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3661 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3662 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3663 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3664 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3665 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3666 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3667 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3668 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3669 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3670 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3671 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3672 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3673 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3674 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3675 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3676 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3677 DEFINE_PROP_END_OF_LIST()
3678 };
3679
3680 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3681 {
3682 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3683 CPUClass *cc = CPU_CLASS(oc);
3684 DeviceClass *dc = DEVICE_CLASS(oc);
3685
3686 xcc->parent_realize = dc->realize;
3687 xcc->parent_unrealize = dc->unrealize;
3688 dc->realize = x86_cpu_realizefn;
3689 dc->unrealize = x86_cpu_unrealizefn;
3690 dc->props = x86_cpu_properties;
3691
3692 xcc->parent_reset = cc->reset;
3693 cc->reset = x86_cpu_reset;
3694 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3695
3696 cc->class_by_name = x86_cpu_class_by_name;
3697 cc->parse_features = x86_cpu_parse_featurestr;
3698 cc->has_work = x86_cpu_has_work;
3699 cc->do_interrupt = x86_cpu_do_interrupt;
3700 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3701 cc->dump_state = x86_cpu_dump_state;
3702 cc->set_pc = x86_cpu_set_pc;
3703 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3704 cc->gdb_read_register = x86_cpu_gdb_read_register;
3705 cc->gdb_write_register = x86_cpu_gdb_write_register;
3706 cc->get_arch_id = x86_cpu_get_arch_id;
3707 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3708 #ifdef CONFIG_USER_ONLY
3709 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3710 #else
3711 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3712 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3713 cc->write_elf64_note = x86_cpu_write_elf64_note;
3714 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3715 cc->write_elf32_note = x86_cpu_write_elf32_note;
3716 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3717 cc->vmsd = &vmstate_x86_cpu;
3718 #endif
3719 /* CPU_NB_REGS * 2 = general regs + xmm regs
3720 * 25 = eip, eflags, 6 seg regs, st[0-7], fctrl,...,fop, mxcsr.
3721 */
3722 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3723 #ifndef CONFIG_USER_ONLY
3724 cc->debug_excp_handler = breakpoint_handler;
3725 #endif
3726 cc->cpu_exec_enter = x86_cpu_exec_enter;
3727 cc->cpu_exec_exit = x86_cpu_exec_exit;
3728
3729 dc->cannot_instantiate_with_device_add_yet = false;
3730 }
3731
3732 static const TypeInfo x86_cpu_type_info = {
3733 .name = TYPE_X86_CPU,
3734 .parent = TYPE_CPU,
3735 .instance_size = sizeof(X86CPU),
3736 .instance_init = x86_cpu_initfn,
3737 .abstract = true,
3738 .class_size = sizeof(X86CPUClass),
3739 .class_init = x86_cpu_common_class_init,
3740 };
3741
3742 static void x86_cpu_register_types(void)
3743 {
3744 int i;
3745
3746 type_register_static(&x86_cpu_type_info);
3747 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3748 x86_register_cpudef_type(&builtin_x86_defs[i]);
3749 }
3750 #ifdef CONFIG_KVM
3751 type_register_static(&host_x86_cpu_type_info);
3752 #endif
3753 }
3754
3755 type_init(x86_cpu_register_types)