]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Add a marker to end of the region zeroed on reset
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 /* feature flags taken from "Intel Processor Identification and the CPUID
185 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
186 * between feature naming conventions, aliases may be added.
187 */
188 static const char *feature_name[] = {
189 "fpu", "vme", "de", "pse",
190 "tsc", "msr", "pae", "mce",
191 "cx8", "apic", NULL, "sep",
192 "mtrr", "pge", "mca", "cmov",
193 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
194 NULL, "ds" /* Intel dts */, "acpi", "mmx",
195 "fxsr", "sse", "sse2", "ss",
196 "ht" /* Intel htt */, "tm", "ia64", "pbe",
197 };
198 static const char *ext_feature_name[] = {
199 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
200 "ds_cpl", "vmx", "smx", "est",
201 "tm2", "ssse3", "cid", NULL,
202 "fma", "cx16", "xtpr", "pdcm",
203 NULL, "pcid", "dca", "sse4.1|sse4_1",
204 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
205 "tsc-deadline", "aes", "xsave", "osxsave",
206 "avx", "f16c", "rdrand", "hypervisor",
207 };
208 /* Feature names that are already defined on feature_name[] but are set on
209 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
210 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
211 * if and only if CPU vendor is AMD.
212 */
213 static const char *ext2_feature_name[] = {
214 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
215 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
216 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
217 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
218 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
219 "nx|xd", NULL, "mmxext", NULL /* mmx */,
220 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
221 NULL, "lm|i64", "3dnowext", "3dnow",
222 };
223 static const char *ext3_feature_name[] = {
224 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
225 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
226 "3dnowprefetch", "osvw", "ibs", "xop",
227 "skinit", "wdt", NULL, "lwp",
228 "fma4", "tce", NULL, "nodeid_msr",
229 NULL, "tbm", "topoext", "perfctr_core",
230 "perfctr_nb", NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 };
233
234 static const char *ext4_feature_name[] = {
235 NULL, NULL, "xstore", "xstore-en",
236 NULL, NULL, "xcrypt", "xcrypt-en",
237 "ace2", "ace2-en", "phe", "phe-en",
238 "pmm", "pmm-en", NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 };
244
245 static const char *kvm_feature_name[] = {
246 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
247 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
248 NULL, NULL, NULL, NULL,
249 NULL, NULL, NULL, NULL,
250 NULL, NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 "kvmclock-stable-bit", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 };
255
256 static const char *hyperv_priv_feature_name[] = {
257 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
258 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
259 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
260 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
261 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
262 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
263 NULL, NULL, NULL, NULL,
264 NULL, NULL, NULL, NULL,
265 NULL, NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268 };
269
270 static const char *hyperv_ident_feature_name[] = {
271 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
272 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
273 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
274 NULL /* hv_create_port */, NULL /* hv_connect_port */,
275 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
276 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
277 NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 };
283
284 static const char *hyperv_misc_feature_name[] = {
285 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
286 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
287 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
288 NULL, NULL,
289 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 };
296
297 static const char *svm_feature_name[] = {
298 "npt", "lbrv", "svm_lock", "nrip_save",
299 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
300 NULL, NULL, "pause_filter", NULL,
301 "pfthreshold", NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306 };
307
308 static const char *cpuid_7_0_ebx_feature_name[] = {
309 "fsgsbase", "tsc_adjust", NULL, "bmi1",
310 "hle", "avx2", NULL, "smep",
311 "bmi2", "erms", "invpcid", "rtm",
312 NULL, NULL, "mpx", NULL,
313 "avx512f", "avx512dq", "rdseed", "adx",
314 "smap", "avx512ifma", "pcommit", "clflushopt",
315 "clwb", NULL, "avx512pf", "avx512er",
316 "avx512cd", NULL, "avx512bw", "avx512vl",
317 };
318
319 static const char *cpuid_7_0_ecx_feature_name[] = {
320 NULL, "avx512vbmi", "umip", "pku",
321 "ospke", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 NULL, NULL, "rdpid", NULL,
326 NULL, NULL, NULL, NULL,
327 NULL, NULL, NULL, NULL,
328 };
329
330 static const char *cpuid_apm_edx_feature_name[] = {
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 "invtsc", NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 };
340
341 static const char *cpuid_xsave_feature_name[] = {
342 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 };
351
352 static const char *cpuid_6_feature_name[] = {
353 NULL, NULL, "arat", NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 NULL, NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL,
360 NULL, NULL, NULL, NULL,
361 };
362
363 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
364 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
365 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
366 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
367 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
368 CPUID_PSE36 | CPUID_FXSR)
369 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
370 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
371 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
372 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
373 CPUID_PAE | CPUID_SEP | CPUID_APIC)
374
375 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
376 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
377 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
378 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
379 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
380 /* partly implemented:
381 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
382 /* missing:
383 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
384 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
385 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
386 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
387 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
388 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
389 /* missing:
390 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
391 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
392 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
393 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
394 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
395
396 #ifdef TARGET_X86_64
397 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
398 #else
399 #define TCG_EXT2_X86_64_FEATURES 0
400 #endif
401
402 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
403 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
404 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
405 TCG_EXT2_X86_64_FEATURES)
406 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
407 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
408 #define TCG_EXT4_FEATURES 0
409 #define TCG_SVM_FEATURES 0
410 #define TCG_KVM_FEATURES 0
411 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
412 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
413 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
414 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
415 CPUID_7_0_EBX_ERMS)
416 /* missing:
417 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
418 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
419 CPUID_7_0_EBX_RDSEED */
420 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
421 #define TCG_APM_FEATURES 0
422 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
423 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
424 /* missing:
425 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
426
427 typedef struct FeatureWordInfo {
428 const char **feat_names;
429 uint32_t cpuid_eax; /* Input EAX for CPUID */
430 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
431 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
432 int cpuid_reg; /* output register (R_* constant) */
433 uint32_t tcg_features; /* Feature flags supported by TCG */
434 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
435 } FeatureWordInfo;
436
437 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
438 [FEAT_1_EDX] = {
439 .feat_names = feature_name,
440 .cpuid_eax = 1, .cpuid_reg = R_EDX,
441 .tcg_features = TCG_FEATURES,
442 },
443 [FEAT_1_ECX] = {
444 .feat_names = ext_feature_name,
445 .cpuid_eax = 1, .cpuid_reg = R_ECX,
446 .tcg_features = TCG_EXT_FEATURES,
447 },
448 [FEAT_8000_0001_EDX] = {
449 .feat_names = ext2_feature_name,
450 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
451 .tcg_features = TCG_EXT2_FEATURES,
452 },
453 [FEAT_8000_0001_ECX] = {
454 .feat_names = ext3_feature_name,
455 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
456 .tcg_features = TCG_EXT3_FEATURES,
457 },
458 [FEAT_C000_0001_EDX] = {
459 .feat_names = ext4_feature_name,
460 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
461 .tcg_features = TCG_EXT4_FEATURES,
462 },
463 [FEAT_KVM] = {
464 .feat_names = kvm_feature_name,
465 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
466 .tcg_features = TCG_KVM_FEATURES,
467 },
468 [FEAT_HYPERV_EAX] = {
469 .feat_names = hyperv_priv_feature_name,
470 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
471 },
472 [FEAT_HYPERV_EBX] = {
473 .feat_names = hyperv_ident_feature_name,
474 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
475 },
476 [FEAT_HYPERV_EDX] = {
477 .feat_names = hyperv_misc_feature_name,
478 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
479 },
480 [FEAT_SVM] = {
481 .feat_names = svm_feature_name,
482 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
483 .tcg_features = TCG_SVM_FEATURES,
484 },
485 [FEAT_7_0_EBX] = {
486 .feat_names = cpuid_7_0_ebx_feature_name,
487 .cpuid_eax = 7,
488 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
489 .cpuid_reg = R_EBX,
490 .tcg_features = TCG_7_0_EBX_FEATURES,
491 },
492 [FEAT_7_0_ECX] = {
493 .feat_names = cpuid_7_0_ecx_feature_name,
494 .cpuid_eax = 7,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
496 .cpuid_reg = R_ECX,
497 .tcg_features = TCG_7_0_ECX_FEATURES,
498 },
499 [FEAT_8000_0007_EDX] = {
500 .feat_names = cpuid_apm_edx_feature_name,
501 .cpuid_eax = 0x80000007,
502 .cpuid_reg = R_EDX,
503 .tcg_features = TCG_APM_FEATURES,
504 .unmigratable_flags = CPUID_APM_INVTSC,
505 },
506 [FEAT_XSAVE] = {
507 .feat_names = cpuid_xsave_feature_name,
508 .cpuid_eax = 0xd,
509 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
510 .cpuid_reg = R_EAX,
511 .tcg_features = TCG_XSAVE_FEATURES,
512 },
513 [FEAT_6_EAX] = {
514 .feat_names = cpuid_6_feature_name,
515 .cpuid_eax = 6, .cpuid_reg = R_EAX,
516 .tcg_features = TCG_6_EAX_FEATURES,
517 },
518 };
519
520 typedef struct X86RegisterInfo32 {
521 /* Name of register */
522 const char *name;
523 /* QAPI enum value register */
524 X86CPURegister32 qapi_enum;
525 } X86RegisterInfo32;
526
527 #define REGISTER(reg) \
528 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
529 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
530 REGISTER(EAX),
531 REGISTER(ECX),
532 REGISTER(EDX),
533 REGISTER(EBX),
534 REGISTER(ESP),
535 REGISTER(EBP),
536 REGISTER(ESI),
537 REGISTER(EDI),
538 };
539 #undef REGISTER
540
541 typedef struct ExtSaveArea {
542 uint32_t feature, bits;
543 uint32_t offset, size;
544 } ExtSaveArea;
545
546 static const ExtSaveArea x86_ext_save_areas[] = {
547 [XSTATE_YMM_BIT] =
548 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
549 .offset = offsetof(X86XSaveArea, avx_state),
550 .size = sizeof(XSaveAVX) },
551 [XSTATE_BNDREGS_BIT] =
552 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
553 .offset = offsetof(X86XSaveArea, bndreg_state),
554 .size = sizeof(XSaveBNDREG) },
555 [XSTATE_BNDCSR_BIT] =
556 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
557 .offset = offsetof(X86XSaveArea, bndcsr_state),
558 .size = sizeof(XSaveBNDCSR) },
559 [XSTATE_OPMASK_BIT] =
560 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
561 .offset = offsetof(X86XSaveArea, opmask_state),
562 .size = sizeof(XSaveOpmask) },
563 [XSTATE_ZMM_Hi256_BIT] =
564 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
565 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
566 .size = sizeof(XSaveZMM_Hi256) },
567 [XSTATE_Hi16_ZMM_BIT] =
568 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
569 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
570 .size = sizeof(XSaveHi16_ZMM) },
571 [XSTATE_PKRU_BIT] =
572 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
573 .offset = offsetof(X86XSaveArea, pkru_state),
574 .size = sizeof(XSavePKRU) },
575 };
576
577 const char *get_register_name_32(unsigned int reg)
578 {
579 if (reg >= CPU_NB_REGS32) {
580 return NULL;
581 }
582 return x86_reg_info_32[reg].name;
583 }
584
585 /*
586 * Returns the set of feature flags that are supported and migratable by
587 * QEMU, for a given FeatureWord.
588 */
589 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
590 {
591 FeatureWordInfo *wi = &feature_word_info[w];
592 uint32_t r = 0;
593 int i;
594
595 for (i = 0; i < 32; i++) {
596 uint32_t f = 1U << i;
597 /* If the feature name is unknown, it is not supported by QEMU yet */
598 if (!wi->feat_names[i]) {
599 continue;
600 }
601 /* Skip features known to QEMU, but explicitly marked as unmigratable */
602 if (wi->unmigratable_flags & f) {
603 continue;
604 }
605 r |= f;
606 }
607 return r;
608 }
609
610 void host_cpuid(uint32_t function, uint32_t count,
611 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
612 {
613 uint32_t vec[4];
614
615 #ifdef __x86_64__
616 asm volatile("cpuid"
617 : "=a"(vec[0]), "=b"(vec[1]),
618 "=c"(vec[2]), "=d"(vec[3])
619 : "0"(function), "c"(count) : "cc");
620 #elif defined(__i386__)
621 asm volatile("pusha \n\t"
622 "cpuid \n\t"
623 "mov %%eax, 0(%2) \n\t"
624 "mov %%ebx, 4(%2) \n\t"
625 "mov %%ecx, 8(%2) \n\t"
626 "mov %%edx, 12(%2) \n\t"
627 "popa"
628 : : "a"(function), "c"(count), "S"(vec)
629 : "memory", "cc");
630 #else
631 abort();
632 #endif
633
634 if (eax)
635 *eax = vec[0];
636 if (ebx)
637 *ebx = vec[1];
638 if (ecx)
639 *ecx = vec[2];
640 if (edx)
641 *edx = vec[3];
642 }
643
644 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
645
646 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
647 * a substring. ex if !NULL points to the first char after a substring,
648 * otherwise the string is assumed to sized by a terminating nul.
649 * Return lexical ordering of *s1:*s2.
650 */
651 static int sstrcmp(const char *s1, const char *e1,
652 const char *s2, const char *e2)
653 {
654 for (;;) {
655 if (!*s1 || !*s2 || *s1 != *s2)
656 return (*s1 - *s2);
657 ++s1, ++s2;
658 if (s1 == e1 && s2 == e2)
659 return (0);
660 else if (s1 == e1)
661 return (*s2);
662 else if (s2 == e2)
663 return (*s1);
664 }
665 }
666
667 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
668 * '|' delimited (possibly empty) strings in which case search for a match
669 * within the alternatives proceeds left to right. Return 0 for success,
670 * non-zero otherwise.
671 */
672 static int altcmp(const char *s, const char *e, const char *altstr)
673 {
674 const char *p, *q;
675
676 for (q = p = altstr; ; ) {
677 while (*p && *p != '|')
678 ++p;
679 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
680 return (0);
681 if (!*p)
682 return (1);
683 else
684 q = ++p;
685 }
686 }
687
688 /* search featureset for flag *[s..e), if found set corresponding bit in
689 * *pval and return true, otherwise return false
690 */
691 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
692 const char **featureset)
693 {
694 uint32_t mask;
695 const char **ppc;
696 bool found = false;
697
698 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
699 if (*ppc && !altcmp(s, e, *ppc)) {
700 *pval |= mask;
701 found = true;
702 }
703 }
704 return found;
705 }
706
707 static void add_flagname_to_bitmaps(const char *flagname,
708 FeatureWordArray words,
709 Error **errp)
710 {
711 FeatureWord w;
712 for (w = 0; w < FEATURE_WORDS; w++) {
713 FeatureWordInfo *wi = &feature_word_info[w];
714 if (wi->feat_names &&
715 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
716 break;
717 }
718 }
719 if (w == FEATURE_WORDS) {
720 error_setg(errp, "CPU feature %s not found", flagname);
721 }
722 }
723
724 /* CPU class name definitions: */
725
726 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
727 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
728
729 /* Return type name for a given CPU model name
730 * Caller is responsible for freeing the returned string.
731 */
732 static char *x86_cpu_type_name(const char *model_name)
733 {
734 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
735 }
736
737 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
738 {
739 ObjectClass *oc;
740 char *typename;
741
742 if (cpu_model == NULL) {
743 return NULL;
744 }
745
746 typename = x86_cpu_type_name(cpu_model);
747 oc = object_class_by_name(typename);
748 g_free(typename);
749 return oc;
750 }
751
752 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
753 {
754 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
755 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
756 return g_strndup(class_name,
757 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
758 }
759
760 struct X86CPUDefinition {
761 const char *name;
762 uint32_t level;
763 uint32_t xlevel;
764 /* vendor is zero-terminated, 12 character ASCII string */
765 char vendor[CPUID_VENDOR_SZ + 1];
766 int family;
767 int model;
768 int stepping;
769 FeatureWordArray features;
770 char model_id[48];
771 };
772
773 static X86CPUDefinition builtin_x86_defs[] = {
774 {
775 .name = "qemu64",
776 .level = 0xd,
777 .vendor = CPUID_VENDOR_AMD,
778 .family = 6,
779 .model = 6,
780 .stepping = 3,
781 .features[FEAT_1_EDX] =
782 PPRO_FEATURES |
783 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
784 CPUID_PSE36,
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 .features[FEAT_8000_0001_EDX] =
788 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
789 .features[FEAT_8000_0001_ECX] =
790 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
791 .xlevel = 0x8000000A,
792 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
793 },
794 {
795 .name = "phenom",
796 .level = 5,
797 .vendor = CPUID_VENDOR_AMD,
798 .family = 16,
799 .model = 2,
800 .stepping = 3,
801 /* Missing: CPUID_HT */
802 .features[FEAT_1_EDX] =
803 PPRO_FEATURES |
804 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
805 CPUID_PSE36 | CPUID_VME,
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
808 CPUID_EXT_POPCNT,
809 .features[FEAT_8000_0001_EDX] =
810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
811 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
812 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
813 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
814 CPUID_EXT3_CR8LEG,
815 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
816 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
817 .features[FEAT_8000_0001_ECX] =
818 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
819 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
820 /* Missing: CPUID_SVM_LBRV */
821 .features[FEAT_SVM] =
822 CPUID_SVM_NPT,
823 .xlevel = 0x8000001A,
824 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
825 },
826 {
827 .name = "core2duo",
828 .level = 10,
829 .vendor = CPUID_VENDOR_INTEL,
830 .family = 6,
831 .model = 15,
832 .stepping = 11,
833 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
834 .features[FEAT_1_EDX] =
835 PPRO_FEATURES |
836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
837 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
838 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
839 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
840 .features[FEAT_1_ECX] =
841 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
842 CPUID_EXT_CX16,
843 .features[FEAT_8000_0001_EDX] =
844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
845 .features[FEAT_8000_0001_ECX] =
846 CPUID_EXT3_LAHF_LM,
847 .xlevel = 0x80000008,
848 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
849 },
850 {
851 .name = "kvm64",
852 .level = 0xd,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 15,
855 .model = 6,
856 .stepping = 1,
857 /* Missing: CPUID_HT */
858 .features[FEAT_1_EDX] =
859 PPRO_FEATURES | CPUID_VME |
860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
861 CPUID_PSE36,
862 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
863 .features[FEAT_1_ECX] =
864 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
865 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
866 .features[FEAT_8000_0001_EDX] =
867 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
868 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
869 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
870 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
871 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
872 .features[FEAT_8000_0001_ECX] =
873 0,
874 .xlevel = 0x80000008,
875 .model_id = "Common KVM processor"
876 },
877 {
878 .name = "qemu32",
879 .level = 4,
880 .vendor = CPUID_VENDOR_INTEL,
881 .family = 6,
882 .model = 6,
883 .stepping = 3,
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES,
886 .features[FEAT_1_ECX] =
887 CPUID_EXT_SSE3,
888 .xlevel = 0x80000004,
889 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
890 },
891 {
892 .name = "kvm32",
893 .level = 5,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 15,
896 .model = 6,
897 .stepping = 1,
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
901 .features[FEAT_1_ECX] =
902 CPUID_EXT_SSE3,
903 .features[FEAT_8000_0001_ECX] =
904 0,
905 .xlevel = 0x80000008,
906 .model_id = "Common 32-bit KVM processor"
907 },
908 {
909 .name = "coreduo",
910 .level = 10,
911 .vendor = CPUID_VENDOR_INTEL,
912 .family = 6,
913 .model = 14,
914 .stepping = 8,
915 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
916 .features[FEAT_1_EDX] =
917 PPRO_FEATURES | CPUID_VME |
918 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
919 CPUID_SS,
920 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
921 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
922 .features[FEAT_1_ECX] =
923 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
924 .features[FEAT_8000_0001_EDX] =
925 CPUID_EXT2_NX,
926 .xlevel = 0x80000008,
927 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
928 },
929 {
930 .name = "486",
931 .level = 1,
932 .vendor = CPUID_VENDOR_INTEL,
933 .family = 4,
934 .model = 8,
935 .stepping = 0,
936 .features[FEAT_1_EDX] =
937 I486_FEATURES,
938 .xlevel = 0,
939 },
940 {
941 .name = "pentium",
942 .level = 1,
943 .vendor = CPUID_VENDOR_INTEL,
944 .family = 5,
945 .model = 4,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 PENTIUM_FEATURES,
949 .xlevel = 0,
950 },
951 {
952 .name = "pentium2",
953 .level = 2,
954 .vendor = CPUID_VENDOR_INTEL,
955 .family = 6,
956 .model = 5,
957 .stepping = 2,
958 .features[FEAT_1_EDX] =
959 PENTIUM2_FEATURES,
960 .xlevel = 0,
961 },
962 {
963 .name = "pentium3",
964 .level = 3,
965 .vendor = CPUID_VENDOR_INTEL,
966 .family = 6,
967 .model = 7,
968 .stepping = 3,
969 .features[FEAT_1_EDX] =
970 PENTIUM3_FEATURES,
971 .xlevel = 0,
972 },
973 {
974 .name = "athlon",
975 .level = 2,
976 .vendor = CPUID_VENDOR_AMD,
977 .family = 6,
978 .model = 2,
979 .stepping = 3,
980 .features[FEAT_1_EDX] =
981 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
982 CPUID_MCA,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
985 .xlevel = 0x80000008,
986 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
987 },
988 {
989 .name = "n270",
990 .level = 10,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 6,
993 .model = 28,
994 .stepping = 2,
995 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
996 .features[FEAT_1_EDX] =
997 PPRO_FEATURES |
998 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
999 CPUID_ACPI | CPUID_SS,
1000 /* Some CPUs got no CPUID_SEP */
1001 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1002 * CPUID_EXT_XTPR */
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1005 CPUID_EXT_MOVBE,
1006 .features[FEAT_8000_0001_EDX] =
1007 CPUID_EXT2_NX,
1008 .features[FEAT_8000_0001_ECX] =
1009 CPUID_EXT3_LAHF_LM,
1010 .xlevel = 0x80000008,
1011 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1012 },
1013 {
1014 .name = "Conroe",
1015 .level = 10,
1016 .vendor = CPUID_VENDOR_INTEL,
1017 .family = 6,
1018 .model = 15,
1019 .stepping = 3,
1020 .features[FEAT_1_EDX] =
1021 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1022 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1023 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1024 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1025 CPUID_DE | CPUID_FP87,
1026 .features[FEAT_1_ECX] =
1027 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .xlevel = 0x80000008,
1033 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1034 },
1035 {
1036 .name = "Penryn",
1037 .level = 10,
1038 .vendor = CPUID_VENDOR_INTEL,
1039 .family = 6,
1040 .model = 23,
1041 .stepping = 3,
1042 .features[FEAT_1_EDX] =
1043 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1044 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1045 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1046 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1047 CPUID_DE | CPUID_FP87,
1048 .features[FEAT_1_ECX] =
1049 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1050 CPUID_EXT_SSE3,
1051 .features[FEAT_8000_0001_EDX] =
1052 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1053 .features[FEAT_8000_0001_ECX] =
1054 CPUID_EXT3_LAHF_LM,
1055 .xlevel = 0x80000008,
1056 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1057 },
1058 {
1059 .name = "Nehalem",
1060 .level = 11,
1061 .vendor = CPUID_VENDOR_INTEL,
1062 .family = 6,
1063 .model = 26,
1064 .stepping = 3,
1065 .features[FEAT_1_EDX] =
1066 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1073 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1074 .features[FEAT_8000_0001_EDX] =
1075 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1076 .features[FEAT_8000_0001_ECX] =
1077 CPUID_EXT3_LAHF_LM,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1080 },
1081 {
1082 .name = "Westmere",
1083 .level = 11,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 44,
1087 .stepping = 1,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1096 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1097 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1098 .features[FEAT_8000_0001_EDX] =
1099 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1100 .features[FEAT_8000_0001_ECX] =
1101 CPUID_EXT3_LAHF_LM,
1102 .features[FEAT_6_EAX] =
1103 CPUID_6_EAX_ARAT,
1104 .xlevel = 0x80000008,
1105 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1106 },
1107 {
1108 .name = "SandyBridge",
1109 .level = 0xd,
1110 .vendor = CPUID_VENDOR_INTEL,
1111 .family = 6,
1112 .model = 42,
1113 .stepping = 1,
1114 .features[FEAT_1_EDX] =
1115 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1116 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1117 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1118 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1119 CPUID_DE | CPUID_FP87,
1120 .features[FEAT_1_ECX] =
1121 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1122 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1123 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1124 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1125 CPUID_EXT_SSE3,
1126 .features[FEAT_8000_0001_EDX] =
1127 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1128 CPUID_EXT2_SYSCALL,
1129 .features[FEAT_8000_0001_ECX] =
1130 CPUID_EXT3_LAHF_LM,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1137 },
1138 {
1139 .name = "IvyBridge",
1140 .level = 0xd,
1141 .vendor = CPUID_VENDOR_INTEL,
1142 .family = 6,
1143 .model = 58,
1144 .stepping = 9,
1145 .features[FEAT_1_EDX] =
1146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1154 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1155 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1156 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_7_0_EBX] =
1158 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1159 CPUID_7_0_EBX_ERMS,
1160 .features[FEAT_8000_0001_EDX] =
1161 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1162 CPUID_EXT2_SYSCALL,
1163 .features[FEAT_8000_0001_ECX] =
1164 CPUID_EXT3_LAHF_LM,
1165 .features[FEAT_XSAVE] =
1166 CPUID_XSAVE_XSAVEOPT,
1167 .features[FEAT_6_EAX] =
1168 CPUID_6_EAX_ARAT,
1169 .xlevel = 0x80000008,
1170 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1171 },
1172 {
1173 .name = "Haswell-noTSX",
1174 .level = 0xd,
1175 .vendor = CPUID_VENDOR_INTEL,
1176 .family = 6,
1177 .model = 60,
1178 .stepping = 1,
1179 .features[FEAT_1_EDX] =
1180 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1181 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1182 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1183 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1184 CPUID_DE | CPUID_FP87,
1185 .features[FEAT_1_ECX] =
1186 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1187 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1188 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1189 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1190 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1191 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1192 .features[FEAT_8000_0001_EDX] =
1193 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1194 CPUID_EXT2_SYSCALL,
1195 .features[FEAT_8000_0001_ECX] =
1196 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1197 .features[FEAT_7_0_EBX] =
1198 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1199 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1200 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1201 .features[FEAT_XSAVE] =
1202 CPUID_XSAVE_XSAVEOPT,
1203 .features[FEAT_6_EAX] =
1204 CPUID_6_EAX_ARAT,
1205 .xlevel = 0x80000008,
1206 .model_id = "Intel Core Processor (Haswell, no TSX)",
1207 }, {
1208 .name = "Haswell",
1209 .level = 0xd,
1210 .vendor = CPUID_VENDOR_INTEL,
1211 .family = 6,
1212 .model = 60,
1213 .stepping = 1,
1214 .features[FEAT_1_EDX] =
1215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1219 CPUID_DE | CPUID_FP87,
1220 .features[FEAT_1_ECX] =
1221 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1222 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1223 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1224 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1225 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1226 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1227 .features[FEAT_8000_0001_EDX] =
1228 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1229 CPUID_EXT2_SYSCALL,
1230 .features[FEAT_8000_0001_ECX] =
1231 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1232 .features[FEAT_7_0_EBX] =
1233 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1234 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1235 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1236 CPUID_7_0_EBX_RTM,
1237 .features[FEAT_XSAVE] =
1238 CPUID_XSAVE_XSAVEOPT,
1239 .features[FEAT_6_EAX] =
1240 CPUID_6_EAX_ARAT,
1241 .xlevel = 0x80000008,
1242 .model_id = "Intel Core Processor (Haswell)",
1243 },
1244 {
1245 .name = "Broadwell-noTSX",
1246 .level = 0xd,
1247 .vendor = CPUID_VENDOR_INTEL,
1248 .family = 6,
1249 .model = 61,
1250 .stepping = 2,
1251 .features[FEAT_1_EDX] =
1252 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1253 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1254 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1255 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1256 CPUID_DE | CPUID_FP87,
1257 .features[FEAT_1_ECX] =
1258 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1259 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1260 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1261 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1262 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1263 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1264 .features[FEAT_8000_0001_EDX] =
1265 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1266 CPUID_EXT2_SYSCALL,
1267 .features[FEAT_8000_0001_ECX] =
1268 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1269 .features[FEAT_7_0_EBX] =
1270 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1271 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1272 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1273 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1274 CPUID_7_0_EBX_SMAP,
1275 .features[FEAT_XSAVE] =
1276 CPUID_XSAVE_XSAVEOPT,
1277 .features[FEAT_6_EAX] =
1278 CPUID_6_EAX_ARAT,
1279 .xlevel = 0x80000008,
1280 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1281 },
1282 {
1283 .name = "Broadwell",
1284 .level = 0xd,
1285 .vendor = CPUID_VENDOR_INTEL,
1286 .family = 6,
1287 .model = 61,
1288 .stepping = 2,
1289 .features[FEAT_1_EDX] =
1290 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1291 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1292 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1293 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1294 CPUID_DE | CPUID_FP87,
1295 .features[FEAT_1_ECX] =
1296 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1297 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1298 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1299 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1300 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1301 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1304 CPUID_EXT2_SYSCALL,
1305 .features[FEAT_8000_0001_ECX] =
1306 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1307 .features[FEAT_7_0_EBX] =
1308 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1309 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1310 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1311 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1312 CPUID_7_0_EBX_SMAP,
1313 .features[FEAT_XSAVE] =
1314 CPUID_XSAVE_XSAVEOPT,
1315 .features[FEAT_6_EAX] =
1316 CPUID_6_EAX_ARAT,
1317 .xlevel = 0x80000008,
1318 .model_id = "Intel Core Processor (Broadwell)",
1319 },
1320 {
1321 .name = "Skylake-Client",
1322 .level = 0xd,
1323 .vendor = CPUID_VENDOR_INTEL,
1324 .family = 6,
1325 .model = 94,
1326 .stepping = 3,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1335 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1336 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1337 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1338 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1339 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1340 .features[FEAT_8000_0001_EDX] =
1341 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1342 CPUID_EXT2_SYSCALL,
1343 .features[FEAT_8000_0001_ECX] =
1344 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1345 .features[FEAT_7_0_EBX] =
1346 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1347 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1348 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1349 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1350 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1351 /* Missing: XSAVES (not supported by some Linux versions,
1352 * including v4.1 to v4.6).
1353 * KVM doesn't yet expose any XSAVES state save component,
1354 * and the only one defined in Skylake (processor tracing)
1355 * probably will block migration anyway.
1356 */
1357 .features[FEAT_XSAVE] =
1358 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1359 CPUID_XSAVE_XGETBV1,
1360 .features[FEAT_6_EAX] =
1361 CPUID_6_EAX_ARAT,
1362 .xlevel = 0x80000008,
1363 .model_id = "Intel Core Processor (Skylake)",
1364 },
1365 {
1366 .name = "Opteron_G1",
1367 .level = 5,
1368 .vendor = CPUID_VENDOR_AMD,
1369 .family = 15,
1370 .model = 6,
1371 .stepping = 1,
1372 .features[FEAT_1_EDX] =
1373 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1374 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1375 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1376 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1377 CPUID_DE | CPUID_FP87,
1378 .features[FEAT_1_ECX] =
1379 CPUID_EXT_SSE3,
1380 .features[FEAT_8000_0001_EDX] =
1381 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1382 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1383 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1384 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1385 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1386 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .xlevel = 0x80000008,
1388 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1389 },
1390 {
1391 .name = "Opteron_G2",
1392 .level = 5,
1393 .vendor = CPUID_VENDOR_AMD,
1394 .family = 15,
1395 .model = 6,
1396 .stepping = 1,
1397 .features[FEAT_1_EDX] =
1398 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1399 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1400 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1401 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1402 CPUID_DE | CPUID_FP87,
1403 .features[FEAT_1_ECX] =
1404 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1405 /* Missing: CPUID_EXT2_RDTSCP */
1406 .features[FEAT_8000_0001_EDX] =
1407 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1408 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1409 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1410 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1411 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1412 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1413 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1414 .features[FEAT_8000_0001_ECX] =
1415 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1416 .xlevel = 0x80000008,
1417 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1418 },
1419 {
1420 .name = "Opteron_G3",
1421 .level = 5,
1422 .vendor = CPUID_VENDOR_AMD,
1423 .family = 15,
1424 .model = 6,
1425 .stepping = 1,
1426 .features[FEAT_1_EDX] =
1427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1431 CPUID_DE | CPUID_FP87,
1432 .features[FEAT_1_ECX] =
1433 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1434 CPUID_EXT_SSE3,
1435 /* Missing: CPUID_EXT2_RDTSCP */
1436 .features[FEAT_8000_0001_EDX] =
1437 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1438 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1439 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1440 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1441 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1442 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1443 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1444 .features[FEAT_8000_0001_ECX] =
1445 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1446 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1447 .xlevel = 0x80000008,
1448 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1449 },
1450 {
1451 .name = "Opteron_G4",
1452 .level = 0xd,
1453 .vendor = CPUID_VENDOR_AMD,
1454 .family = 21,
1455 .model = 1,
1456 .stepping = 2,
1457 .features[FEAT_1_EDX] =
1458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1462 CPUID_DE | CPUID_FP87,
1463 .features[FEAT_1_ECX] =
1464 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1465 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1466 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1467 CPUID_EXT_SSE3,
1468 /* Missing: CPUID_EXT2_RDTSCP */
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM |
1471 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1472 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1473 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1474 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1475 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1476 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1477 .features[FEAT_8000_0001_ECX] =
1478 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1479 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1480 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1481 CPUID_EXT3_LAHF_LM,
1482 /* no xsaveopt! */
1483 .xlevel = 0x8000001A,
1484 .model_id = "AMD Opteron 62xx class CPU",
1485 },
1486 {
1487 .name = "Opteron_G5",
1488 .level = 0xd,
1489 .vendor = CPUID_VENDOR_AMD,
1490 .family = 21,
1491 .model = 2,
1492 .stepping = 0,
1493 .features[FEAT_1_EDX] =
1494 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1495 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1496 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1497 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1498 CPUID_DE | CPUID_FP87,
1499 .features[FEAT_1_ECX] =
1500 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1501 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1502 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1503 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1504 /* Missing: CPUID_EXT2_RDTSCP */
1505 .features[FEAT_8000_0001_EDX] =
1506 CPUID_EXT2_LM |
1507 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1508 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1509 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1510 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1511 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1512 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1513 .features[FEAT_8000_0001_ECX] =
1514 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1515 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1516 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1517 CPUID_EXT3_LAHF_LM,
1518 /* no xsaveopt! */
1519 .xlevel = 0x8000001A,
1520 .model_id = "AMD Opteron 63xx class CPU",
1521 },
1522 };
1523
1524 typedef struct PropValue {
1525 const char *prop, *value;
1526 } PropValue;
1527
1528 /* KVM-specific features that are automatically added/removed
1529 * from all CPU models when KVM is enabled.
1530 */
1531 static PropValue kvm_default_props[] = {
1532 { "kvmclock", "on" },
1533 { "kvm-nopiodelay", "on" },
1534 { "kvm-asyncpf", "on" },
1535 { "kvm-steal-time", "on" },
1536 { "kvm-pv-eoi", "on" },
1537 { "kvmclock-stable-bit", "on" },
1538 { "x2apic", "on" },
1539 { "acpi", "off" },
1540 { "monitor", "off" },
1541 { "svm", "off" },
1542 { NULL, NULL },
1543 };
1544
1545 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1546 {
1547 PropValue *pv;
1548 for (pv = kvm_default_props; pv->prop; pv++) {
1549 if (!strcmp(pv->prop, prop)) {
1550 pv->value = value;
1551 break;
1552 }
1553 }
1554
1555 /* It is valid to call this function only for properties that
1556 * are already present in the kvm_default_props table.
1557 */
1558 assert(pv->prop);
1559 }
1560
1561 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1562 bool migratable_only);
1563
1564 #ifdef CONFIG_KVM
1565
1566 static bool lmce_supported(void)
1567 {
1568 uint64_t mce_cap;
1569
1570 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1571 return false;
1572 }
1573
1574 return !!(mce_cap & MCG_LMCE_P);
1575 }
1576
1577 static int cpu_x86_fill_model_id(char *str)
1578 {
1579 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1580 int i;
1581
1582 for (i = 0; i < 3; i++) {
1583 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1584 memcpy(str + i * 16 + 0, &eax, 4);
1585 memcpy(str + i * 16 + 4, &ebx, 4);
1586 memcpy(str + i * 16 + 8, &ecx, 4);
1587 memcpy(str + i * 16 + 12, &edx, 4);
1588 }
1589 return 0;
1590 }
1591
1592 static X86CPUDefinition host_cpudef;
1593
1594 static Property host_x86_cpu_properties[] = {
1595 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1596 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1597 DEFINE_PROP_END_OF_LIST()
1598 };
1599
1600 /* class_init for the "host" CPU model
1601 *
1602 * This function may be called before KVM is initialized.
1603 */
1604 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1605 {
1606 DeviceClass *dc = DEVICE_CLASS(oc);
1607 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1608 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1609
1610 xcc->kvm_required = true;
1611
1612 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1613 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1614
1615 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1616 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1617 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1618 host_cpudef.stepping = eax & 0x0F;
1619
1620 cpu_x86_fill_model_id(host_cpudef.model_id);
1621
1622 xcc->cpu_def = &host_cpudef;
1623
1624 /* level, xlevel, xlevel2, and the feature words are initialized on
1625 * instance_init, because they require KVM to be initialized.
1626 */
1627
1628 dc->props = host_x86_cpu_properties;
1629 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1630 dc->cannot_destroy_with_object_finalize_yet = true;
1631 }
1632
1633 static void host_x86_cpu_initfn(Object *obj)
1634 {
1635 X86CPU *cpu = X86_CPU(obj);
1636 CPUX86State *env = &cpu->env;
1637 KVMState *s = kvm_state;
1638
1639 /* We can't fill the features array here because we don't know yet if
1640 * "migratable" is true or false.
1641 */
1642 cpu->host_features = true;
1643
1644 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1645 if (kvm_enabled()) {
1646 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1647 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1648 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1649
1650 if (lmce_supported()) {
1651 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1652 }
1653 }
1654
1655 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1656 }
1657
1658 static const TypeInfo host_x86_cpu_type_info = {
1659 .name = X86_CPU_TYPE_NAME("host"),
1660 .parent = TYPE_X86_CPU,
1661 .instance_init = host_x86_cpu_initfn,
1662 .class_init = host_x86_cpu_class_init,
1663 };
1664
1665 #endif
1666
1667 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1668 {
1669 FeatureWordInfo *f = &feature_word_info[w];
1670 int i;
1671
1672 for (i = 0; i < 32; ++i) {
1673 if ((1UL << i) & mask) {
1674 const char *reg = get_register_name_32(f->cpuid_reg);
1675 assert(reg);
1676 fprintf(stderr, "warning: %s doesn't support requested feature: "
1677 "CPUID.%02XH:%s%s%s [bit %d]\n",
1678 kvm_enabled() ? "host" : "TCG",
1679 f->cpuid_eax, reg,
1680 f->feat_names[i] ? "." : "",
1681 f->feat_names[i] ? f->feat_names[i] : "", i);
1682 }
1683 }
1684 }
1685
1686 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1687 const char *name, void *opaque,
1688 Error **errp)
1689 {
1690 X86CPU *cpu = X86_CPU(obj);
1691 CPUX86State *env = &cpu->env;
1692 int64_t value;
1693
1694 value = (env->cpuid_version >> 8) & 0xf;
1695 if (value == 0xf) {
1696 value += (env->cpuid_version >> 20) & 0xff;
1697 }
1698 visit_type_int(v, name, &value, errp);
1699 }
1700
1701 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1702 const char *name, void *opaque,
1703 Error **errp)
1704 {
1705 X86CPU *cpu = X86_CPU(obj);
1706 CPUX86State *env = &cpu->env;
1707 const int64_t min = 0;
1708 const int64_t max = 0xff + 0xf;
1709 Error *local_err = NULL;
1710 int64_t value;
1711
1712 visit_type_int(v, name, &value, &local_err);
1713 if (local_err) {
1714 error_propagate(errp, local_err);
1715 return;
1716 }
1717 if (value < min || value > max) {
1718 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1719 name ? name : "null", value, min, max);
1720 return;
1721 }
1722
1723 env->cpuid_version &= ~0xff00f00;
1724 if (value > 0x0f) {
1725 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1726 } else {
1727 env->cpuid_version |= value << 8;
1728 }
1729 }
1730
1731 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1732 const char *name, void *opaque,
1733 Error **errp)
1734 {
1735 X86CPU *cpu = X86_CPU(obj);
1736 CPUX86State *env = &cpu->env;
1737 int64_t value;
1738
1739 value = (env->cpuid_version >> 4) & 0xf;
1740 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1741 visit_type_int(v, name, &value, errp);
1742 }
1743
1744 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1745 const char *name, void *opaque,
1746 Error **errp)
1747 {
1748 X86CPU *cpu = X86_CPU(obj);
1749 CPUX86State *env = &cpu->env;
1750 const int64_t min = 0;
1751 const int64_t max = 0xff;
1752 Error *local_err = NULL;
1753 int64_t value;
1754
1755 visit_type_int(v, name, &value, &local_err);
1756 if (local_err) {
1757 error_propagate(errp, local_err);
1758 return;
1759 }
1760 if (value < min || value > max) {
1761 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1762 name ? name : "null", value, min, max);
1763 return;
1764 }
1765
1766 env->cpuid_version &= ~0xf00f0;
1767 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1768 }
1769
1770 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1771 const char *name, void *opaque,
1772 Error **errp)
1773 {
1774 X86CPU *cpu = X86_CPU(obj);
1775 CPUX86State *env = &cpu->env;
1776 int64_t value;
1777
1778 value = env->cpuid_version & 0xf;
1779 visit_type_int(v, name, &value, errp);
1780 }
1781
1782 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1783 const char *name, void *opaque,
1784 Error **errp)
1785 {
1786 X86CPU *cpu = X86_CPU(obj);
1787 CPUX86State *env = &cpu->env;
1788 const int64_t min = 0;
1789 const int64_t max = 0xf;
1790 Error *local_err = NULL;
1791 int64_t value;
1792
1793 visit_type_int(v, name, &value, &local_err);
1794 if (local_err) {
1795 error_propagate(errp, local_err);
1796 return;
1797 }
1798 if (value < min || value > max) {
1799 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1800 name ? name : "null", value, min, max);
1801 return;
1802 }
1803
1804 env->cpuid_version &= ~0xf;
1805 env->cpuid_version |= value & 0xf;
1806 }
1807
1808 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1809 {
1810 X86CPU *cpu = X86_CPU(obj);
1811 CPUX86State *env = &cpu->env;
1812 char *value;
1813
1814 value = g_malloc(CPUID_VENDOR_SZ + 1);
1815 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1816 env->cpuid_vendor3);
1817 return value;
1818 }
1819
1820 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1821 Error **errp)
1822 {
1823 X86CPU *cpu = X86_CPU(obj);
1824 CPUX86State *env = &cpu->env;
1825 int i;
1826
1827 if (strlen(value) != CPUID_VENDOR_SZ) {
1828 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1829 return;
1830 }
1831
1832 env->cpuid_vendor1 = 0;
1833 env->cpuid_vendor2 = 0;
1834 env->cpuid_vendor3 = 0;
1835 for (i = 0; i < 4; i++) {
1836 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1837 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1838 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1839 }
1840 }
1841
1842 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1843 {
1844 X86CPU *cpu = X86_CPU(obj);
1845 CPUX86State *env = &cpu->env;
1846 char *value;
1847 int i;
1848
1849 value = g_malloc(48 + 1);
1850 for (i = 0; i < 48; i++) {
1851 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1852 }
1853 value[48] = '\0';
1854 return value;
1855 }
1856
1857 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1858 Error **errp)
1859 {
1860 X86CPU *cpu = X86_CPU(obj);
1861 CPUX86State *env = &cpu->env;
1862 int c, len, i;
1863
1864 if (model_id == NULL) {
1865 model_id = "";
1866 }
1867 len = strlen(model_id);
1868 memset(env->cpuid_model, 0, 48);
1869 for (i = 0; i < 48; i++) {
1870 if (i >= len) {
1871 c = '\0';
1872 } else {
1873 c = (uint8_t)model_id[i];
1874 }
1875 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1876 }
1877 }
1878
1879 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1880 void *opaque, Error **errp)
1881 {
1882 X86CPU *cpu = X86_CPU(obj);
1883 int64_t value;
1884
1885 value = cpu->env.tsc_khz * 1000;
1886 visit_type_int(v, name, &value, errp);
1887 }
1888
1889 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1890 void *opaque, Error **errp)
1891 {
1892 X86CPU *cpu = X86_CPU(obj);
1893 const int64_t min = 0;
1894 const int64_t max = INT64_MAX;
1895 Error *local_err = NULL;
1896 int64_t value;
1897
1898 visit_type_int(v, name, &value, &local_err);
1899 if (local_err) {
1900 error_propagate(errp, local_err);
1901 return;
1902 }
1903 if (value < min || value > max) {
1904 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1905 name ? name : "null", value, min, max);
1906 return;
1907 }
1908
1909 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1910 }
1911
1912 /* Generic getter for "feature-words" and "filtered-features" properties */
1913 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1914 const char *name, void *opaque,
1915 Error **errp)
1916 {
1917 uint32_t *array = (uint32_t *)opaque;
1918 FeatureWord w;
1919 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1920 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1921 X86CPUFeatureWordInfoList *list = NULL;
1922
1923 for (w = 0; w < FEATURE_WORDS; w++) {
1924 FeatureWordInfo *wi = &feature_word_info[w];
1925 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1926 qwi->cpuid_input_eax = wi->cpuid_eax;
1927 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1928 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1929 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1930 qwi->features = array[w];
1931
1932 /* List will be in reverse order, but order shouldn't matter */
1933 list_entries[w].next = list;
1934 list_entries[w].value = &word_infos[w];
1935 list = &list_entries[w];
1936 }
1937
1938 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1939 }
1940
1941 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1942 void *opaque, Error **errp)
1943 {
1944 X86CPU *cpu = X86_CPU(obj);
1945 int64_t value = cpu->hyperv_spinlock_attempts;
1946
1947 visit_type_int(v, name, &value, errp);
1948 }
1949
1950 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1951 void *opaque, Error **errp)
1952 {
1953 const int64_t min = 0xFFF;
1954 const int64_t max = UINT_MAX;
1955 X86CPU *cpu = X86_CPU(obj);
1956 Error *err = NULL;
1957 int64_t value;
1958
1959 visit_type_int(v, name, &value, &err);
1960 if (err) {
1961 error_propagate(errp, err);
1962 return;
1963 }
1964
1965 if (value < min || value > max) {
1966 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1967 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1968 object_get_typename(obj), name ? name : "null",
1969 value, min, max);
1970 return;
1971 }
1972 cpu->hyperv_spinlock_attempts = value;
1973 }
1974
1975 static PropertyInfo qdev_prop_spinlocks = {
1976 .name = "int",
1977 .get = x86_get_hv_spinlocks,
1978 .set = x86_set_hv_spinlocks,
1979 };
1980
1981 /* Convert all '_' in a feature string option name to '-', to make feature
1982 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1983 */
1984 static inline void feat2prop(char *s)
1985 {
1986 while ((s = strchr(s, '_'))) {
1987 *s = '-';
1988 }
1989 }
1990
1991 /* Compatibily hack to maintain legacy +-feat semantic,
1992 * where +-feat overwrites any feature set by
1993 * feat=on|feat even if the later is parsed after +-feat
1994 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1995 */
1996 static FeatureWordArray plus_features = { 0 };
1997 static FeatureWordArray minus_features = { 0 };
1998
1999 /* Parse "+feature,-feature,feature=foo" CPU feature string
2000 */
2001 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2002 Error **errp)
2003 {
2004 char *featurestr; /* Single 'key=value" string being parsed */
2005 Error *local_err = NULL;
2006 static bool cpu_globals_initialized;
2007
2008 if (cpu_globals_initialized) {
2009 return;
2010 }
2011 cpu_globals_initialized = true;
2012
2013 if (!features) {
2014 return;
2015 }
2016
2017 for (featurestr = strtok(features, ",");
2018 featurestr && !local_err;
2019 featurestr = strtok(NULL, ",")) {
2020 const char *name;
2021 const char *val = NULL;
2022 char *eq = NULL;
2023 char num[32];
2024 GlobalProperty *prop;
2025
2026 /* Compatibility syntax: */
2027 if (featurestr[0] == '+') {
2028 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2029 continue;
2030 } else if (featurestr[0] == '-') {
2031 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2032 continue;
2033 }
2034
2035 eq = strchr(featurestr, '=');
2036 if (eq) {
2037 *eq++ = 0;
2038 val = eq;
2039 } else {
2040 val = "on";
2041 }
2042
2043 feat2prop(featurestr);
2044 name = featurestr;
2045
2046 /* Special case: */
2047 if (!strcmp(name, "tsc-freq")) {
2048 int64_t tsc_freq;
2049 char *err;
2050
2051 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2052 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2053 if (tsc_freq < 0 || *err) {
2054 error_setg(errp, "bad numerical value %s", val);
2055 return;
2056 }
2057 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2058 val = num;
2059 name = "tsc-frequency";
2060 }
2061
2062 prop = g_new0(typeof(*prop), 1);
2063 prop->driver = typename;
2064 prop->property = g_strdup(name);
2065 prop->value = g_strdup(val);
2066 prop->errp = &error_fatal;
2067 qdev_prop_register_global(prop);
2068 }
2069
2070 if (local_err) {
2071 error_propagate(errp, local_err);
2072 }
2073 }
2074
2075 /* Print all cpuid feature names in featureset
2076 */
2077 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2078 {
2079 int bit;
2080 bool first = true;
2081
2082 for (bit = 0; bit < 32; bit++) {
2083 if (featureset[bit]) {
2084 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2085 first = false;
2086 }
2087 }
2088 }
2089
2090 /* generate CPU information. */
2091 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2092 {
2093 X86CPUDefinition *def;
2094 char buf[256];
2095 int i;
2096
2097 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2098 def = &builtin_x86_defs[i];
2099 snprintf(buf, sizeof(buf), "%s", def->name);
2100 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2101 }
2102 #ifdef CONFIG_KVM
2103 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2104 "KVM processor with all supported host features "
2105 "(only available in KVM mode)");
2106 #endif
2107
2108 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2109 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2110 FeatureWordInfo *fw = &feature_word_info[i];
2111
2112 (*cpu_fprintf)(f, " ");
2113 listflags(f, cpu_fprintf, fw->feat_names);
2114 (*cpu_fprintf)(f, "\n");
2115 }
2116 }
2117
2118 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2119 {
2120 CpuDefinitionInfoList *cpu_list = NULL;
2121 X86CPUDefinition *def;
2122 int i;
2123
2124 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2125 CpuDefinitionInfoList *entry;
2126 CpuDefinitionInfo *info;
2127
2128 def = &builtin_x86_defs[i];
2129 info = g_malloc0(sizeof(*info));
2130 info->name = g_strdup(def->name);
2131
2132 entry = g_malloc0(sizeof(*entry));
2133 entry->value = info;
2134 entry->next = cpu_list;
2135 cpu_list = entry;
2136 }
2137
2138 return cpu_list;
2139 }
2140
2141 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2142 bool migratable_only)
2143 {
2144 FeatureWordInfo *wi = &feature_word_info[w];
2145 uint32_t r;
2146
2147 if (kvm_enabled()) {
2148 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2149 wi->cpuid_ecx,
2150 wi->cpuid_reg);
2151 } else if (tcg_enabled()) {
2152 r = wi->tcg_features;
2153 } else {
2154 return ~0;
2155 }
2156 if (migratable_only) {
2157 r &= x86_cpu_get_migratable_flags(w);
2158 }
2159 return r;
2160 }
2161
2162 /*
2163 * Filters CPU feature words based on host availability of each feature.
2164 *
2165 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2166 */
2167 static int x86_cpu_filter_features(X86CPU *cpu)
2168 {
2169 CPUX86State *env = &cpu->env;
2170 FeatureWord w;
2171 int rv = 0;
2172
2173 for (w = 0; w < FEATURE_WORDS; w++) {
2174 uint32_t host_feat =
2175 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2176 uint32_t requested_features = env->features[w];
2177 env->features[w] &= host_feat;
2178 cpu->filtered_features[w] = requested_features & ~env->features[w];
2179 if (cpu->filtered_features[w]) {
2180 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2181 report_unavailable_features(w, cpu->filtered_features[w]);
2182 }
2183 rv = 1;
2184 }
2185 }
2186
2187 return rv;
2188 }
2189
2190 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2191 {
2192 PropValue *pv;
2193 for (pv = props; pv->prop; pv++) {
2194 if (!pv->value) {
2195 continue;
2196 }
2197 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2198 &error_abort);
2199 }
2200 }
2201
2202 /* Load data from X86CPUDefinition
2203 */
2204 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2205 {
2206 CPUX86State *env = &cpu->env;
2207 const char *vendor;
2208 char host_vendor[CPUID_VENDOR_SZ + 1];
2209 FeatureWord w;
2210
2211 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2212 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2213 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2214 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2215 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2216 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2217 for (w = 0; w < FEATURE_WORDS; w++) {
2218 env->features[w] = def->features[w];
2219 }
2220
2221 /* Special cases not set in the X86CPUDefinition structs: */
2222 if (kvm_enabled()) {
2223 if (!kvm_irqchip_in_kernel()) {
2224 x86_cpu_change_kvm_default("x2apic", "off");
2225 }
2226
2227 x86_cpu_apply_props(cpu, kvm_default_props);
2228 }
2229
2230 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2231
2232 /* sysenter isn't supported in compatibility mode on AMD,
2233 * syscall isn't supported in compatibility mode on Intel.
2234 * Normally we advertise the actual CPU vendor, but you can
2235 * override this using the 'vendor' property if you want to use
2236 * KVM's sysenter/syscall emulation in compatibility mode and
2237 * when doing cross vendor migration
2238 */
2239 vendor = def->vendor;
2240 if (kvm_enabled()) {
2241 uint32_t ebx = 0, ecx = 0, edx = 0;
2242 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2243 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2244 vendor = host_vendor;
2245 }
2246
2247 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2248
2249 }
2250
2251 X86CPU *cpu_x86_init(const char *cpu_model)
2252 {
2253 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2254 }
2255
2256 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2257 {
2258 X86CPUDefinition *cpudef = data;
2259 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2260
2261 xcc->cpu_def = cpudef;
2262 }
2263
2264 static void x86_register_cpudef_type(X86CPUDefinition *def)
2265 {
2266 char *typename = x86_cpu_type_name(def->name);
2267 TypeInfo ti = {
2268 .name = typename,
2269 .parent = TYPE_X86_CPU,
2270 .class_init = x86_cpu_cpudef_class_init,
2271 .class_data = def,
2272 };
2273
2274 type_register(&ti);
2275 g_free(typename);
2276 }
2277
2278 #if !defined(CONFIG_USER_ONLY)
2279
2280 void cpu_clear_apic_feature(CPUX86State *env)
2281 {
2282 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2283 }
2284
2285 #endif /* !CONFIG_USER_ONLY */
2286
2287 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2288 uint32_t *eax, uint32_t *ebx,
2289 uint32_t *ecx, uint32_t *edx)
2290 {
2291 X86CPU *cpu = x86_env_get_cpu(env);
2292 CPUState *cs = CPU(cpu);
2293 uint32_t pkg_offset;
2294
2295 /* test if maximum index reached */
2296 if (index & 0x80000000) {
2297 if (index > env->cpuid_xlevel) {
2298 if (env->cpuid_xlevel2 > 0) {
2299 /* Handle the Centaur's CPUID instruction. */
2300 if (index > env->cpuid_xlevel2) {
2301 index = env->cpuid_xlevel2;
2302 } else if (index < 0xC0000000) {
2303 index = env->cpuid_xlevel;
2304 }
2305 } else {
2306 /* Intel documentation states that invalid EAX input will
2307 * return the same information as EAX=cpuid_level
2308 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2309 */
2310 index = env->cpuid_level;
2311 }
2312 }
2313 } else {
2314 if (index > env->cpuid_level)
2315 index = env->cpuid_level;
2316 }
2317
2318 switch(index) {
2319 case 0:
2320 *eax = env->cpuid_level;
2321 *ebx = env->cpuid_vendor1;
2322 *edx = env->cpuid_vendor2;
2323 *ecx = env->cpuid_vendor3;
2324 break;
2325 case 1:
2326 *eax = env->cpuid_version;
2327 *ebx = (cpu->apic_id << 24) |
2328 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2329 *ecx = env->features[FEAT_1_ECX];
2330 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2331 *ecx |= CPUID_EXT_OSXSAVE;
2332 }
2333 *edx = env->features[FEAT_1_EDX];
2334 if (cs->nr_cores * cs->nr_threads > 1) {
2335 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2336 *edx |= CPUID_HT;
2337 }
2338 break;
2339 case 2:
2340 /* cache info: needed for Pentium Pro compatibility */
2341 if (cpu->cache_info_passthrough) {
2342 host_cpuid(index, 0, eax, ebx, ecx, edx);
2343 break;
2344 }
2345 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2346 *ebx = 0;
2347 if (!cpu->enable_l3_cache) {
2348 *ecx = 0;
2349 } else {
2350 *ecx = L3_N_DESCRIPTOR;
2351 }
2352 *edx = (L1D_DESCRIPTOR << 16) | \
2353 (L1I_DESCRIPTOR << 8) | \
2354 (L2_DESCRIPTOR);
2355 break;
2356 case 4:
2357 /* cache info: needed for Core compatibility */
2358 if (cpu->cache_info_passthrough) {
2359 host_cpuid(index, count, eax, ebx, ecx, edx);
2360 *eax &= ~0xFC000000;
2361 } else {
2362 *eax = 0;
2363 switch (count) {
2364 case 0: /* L1 dcache info */
2365 *eax |= CPUID_4_TYPE_DCACHE | \
2366 CPUID_4_LEVEL(1) | \
2367 CPUID_4_SELF_INIT_LEVEL;
2368 *ebx = (L1D_LINE_SIZE - 1) | \
2369 ((L1D_PARTITIONS - 1) << 12) | \
2370 ((L1D_ASSOCIATIVITY - 1) << 22);
2371 *ecx = L1D_SETS - 1;
2372 *edx = CPUID_4_NO_INVD_SHARING;
2373 break;
2374 case 1: /* L1 icache info */
2375 *eax |= CPUID_4_TYPE_ICACHE | \
2376 CPUID_4_LEVEL(1) | \
2377 CPUID_4_SELF_INIT_LEVEL;
2378 *ebx = (L1I_LINE_SIZE - 1) | \
2379 ((L1I_PARTITIONS - 1) << 12) | \
2380 ((L1I_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L1I_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2383 break;
2384 case 2: /* L2 cache info */
2385 *eax |= CPUID_4_TYPE_UNIFIED | \
2386 CPUID_4_LEVEL(2) | \
2387 CPUID_4_SELF_INIT_LEVEL;
2388 if (cs->nr_threads > 1) {
2389 *eax |= (cs->nr_threads - 1) << 14;
2390 }
2391 *ebx = (L2_LINE_SIZE - 1) | \
2392 ((L2_PARTITIONS - 1) << 12) | \
2393 ((L2_ASSOCIATIVITY - 1) << 22);
2394 *ecx = L2_SETS - 1;
2395 *edx = CPUID_4_NO_INVD_SHARING;
2396 break;
2397 case 3: /* L3 cache info */
2398 if (!cpu->enable_l3_cache) {
2399 *eax = 0;
2400 *ebx = 0;
2401 *ecx = 0;
2402 *edx = 0;
2403 break;
2404 }
2405 *eax |= CPUID_4_TYPE_UNIFIED | \
2406 CPUID_4_LEVEL(3) | \
2407 CPUID_4_SELF_INIT_LEVEL;
2408 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2409 *eax |= ((1 << pkg_offset) - 1) << 14;
2410 *ebx = (L3_N_LINE_SIZE - 1) | \
2411 ((L3_N_PARTITIONS - 1) << 12) | \
2412 ((L3_N_ASSOCIATIVITY - 1) << 22);
2413 *ecx = L3_N_SETS - 1;
2414 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2415 break;
2416 default: /* end of info */
2417 *eax = 0;
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 break;
2422 }
2423 }
2424
2425 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2426 if ((*eax & 31) && cs->nr_cores > 1) {
2427 *eax |= (cs->nr_cores - 1) << 26;
2428 }
2429 break;
2430 case 5:
2431 /* mwait info: needed for Core compatibility */
2432 *eax = 0; /* Smallest monitor-line size in bytes */
2433 *ebx = 0; /* Largest monitor-line size in bytes */
2434 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2435 *edx = 0;
2436 break;
2437 case 6:
2438 /* Thermal and Power Leaf */
2439 *eax = env->features[FEAT_6_EAX];
2440 *ebx = 0;
2441 *ecx = 0;
2442 *edx = 0;
2443 break;
2444 case 7:
2445 /* Structured Extended Feature Flags Enumeration Leaf */
2446 if (count == 0) {
2447 *eax = 0; /* Maximum ECX value for sub-leaves */
2448 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2449 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2450 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2451 *ecx |= CPUID_7_0_ECX_OSPKE;
2452 }
2453 *edx = 0; /* Reserved */
2454 } else {
2455 *eax = 0;
2456 *ebx = 0;
2457 *ecx = 0;
2458 *edx = 0;
2459 }
2460 break;
2461 case 9:
2462 /* Direct Cache Access Information Leaf */
2463 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2464 *ebx = 0;
2465 *ecx = 0;
2466 *edx = 0;
2467 break;
2468 case 0xA:
2469 /* Architectural Performance Monitoring Leaf */
2470 if (kvm_enabled() && cpu->enable_pmu) {
2471 KVMState *s = cs->kvm_state;
2472
2473 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2474 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2475 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2476 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2477 } else {
2478 *eax = 0;
2479 *ebx = 0;
2480 *ecx = 0;
2481 *edx = 0;
2482 }
2483 break;
2484 case 0xB:
2485 /* Extended Topology Enumeration Leaf */
2486 if (!cpu->enable_cpuid_0xb) {
2487 *eax = *ebx = *ecx = *edx = 0;
2488 break;
2489 }
2490
2491 *ecx = count & 0xff;
2492 *edx = cpu->apic_id;
2493
2494 switch (count) {
2495 case 0:
2496 *eax = apicid_core_offset(smp_cores, smp_threads);
2497 *ebx = smp_threads;
2498 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2499 break;
2500 case 1:
2501 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2502 *ebx = smp_cores * smp_threads;
2503 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2504 break;
2505 default:
2506 *eax = 0;
2507 *ebx = 0;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2509 }
2510
2511 assert(!(*eax & ~0x1f));
2512 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2513 break;
2514 case 0xD: {
2515 KVMState *s = cs->kvm_state;
2516 uint64_t ena_mask;
2517 int i;
2518
2519 /* Processor Extended State */
2520 *eax = 0;
2521 *ebx = 0;
2522 *ecx = 0;
2523 *edx = 0;
2524 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2525 break;
2526 }
2527 if (kvm_enabled()) {
2528 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2529 ena_mask <<= 32;
2530 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2531 } else {
2532 ena_mask = -1;
2533 }
2534
2535 if (count == 0) {
2536 *ecx = 0x240;
2537 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2538 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2539 if ((env->features[esa->feature] & esa->bits) == esa->bits
2540 && ((ena_mask >> i) & 1) != 0) {
2541 if (i < 32) {
2542 *eax |= 1u << i;
2543 } else {
2544 *edx |= 1u << (i - 32);
2545 }
2546 *ecx = MAX(*ecx, esa->offset + esa->size);
2547 }
2548 }
2549 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2550 *ebx = *ecx;
2551 } else if (count == 1) {
2552 *eax = env->features[FEAT_XSAVE];
2553 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2554 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2555 if ((env->features[esa->feature] & esa->bits) == esa->bits
2556 && ((ena_mask >> count) & 1) != 0) {
2557 *eax = esa->size;
2558 *ebx = esa->offset;
2559 }
2560 }
2561 break;
2562 }
2563 case 0x80000000:
2564 *eax = env->cpuid_xlevel;
2565 *ebx = env->cpuid_vendor1;
2566 *edx = env->cpuid_vendor2;
2567 *ecx = env->cpuid_vendor3;
2568 break;
2569 case 0x80000001:
2570 *eax = env->cpuid_version;
2571 *ebx = 0;
2572 *ecx = env->features[FEAT_8000_0001_ECX];
2573 *edx = env->features[FEAT_8000_0001_EDX];
2574
2575 /* The Linux kernel checks for the CMPLegacy bit and
2576 * discards multiple thread information if it is set.
2577 * So don't set it here for Intel to make Linux guests happy.
2578 */
2579 if (cs->nr_cores * cs->nr_threads > 1) {
2580 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2581 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2582 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2583 *ecx |= 1 << 1; /* CmpLegacy bit */
2584 }
2585 }
2586 break;
2587 case 0x80000002:
2588 case 0x80000003:
2589 case 0x80000004:
2590 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2591 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2592 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2593 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2594 break;
2595 case 0x80000005:
2596 /* cache info (L1 cache) */
2597 if (cpu->cache_info_passthrough) {
2598 host_cpuid(index, 0, eax, ebx, ecx, edx);
2599 break;
2600 }
2601 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2602 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2603 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2604 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2605 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2606 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2607 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2608 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2609 break;
2610 case 0x80000006:
2611 /* cache info (L2 cache) */
2612 if (cpu->cache_info_passthrough) {
2613 host_cpuid(index, 0, eax, ebx, ecx, edx);
2614 break;
2615 }
2616 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2617 (L2_DTLB_2M_ENTRIES << 16) | \
2618 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2619 (L2_ITLB_2M_ENTRIES);
2620 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2621 (L2_DTLB_4K_ENTRIES << 16) | \
2622 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2623 (L2_ITLB_4K_ENTRIES);
2624 *ecx = (L2_SIZE_KB_AMD << 16) | \
2625 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2626 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2627 if (!cpu->enable_l3_cache) {
2628 *edx = ((L3_SIZE_KB / 512) << 18) | \
2629 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2630 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2631 } else {
2632 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2633 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2634 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2635 }
2636 break;
2637 case 0x80000007:
2638 *eax = 0;
2639 *ebx = 0;
2640 *ecx = 0;
2641 *edx = env->features[FEAT_8000_0007_EDX];
2642 break;
2643 case 0x80000008:
2644 /* virtual & phys address size in low 2 bytes. */
2645 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2646 /* 64 bit processor, 48 bits virtual, configurable
2647 * physical bits.
2648 */
2649 *eax = 0x00003000 + cpu->phys_bits;
2650 } else {
2651 *eax = cpu->phys_bits;
2652 }
2653 *ebx = 0;
2654 *ecx = 0;
2655 *edx = 0;
2656 if (cs->nr_cores * cs->nr_threads > 1) {
2657 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2658 }
2659 break;
2660 case 0x8000000A:
2661 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2662 *eax = 0x00000001; /* SVM Revision */
2663 *ebx = 0x00000010; /* nr of ASIDs */
2664 *ecx = 0;
2665 *edx = env->features[FEAT_SVM]; /* optional features */
2666 } else {
2667 *eax = 0;
2668 *ebx = 0;
2669 *ecx = 0;
2670 *edx = 0;
2671 }
2672 break;
2673 case 0xC0000000:
2674 *eax = env->cpuid_xlevel2;
2675 *ebx = 0;
2676 *ecx = 0;
2677 *edx = 0;
2678 break;
2679 case 0xC0000001:
2680 /* Support for VIA CPU's CPUID instruction */
2681 *eax = env->cpuid_version;
2682 *ebx = 0;
2683 *ecx = 0;
2684 *edx = env->features[FEAT_C000_0001_EDX];
2685 break;
2686 case 0xC0000002:
2687 case 0xC0000003:
2688 case 0xC0000004:
2689 /* Reserved for the future, and now filled with zero */
2690 *eax = 0;
2691 *ebx = 0;
2692 *ecx = 0;
2693 *edx = 0;
2694 break;
2695 default:
2696 /* reserved values: zero */
2697 *eax = 0;
2698 *ebx = 0;
2699 *ecx = 0;
2700 *edx = 0;
2701 break;
2702 }
2703 }
2704
2705 /* CPUClass::reset() */
2706 static void x86_cpu_reset(CPUState *s)
2707 {
2708 X86CPU *cpu = X86_CPU(s);
2709 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2710 CPUX86State *env = &cpu->env;
2711 target_ulong cr4;
2712 uint64_t xcr0;
2713 int i;
2714
2715 xcc->parent_reset(s);
2716
2717 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2718
2719 tlb_flush(s, 1);
2720
2721 env->old_exception = -1;
2722
2723 /* init to reset state */
2724
2725 env->hflags2 |= HF2_GIF_MASK;
2726
2727 cpu_x86_update_cr0(env, 0x60000010);
2728 env->a20_mask = ~0x0;
2729 env->smbase = 0x30000;
2730
2731 env->idt.limit = 0xffff;
2732 env->gdt.limit = 0xffff;
2733 env->ldt.limit = 0xffff;
2734 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2735 env->tr.limit = 0xffff;
2736 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2737
2738 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2739 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2740 DESC_R_MASK | DESC_A_MASK);
2741 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2742 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2743 DESC_A_MASK);
2744 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2745 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2746 DESC_A_MASK);
2747 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2748 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2749 DESC_A_MASK);
2750 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2751 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2752 DESC_A_MASK);
2753 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2754 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2755 DESC_A_MASK);
2756
2757 env->eip = 0xfff0;
2758 env->regs[R_EDX] = env->cpuid_version;
2759
2760 env->eflags = 0x2;
2761
2762 /* FPU init */
2763 for (i = 0; i < 8; i++) {
2764 env->fptags[i] = 1;
2765 }
2766 cpu_set_fpuc(env, 0x37f);
2767
2768 env->mxcsr = 0x1f80;
2769 /* All units are in INIT state. */
2770 env->xstate_bv = 0;
2771
2772 env->pat = 0x0007040600070406ULL;
2773 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2774
2775 memset(env->dr, 0, sizeof(env->dr));
2776 env->dr[6] = DR6_FIXED_1;
2777 env->dr[7] = DR7_FIXED_1;
2778 cpu_breakpoint_remove_all(s, BP_CPU);
2779 cpu_watchpoint_remove_all(s, BP_CPU);
2780
2781 cr4 = 0;
2782 xcr0 = XSTATE_FP_MASK;
2783
2784 #ifdef CONFIG_USER_ONLY
2785 /* Enable all the features for user-mode. */
2786 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2787 xcr0 |= XSTATE_SSE_MASK;
2788 }
2789 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2790 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2791 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2792 xcr0 |= 1ull << i;
2793 }
2794 }
2795
2796 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2797 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2798 }
2799 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2800 cr4 |= CR4_FSGSBASE_MASK;
2801 }
2802 #endif
2803
2804 env->xcr0 = xcr0;
2805 cpu_x86_update_cr4(env, cr4);
2806
2807 /*
2808 * SDM 11.11.5 requires:
2809 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2810 * - IA32_MTRR_PHYSMASKn.V = 0
2811 * All other bits are undefined. For simplification, zero it all.
2812 */
2813 env->mtrr_deftype = 0;
2814 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2815 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2816
2817 #if !defined(CONFIG_USER_ONLY)
2818 /* We hard-wire the BSP to the first CPU. */
2819 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2820
2821 s->halted = !cpu_is_bsp(cpu);
2822
2823 if (kvm_enabled()) {
2824 kvm_arch_reset_vcpu(cpu);
2825 }
2826 #endif
2827 }
2828
2829 #ifndef CONFIG_USER_ONLY
2830 bool cpu_is_bsp(X86CPU *cpu)
2831 {
2832 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2833 }
2834
2835 /* TODO: remove me, when reset over QOM tree is implemented */
2836 static void x86_cpu_machine_reset_cb(void *opaque)
2837 {
2838 X86CPU *cpu = opaque;
2839 cpu_reset(CPU(cpu));
2840 }
2841 #endif
2842
2843 static void mce_init(X86CPU *cpu)
2844 {
2845 CPUX86State *cenv = &cpu->env;
2846 unsigned int bank;
2847
2848 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2849 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2850 (CPUID_MCE | CPUID_MCA)) {
2851 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2852 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2853 cenv->mcg_ctl = ~(uint64_t)0;
2854 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2855 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2856 }
2857 }
2858 }
2859
2860 #ifndef CONFIG_USER_ONLY
2861 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2862 {
2863 APICCommonState *apic;
2864 const char *apic_type = "apic";
2865
2866 if (kvm_apic_in_kernel()) {
2867 apic_type = "kvm-apic";
2868 } else if (xen_enabled()) {
2869 apic_type = "xen-apic";
2870 }
2871
2872 cpu->apic_state = DEVICE(object_new(apic_type));
2873
2874 object_property_add_child(OBJECT(cpu), "lapic",
2875 OBJECT(cpu->apic_state), &error_abort);
2876 object_unref(OBJECT(cpu->apic_state));
2877
2878 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2879 /* TODO: convert to link<> */
2880 apic = APIC_COMMON(cpu->apic_state);
2881 apic->cpu = cpu;
2882 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2883 }
2884
2885 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2886 {
2887 APICCommonState *apic;
2888 static bool apic_mmio_map_once;
2889
2890 if (cpu->apic_state == NULL) {
2891 return;
2892 }
2893 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2894 errp);
2895
2896 /* Map APIC MMIO area */
2897 apic = APIC_COMMON(cpu->apic_state);
2898 if (!apic_mmio_map_once) {
2899 memory_region_add_subregion_overlap(get_system_memory(),
2900 apic->apicbase &
2901 MSR_IA32_APICBASE_BASE,
2902 &apic->io_memory,
2903 0x1000);
2904 apic_mmio_map_once = true;
2905 }
2906 }
2907
2908 static void x86_cpu_machine_done(Notifier *n, void *unused)
2909 {
2910 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2911 MemoryRegion *smram =
2912 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2913
2914 if (smram) {
2915 cpu->smram = g_new(MemoryRegion, 1);
2916 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2917 smram, 0, 1ull << 32);
2918 memory_region_set_enabled(cpu->smram, false);
2919 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2920 }
2921 }
2922 #else
2923 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2924 {
2925 }
2926 #endif
2927
2928 /* Note: Only safe for use on x86(-64) hosts */
2929 static uint32_t x86_host_phys_bits(void)
2930 {
2931 uint32_t eax;
2932 uint32_t host_phys_bits;
2933
2934 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2935 if (eax >= 0x80000008) {
2936 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2937 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2938 * at 23:16 that can specify a maximum physical address bits for
2939 * the guest that can override this value; but I've not seen
2940 * anything with that set.
2941 */
2942 host_phys_bits = eax & 0xff;
2943 } else {
2944 /* It's an odd 64 bit machine that doesn't have the leaf for
2945 * physical address bits; fall back to 36 that's most older
2946 * Intel.
2947 */
2948 host_phys_bits = 36;
2949 }
2950
2951 return host_phys_bits;
2952 }
2953
2954 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2955 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2956 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2957 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2958 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2959 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2960 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2961 {
2962 CPUState *cs = CPU(dev);
2963 X86CPU *cpu = X86_CPU(dev);
2964 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2965 CPUX86State *env = &cpu->env;
2966 Error *local_err = NULL;
2967 static bool ht_warned;
2968 FeatureWord w;
2969
2970 if (xcc->kvm_required && !kvm_enabled()) {
2971 char *name = x86_cpu_class_get_model_name(xcc);
2972 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2973 g_free(name);
2974 goto out;
2975 }
2976
2977 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2978 error_setg(errp, "apic-id property was not initialized properly");
2979 return;
2980 }
2981
2982 /*TODO: cpu->host_features incorrectly overwrites features
2983 * set using "feat=on|off". Once we fix this, we can convert
2984 * plus_features & minus_features to global properties
2985 * inside x86_cpu_parse_featurestr() too.
2986 */
2987 if (cpu->host_features) {
2988 for (w = 0; w < FEATURE_WORDS; w++) {
2989 env->features[w] =
2990 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2991 }
2992 }
2993
2994 for (w = 0; w < FEATURE_WORDS; w++) {
2995 cpu->env.features[w] |= plus_features[w];
2996 cpu->env.features[w] &= ~minus_features[w];
2997 }
2998
2999 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
3000 env->cpuid_level = 7;
3001 }
3002
3003 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3004 error_setg(&local_err,
3005 kvm_enabled() ?
3006 "Host doesn't support requested features" :
3007 "TCG doesn't support requested features");
3008 goto out;
3009 }
3010
3011 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3012 * CPUID[1].EDX.
3013 */
3014 if (IS_AMD_CPU(env)) {
3015 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3016 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3017 & CPUID_EXT2_AMD_ALIASES);
3018 }
3019
3020 /* For 64bit systems think about the number of physical bits to present.
3021 * ideally this should be the same as the host; anything other than matching
3022 * the host can cause incorrect guest behaviour.
3023 * QEMU used to pick the magic value of 40 bits that corresponds to
3024 * consumer AMD devices but nothing else.
3025 */
3026 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3027 if (kvm_enabled()) {
3028 uint32_t host_phys_bits = x86_host_phys_bits();
3029 static bool warned;
3030
3031 if (cpu->host_phys_bits) {
3032 /* The user asked for us to use the host physical bits */
3033 cpu->phys_bits = host_phys_bits;
3034 }
3035
3036 /* Print a warning if the user set it to a value that's not the
3037 * host value.
3038 */
3039 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3040 !warned) {
3041 error_report("Warning: Host physical bits (%u)"
3042 " does not match phys-bits property (%u)",
3043 host_phys_bits, cpu->phys_bits);
3044 warned = true;
3045 }
3046
3047 if (cpu->phys_bits &&
3048 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3049 cpu->phys_bits < 32)) {
3050 error_setg(errp, "phys-bits should be between 32 and %u "
3051 " (but is %u)",
3052 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3053 return;
3054 }
3055 } else {
3056 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3057 error_setg(errp, "TCG only supports phys-bits=%u",
3058 TCG_PHYS_ADDR_BITS);
3059 return;
3060 }
3061 }
3062 /* 0 means it was not explicitly set by the user (or by machine
3063 * compat_props or by the host code above). In this case, the default
3064 * is the value used by TCG (40).
3065 */
3066 if (cpu->phys_bits == 0) {
3067 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3068 }
3069 } else {
3070 /* For 32 bit systems don't use the user set value, but keep
3071 * phys_bits consistent with what we tell the guest.
3072 */
3073 if (cpu->phys_bits != 0) {
3074 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3075 return;
3076 }
3077
3078 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3079 cpu->phys_bits = 36;
3080 } else {
3081 cpu->phys_bits = 32;
3082 }
3083 }
3084 cpu_exec_init(cs, &error_abort);
3085
3086 if (tcg_enabled()) {
3087 tcg_x86_init();
3088 }
3089
3090 #ifndef CONFIG_USER_ONLY
3091 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3092
3093 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3094 x86_cpu_apic_create(cpu, &local_err);
3095 if (local_err != NULL) {
3096 goto out;
3097 }
3098 }
3099 #endif
3100
3101 mce_init(cpu);
3102
3103 #ifndef CONFIG_USER_ONLY
3104 if (tcg_enabled()) {
3105 AddressSpace *newas = g_new(AddressSpace, 1);
3106
3107 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3108 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3109
3110 /* Outer container... */
3111 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3112 memory_region_set_enabled(cpu->cpu_as_root, true);
3113
3114 /* ... with two regions inside: normal system memory with low
3115 * priority, and...
3116 */
3117 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3118 get_system_memory(), 0, ~0ull);
3119 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3120 memory_region_set_enabled(cpu->cpu_as_mem, true);
3121 address_space_init(newas, cpu->cpu_as_root, "CPU");
3122 cs->num_ases = 1;
3123 cpu_address_space_init(cs, newas, 0);
3124
3125 /* ... SMRAM with higher priority, linked from /machine/smram. */
3126 cpu->machine_done.notify = x86_cpu_machine_done;
3127 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3128 }
3129 #endif
3130
3131 qemu_init_vcpu(cs);
3132
3133 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3134 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3135 * based on inputs (sockets,cores,threads), it is still better to gives
3136 * users a warning.
3137 *
3138 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3139 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3140 */
3141 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3142 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3143 " -smp options properly.");
3144 ht_warned = true;
3145 }
3146
3147 x86_cpu_apic_realize(cpu, &local_err);
3148 if (local_err != NULL) {
3149 goto out;
3150 }
3151 cpu_reset(cs);
3152
3153 xcc->parent_realize(dev, &local_err);
3154
3155 out:
3156 if (local_err != NULL) {
3157 error_propagate(errp, local_err);
3158 return;
3159 }
3160 }
3161
3162 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3163 {
3164 X86CPU *cpu = X86_CPU(dev);
3165
3166 #ifndef CONFIG_USER_ONLY
3167 cpu_remove_sync(CPU(dev));
3168 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3169 #endif
3170
3171 if (cpu->apic_state) {
3172 object_unparent(OBJECT(cpu->apic_state));
3173 cpu->apic_state = NULL;
3174 }
3175 }
3176
3177 typedef struct BitProperty {
3178 uint32_t *ptr;
3179 uint32_t mask;
3180 } BitProperty;
3181
3182 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3183 void *opaque, Error **errp)
3184 {
3185 BitProperty *fp = opaque;
3186 bool value = (*fp->ptr & fp->mask) == fp->mask;
3187 visit_type_bool(v, name, &value, errp);
3188 }
3189
3190 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3191 void *opaque, Error **errp)
3192 {
3193 DeviceState *dev = DEVICE(obj);
3194 BitProperty *fp = opaque;
3195 Error *local_err = NULL;
3196 bool value;
3197
3198 if (dev->realized) {
3199 qdev_prop_set_after_realize(dev, name, errp);
3200 return;
3201 }
3202
3203 visit_type_bool(v, name, &value, &local_err);
3204 if (local_err) {
3205 error_propagate(errp, local_err);
3206 return;
3207 }
3208
3209 if (value) {
3210 *fp->ptr |= fp->mask;
3211 } else {
3212 *fp->ptr &= ~fp->mask;
3213 }
3214 }
3215
3216 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3217 void *opaque)
3218 {
3219 BitProperty *prop = opaque;
3220 g_free(prop);
3221 }
3222
3223 /* Register a boolean property to get/set a single bit in a uint32_t field.
3224 *
3225 * The same property name can be registered multiple times to make it affect
3226 * multiple bits in the same FeatureWord. In that case, the getter will return
3227 * true only if all bits are set.
3228 */
3229 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3230 const char *prop_name,
3231 uint32_t *field,
3232 int bitnr)
3233 {
3234 BitProperty *fp;
3235 ObjectProperty *op;
3236 uint32_t mask = (1UL << bitnr);
3237
3238 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3239 if (op) {
3240 fp = op->opaque;
3241 assert(fp->ptr == field);
3242 fp->mask |= mask;
3243 } else {
3244 fp = g_new0(BitProperty, 1);
3245 fp->ptr = field;
3246 fp->mask = mask;
3247 object_property_add(OBJECT(cpu), prop_name, "bool",
3248 x86_cpu_get_bit_prop,
3249 x86_cpu_set_bit_prop,
3250 x86_cpu_release_bit_prop, fp, &error_abort);
3251 }
3252 }
3253
3254 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3255 FeatureWord w,
3256 int bitnr)
3257 {
3258 Object *obj = OBJECT(cpu);
3259 int i;
3260 char **names;
3261 FeatureWordInfo *fi = &feature_word_info[w];
3262
3263 if (!fi->feat_names) {
3264 return;
3265 }
3266 if (!fi->feat_names[bitnr]) {
3267 return;
3268 }
3269
3270 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3271
3272 feat2prop(names[0]);
3273 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3274
3275 for (i = 1; names[i]; i++) {
3276 feat2prop(names[i]);
3277 object_property_add_alias(obj, names[i], obj, names[0],
3278 &error_abort);
3279 }
3280
3281 g_strfreev(names);
3282 }
3283
3284 static void x86_cpu_initfn(Object *obj)
3285 {
3286 CPUState *cs = CPU(obj);
3287 X86CPU *cpu = X86_CPU(obj);
3288 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3289 CPUX86State *env = &cpu->env;
3290 FeatureWord w;
3291
3292 cs->env_ptr = env;
3293
3294 object_property_add(obj, "family", "int",
3295 x86_cpuid_version_get_family,
3296 x86_cpuid_version_set_family, NULL, NULL, NULL);
3297 object_property_add(obj, "model", "int",
3298 x86_cpuid_version_get_model,
3299 x86_cpuid_version_set_model, NULL, NULL, NULL);
3300 object_property_add(obj, "stepping", "int",
3301 x86_cpuid_version_get_stepping,
3302 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3303 object_property_add_str(obj, "vendor",
3304 x86_cpuid_get_vendor,
3305 x86_cpuid_set_vendor, NULL);
3306 object_property_add_str(obj, "model-id",
3307 x86_cpuid_get_model_id,
3308 x86_cpuid_set_model_id, NULL);
3309 object_property_add(obj, "tsc-frequency", "int",
3310 x86_cpuid_get_tsc_freq,
3311 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3312 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3313 x86_cpu_get_feature_words,
3314 NULL, NULL, (void *)env->features, NULL);
3315 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3316 x86_cpu_get_feature_words,
3317 NULL, NULL, (void *)cpu->filtered_features, NULL);
3318
3319 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3320
3321 for (w = 0; w < FEATURE_WORDS; w++) {
3322 int bitnr;
3323
3324 for (bitnr = 0; bitnr < 32; bitnr++) {
3325 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3326 }
3327 }
3328
3329 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3330 }
3331
3332 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3333 {
3334 X86CPU *cpu = X86_CPU(cs);
3335
3336 return cpu->apic_id;
3337 }
3338
3339 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3340 {
3341 X86CPU *cpu = X86_CPU(cs);
3342
3343 return cpu->env.cr[0] & CR0_PG_MASK;
3344 }
3345
3346 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3347 {
3348 X86CPU *cpu = X86_CPU(cs);
3349
3350 cpu->env.eip = value;
3351 }
3352
3353 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3354 {
3355 X86CPU *cpu = X86_CPU(cs);
3356
3357 cpu->env.eip = tb->pc - tb->cs_base;
3358 }
3359
3360 static bool x86_cpu_has_work(CPUState *cs)
3361 {
3362 X86CPU *cpu = X86_CPU(cs);
3363 CPUX86State *env = &cpu->env;
3364
3365 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3366 CPU_INTERRUPT_POLL)) &&
3367 (env->eflags & IF_MASK)) ||
3368 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3369 CPU_INTERRUPT_INIT |
3370 CPU_INTERRUPT_SIPI |
3371 CPU_INTERRUPT_MCE)) ||
3372 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3373 !(env->hflags & HF_SMM_MASK));
3374 }
3375
3376 static Property x86_cpu_properties[] = {
3377 #ifdef CONFIG_USER_ONLY
3378 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3379 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3380 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3381 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3382 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3383 #else
3384 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3385 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3386 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3387 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3388 #endif
3389 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3390 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3391 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3392 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3393 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3394 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3395 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3396 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3397 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3398 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3399 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3400 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3401 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3402 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3403 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3404 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3405 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3406 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3407 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3408 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3409 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3410 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3411 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3412 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3413 DEFINE_PROP_END_OF_LIST()
3414 };
3415
3416 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3417 {
3418 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3419 CPUClass *cc = CPU_CLASS(oc);
3420 DeviceClass *dc = DEVICE_CLASS(oc);
3421
3422 xcc->parent_realize = dc->realize;
3423 dc->realize = x86_cpu_realizefn;
3424 dc->unrealize = x86_cpu_unrealizefn;
3425 dc->props = x86_cpu_properties;
3426
3427 xcc->parent_reset = cc->reset;
3428 cc->reset = x86_cpu_reset;
3429 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3430
3431 cc->class_by_name = x86_cpu_class_by_name;
3432 cc->parse_features = x86_cpu_parse_featurestr;
3433 cc->has_work = x86_cpu_has_work;
3434 cc->do_interrupt = x86_cpu_do_interrupt;
3435 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3436 cc->dump_state = x86_cpu_dump_state;
3437 cc->set_pc = x86_cpu_set_pc;
3438 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3439 cc->gdb_read_register = x86_cpu_gdb_read_register;
3440 cc->gdb_write_register = x86_cpu_gdb_write_register;
3441 cc->get_arch_id = x86_cpu_get_arch_id;
3442 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3443 #ifdef CONFIG_USER_ONLY
3444 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3445 #else
3446 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3447 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3448 cc->write_elf64_note = x86_cpu_write_elf64_note;
3449 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3450 cc->write_elf32_note = x86_cpu_write_elf32_note;
3451 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3452 cc->vmsd = &vmstate_x86_cpu;
3453 #endif
3454 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3455 #ifndef CONFIG_USER_ONLY
3456 cc->debug_excp_handler = breakpoint_handler;
3457 #endif
3458 cc->cpu_exec_enter = x86_cpu_exec_enter;
3459 cc->cpu_exec_exit = x86_cpu_exec_exit;
3460
3461 dc->cannot_instantiate_with_device_add_yet = false;
3462 /*
3463 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3464 * object in cpus -> dangling pointer after final object_unref().
3465 */
3466 dc->cannot_destroy_with_object_finalize_yet = true;
3467 }
3468
3469 static const TypeInfo x86_cpu_type_info = {
3470 .name = TYPE_X86_CPU,
3471 .parent = TYPE_CPU,
3472 .instance_size = sizeof(X86CPU),
3473 .instance_init = x86_cpu_initfn,
3474 .abstract = true,
3475 .class_size = sizeof(X86CPUClass),
3476 .class_init = x86_cpu_common_class_init,
3477 };
3478
3479 static void x86_cpu_register_types(void)
3480 {
3481 int i;
3482
3483 type_register_static(&x86_cpu_type_info);
3484 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3485 x86_register_cpudef_type(&builtin_x86_defs[i]);
3486 }
3487 #ifdef CONFIG_KVM
3488 type_register_static(&host_x86_cpu_type_info);
3489 #endif
3490 }
3491
3492 type_init(x86_cpu_register_types)