]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Report hyperv feature words through qom
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *hyperv_priv_feature_name[] = {
249 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
250 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
251 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
252 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
253 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
254 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 NULL, NULL, NULL, NULL,
259 NULL, NULL, NULL, NULL,
260 };
261
262 static const char *hyperv_ident_feature_name[] = {
263 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
264 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
265 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
266 NULL /* hv_create_port */, NULL /* hv_connect_port */,
267 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
268 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
269 NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 };
275
276 static const char *hyperv_misc_feature_name[] = {
277 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
278 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
279 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
280 NULL, NULL,
281 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
287 };
288
289 static const char *svm_feature_name[] = {
290 "npt", "lbrv", "svm_lock", "nrip_save",
291 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
292 NULL, NULL, "pause_filter", NULL,
293 "pfthreshold", NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
298 };
299
300 static const char *cpuid_7_0_ebx_feature_name[] = {
301 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
302 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
303 "avx512f", NULL, "rdseed", "adx", "smap", NULL, "pcommit", "clflushopt",
304 "clwb", NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
305 };
306
307 static const char *cpuid_7_0_ecx_feature_name[] = {
308 NULL, NULL, NULL, "pku",
309 "ospke", NULL, NULL, NULL,
310 NULL, NULL, NULL, NULL,
311 NULL, NULL, NULL, NULL,
312 NULL, NULL, NULL, NULL,
313 NULL, NULL, NULL, NULL,
314 NULL, NULL, NULL, NULL,
315 NULL, NULL, NULL, NULL,
316 };
317
318 static const char *cpuid_apm_edx_feature_name[] = {
319 NULL, NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 "invtsc", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 NULL, NULL, NULL, NULL,
327 };
328
329 static const char *cpuid_xsave_feature_name[] = {
330 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 };
339
340 static const char *cpuid_6_feature_name[] = {
341 NULL, NULL, "arat", NULL,
342 NULL, NULL, NULL, NULL,
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 };
350
351 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
352 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
353 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
354 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
355 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
356 CPUID_PSE36 | CPUID_FXSR)
357 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
358 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
359 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
360 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
361 CPUID_PAE | CPUID_SEP | CPUID_APIC)
362
363 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
364 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
365 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
366 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
367 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
368 /* partly implemented:
369 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
370 /* missing:
371 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
372 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
373 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
374 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
375 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
376 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
377 /* missing:
378 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
379 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
380 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
381 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
382 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
383
384 #ifdef TARGET_X86_64
385 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
386 #else
387 #define TCG_EXT2_X86_64_FEATURES 0
388 #endif
389
390 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
391 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
392 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
393 TCG_EXT2_X86_64_FEATURES)
394 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
395 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
396 #define TCG_EXT4_FEATURES 0
397 #define TCG_SVM_FEATURES 0
398 #define TCG_KVM_FEATURES 0
399 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
400 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
401 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
402 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
403 CPUID_7_0_EBX_ERMS)
404 /* missing:
405 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
406 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
407 CPUID_7_0_EBX_RDSEED */
408 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
409 #define TCG_APM_FEATURES 0
410 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
411 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
412 /* missing:
413 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
414
415 typedef struct FeatureWordInfo {
416 const char **feat_names;
417 uint32_t cpuid_eax; /* Input EAX for CPUID */
418 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
419 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
420 int cpuid_reg; /* output register (R_* constant) */
421 uint32_t tcg_features; /* Feature flags supported by TCG */
422 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
423 } FeatureWordInfo;
424
425 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
426 [FEAT_1_EDX] = {
427 .feat_names = feature_name,
428 .cpuid_eax = 1, .cpuid_reg = R_EDX,
429 .tcg_features = TCG_FEATURES,
430 },
431 [FEAT_1_ECX] = {
432 .feat_names = ext_feature_name,
433 .cpuid_eax = 1, .cpuid_reg = R_ECX,
434 .tcg_features = TCG_EXT_FEATURES,
435 },
436 [FEAT_8000_0001_EDX] = {
437 .feat_names = ext2_feature_name,
438 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
439 .tcg_features = TCG_EXT2_FEATURES,
440 },
441 [FEAT_8000_0001_ECX] = {
442 .feat_names = ext3_feature_name,
443 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
444 .tcg_features = TCG_EXT3_FEATURES,
445 },
446 [FEAT_C000_0001_EDX] = {
447 .feat_names = ext4_feature_name,
448 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
449 .tcg_features = TCG_EXT4_FEATURES,
450 },
451 [FEAT_KVM] = {
452 .feat_names = kvm_feature_name,
453 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
454 .tcg_features = TCG_KVM_FEATURES,
455 },
456 [FEAT_HYPERV_EAX] = {
457 .feat_names = hyperv_priv_feature_name,
458 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
459 },
460 [FEAT_HYPERV_EBX] = {
461 .feat_names = hyperv_ident_feature_name,
462 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
463 },
464 [FEAT_HYPERV_EDX] = {
465 .feat_names = hyperv_misc_feature_name,
466 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
467 },
468 [FEAT_SVM] = {
469 .feat_names = svm_feature_name,
470 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
471 .tcg_features = TCG_SVM_FEATURES,
472 },
473 [FEAT_7_0_EBX] = {
474 .feat_names = cpuid_7_0_ebx_feature_name,
475 .cpuid_eax = 7,
476 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
477 .cpuid_reg = R_EBX,
478 .tcg_features = TCG_7_0_EBX_FEATURES,
479 },
480 [FEAT_7_0_ECX] = {
481 .feat_names = cpuid_7_0_ecx_feature_name,
482 .cpuid_eax = 7,
483 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
484 .cpuid_reg = R_ECX,
485 .tcg_features = TCG_7_0_ECX_FEATURES,
486 },
487 [FEAT_8000_0007_EDX] = {
488 .feat_names = cpuid_apm_edx_feature_name,
489 .cpuid_eax = 0x80000007,
490 .cpuid_reg = R_EDX,
491 .tcg_features = TCG_APM_FEATURES,
492 .unmigratable_flags = CPUID_APM_INVTSC,
493 },
494 [FEAT_XSAVE] = {
495 .feat_names = cpuid_xsave_feature_name,
496 .cpuid_eax = 0xd,
497 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
498 .cpuid_reg = R_EAX,
499 .tcg_features = TCG_XSAVE_FEATURES,
500 },
501 [FEAT_6_EAX] = {
502 .feat_names = cpuid_6_feature_name,
503 .cpuid_eax = 6, .cpuid_reg = R_EAX,
504 .tcg_features = TCG_6_EAX_FEATURES,
505 },
506 };
507
508 typedef struct X86RegisterInfo32 {
509 /* Name of register */
510 const char *name;
511 /* QAPI enum value register */
512 X86CPURegister32 qapi_enum;
513 } X86RegisterInfo32;
514
515 #define REGISTER(reg) \
516 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
517 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
518 REGISTER(EAX),
519 REGISTER(ECX),
520 REGISTER(EDX),
521 REGISTER(EBX),
522 REGISTER(ESP),
523 REGISTER(EBP),
524 REGISTER(ESI),
525 REGISTER(EDI),
526 };
527 #undef REGISTER
528
529 const ExtSaveArea x86_ext_save_areas[] = {
530 [XSTATE_YMM_BIT] =
531 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
532 .offset = offsetof(X86XSaveArea, avx_state),
533 .size = sizeof(XSaveAVX) },
534 [XSTATE_BNDREGS_BIT] =
535 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
536 .offset = offsetof(X86XSaveArea, bndreg_state),
537 .size = sizeof(XSaveBNDREG) },
538 [XSTATE_BNDCSR_BIT] =
539 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
540 .offset = offsetof(X86XSaveArea, bndcsr_state),
541 .size = sizeof(XSaveBNDCSR) },
542 [XSTATE_OPMASK_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
544 .offset = offsetof(X86XSaveArea, opmask_state),
545 .size = sizeof(XSaveOpmask) },
546 [XSTATE_ZMM_Hi256_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
548 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
549 .size = sizeof(XSaveZMM_Hi256) },
550 [XSTATE_Hi16_ZMM_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
553 .size = sizeof(XSaveHi16_ZMM) },
554 [XSTATE_PKRU_BIT] =
555 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
556 .offset = offsetof(X86XSaveArea, pkru_state),
557 .size = sizeof(XSavePKRU) },
558 };
559
560 const char *get_register_name_32(unsigned int reg)
561 {
562 if (reg >= CPU_NB_REGS32) {
563 return NULL;
564 }
565 return x86_reg_info_32[reg].name;
566 }
567
568 /*
569 * Returns the set of feature flags that are supported and migratable by
570 * QEMU, for a given FeatureWord.
571 */
572 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
573 {
574 FeatureWordInfo *wi = &feature_word_info[w];
575 uint32_t r = 0;
576 int i;
577
578 for (i = 0; i < 32; i++) {
579 uint32_t f = 1U << i;
580 /* If the feature name is unknown, it is not supported by QEMU yet */
581 if (!wi->feat_names[i]) {
582 continue;
583 }
584 /* Skip features known to QEMU, but explicitly marked as unmigratable */
585 if (wi->unmigratable_flags & f) {
586 continue;
587 }
588 r |= f;
589 }
590 return r;
591 }
592
593 void host_cpuid(uint32_t function, uint32_t count,
594 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
595 {
596 uint32_t vec[4];
597
598 #ifdef __x86_64__
599 asm volatile("cpuid"
600 : "=a"(vec[0]), "=b"(vec[1]),
601 "=c"(vec[2]), "=d"(vec[3])
602 : "0"(function), "c"(count) : "cc");
603 #elif defined(__i386__)
604 asm volatile("pusha \n\t"
605 "cpuid \n\t"
606 "mov %%eax, 0(%2) \n\t"
607 "mov %%ebx, 4(%2) \n\t"
608 "mov %%ecx, 8(%2) \n\t"
609 "mov %%edx, 12(%2) \n\t"
610 "popa"
611 : : "a"(function), "c"(count), "S"(vec)
612 : "memory", "cc");
613 #else
614 abort();
615 #endif
616
617 if (eax)
618 *eax = vec[0];
619 if (ebx)
620 *ebx = vec[1];
621 if (ecx)
622 *ecx = vec[2];
623 if (edx)
624 *edx = vec[3];
625 }
626
627 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
628
629 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
630 * a substring. ex if !NULL points to the first char after a substring,
631 * otherwise the string is assumed to sized by a terminating nul.
632 * Return lexical ordering of *s1:*s2.
633 */
634 static int sstrcmp(const char *s1, const char *e1,
635 const char *s2, const char *e2)
636 {
637 for (;;) {
638 if (!*s1 || !*s2 || *s1 != *s2)
639 return (*s1 - *s2);
640 ++s1, ++s2;
641 if (s1 == e1 && s2 == e2)
642 return (0);
643 else if (s1 == e1)
644 return (*s2);
645 else if (s2 == e2)
646 return (*s1);
647 }
648 }
649
650 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
651 * '|' delimited (possibly empty) strings in which case search for a match
652 * within the alternatives proceeds left to right. Return 0 for success,
653 * non-zero otherwise.
654 */
655 static int altcmp(const char *s, const char *e, const char *altstr)
656 {
657 const char *p, *q;
658
659 for (q = p = altstr; ; ) {
660 while (*p && *p != '|')
661 ++p;
662 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
663 return (0);
664 if (!*p)
665 return (1);
666 else
667 q = ++p;
668 }
669 }
670
671 /* search featureset for flag *[s..e), if found set corresponding bit in
672 * *pval and return true, otherwise return false
673 */
674 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
675 const char **featureset)
676 {
677 uint32_t mask;
678 const char **ppc;
679 bool found = false;
680
681 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
682 if (*ppc && !altcmp(s, e, *ppc)) {
683 *pval |= mask;
684 found = true;
685 }
686 }
687 return found;
688 }
689
690 static void add_flagname_to_bitmaps(const char *flagname,
691 FeatureWordArray words,
692 Error **errp)
693 {
694 FeatureWord w;
695 for (w = 0; w < FEATURE_WORDS; w++) {
696 FeatureWordInfo *wi = &feature_word_info[w];
697 if (wi->feat_names &&
698 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
699 break;
700 }
701 }
702 if (w == FEATURE_WORDS) {
703 error_setg(errp, "CPU feature %s not found", flagname);
704 }
705 }
706
707 /* CPU class name definitions: */
708
709 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
710 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
711
712 /* Return type name for a given CPU model name
713 * Caller is responsible for freeing the returned string.
714 */
715 static char *x86_cpu_type_name(const char *model_name)
716 {
717 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
718 }
719
720 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
721 {
722 ObjectClass *oc;
723 char *typename;
724
725 if (cpu_model == NULL) {
726 return NULL;
727 }
728
729 typename = x86_cpu_type_name(cpu_model);
730 oc = object_class_by_name(typename);
731 g_free(typename);
732 return oc;
733 }
734
735 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
736 {
737 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
738 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
739 return g_strndup(class_name,
740 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
741 }
742
743 struct X86CPUDefinition {
744 const char *name;
745 uint32_t level;
746 uint32_t xlevel;
747 uint32_t xlevel2;
748 /* vendor is zero-terminated, 12 character ASCII string */
749 char vendor[CPUID_VENDOR_SZ + 1];
750 int family;
751 int model;
752 int stepping;
753 FeatureWordArray features;
754 char model_id[48];
755 };
756
757 static X86CPUDefinition builtin_x86_defs[] = {
758 {
759 .name = "qemu64",
760 .level = 0xd,
761 .vendor = CPUID_VENDOR_AMD,
762 .family = 6,
763 .model = 6,
764 .stepping = 3,
765 .features[FEAT_1_EDX] =
766 PPRO_FEATURES |
767 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
768 CPUID_PSE36,
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 .features[FEAT_8000_0001_EDX] =
772 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
773 .features[FEAT_8000_0001_ECX] =
774 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
775 .xlevel = 0x8000000A,
776 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
777 },
778 {
779 .name = "phenom",
780 .level = 5,
781 .vendor = CPUID_VENDOR_AMD,
782 .family = 16,
783 .model = 2,
784 .stepping = 3,
785 /* Missing: CPUID_HT */
786 .features[FEAT_1_EDX] =
787 PPRO_FEATURES |
788 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
789 CPUID_PSE36 | CPUID_VME,
790 .features[FEAT_1_ECX] =
791 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
792 CPUID_EXT_POPCNT,
793 .features[FEAT_8000_0001_EDX] =
794 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
795 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
796 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
797 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
798 CPUID_EXT3_CR8LEG,
799 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
800 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
801 .features[FEAT_8000_0001_ECX] =
802 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
803 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
804 /* Missing: CPUID_SVM_LBRV */
805 .features[FEAT_SVM] =
806 CPUID_SVM_NPT,
807 .xlevel = 0x8000001A,
808 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
809 },
810 {
811 .name = "core2duo",
812 .level = 10,
813 .vendor = CPUID_VENDOR_INTEL,
814 .family = 6,
815 .model = 15,
816 .stepping = 11,
817 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
818 .features[FEAT_1_EDX] =
819 PPRO_FEATURES |
820 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
821 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
822 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
823 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
824 .features[FEAT_1_ECX] =
825 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
826 CPUID_EXT_CX16,
827 .features[FEAT_8000_0001_EDX] =
828 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
829 .features[FEAT_8000_0001_ECX] =
830 CPUID_EXT3_LAHF_LM,
831 .xlevel = 0x80000008,
832 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
833 },
834 {
835 .name = "kvm64",
836 .level = 0xd,
837 .vendor = CPUID_VENDOR_INTEL,
838 .family = 15,
839 .model = 6,
840 .stepping = 1,
841 /* Missing: CPUID_HT */
842 .features[FEAT_1_EDX] =
843 PPRO_FEATURES | CPUID_VME |
844 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
845 CPUID_PSE36,
846 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
847 .features[FEAT_1_ECX] =
848 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
849 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
850 .features[FEAT_8000_0001_EDX] =
851 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
852 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
853 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
854 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
855 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
856 .features[FEAT_8000_0001_ECX] =
857 0,
858 .xlevel = 0x80000008,
859 .model_id = "Common KVM processor"
860 },
861 {
862 .name = "qemu32",
863 .level = 4,
864 .vendor = CPUID_VENDOR_INTEL,
865 .family = 6,
866 .model = 6,
867 .stepping = 3,
868 .features[FEAT_1_EDX] =
869 PPRO_FEATURES,
870 .features[FEAT_1_ECX] =
871 CPUID_EXT_SSE3,
872 .xlevel = 0x80000004,
873 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
874 },
875 {
876 .name = "kvm32",
877 .level = 5,
878 .vendor = CPUID_VENDOR_INTEL,
879 .family = 15,
880 .model = 6,
881 .stepping = 1,
882 .features[FEAT_1_EDX] =
883 PPRO_FEATURES | CPUID_VME |
884 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
885 .features[FEAT_1_ECX] =
886 CPUID_EXT_SSE3,
887 .features[FEAT_8000_0001_ECX] =
888 0,
889 .xlevel = 0x80000008,
890 .model_id = "Common 32-bit KVM processor"
891 },
892 {
893 .name = "coreduo",
894 .level = 10,
895 .vendor = CPUID_VENDOR_INTEL,
896 .family = 6,
897 .model = 14,
898 .stepping = 8,
899 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
900 .features[FEAT_1_EDX] =
901 PPRO_FEATURES | CPUID_VME |
902 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
903 CPUID_SS,
904 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
905 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
906 .features[FEAT_1_ECX] =
907 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
908 .features[FEAT_8000_0001_EDX] =
909 CPUID_EXT2_NX,
910 .xlevel = 0x80000008,
911 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
912 },
913 {
914 .name = "486",
915 .level = 1,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 4,
918 .model = 8,
919 .stepping = 0,
920 .features[FEAT_1_EDX] =
921 I486_FEATURES,
922 .xlevel = 0,
923 },
924 {
925 .name = "pentium",
926 .level = 1,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 5,
929 .model = 4,
930 .stepping = 3,
931 .features[FEAT_1_EDX] =
932 PENTIUM_FEATURES,
933 .xlevel = 0,
934 },
935 {
936 .name = "pentium2",
937 .level = 2,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 5,
941 .stepping = 2,
942 .features[FEAT_1_EDX] =
943 PENTIUM2_FEATURES,
944 .xlevel = 0,
945 },
946 {
947 .name = "pentium3",
948 .level = 3,
949 .vendor = CPUID_VENDOR_INTEL,
950 .family = 6,
951 .model = 7,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 PENTIUM3_FEATURES,
955 .xlevel = 0,
956 },
957 {
958 .name = "athlon",
959 .level = 2,
960 .vendor = CPUID_VENDOR_AMD,
961 .family = 6,
962 .model = 2,
963 .stepping = 3,
964 .features[FEAT_1_EDX] =
965 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
966 CPUID_MCA,
967 .features[FEAT_8000_0001_EDX] =
968 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
969 .xlevel = 0x80000008,
970 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
971 },
972 {
973 .name = "n270",
974 .level = 10,
975 .vendor = CPUID_VENDOR_INTEL,
976 .family = 6,
977 .model = 28,
978 .stepping = 2,
979 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
980 .features[FEAT_1_EDX] =
981 PPRO_FEATURES |
982 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
983 CPUID_ACPI | CPUID_SS,
984 /* Some CPUs got no CPUID_SEP */
985 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
986 * CPUID_EXT_XTPR */
987 .features[FEAT_1_ECX] =
988 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
989 CPUID_EXT_MOVBE,
990 .features[FEAT_8000_0001_EDX] =
991 CPUID_EXT2_NX,
992 .features[FEAT_8000_0001_ECX] =
993 CPUID_EXT3_LAHF_LM,
994 .xlevel = 0x80000008,
995 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
996 },
997 {
998 .name = "Conroe",
999 .level = 10,
1000 .vendor = CPUID_VENDOR_INTEL,
1001 .family = 6,
1002 .model = 15,
1003 .stepping = 3,
1004 .features[FEAT_1_EDX] =
1005 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1006 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1007 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1008 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1009 CPUID_DE | CPUID_FP87,
1010 .features[FEAT_1_ECX] =
1011 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1012 .features[FEAT_8000_0001_EDX] =
1013 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1014 .features[FEAT_8000_0001_ECX] =
1015 CPUID_EXT3_LAHF_LM,
1016 .xlevel = 0x80000008,
1017 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1018 },
1019 {
1020 .name = "Penryn",
1021 .level = 10,
1022 .vendor = CPUID_VENDOR_INTEL,
1023 .family = 6,
1024 .model = 23,
1025 .stepping = 3,
1026 .features[FEAT_1_EDX] =
1027 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1028 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1029 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1030 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1031 CPUID_DE | CPUID_FP87,
1032 .features[FEAT_1_ECX] =
1033 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1034 CPUID_EXT_SSE3,
1035 .features[FEAT_8000_0001_EDX] =
1036 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .xlevel = 0x80000008,
1040 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1041 },
1042 {
1043 .name = "Nehalem",
1044 .level = 11,
1045 .vendor = CPUID_VENDOR_INTEL,
1046 .family = 6,
1047 .model = 26,
1048 .stepping = 3,
1049 .features[FEAT_1_EDX] =
1050 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1051 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1052 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1053 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1054 CPUID_DE | CPUID_FP87,
1055 .features[FEAT_1_ECX] =
1056 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1057 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1058 .features[FEAT_8000_0001_EDX] =
1059 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1060 .features[FEAT_8000_0001_ECX] =
1061 CPUID_EXT3_LAHF_LM,
1062 .xlevel = 0x80000008,
1063 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1064 },
1065 {
1066 .name = "Westmere",
1067 .level = 11,
1068 .vendor = CPUID_VENDOR_INTEL,
1069 .family = 6,
1070 .model = 44,
1071 .stepping = 1,
1072 .features[FEAT_1_EDX] =
1073 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1074 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1075 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1076 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1077 CPUID_DE | CPUID_FP87,
1078 .features[FEAT_1_ECX] =
1079 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1080 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1081 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1082 .features[FEAT_8000_0001_EDX] =
1083 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1084 .features[FEAT_8000_0001_ECX] =
1085 CPUID_EXT3_LAHF_LM,
1086 .features[FEAT_6_EAX] =
1087 CPUID_6_EAX_ARAT,
1088 .xlevel = 0x80000008,
1089 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1090 },
1091 {
1092 .name = "SandyBridge",
1093 .level = 0xd,
1094 .vendor = CPUID_VENDOR_INTEL,
1095 .family = 6,
1096 .model = 42,
1097 .stepping = 1,
1098 .features[FEAT_1_EDX] =
1099 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1100 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1101 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1102 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1103 CPUID_DE | CPUID_FP87,
1104 .features[FEAT_1_ECX] =
1105 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1106 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1107 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1108 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1109 CPUID_EXT_SSE3,
1110 .features[FEAT_8000_0001_EDX] =
1111 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1112 CPUID_EXT2_SYSCALL,
1113 .features[FEAT_8000_0001_ECX] =
1114 CPUID_EXT3_LAHF_LM,
1115 .features[FEAT_XSAVE] =
1116 CPUID_XSAVE_XSAVEOPT,
1117 .features[FEAT_6_EAX] =
1118 CPUID_6_EAX_ARAT,
1119 .xlevel = 0x80000008,
1120 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1121 },
1122 {
1123 .name = "IvyBridge",
1124 .level = 0xd,
1125 .vendor = CPUID_VENDOR_INTEL,
1126 .family = 6,
1127 .model = 58,
1128 .stepping = 9,
1129 .features[FEAT_1_EDX] =
1130 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1131 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1132 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1133 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1134 CPUID_DE | CPUID_FP87,
1135 .features[FEAT_1_ECX] =
1136 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1137 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1138 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1139 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1140 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1141 .features[FEAT_7_0_EBX] =
1142 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_ERMS,
1144 .features[FEAT_8000_0001_EDX] =
1145 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1146 CPUID_EXT2_SYSCALL,
1147 .features[FEAT_8000_0001_ECX] =
1148 CPUID_EXT3_LAHF_LM,
1149 .features[FEAT_XSAVE] =
1150 CPUID_XSAVE_XSAVEOPT,
1151 .features[FEAT_6_EAX] =
1152 CPUID_6_EAX_ARAT,
1153 .xlevel = 0x80000008,
1154 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1155 },
1156 {
1157 .name = "Haswell-noTSX",
1158 .level = 0xd,
1159 .vendor = CPUID_VENDOR_INTEL,
1160 .family = 6,
1161 .model = 60,
1162 .stepping = 1,
1163 .features[FEAT_1_EDX] =
1164 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1165 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1166 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1167 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1168 CPUID_DE | CPUID_FP87,
1169 .features[FEAT_1_ECX] =
1170 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1171 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1172 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1173 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1174 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1175 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1176 .features[FEAT_8000_0001_EDX] =
1177 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1178 CPUID_EXT2_SYSCALL,
1179 .features[FEAT_8000_0001_ECX] =
1180 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1181 .features[FEAT_7_0_EBX] =
1182 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1183 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1184 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1185 .features[FEAT_XSAVE] =
1186 CPUID_XSAVE_XSAVEOPT,
1187 .features[FEAT_6_EAX] =
1188 CPUID_6_EAX_ARAT,
1189 .xlevel = 0x80000008,
1190 .model_id = "Intel Core Processor (Haswell, no TSX)",
1191 }, {
1192 .name = "Haswell",
1193 .level = 0xd,
1194 .vendor = CPUID_VENDOR_INTEL,
1195 .family = 6,
1196 .model = 60,
1197 .stepping = 1,
1198 .features[FEAT_1_EDX] =
1199 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1200 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1201 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1202 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1203 CPUID_DE | CPUID_FP87,
1204 .features[FEAT_1_ECX] =
1205 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1206 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1207 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1208 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1209 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1210 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1211 .features[FEAT_8000_0001_EDX] =
1212 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1213 CPUID_EXT2_SYSCALL,
1214 .features[FEAT_8000_0001_ECX] =
1215 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1216 .features[FEAT_7_0_EBX] =
1217 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1218 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1219 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1220 CPUID_7_0_EBX_RTM,
1221 .features[FEAT_XSAVE] =
1222 CPUID_XSAVE_XSAVEOPT,
1223 .features[FEAT_6_EAX] =
1224 CPUID_6_EAX_ARAT,
1225 .xlevel = 0x80000008,
1226 .model_id = "Intel Core Processor (Haswell)",
1227 },
1228 {
1229 .name = "Broadwell-noTSX",
1230 .level = 0xd,
1231 .vendor = CPUID_VENDOR_INTEL,
1232 .family = 6,
1233 .model = 61,
1234 .stepping = 2,
1235 .features[FEAT_1_EDX] =
1236 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1237 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1238 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1239 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1240 CPUID_DE | CPUID_FP87,
1241 .features[FEAT_1_ECX] =
1242 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1243 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1244 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1245 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1246 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1247 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1250 CPUID_EXT2_SYSCALL,
1251 .features[FEAT_8000_0001_ECX] =
1252 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1253 .features[FEAT_7_0_EBX] =
1254 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1255 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1256 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1257 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1258 CPUID_7_0_EBX_SMAP,
1259 .features[FEAT_XSAVE] =
1260 CPUID_XSAVE_XSAVEOPT,
1261 .features[FEAT_6_EAX] =
1262 CPUID_6_EAX_ARAT,
1263 .xlevel = 0x80000008,
1264 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1265 },
1266 {
1267 .name = "Broadwell",
1268 .level = 0xd,
1269 .vendor = CPUID_VENDOR_INTEL,
1270 .family = 6,
1271 .model = 61,
1272 .stepping = 2,
1273 .features[FEAT_1_EDX] =
1274 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1275 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1276 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1277 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1278 CPUID_DE | CPUID_FP87,
1279 .features[FEAT_1_ECX] =
1280 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1281 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1282 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1283 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1284 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1285 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1286 .features[FEAT_8000_0001_EDX] =
1287 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1288 CPUID_EXT2_SYSCALL,
1289 .features[FEAT_8000_0001_ECX] =
1290 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1291 .features[FEAT_7_0_EBX] =
1292 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1293 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1294 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1295 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1296 CPUID_7_0_EBX_SMAP,
1297 .features[FEAT_XSAVE] =
1298 CPUID_XSAVE_XSAVEOPT,
1299 .features[FEAT_6_EAX] =
1300 CPUID_6_EAX_ARAT,
1301 .xlevel = 0x80000008,
1302 .model_id = "Intel Core Processor (Broadwell)",
1303 },
1304 {
1305 .name = "Skylake-Client",
1306 .level = 0xd,
1307 .vendor = CPUID_VENDOR_INTEL,
1308 .family = 6,
1309 .model = 94,
1310 .stepping = 3,
1311 .features[FEAT_1_EDX] =
1312 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1313 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1314 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1315 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1316 CPUID_DE | CPUID_FP87,
1317 .features[FEAT_1_ECX] =
1318 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1319 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1320 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1321 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1322 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1323 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1324 .features[FEAT_8000_0001_EDX] =
1325 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1326 CPUID_EXT2_SYSCALL,
1327 .features[FEAT_8000_0001_ECX] =
1328 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1329 .features[FEAT_7_0_EBX] =
1330 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1331 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1332 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1333 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1334 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1335 /* Missing: XSAVES (not supported by some Linux versions,
1336 * including v4.1 to v4.6).
1337 * KVM doesn't yet expose any XSAVES state save component,
1338 * and the only one defined in Skylake (processor tracing)
1339 * probably will block migration anyway.
1340 */
1341 .features[FEAT_XSAVE] =
1342 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1343 CPUID_XSAVE_XGETBV1,
1344 .features[FEAT_6_EAX] =
1345 CPUID_6_EAX_ARAT,
1346 .xlevel = 0x80000008,
1347 .model_id = "Intel Core Processor (Skylake)",
1348 },
1349 {
1350 .name = "Opteron_G1",
1351 .level = 5,
1352 .vendor = CPUID_VENDOR_AMD,
1353 .family = 15,
1354 .model = 6,
1355 .stepping = 1,
1356 .features[FEAT_1_EDX] =
1357 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1358 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1359 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1360 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1361 CPUID_DE | CPUID_FP87,
1362 .features[FEAT_1_ECX] =
1363 CPUID_EXT_SSE3,
1364 .features[FEAT_8000_0001_EDX] =
1365 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1366 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1367 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1368 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1369 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1370 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1371 .xlevel = 0x80000008,
1372 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1373 },
1374 {
1375 .name = "Opteron_G2",
1376 .level = 5,
1377 .vendor = CPUID_VENDOR_AMD,
1378 .family = 15,
1379 .model = 6,
1380 .stepping = 1,
1381 .features[FEAT_1_EDX] =
1382 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1383 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1384 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1385 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1386 CPUID_DE | CPUID_FP87,
1387 .features[FEAT_1_ECX] =
1388 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1389 /* Missing: CPUID_EXT2_RDTSCP */
1390 .features[FEAT_8000_0001_EDX] =
1391 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1392 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1393 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1394 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1395 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1396 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1397 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1398 .features[FEAT_8000_0001_ECX] =
1399 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1400 .xlevel = 0x80000008,
1401 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1402 },
1403 {
1404 .name = "Opteron_G3",
1405 .level = 5,
1406 .vendor = CPUID_VENDOR_AMD,
1407 .family = 15,
1408 .model = 6,
1409 .stepping = 1,
1410 .features[FEAT_1_EDX] =
1411 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1412 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1413 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1414 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1415 CPUID_DE | CPUID_FP87,
1416 .features[FEAT_1_ECX] =
1417 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1418 CPUID_EXT_SSE3,
1419 /* Missing: CPUID_EXT2_RDTSCP */
1420 .features[FEAT_8000_0001_EDX] =
1421 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1422 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1423 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1424 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1425 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1426 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1427 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1428 .features[FEAT_8000_0001_ECX] =
1429 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1430 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1431 .xlevel = 0x80000008,
1432 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1433 },
1434 {
1435 .name = "Opteron_G4",
1436 .level = 0xd,
1437 .vendor = CPUID_VENDOR_AMD,
1438 .family = 21,
1439 .model = 1,
1440 .stepping = 2,
1441 .features[FEAT_1_EDX] =
1442 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1443 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1444 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1445 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1446 CPUID_DE | CPUID_FP87,
1447 .features[FEAT_1_ECX] =
1448 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1449 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1450 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1451 CPUID_EXT_SSE3,
1452 /* Missing: CPUID_EXT2_RDTSCP */
1453 .features[FEAT_8000_0001_EDX] =
1454 CPUID_EXT2_LM |
1455 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1456 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1457 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1458 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1459 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1460 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1461 .features[FEAT_8000_0001_ECX] =
1462 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1463 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1464 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1465 CPUID_EXT3_LAHF_LM,
1466 /* no xsaveopt! */
1467 .xlevel = 0x8000001A,
1468 .model_id = "AMD Opteron 62xx class CPU",
1469 },
1470 {
1471 .name = "Opteron_G5",
1472 .level = 0xd,
1473 .vendor = CPUID_VENDOR_AMD,
1474 .family = 21,
1475 .model = 2,
1476 .stepping = 0,
1477 .features[FEAT_1_EDX] =
1478 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1479 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1480 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1481 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1482 CPUID_DE | CPUID_FP87,
1483 .features[FEAT_1_ECX] =
1484 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1485 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1486 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1487 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1488 /* Missing: CPUID_EXT2_RDTSCP */
1489 .features[FEAT_8000_0001_EDX] =
1490 CPUID_EXT2_LM |
1491 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1492 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1493 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1494 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1495 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1496 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1497 .features[FEAT_8000_0001_ECX] =
1498 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1499 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1500 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1501 CPUID_EXT3_LAHF_LM,
1502 /* no xsaveopt! */
1503 .xlevel = 0x8000001A,
1504 .model_id = "AMD Opteron 63xx class CPU",
1505 },
1506 };
1507
1508 typedef struct PropValue {
1509 const char *prop, *value;
1510 } PropValue;
1511
1512 /* KVM-specific features that are automatically added/removed
1513 * from all CPU models when KVM is enabled.
1514 */
1515 static PropValue kvm_default_props[] = {
1516 { "kvmclock", "on" },
1517 { "kvm-nopiodelay", "on" },
1518 { "kvm-asyncpf", "on" },
1519 { "kvm-steal-time", "on" },
1520 { "kvm-pv-eoi", "on" },
1521 { "kvmclock-stable-bit", "on" },
1522 { "x2apic", "on" },
1523 { "acpi", "off" },
1524 { "monitor", "off" },
1525 { "svm", "off" },
1526 { NULL, NULL },
1527 };
1528
1529 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1530 {
1531 PropValue *pv;
1532 for (pv = kvm_default_props; pv->prop; pv++) {
1533 if (!strcmp(pv->prop, prop)) {
1534 pv->value = value;
1535 break;
1536 }
1537 }
1538
1539 /* It is valid to call this function only for properties that
1540 * are already present in the kvm_default_props table.
1541 */
1542 assert(pv->prop);
1543 }
1544
1545 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1546 bool migratable_only);
1547
1548 #ifdef CONFIG_KVM
1549
1550 static int cpu_x86_fill_model_id(char *str)
1551 {
1552 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1553 int i;
1554
1555 for (i = 0; i < 3; i++) {
1556 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1557 memcpy(str + i * 16 + 0, &eax, 4);
1558 memcpy(str + i * 16 + 4, &ebx, 4);
1559 memcpy(str + i * 16 + 8, &ecx, 4);
1560 memcpy(str + i * 16 + 12, &edx, 4);
1561 }
1562 return 0;
1563 }
1564
1565 static X86CPUDefinition host_cpudef;
1566
1567 static Property host_x86_cpu_properties[] = {
1568 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1569 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1570 DEFINE_PROP_END_OF_LIST()
1571 };
1572
1573 /* class_init for the "host" CPU model
1574 *
1575 * This function may be called before KVM is initialized.
1576 */
1577 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1578 {
1579 DeviceClass *dc = DEVICE_CLASS(oc);
1580 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1581 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1582
1583 xcc->kvm_required = true;
1584
1585 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1586 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1587
1588 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1589 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1590 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1591 host_cpudef.stepping = eax & 0x0F;
1592
1593 cpu_x86_fill_model_id(host_cpudef.model_id);
1594
1595 xcc->cpu_def = &host_cpudef;
1596
1597 /* level, xlevel, xlevel2, and the feature words are initialized on
1598 * instance_init, because they require KVM to be initialized.
1599 */
1600
1601 dc->props = host_x86_cpu_properties;
1602 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1603 dc->cannot_destroy_with_object_finalize_yet = true;
1604 }
1605
1606 static void host_x86_cpu_initfn(Object *obj)
1607 {
1608 X86CPU *cpu = X86_CPU(obj);
1609 CPUX86State *env = &cpu->env;
1610 KVMState *s = kvm_state;
1611
1612 /* We can't fill the features array here because we don't know yet if
1613 * "migratable" is true or false.
1614 */
1615 cpu->host_features = true;
1616
1617 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1618 if (kvm_enabled()) {
1619 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1620 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1621 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1622 }
1623
1624 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1625 }
1626
1627 static const TypeInfo host_x86_cpu_type_info = {
1628 .name = X86_CPU_TYPE_NAME("host"),
1629 .parent = TYPE_X86_CPU,
1630 .instance_init = host_x86_cpu_initfn,
1631 .class_init = host_x86_cpu_class_init,
1632 };
1633
1634 #endif
1635
1636 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1637 {
1638 FeatureWordInfo *f = &feature_word_info[w];
1639 int i;
1640
1641 for (i = 0; i < 32; ++i) {
1642 if ((1UL << i) & mask) {
1643 const char *reg = get_register_name_32(f->cpuid_reg);
1644 assert(reg);
1645 fprintf(stderr, "warning: %s doesn't support requested feature: "
1646 "CPUID.%02XH:%s%s%s [bit %d]\n",
1647 kvm_enabled() ? "host" : "TCG",
1648 f->cpuid_eax, reg,
1649 f->feat_names[i] ? "." : "",
1650 f->feat_names[i] ? f->feat_names[i] : "", i);
1651 }
1652 }
1653 }
1654
1655 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1656 const char *name, void *opaque,
1657 Error **errp)
1658 {
1659 X86CPU *cpu = X86_CPU(obj);
1660 CPUX86State *env = &cpu->env;
1661 int64_t value;
1662
1663 value = (env->cpuid_version >> 8) & 0xf;
1664 if (value == 0xf) {
1665 value += (env->cpuid_version >> 20) & 0xff;
1666 }
1667 visit_type_int(v, name, &value, errp);
1668 }
1669
1670 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1671 const char *name, void *opaque,
1672 Error **errp)
1673 {
1674 X86CPU *cpu = X86_CPU(obj);
1675 CPUX86State *env = &cpu->env;
1676 const int64_t min = 0;
1677 const int64_t max = 0xff + 0xf;
1678 Error *local_err = NULL;
1679 int64_t value;
1680
1681 visit_type_int(v, name, &value, &local_err);
1682 if (local_err) {
1683 error_propagate(errp, local_err);
1684 return;
1685 }
1686 if (value < min || value > max) {
1687 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1688 name ? name : "null", value, min, max);
1689 return;
1690 }
1691
1692 env->cpuid_version &= ~0xff00f00;
1693 if (value > 0x0f) {
1694 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1695 } else {
1696 env->cpuid_version |= value << 8;
1697 }
1698 }
1699
1700 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1701 const char *name, void *opaque,
1702 Error **errp)
1703 {
1704 X86CPU *cpu = X86_CPU(obj);
1705 CPUX86State *env = &cpu->env;
1706 int64_t value;
1707
1708 value = (env->cpuid_version >> 4) & 0xf;
1709 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1710 visit_type_int(v, name, &value, errp);
1711 }
1712
1713 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1714 const char *name, void *opaque,
1715 Error **errp)
1716 {
1717 X86CPU *cpu = X86_CPU(obj);
1718 CPUX86State *env = &cpu->env;
1719 const int64_t min = 0;
1720 const int64_t max = 0xff;
1721 Error *local_err = NULL;
1722 int64_t value;
1723
1724 visit_type_int(v, name, &value, &local_err);
1725 if (local_err) {
1726 error_propagate(errp, local_err);
1727 return;
1728 }
1729 if (value < min || value > max) {
1730 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1731 name ? name : "null", value, min, max);
1732 return;
1733 }
1734
1735 env->cpuid_version &= ~0xf00f0;
1736 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1737 }
1738
1739 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1740 const char *name, void *opaque,
1741 Error **errp)
1742 {
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1745 int64_t value;
1746
1747 value = env->cpuid_version & 0xf;
1748 visit_type_int(v, name, &value, errp);
1749 }
1750
1751 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1752 const char *name, void *opaque,
1753 Error **errp)
1754 {
1755 X86CPU *cpu = X86_CPU(obj);
1756 CPUX86State *env = &cpu->env;
1757 const int64_t min = 0;
1758 const int64_t max = 0xf;
1759 Error *local_err = NULL;
1760 int64_t value;
1761
1762 visit_type_int(v, name, &value, &local_err);
1763 if (local_err) {
1764 error_propagate(errp, local_err);
1765 return;
1766 }
1767 if (value < min || value > max) {
1768 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1769 name ? name : "null", value, min, max);
1770 return;
1771 }
1772
1773 env->cpuid_version &= ~0xf;
1774 env->cpuid_version |= value & 0xf;
1775 }
1776
1777 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1778 {
1779 X86CPU *cpu = X86_CPU(obj);
1780 CPUX86State *env = &cpu->env;
1781 char *value;
1782
1783 value = g_malloc(CPUID_VENDOR_SZ + 1);
1784 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1785 env->cpuid_vendor3);
1786 return value;
1787 }
1788
1789 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1790 Error **errp)
1791 {
1792 X86CPU *cpu = X86_CPU(obj);
1793 CPUX86State *env = &cpu->env;
1794 int i;
1795
1796 if (strlen(value) != CPUID_VENDOR_SZ) {
1797 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1798 return;
1799 }
1800
1801 env->cpuid_vendor1 = 0;
1802 env->cpuid_vendor2 = 0;
1803 env->cpuid_vendor3 = 0;
1804 for (i = 0; i < 4; i++) {
1805 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1806 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1807 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1808 }
1809 }
1810
1811 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1812 {
1813 X86CPU *cpu = X86_CPU(obj);
1814 CPUX86State *env = &cpu->env;
1815 char *value;
1816 int i;
1817
1818 value = g_malloc(48 + 1);
1819 for (i = 0; i < 48; i++) {
1820 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1821 }
1822 value[48] = '\0';
1823 return value;
1824 }
1825
1826 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1827 Error **errp)
1828 {
1829 X86CPU *cpu = X86_CPU(obj);
1830 CPUX86State *env = &cpu->env;
1831 int c, len, i;
1832
1833 if (model_id == NULL) {
1834 model_id = "";
1835 }
1836 len = strlen(model_id);
1837 memset(env->cpuid_model, 0, 48);
1838 for (i = 0; i < 48; i++) {
1839 if (i >= len) {
1840 c = '\0';
1841 } else {
1842 c = (uint8_t)model_id[i];
1843 }
1844 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1845 }
1846 }
1847
1848 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1849 void *opaque, Error **errp)
1850 {
1851 X86CPU *cpu = X86_CPU(obj);
1852 int64_t value;
1853
1854 value = cpu->env.tsc_khz * 1000;
1855 visit_type_int(v, name, &value, errp);
1856 }
1857
1858 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1859 void *opaque, Error **errp)
1860 {
1861 X86CPU *cpu = X86_CPU(obj);
1862 const int64_t min = 0;
1863 const int64_t max = INT64_MAX;
1864 Error *local_err = NULL;
1865 int64_t value;
1866
1867 visit_type_int(v, name, &value, &local_err);
1868 if (local_err) {
1869 error_propagate(errp, local_err);
1870 return;
1871 }
1872 if (value < min || value > max) {
1873 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1874 name ? name : "null", value, min, max);
1875 return;
1876 }
1877
1878 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1879 }
1880
1881 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, const char *name,
1882 void *opaque, Error **errp)
1883 {
1884 X86CPU *cpu = X86_CPU(obj);
1885 int64_t value = cpu->apic_id;
1886
1887 visit_type_int(v, name, &value, errp);
1888 }
1889
1890 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, const char *name,
1891 void *opaque, Error **errp)
1892 {
1893 X86CPU *cpu = X86_CPU(obj);
1894 DeviceState *dev = DEVICE(obj);
1895 const int64_t min = 0;
1896 const int64_t max = UINT32_MAX;
1897 Error *error = NULL;
1898 int64_t value;
1899
1900 if (dev->realized) {
1901 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1902 "it was realized", name, object_get_typename(obj));
1903 return;
1904 }
1905
1906 visit_type_int(v, name, &value, &error);
1907 if (error) {
1908 error_propagate(errp, error);
1909 return;
1910 }
1911 if (value < min || value > max) {
1912 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1913 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1914 object_get_typename(obj), name, value, min, max);
1915 return;
1916 }
1917
1918 if ((value != cpu->apic_id) && cpu_exists(value)) {
1919 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1920 return;
1921 }
1922 cpu->apic_id = value;
1923 }
1924
1925 /* Generic getter for "feature-words" and "filtered-features" properties */
1926 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1927 const char *name, void *opaque,
1928 Error **errp)
1929 {
1930 uint32_t *array = (uint32_t *)opaque;
1931 FeatureWord w;
1932 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1933 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1934 X86CPUFeatureWordInfoList *list = NULL;
1935
1936 for (w = 0; w < FEATURE_WORDS; w++) {
1937 FeatureWordInfo *wi = &feature_word_info[w];
1938 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1939 qwi->cpuid_input_eax = wi->cpuid_eax;
1940 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1941 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1942 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1943 qwi->features = array[w];
1944
1945 /* List will be in reverse order, but order shouldn't matter */
1946 list_entries[w].next = list;
1947 list_entries[w].value = &word_infos[w];
1948 list = &list_entries[w];
1949 }
1950
1951 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1952 }
1953
1954 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1955 void *opaque, Error **errp)
1956 {
1957 X86CPU *cpu = X86_CPU(obj);
1958 int64_t value = cpu->hyperv_spinlock_attempts;
1959
1960 visit_type_int(v, name, &value, errp);
1961 }
1962
1963 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1964 void *opaque, Error **errp)
1965 {
1966 const int64_t min = 0xFFF;
1967 const int64_t max = UINT_MAX;
1968 X86CPU *cpu = X86_CPU(obj);
1969 Error *err = NULL;
1970 int64_t value;
1971
1972 visit_type_int(v, name, &value, &err);
1973 if (err) {
1974 error_propagate(errp, err);
1975 return;
1976 }
1977
1978 if (value < min || value > max) {
1979 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1980 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1981 object_get_typename(obj), name ? name : "null",
1982 value, min, max);
1983 return;
1984 }
1985 cpu->hyperv_spinlock_attempts = value;
1986 }
1987
1988 static PropertyInfo qdev_prop_spinlocks = {
1989 .name = "int",
1990 .get = x86_get_hv_spinlocks,
1991 .set = x86_set_hv_spinlocks,
1992 };
1993
1994 /* Convert all '_' in a feature string option name to '-', to make feature
1995 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1996 */
1997 static inline void feat2prop(char *s)
1998 {
1999 while ((s = strchr(s, '_'))) {
2000 *s = '-';
2001 }
2002 }
2003
2004 /* Compatibily hack to maintain legacy +-feat semantic,
2005 * where +-feat overwrites any feature set by
2006 * feat=on|feat even if the later is parsed after +-feat
2007 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2008 */
2009 static FeatureWordArray plus_features = { 0 };
2010 static FeatureWordArray minus_features = { 0 };
2011
2012 /* Parse "+feature,-feature,feature=foo" CPU feature string
2013 */
2014 static void x86_cpu_parse_featurestr(const char *typename, char *features,
2015 Error **errp)
2016 {
2017 char *featurestr; /* Single 'key=value" string being parsed */
2018 Error *local_err = NULL;
2019 static bool cpu_globals_initialized;
2020
2021 if (cpu_globals_initialized) {
2022 return;
2023 }
2024 cpu_globals_initialized = true;
2025
2026 if (!features) {
2027 return;
2028 }
2029
2030 for (featurestr = strtok(features, ",");
2031 featurestr && !local_err;
2032 featurestr = strtok(NULL, ",")) {
2033 const char *name;
2034 const char *val = NULL;
2035 char *eq = NULL;
2036 char num[32];
2037 GlobalProperty *prop;
2038
2039 /* Compatibility syntax: */
2040 if (featurestr[0] == '+') {
2041 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2042 continue;
2043 } else if (featurestr[0] == '-') {
2044 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2045 continue;
2046 }
2047
2048 eq = strchr(featurestr, '=');
2049 if (eq) {
2050 *eq++ = 0;
2051 val = eq;
2052 } else {
2053 val = "on";
2054 }
2055
2056 feat2prop(featurestr);
2057 name = featurestr;
2058
2059 /* Special case: */
2060 if (!strcmp(name, "tsc-freq")) {
2061 int64_t tsc_freq;
2062 char *err;
2063
2064 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2065 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2066 if (tsc_freq < 0 || *err) {
2067 error_setg(errp, "bad numerical value %s", val);
2068 return;
2069 }
2070 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2071 val = num;
2072 name = "tsc-frequency";
2073 }
2074
2075 prop = g_new0(typeof(*prop), 1);
2076 prop->driver = typename;
2077 prop->property = g_strdup(name);
2078 prop->value = g_strdup(val);
2079 prop->errp = &error_fatal;
2080 qdev_prop_register_global(prop);
2081 }
2082
2083 if (local_err) {
2084 error_propagate(errp, local_err);
2085 }
2086 }
2087
2088 /* Print all cpuid feature names in featureset
2089 */
2090 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2091 {
2092 int bit;
2093 bool first = true;
2094
2095 for (bit = 0; bit < 32; bit++) {
2096 if (featureset[bit]) {
2097 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2098 first = false;
2099 }
2100 }
2101 }
2102
2103 /* generate CPU information. */
2104 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2105 {
2106 X86CPUDefinition *def;
2107 char buf[256];
2108 int i;
2109
2110 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2111 def = &builtin_x86_defs[i];
2112 snprintf(buf, sizeof(buf), "%s", def->name);
2113 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2114 }
2115 #ifdef CONFIG_KVM
2116 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2117 "KVM processor with all supported host features "
2118 "(only available in KVM mode)");
2119 #endif
2120
2121 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2122 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2123 FeatureWordInfo *fw = &feature_word_info[i];
2124
2125 (*cpu_fprintf)(f, " ");
2126 listflags(f, cpu_fprintf, fw->feat_names);
2127 (*cpu_fprintf)(f, "\n");
2128 }
2129 }
2130
2131 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2132 {
2133 CpuDefinitionInfoList *cpu_list = NULL;
2134 X86CPUDefinition *def;
2135 int i;
2136
2137 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2138 CpuDefinitionInfoList *entry;
2139 CpuDefinitionInfo *info;
2140
2141 def = &builtin_x86_defs[i];
2142 info = g_malloc0(sizeof(*info));
2143 info->name = g_strdup(def->name);
2144
2145 entry = g_malloc0(sizeof(*entry));
2146 entry->value = info;
2147 entry->next = cpu_list;
2148 cpu_list = entry;
2149 }
2150
2151 return cpu_list;
2152 }
2153
2154 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2155 bool migratable_only)
2156 {
2157 FeatureWordInfo *wi = &feature_word_info[w];
2158 uint32_t r;
2159
2160 if (kvm_enabled()) {
2161 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2162 wi->cpuid_ecx,
2163 wi->cpuid_reg);
2164 } else if (tcg_enabled()) {
2165 r = wi->tcg_features;
2166 } else {
2167 return ~0;
2168 }
2169 if (migratable_only) {
2170 r &= x86_cpu_get_migratable_flags(w);
2171 }
2172 return r;
2173 }
2174
2175 /*
2176 * Filters CPU feature words based on host availability of each feature.
2177 *
2178 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2179 */
2180 static int x86_cpu_filter_features(X86CPU *cpu)
2181 {
2182 CPUX86State *env = &cpu->env;
2183 FeatureWord w;
2184 int rv = 0;
2185
2186 for (w = 0; w < FEATURE_WORDS; w++) {
2187 uint32_t host_feat =
2188 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2189 uint32_t requested_features = env->features[w];
2190 env->features[w] &= host_feat;
2191 cpu->filtered_features[w] = requested_features & ~env->features[w];
2192 if (cpu->filtered_features[w]) {
2193 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2194 report_unavailable_features(w, cpu->filtered_features[w]);
2195 }
2196 rv = 1;
2197 }
2198 }
2199
2200 return rv;
2201 }
2202
2203 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2204 {
2205 PropValue *pv;
2206 for (pv = props; pv->prop; pv++) {
2207 if (!pv->value) {
2208 continue;
2209 }
2210 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2211 &error_abort);
2212 }
2213 }
2214
2215 /* Load data from X86CPUDefinition
2216 */
2217 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2218 {
2219 CPUX86State *env = &cpu->env;
2220 const char *vendor;
2221 char host_vendor[CPUID_VENDOR_SZ + 1];
2222 FeatureWord w;
2223
2224 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2225 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2226 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2227 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2228 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2229 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2230 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2231 for (w = 0; w < FEATURE_WORDS; w++) {
2232 env->features[w] = def->features[w];
2233 }
2234
2235 /* Special cases not set in the X86CPUDefinition structs: */
2236 if (kvm_enabled()) {
2237 if (!kvm_irqchip_in_kernel()) {
2238 x86_cpu_change_kvm_default("x2apic", "off");
2239 }
2240
2241 x86_cpu_apply_props(cpu, kvm_default_props);
2242 }
2243
2244 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2245
2246 /* sysenter isn't supported in compatibility mode on AMD,
2247 * syscall isn't supported in compatibility mode on Intel.
2248 * Normally we advertise the actual CPU vendor, but you can
2249 * override this using the 'vendor' property if you want to use
2250 * KVM's sysenter/syscall emulation in compatibility mode and
2251 * when doing cross vendor migration
2252 */
2253 vendor = def->vendor;
2254 if (kvm_enabled()) {
2255 uint32_t ebx = 0, ecx = 0, edx = 0;
2256 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2257 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2258 vendor = host_vendor;
2259 }
2260
2261 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2262
2263 }
2264
2265 X86CPU *cpu_x86_init(const char *cpu_model)
2266 {
2267 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2268 }
2269
2270 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2271 {
2272 X86CPUDefinition *cpudef = data;
2273 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2274
2275 xcc->cpu_def = cpudef;
2276 }
2277
2278 static void x86_register_cpudef_type(X86CPUDefinition *def)
2279 {
2280 char *typename = x86_cpu_type_name(def->name);
2281 TypeInfo ti = {
2282 .name = typename,
2283 .parent = TYPE_X86_CPU,
2284 .class_init = x86_cpu_cpudef_class_init,
2285 .class_data = def,
2286 };
2287
2288 type_register(&ti);
2289 g_free(typename);
2290 }
2291
2292 #if !defined(CONFIG_USER_ONLY)
2293
2294 void cpu_clear_apic_feature(CPUX86State *env)
2295 {
2296 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2297 }
2298
2299 #endif /* !CONFIG_USER_ONLY */
2300
2301 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2302 uint32_t *eax, uint32_t *ebx,
2303 uint32_t *ecx, uint32_t *edx)
2304 {
2305 X86CPU *cpu = x86_env_get_cpu(env);
2306 CPUState *cs = CPU(cpu);
2307
2308 /* test if maximum index reached */
2309 if (index & 0x80000000) {
2310 if (index > env->cpuid_xlevel) {
2311 if (env->cpuid_xlevel2 > 0) {
2312 /* Handle the Centaur's CPUID instruction. */
2313 if (index > env->cpuid_xlevel2) {
2314 index = env->cpuid_xlevel2;
2315 } else if (index < 0xC0000000) {
2316 index = env->cpuid_xlevel;
2317 }
2318 } else {
2319 /* Intel documentation states that invalid EAX input will
2320 * return the same information as EAX=cpuid_level
2321 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2322 */
2323 index = env->cpuid_level;
2324 }
2325 }
2326 } else {
2327 if (index > env->cpuid_level)
2328 index = env->cpuid_level;
2329 }
2330
2331 switch(index) {
2332 case 0:
2333 *eax = env->cpuid_level;
2334 *ebx = env->cpuid_vendor1;
2335 *edx = env->cpuid_vendor2;
2336 *ecx = env->cpuid_vendor3;
2337 break;
2338 case 1:
2339 *eax = env->cpuid_version;
2340 *ebx = (cpu->apic_id << 24) |
2341 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2342 *ecx = env->features[FEAT_1_ECX];
2343 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2344 *ecx |= CPUID_EXT_OSXSAVE;
2345 }
2346 *edx = env->features[FEAT_1_EDX];
2347 if (cs->nr_cores * cs->nr_threads > 1) {
2348 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2349 *edx |= CPUID_HT;
2350 }
2351 break;
2352 case 2:
2353 /* cache info: needed for Pentium Pro compatibility */
2354 if (cpu->cache_info_passthrough) {
2355 host_cpuid(index, 0, eax, ebx, ecx, edx);
2356 break;
2357 }
2358 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2359 *ebx = 0;
2360 *ecx = 0;
2361 *edx = (L1D_DESCRIPTOR << 16) | \
2362 (L1I_DESCRIPTOR << 8) | \
2363 (L2_DESCRIPTOR);
2364 break;
2365 case 4:
2366 /* cache info: needed for Core compatibility */
2367 if (cpu->cache_info_passthrough) {
2368 host_cpuid(index, count, eax, ebx, ecx, edx);
2369 *eax &= ~0xFC000000;
2370 } else {
2371 *eax = 0;
2372 switch (count) {
2373 case 0: /* L1 dcache info */
2374 *eax |= CPUID_4_TYPE_DCACHE | \
2375 CPUID_4_LEVEL(1) | \
2376 CPUID_4_SELF_INIT_LEVEL;
2377 *ebx = (L1D_LINE_SIZE - 1) | \
2378 ((L1D_PARTITIONS - 1) << 12) | \
2379 ((L1D_ASSOCIATIVITY - 1) << 22);
2380 *ecx = L1D_SETS - 1;
2381 *edx = CPUID_4_NO_INVD_SHARING;
2382 break;
2383 case 1: /* L1 icache info */
2384 *eax |= CPUID_4_TYPE_ICACHE | \
2385 CPUID_4_LEVEL(1) | \
2386 CPUID_4_SELF_INIT_LEVEL;
2387 *ebx = (L1I_LINE_SIZE - 1) | \
2388 ((L1I_PARTITIONS - 1) << 12) | \
2389 ((L1I_ASSOCIATIVITY - 1) << 22);
2390 *ecx = L1I_SETS - 1;
2391 *edx = CPUID_4_NO_INVD_SHARING;
2392 break;
2393 case 2: /* L2 cache info */
2394 *eax |= CPUID_4_TYPE_UNIFIED | \
2395 CPUID_4_LEVEL(2) | \
2396 CPUID_4_SELF_INIT_LEVEL;
2397 if (cs->nr_threads > 1) {
2398 *eax |= (cs->nr_threads - 1) << 14;
2399 }
2400 *ebx = (L2_LINE_SIZE - 1) | \
2401 ((L2_PARTITIONS - 1) << 12) | \
2402 ((L2_ASSOCIATIVITY - 1) << 22);
2403 *ecx = L2_SETS - 1;
2404 *edx = CPUID_4_NO_INVD_SHARING;
2405 break;
2406 default: /* end of info */
2407 *eax = 0;
2408 *ebx = 0;
2409 *ecx = 0;
2410 *edx = 0;
2411 break;
2412 }
2413 }
2414
2415 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2416 if ((*eax & 31) && cs->nr_cores > 1) {
2417 *eax |= (cs->nr_cores - 1) << 26;
2418 }
2419 break;
2420 case 5:
2421 /* mwait info: needed for Core compatibility */
2422 *eax = 0; /* Smallest monitor-line size in bytes */
2423 *ebx = 0; /* Largest monitor-line size in bytes */
2424 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2425 *edx = 0;
2426 break;
2427 case 6:
2428 /* Thermal and Power Leaf */
2429 *eax = env->features[FEAT_6_EAX];
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2433 break;
2434 case 7:
2435 /* Structured Extended Feature Flags Enumeration Leaf */
2436 if (count == 0) {
2437 *eax = 0; /* Maximum ECX value for sub-leaves */
2438 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2439 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2440 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2441 *ecx |= CPUID_7_0_ECX_OSPKE;
2442 }
2443 *edx = 0; /* Reserved */
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2449 }
2450 break;
2451 case 9:
2452 /* Direct Cache Access Information Leaf */
2453 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2454 *ebx = 0;
2455 *ecx = 0;
2456 *edx = 0;
2457 break;
2458 case 0xA:
2459 /* Architectural Performance Monitoring Leaf */
2460 if (kvm_enabled() && cpu->enable_pmu) {
2461 KVMState *s = cs->kvm_state;
2462
2463 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2464 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2465 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2466 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2467 } else {
2468 *eax = 0;
2469 *ebx = 0;
2470 *ecx = 0;
2471 *edx = 0;
2472 }
2473 break;
2474 case 0xB:
2475 /* Extended Topology Enumeration Leaf */
2476 if (!cpu->enable_cpuid_0xb) {
2477 *eax = *ebx = *ecx = *edx = 0;
2478 break;
2479 }
2480
2481 *ecx = count & 0xff;
2482 *edx = cpu->apic_id;
2483
2484 switch (count) {
2485 case 0:
2486 *eax = apicid_core_offset(smp_cores, smp_threads);
2487 *ebx = smp_threads;
2488 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2489 break;
2490 case 1:
2491 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2492 *ebx = smp_cores * smp_threads;
2493 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2494 break;
2495 default:
2496 *eax = 0;
2497 *ebx = 0;
2498 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2499 }
2500
2501 assert(!(*eax & ~0x1f));
2502 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2503 break;
2504 case 0xD: {
2505 KVMState *s = cs->kvm_state;
2506 uint64_t ena_mask;
2507 int i;
2508
2509 /* Processor Extended State */
2510 *eax = 0;
2511 *ebx = 0;
2512 *ecx = 0;
2513 *edx = 0;
2514 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2515 break;
2516 }
2517 if (kvm_enabled()) {
2518 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2519 ena_mask <<= 32;
2520 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2521 } else {
2522 ena_mask = -1;
2523 }
2524
2525 if (count == 0) {
2526 *ecx = 0x240;
2527 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2528 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2529 if ((env->features[esa->feature] & esa->bits) == esa->bits
2530 && ((ena_mask >> i) & 1) != 0) {
2531 if (i < 32) {
2532 *eax |= 1u << i;
2533 } else {
2534 *edx |= 1u << (i - 32);
2535 }
2536 *ecx = MAX(*ecx, esa->offset + esa->size);
2537 }
2538 }
2539 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2540 *ebx = *ecx;
2541 } else if (count == 1) {
2542 *eax = env->features[FEAT_XSAVE];
2543 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2544 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2545 if ((env->features[esa->feature] & esa->bits) == esa->bits
2546 && ((ena_mask >> count) & 1) != 0) {
2547 *eax = esa->size;
2548 *ebx = esa->offset;
2549 }
2550 }
2551 break;
2552 }
2553 case 0x80000000:
2554 *eax = env->cpuid_xlevel;
2555 *ebx = env->cpuid_vendor1;
2556 *edx = env->cpuid_vendor2;
2557 *ecx = env->cpuid_vendor3;
2558 break;
2559 case 0x80000001:
2560 *eax = env->cpuid_version;
2561 *ebx = 0;
2562 *ecx = env->features[FEAT_8000_0001_ECX];
2563 *edx = env->features[FEAT_8000_0001_EDX];
2564
2565 /* The Linux kernel checks for the CMPLegacy bit and
2566 * discards multiple thread information if it is set.
2567 * So don't set it here for Intel to make Linux guests happy.
2568 */
2569 if (cs->nr_cores * cs->nr_threads > 1) {
2570 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2571 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2572 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2573 *ecx |= 1 << 1; /* CmpLegacy bit */
2574 }
2575 }
2576 break;
2577 case 0x80000002:
2578 case 0x80000003:
2579 case 0x80000004:
2580 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2581 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2582 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2583 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2584 break;
2585 case 0x80000005:
2586 /* cache info (L1 cache) */
2587 if (cpu->cache_info_passthrough) {
2588 host_cpuid(index, 0, eax, ebx, ecx, edx);
2589 break;
2590 }
2591 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2592 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2593 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2594 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2595 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2596 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2597 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2598 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2599 break;
2600 case 0x80000006:
2601 /* cache info (L2 cache) */
2602 if (cpu->cache_info_passthrough) {
2603 host_cpuid(index, 0, eax, ebx, ecx, edx);
2604 break;
2605 }
2606 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2607 (L2_DTLB_2M_ENTRIES << 16) | \
2608 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2609 (L2_ITLB_2M_ENTRIES);
2610 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2611 (L2_DTLB_4K_ENTRIES << 16) | \
2612 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2613 (L2_ITLB_4K_ENTRIES);
2614 *ecx = (L2_SIZE_KB_AMD << 16) | \
2615 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2616 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2617 *edx = ((L3_SIZE_KB/512) << 18) | \
2618 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2619 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2620 break;
2621 case 0x80000007:
2622 *eax = 0;
2623 *ebx = 0;
2624 *ecx = 0;
2625 *edx = env->features[FEAT_8000_0007_EDX];
2626 break;
2627 case 0x80000008:
2628 /* virtual & phys address size in low 2 bytes. */
2629 /* XXX: This value must match the one used in the MMU code. */
2630 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2631 /* 64 bit processor */
2632 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2633 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2634 } else {
2635 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2636 *eax = 0x00000024; /* 36 bits physical */
2637 } else {
2638 *eax = 0x00000020; /* 32 bits physical */
2639 }
2640 }
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = 0;
2644 if (cs->nr_cores * cs->nr_threads > 1) {
2645 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2646 }
2647 break;
2648 case 0x8000000A:
2649 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2650 *eax = 0x00000001; /* SVM Revision */
2651 *ebx = 0x00000010; /* nr of ASIDs */
2652 *ecx = 0;
2653 *edx = env->features[FEAT_SVM]; /* optional features */
2654 } else {
2655 *eax = 0;
2656 *ebx = 0;
2657 *ecx = 0;
2658 *edx = 0;
2659 }
2660 break;
2661 case 0xC0000000:
2662 *eax = env->cpuid_xlevel2;
2663 *ebx = 0;
2664 *ecx = 0;
2665 *edx = 0;
2666 break;
2667 case 0xC0000001:
2668 /* Support for VIA CPU's CPUID instruction */
2669 *eax = env->cpuid_version;
2670 *ebx = 0;
2671 *ecx = 0;
2672 *edx = env->features[FEAT_C000_0001_EDX];
2673 break;
2674 case 0xC0000002:
2675 case 0xC0000003:
2676 case 0xC0000004:
2677 /* Reserved for the future, and now filled with zero */
2678 *eax = 0;
2679 *ebx = 0;
2680 *ecx = 0;
2681 *edx = 0;
2682 break;
2683 default:
2684 /* reserved values: zero */
2685 *eax = 0;
2686 *ebx = 0;
2687 *ecx = 0;
2688 *edx = 0;
2689 break;
2690 }
2691 }
2692
2693 /* CPUClass::reset() */
2694 static void x86_cpu_reset(CPUState *s)
2695 {
2696 X86CPU *cpu = X86_CPU(s);
2697 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2698 CPUX86State *env = &cpu->env;
2699 target_ulong cr4;
2700 uint64_t xcr0;
2701 int i;
2702
2703 xcc->parent_reset(s);
2704
2705 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2706
2707 tlb_flush(s, 1);
2708
2709 env->old_exception = -1;
2710
2711 /* init to reset state */
2712
2713 #ifdef CONFIG_SOFTMMU
2714 env->hflags |= HF_SOFTMMU_MASK;
2715 #endif
2716 env->hflags2 |= HF2_GIF_MASK;
2717
2718 cpu_x86_update_cr0(env, 0x60000010);
2719 env->a20_mask = ~0x0;
2720 env->smbase = 0x30000;
2721
2722 env->idt.limit = 0xffff;
2723 env->gdt.limit = 0xffff;
2724 env->ldt.limit = 0xffff;
2725 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2726 env->tr.limit = 0xffff;
2727 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2728
2729 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2730 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2731 DESC_R_MASK | DESC_A_MASK);
2732 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 DESC_A_MASK);
2735 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2736 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2737 DESC_A_MASK);
2738 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2739 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2740 DESC_A_MASK);
2741 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2742 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2743 DESC_A_MASK);
2744 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2745 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2746 DESC_A_MASK);
2747
2748 env->eip = 0xfff0;
2749 env->regs[R_EDX] = env->cpuid_version;
2750
2751 env->eflags = 0x2;
2752
2753 /* FPU init */
2754 for (i = 0; i < 8; i++) {
2755 env->fptags[i] = 1;
2756 }
2757 cpu_set_fpuc(env, 0x37f);
2758
2759 env->mxcsr = 0x1f80;
2760 /* All units are in INIT state. */
2761 env->xstate_bv = 0;
2762
2763 env->pat = 0x0007040600070406ULL;
2764 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2765
2766 memset(env->dr, 0, sizeof(env->dr));
2767 env->dr[6] = DR6_FIXED_1;
2768 env->dr[7] = DR7_FIXED_1;
2769 cpu_breakpoint_remove_all(s, BP_CPU);
2770 cpu_watchpoint_remove_all(s, BP_CPU);
2771
2772 cr4 = 0;
2773 xcr0 = XSTATE_FP_MASK;
2774
2775 #ifdef CONFIG_USER_ONLY
2776 /* Enable all the features for user-mode. */
2777 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2778 xcr0 |= XSTATE_SSE_MASK;
2779 }
2780 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2781 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2782 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2783 xcr0 |= 1ull << i;
2784 }
2785 }
2786
2787 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2788 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2789 }
2790 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2791 cr4 |= CR4_FSGSBASE_MASK;
2792 }
2793 #endif
2794
2795 env->xcr0 = xcr0;
2796 cpu_x86_update_cr4(env, cr4);
2797
2798 /*
2799 * SDM 11.11.5 requires:
2800 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2801 * - IA32_MTRR_PHYSMASKn.V = 0
2802 * All other bits are undefined. For simplification, zero it all.
2803 */
2804 env->mtrr_deftype = 0;
2805 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2806 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2807
2808 #if !defined(CONFIG_USER_ONLY)
2809 /* We hard-wire the BSP to the first CPU. */
2810 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2811
2812 s->halted = !cpu_is_bsp(cpu);
2813
2814 if (kvm_enabled()) {
2815 kvm_arch_reset_vcpu(cpu);
2816 }
2817 #endif
2818 }
2819
2820 #ifndef CONFIG_USER_ONLY
2821 bool cpu_is_bsp(X86CPU *cpu)
2822 {
2823 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2824 }
2825
2826 /* TODO: remove me, when reset over QOM tree is implemented */
2827 static void x86_cpu_machine_reset_cb(void *opaque)
2828 {
2829 X86CPU *cpu = opaque;
2830 cpu_reset(CPU(cpu));
2831 }
2832 #endif
2833
2834 static void mce_init(X86CPU *cpu)
2835 {
2836 CPUX86State *cenv = &cpu->env;
2837 unsigned int bank;
2838
2839 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2840 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2841 (CPUID_MCE | CPUID_MCA)) {
2842 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2843 cenv->mcg_ctl = ~(uint64_t)0;
2844 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2845 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2846 }
2847 }
2848 }
2849
2850 #ifndef CONFIG_USER_ONLY
2851 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2852 {
2853 APICCommonState *apic;
2854 const char *apic_type = "apic";
2855
2856 if (kvm_apic_in_kernel()) {
2857 apic_type = "kvm-apic";
2858 } else if (xen_enabled()) {
2859 apic_type = "xen-apic";
2860 }
2861
2862 cpu->apic_state = DEVICE(object_new(apic_type));
2863
2864 object_property_add_child(OBJECT(cpu), "apic",
2865 OBJECT(cpu->apic_state), NULL);
2866 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2867 /* TODO: convert to link<> */
2868 apic = APIC_COMMON(cpu->apic_state);
2869 apic->cpu = cpu;
2870 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2871 }
2872
2873 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2874 {
2875 APICCommonState *apic;
2876 static bool apic_mmio_map_once;
2877
2878 if (cpu->apic_state == NULL) {
2879 return;
2880 }
2881 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2882 errp);
2883
2884 /* Map APIC MMIO area */
2885 apic = APIC_COMMON(cpu->apic_state);
2886 if (!apic_mmio_map_once) {
2887 memory_region_add_subregion_overlap(get_system_memory(),
2888 apic->apicbase &
2889 MSR_IA32_APICBASE_BASE,
2890 &apic->io_memory,
2891 0x1000);
2892 apic_mmio_map_once = true;
2893 }
2894 }
2895
2896 static void x86_cpu_machine_done(Notifier *n, void *unused)
2897 {
2898 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2899 MemoryRegion *smram =
2900 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2901
2902 if (smram) {
2903 cpu->smram = g_new(MemoryRegion, 1);
2904 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2905 smram, 0, 1ull << 32);
2906 memory_region_set_enabled(cpu->smram, false);
2907 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2908 }
2909 }
2910 #else
2911 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2912 {
2913 }
2914 #endif
2915
2916
2917 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2918 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2919 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2920 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2921 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2922 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2923 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2924 {
2925 CPUState *cs = CPU(dev);
2926 X86CPU *cpu = X86_CPU(dev);
2927 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2928 CPUX86State *env = &cpu->env;
2929 Error *local_err = NULL;
2930 static bool ht_warned;
2931 FeatureWord w;
2932
2933 if (xcc->kvm_required && !kvm_enabled()) {
2934 char *name = x86_cpu_class_get_model_name(xcc);
2935 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2936 g_free(name);
2937 goto out;
2938 }
2939
2940 if (cpu->apic_id < 0) {
2941 error_setg(errp, "apic-id property was not initialized properly");
2942 return;
2943 }
2944
2945 /*TODO: cpu->host_features incorrectly overwrites features
2946 * set using "feat=on|off". Once we fix this, we can convert
2947 * plus_features & minus_features to global properties
2948 * inside x86_cpu_parse_featurestr() too.
2949 */
2950 if (cpu->host_features) {
2951 for (w = 0; w < FEATURE_WORDS; w++) {
2952 env->features[w] =
2953 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2954 }
2955 }
2956
2957 for (w = 0; w < FEATURE_WORDS; w++) {
2958 cpu->env.features[w] |= plus_features[w];
2959 cpu->env.features[w] &= ~minus_features[w];
2960 }
2961
2962 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2963 env->cpuid_level = 7;
2964 }
2965
2966 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2967 error_setg(&local_err,
2968 kvm_enabled() ?
2969 "Host doesn't support requested features" :
2970 "TCG doesn't support requested features");
2971 goto out;
2972 }
2973
2974 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2975 * CPUID[1].EDX.
2976 */
2977 if (IS_AMD_CPU(env)) {
2978 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2979 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2980 & CPUID_EXT2_AMD_ALIASES);
2981 }
2982
2983
2984 cpu_exec_init(cs, &error_abort);
2985
2986 if (tcg_enabled()) {
2987 tcg_x86_init();
2988 }
2989
2990 #ifndef CONFIG_USER_ONLY
2991 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2992
2993 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2994 x86_cpu_apic_create(cpu, &local_err);
2995 if (local_err != NULL) {
2996 goto out;
2997 }
2998 }
2999 #endif
3000
3001 mce_init(cpu);
3002
3003 #ifndef CONFIG_USER_ONLY
3004 if (tcg_enabled()) {
3005 AddressSpace *newas = g_new(AddressSpace, 1);
3006
3007 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3008 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3009
3010 /* Outer container... */
3011 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3012 memory_region_set_enabled(cpu->cpu_as_root, true);
3013
3014 /* ... with two regions inside: normal system memory with low
3015 * priority, and...
3016 */
3017 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3018 get_system_memory(), 0, ~0ull);
3019 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3020 memory_region_set_enabled(cpu->cpu_as_mem, true);
3021 address_space_init(newas, cpu->cpu_as_root, "CPU");
3022 cs->num_ases = 1;
3023 cpu_address_space_init(cs, newas, 0);
3024
3025 /* ... SMRAM with higher priority, linked from /machine/smram. */
3026 cpu->machine_done.notify = x86_cpu_machine_done;
3027 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3028 }
3029 #endif
3030
3031 qemu_init_vcpu(cs);
3032
3033 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3034 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3035 * based on inputs (sockets,cores,threads), it is still better to gives
3036 * users a warning.
3037 *
3038 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3039 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3040 */
3041 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3042 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3043 " -smp options properly.");
3044 ht_warned = true;
3045 }
3046
3047 x86_cpu_apic_realize(cpu, &local_err);
3048 if (local_err != NULL) {
3049 goto out;
3050 }
3051 cpu_reset(cs);
3052
3053 xcc->parent_realize(dev, &local_err);
3054
3055 out:
3056 if (local_err != NULL) {
3057 error_propagate(errp, local_err);
3058 return;
3059 }
3060 }
3061
3062 typedef struct BitProperty {
3063 uint32_t *ptr;
3064 uint32_t mask;
3065 } BitProperty;
3066
3067 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3068 void *opaque, Error **errp)
3069 {
3070 BitProperty *fp = opaque;
3071 bool value = (*fp->ptr & fp->mask) == fp->mask;
3072 visit_type_bool(v, name, &value, errp);
3073 }
3074
3075 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3076 void *opaque, Error **errp)
3077 {
3078 DeviceState *dev = DEVICE(obj);
3079 BitProperty *fp = opaque;
3080 Error *local_err = NULL;
3081 bool value;
3082
3083 if (dev->realized) {
3084 qdev_prop_set_after_realize(dev, name, errp);
3085 return;
3086 }
3087
3088 visit_type_bool(v, name, &value, &local_err);
3089 if (local_err) {
3090 error_propagate(errp, local_err);
3091 return;
3092 }
3093
3094 if (value) {
3095 *fp->ptr |= fp->mask;
3096 } else {
3097 *fp->ptr &= ~fp->mask;
3098 }
3099 }
3100
3101 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3102 void *opaque)
3103 {
3104 BitProperty *prop = opaque;
3105 g_free(prop);
3106 }
3107
3108 /* Register a boolean property to get/set a single bit in a uint32_t field.
3109 *
3110 * The same property name can be registered multiple times to make it affect
3111 * multiple bits in the same FeatureWord. In that case, the getter will return
3112 * true only if all bits are set.
3113 */
3114 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3115 const char *prop_name,
3116 uint32_t *field,
3117 int bitnr)
3118 {
3119 BitProperty *fp;
3120 ObjectProperty *op;
3121 uint32_t mask = (1UL << bitnr);
3122
3123 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3124 if (op) {
3125 fp = op->opaque;
3126 assert(fp->ptr == field);
3127 fp->mask |= mask;
3128 } else {
3129 fp = g_new0(BitProperty, 1);
3130 fp->ptr = field;
3131 fp->mask = mask;
3132 object_property_add(OBJECT(cpu), prop_name, "bool",
3133 x86_cpu_get_bit_prop,
3134 x86_cpu_set_bit_prop,
3135 x86_cpu_release_bit_prop, fp, &error_abort);
3136 }
3137 }
3138
3139 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3140 FeatureWord w,
3141 int bitnr)
3142 {
3143 Object *obj = OBJECT(cpu);
3144 int i;
3145 char **names;
3146 FeatureWordInfo *fi = &feature_word_info[w];
3147
3148 if (!fi->feat_names) {
3149 return;
3150 }
3151 if (!fi->feat_names[bitnr]) {
3152 return;
3153 }
3154
3155 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3156
3157 feat2prop(names[0]);
3158 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3159
3160 for (i = 1; names[i]; i++) {
3161 feat2prop(names[i]);
3162 object_property_add_alias(obj, names[i], obj, names[0],
3163 &error_abort);
3164 }
3165
3166 g_strfreev(names);
3167 }
3168
3169 static void x86_cpu_initfn(Object *obj)
3170 {
3171 CPUState *cs = CPU(obj);
3172 X86CPU *cpu = X86_CPU(obj);
3173 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3174 CPUX86State *env = &cpu->env;
3175 FeatureWord w;
3176
3177 cs->env_ptr = env;
3178
3179 object_property_add(obj, "family", "int",
3180 x86_cpuid_version_get_family,
3181 x86_cpuid_version_set_family, NULL, NULL, NULL);
3182 object_property_add(obj, "model", "int",
3183 x86_cpuid_version_get_model,
3184 x86_cpuid_version_set_model, NULL, NULL, NULL);
3185 object_property_add(obj, "stepping", "int",
3186 x86_cpuid_version_get_stepping,
3187 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3188 object_property_add_str(obj, "vendor",
3189 x86_cpuid_get_vendor,
3190 x86_cpuid_set_vendor, NULL);
3191 object_property_add_str(obj, "model-id",
3192 x86_cpuid_get_model_id,
3193 x86_cpuid_set_model_id, NULL);
3194 object_property_add(obj, "tsc-frequency", "int",
3195 x86_cpuid_get_tsc_freq,
3196 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3197 object_property_add(obj, "apic-id", "int",
3198 x86_cpuid_get_apic_id,
3199 x86_cpuid_set_apic_id, NULL, NULL, NULL);
3200 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3201 x86_cpu_get_feature_words,
3202 NULL, NULL, (void *)env->features, NULL);
3203 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3204 x86_cpu_get_feature_words,
3205 NULL, NULL, (void *)cpu->filtered_features, NULL);
3206
3207 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3208
3209 #ifndef CONFIG_USER_ONLY
3210 /* Any code creating new X86CPU objects have to set apic-id explicitly */
3211 cpu->apic_id = -1;
3212 #endif
3213
3214 for (w = 0; w < FEATURE_WORDS; w++) {
3215 int bitnr;
3216
3217 for (bitnr = 0; bitnr < 32; bitnr++) {
3218 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3219 }
3220 }
3221
3222 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3223 }
3224
3225 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3226 {
3227 X86CPU *cpu = X86_CPU(cs);
3228
3229 return cpu->apic_id;
3230 }
3231
3232 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3233 {
3234 X86CPU *cpu = X86_CPU(cs);
3235
3236 return cpu->env.cr[0] & CR0_PG_MASK;
3237 }
3238
3239 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3240 {
3241 X86CPU *cpu = X86_CPU(cs);
3242
3243 cpu->env.eip = value;
3244 }
3245
3246 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3247 {
3248 X86CPU *cpu = X86_CPU(cs);
3249
3250 cpu->env.eip = tb->pc - tb->cs_base;
3251 }
3252
3253 static bool x86_cpu_has_work(CPUState *cs)
3254 {
3255 X86CPU *cpu = X86_CPU(cs);
3256 CPUX86State *env = &cpu->env;
3257
3258 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3259 CPU_INTERRUPT_POLL)) &&
3260 (env->eflags & IF_MASK)) ||
3261 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3262 CPU_INTERRUPT_INIT |
3263 CPU_INTERRUPT_SIPI |
3264 CPU_INTERRUPT_MCE)) ||
3265 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3266 !(env->hflags & HF_SMM_MASK));
3267 }
3268
3269 static Property x86_cpu_properties[] = {
3270 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3271 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3272 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3273 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3274 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3275 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3276 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3277 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3278 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3279 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3280 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3281 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3282 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3283 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3284 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3285 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3286 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3287 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3288 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3289 DEFINE_PROP_END_OF_LIST()
3290 };
3291
3292 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3293 {
3294 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3295 CPUClass *cc = CPU_CLASS(oc);
3296 DeviceClass *dc = DEVICE_CLASS(oc);
3297
3298 xcc->parent_realize = dc->realize;
3299 dc->realize = x86_cpu_realizefn;
3300 dc->props = x86_cpu_properties;
3301
3302 xcc->parent_reset = cc->reset;
3303 cc->reset = x86_cpu_reset;
3304 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3305
3306 cc->class_by_name = x86_cpu_class_by_name;
3307 cc->parse_features = x86_cpu_parse_featurestr;
3308 cc->has_work = x86_cpu_has_work;
3309 cc->do_interrupt = x86_cpu_do_interrupt;
3310 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3311 cc->dump_state = x86_cpu_dump_state;
3312 cc->set_pc = x86_cpu_set_pc;
3313 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3314 cc->gdb_read_register = x86_cpu_gdb_read_register;
3315 cc->gdb_write_register = x86_cpu_gdb_write_register;
3316 cc->get_arch_id = x86_cpu_get_arch_id;
3317 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3318 #ifdef CONFIG_USER_ONLY
3319 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3320 #else
3321 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3322 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3323 cc->write_elf64_note = x86_cpu_write_elf64_note;
3324 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3325 cc->write_elf32_note = x86_cpu_write_elf32_note;
3326 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3327 cc->vmsd = &vmstate_x86_cpu;
3328 #endif
3329 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3330 #ifndef CONFIG_USER_ONLY
3331 cc->debug_excp_handler = breakpoint_handler;
3332 #endif
3333 cc->cpu_exec_enter = x86_cpu_exec_enter;
3334 cc->cpu_exec_exit = x86_cpu_exec_exit;
3335
3336 /*
3337 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3338 * object in cpus -> dangling pointer after final object_unref().
3339 */
3340 dc->cannot_destroy_with_object_finalize_yet = true;
3341 }
3342
3343 static const TypeInfo x86_cpu_type_info = {
3344 .name = TYPE_X86_CPU,
3345 .parent = TYPE_CPU,
3346 .instance_size = sizeof(X86CPU),
3347 .instance_init = x86_cpu_initfn,
3348 .abstract = true,
3349 .class_size = sizeof(X86CPUClass),
3350 .class_init = x86_cpu_common_class_init,
3351 };
3352
3353 static void x86_cpu_register_types(void)
3354 {
3355 int i;
3356
3357 type_register_static(&x86_cpu_type_info);
3358 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3359 x86_register_cpudef_type(&builtin_x86_defs[i]);
3360 }
3361 #ifdef CONFIG_KVM
3362 type_register_static(&host_x86_cpu_type_info);
3363 #endif
3364 }
3365
3366 type_init(x86_cpu_register_types)