]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
Merge remote-tracking branch 'remotes/ehabkost/tags/x86-pull-request' into staging
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *hyperv_priv_feature_name[] = {
249 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
250 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
251 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
252 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
253 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
254 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 NULL, NULL, NULL, NULL,
258 NULL, NULL, NULL, NULL,
259 NULL, NULL, NULL, NULL,
260 };
261
262 static const char *hyperv_ident_feature_name[] = {
263 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
264 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
265 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
266 NULL /* hv_create_port */, NULL /* hv_connect_port */,
267 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
268 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
269 NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 };
275
276 static const char *hyperv_misc_feature_name[] = {
277 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
278 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
279 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
280 NULL, NULL,
281 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 NULL, NULL, NULL, NULL,
287 };
288
289 static const char *svm_feature_name[] = {
290 "npt", "lbrv", "svm_lock", "nrip_save",
291 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
292 NULL, NULL, "pause_filter", NULL,
293 "pfthreshold", NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295 NULL, NULL, NULL, NULL,
296 NULL, NULL, NULL, NULL,
297 NULL, NULL, NULL, NULL,
298 };
299
300 static const char *cpuid_7_0_ebx_feature_name[] = {
301 "fsgsbase", "tsc_adjust", NULL, "bmi1",
302 "hle", "avx2", NULL, "smep",
303 "bmi2", "erms", "invpcid", "rtm",
304 NULL, NULL, "mpx", NULL,
305 "avx512f", "avx512dq", "rdseed", "adx",
306 "smap", "avx512ifma", "pcommit", "clflushopt",
307 "clwb", NULL, "avx512pf", "avx512er",
308 "avx512cd", NULL, "avx512bw", "avx512vl",
309 };
310
311 static const char *cpuid_7_0_ecx_feature_name[] = {
312 NULL, "avx512vbmi", "umip", "pku",
313 "ospke", NULL, NULL, NULL,
314 NULL, NULL, NULL, NULL,
315 NULL, NULL, NULL, NULL,
316 NULL, NULL, NULL, NULL,
317 NULL, NULL, "rdpid", NULL,
318 NULL, NULL, NULL, NULL,
319 NULL, NULL, NULL, NULL,
320 };
321
322 static const char *cpuid_apm_edx_feature_name[] = {
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 "invtsc", NULL, NULL, NULL,
326 NULL, NULL, NULL, NULL,
327 NULL, NULL, NULL, NULL,
328 NULL, NULL, NULL, NULL,
329 NULL, NULL, NULL, NULL,
330 NULL, NULL, NULL, NULL,
331 };
332
333 static const char *cpuid_xsave_feature_name[] = {
334 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 NULL, NULL, NULL, NULL,
341 NULL, NULL, NULL, NULL,
342 };
343
344 static const char *cpuid_6_feature_name[] = {
345 NULL, NULL, "arat", NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 NULL, NULL, NULL, NULL,
353 };
354
355 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
356 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
357 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
358 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
359 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
360 CPUID_PSE36 | CPUID_FXSR)
361 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
362 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
363 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
364 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
365 CPUID_PAE | CPUID_SEP | CPUID_APIC)
366
367 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
368 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
369 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
370 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
371 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
372 /* partly implemented:
373 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
374 /* missing:
375 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
376 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
377 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
378 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
379 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
380 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
381 /* missing:
382 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
383 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
384 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
385 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
386 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
387
388 #ifdef TARGET_X86_64
389 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
390 #else
391 #define TCG_EXT2_X86_64_FEATURES 0
392 #endif
393
394 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
395 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
396 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
397 TCG_EXT2_X86_64_FEATURES)
398 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
399 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
400 #define TCG_EXT4_FEATURES 0
401 #define TCG_SVM_FEATURES 0
402 #define TCG_KVM_FEATURES 0
403 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
404 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
405 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
406 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
407 CPUID_7_0_EBX_ERMS)
408 /* missing:
409 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
410 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
411 CPUID_7_0_EBX_RDSEED */
412 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
413 #define TCG_APM_FEATURES 0
414 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
415 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
416 /* missing:
417 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
418
419 typedef struct FeatureWordInfo {
420 const char **feat_names;
421 uint32_t cpuid_eax; /* Input EAX for CPUID */
422 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
423 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
424 int cpuid_reg; /* output register (R_* constant) */
425 uint32_t tcg_features; /* Feature flags supported by TCG */
426 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
427 } FeatureWordInfo;
428
429 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
430 [FEAT_1_EDX] = {
431 .feat_names = feature_name,
432 .cpuid_eax = 1, .cpuid_reg = R_EDX,
433 .tcg_features = TCG_FEATURES,
434 },
435 [FEAT_1_ECX] = {
436 .feat_names = ext_feature_name,
437 .cpuid_eax = 1, .cpuid_reg = R_ECX,
438 .tcg_features = TCG_EXT_FEATURES,
439 },
440 [FEAT_8000_0001_EDX] = {
441 .feat_names = ext2_feature_name,
442 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
443 .tcg_features = TCG_EXT2_FEATURES,
444 },
445 [FEAT_8000_0001_ECX] = {
446 .feat_names = ext3_feature_name,
447 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
448 .tcg_features = TCG_EXT3_FEATURES,
449 },
450 [FEAT_C000_0001_EDX] = {
451 .feat_names = ext4_feature_name,
452 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
453 .tcg_features = TCG_EXT4_FEATURES,
454 },
455 [FEAT_KVM] = {
456 .feat_names = kvm_feature_name,
457 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
458 .tcg_features = TCG_KVM_FEATURES,
459 },
460 [FEAT_HYPERV_EAX] = {
461 .feat_names = hyperv_priv_feature_name,
462 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
463 },
464 [FEAT_HYPERV_EBX] = {
465 .feat_names = hyperv_ident_feature_name,
466 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
467 },
468 [FEAT_HYPERV_EDX] = {
469 .feat_names = hyperv_misc_feature_name,
470 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
471 },
472 [FEAT_SVM] = {
473 .feat_names = svm_feature_name,
474 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
475 .tcg_features = TCG_SVM_FEATURES,
476 },
477 [FEAT_7_0_EBX] = {
478 .feat_names = cpuid_7_0_ebx_feature_name,
479 .cpuid_eax = 7,
480 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
481 .cpuid_reg = R_EBX,
482 .tcg_features = TCG_7_0_EBX_FEATURES,
483 },
484 [FEAT_7_0_ECX] = {
485 .feat_names = cpuid_7_0_ecx_feature_name,
486 .cpuid_eax = 7,
487 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
488 .cpuid_reg = R_ECX,
489 .tcg_features = TCG_7_0_ECX_FEATURES,
490 },
491 [FEAT_8000_0007_EDX] = {
492 .feat_names = cpuid_apm_edx_feature_name,
493 .cpuid_eax = 0x80000007,
494 .cpuid_reg = R_EDX,
495 .tcg_features = TCG_APM_FEATURES,
496 .unmigratable_flags = CPUID_APM_INVTSC,
497 },
498 [FEAT_XSAVE] = {
499 .feat_names = cpuid_xsave_feature_name,
500 .cpuid_eax = 0xd,
501 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
502 .cpuid_reg = R_EAX,
503 .tcg_features = TCG_XSAVE_FEATURES,
504 },
505 [FEAT_6_EAX] = {
506 .feat_names = cpuid_6_feature_name,
507 .cpuid_eax = 6, .cpuid_reg = R_EAX,
508 .tcg_features = TCG_6_EAX_FEATURES,
509 },
510 };
511
512 typedef struct X86RegisterInfo32 {
513 /* Name of register */
514 const char *name;
515 /* QAPI enum value register */
516 X86CPURegister32 qapi_enum;
517 } X86RegisterInfo32;
518
519 #define REGISTER(reg) \
520 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
521 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
522 REGISTER(EAX),
523 REGISTER(ECX),
524 REGISTER(EDX),
525 REGISTER(EBX),
526 REGISTER(ESP),
527 REGISTER(EBP),
528 REGISTER(ESI),
529 REGISTER(EDI),
530 };
531 #undef REGISTER
532
533 const ExtSaveArea x86_ext_save_areas[] = {
534 [XSTATE_YMM_BIT] =
535 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
536 .offset = offsetof(X86XSaveArea, avx_state),
537 .size = sizeof(XSaveAVX) },
538 [XSTATE_BNDREGS_BIT] =
539 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
540 .offset = offsetof(X86XSaveArea, bndreg_state),
541 .size = sizeof(XSaveBNDREG) },
542 [XSTATE_BNDCSR_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
544 .offset = offsetof(X86XSaveArea, bndcsr_state),
545 .size = sizeof(XSaveBNDCSR) },
546 [XSTATE_OPMASK_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
548 .offset = offsetof(X86XSaveArea, opmask_state),
549 .size = sizeof(XSaveOpmask) },
550 [XSTATE_ZMM_Hi256_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
553 .size = sizeof(XSaveZMM_Hi256) },
554 [XSTATE_Hi16_ZMM_BIT] =
555 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
556 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
557 .size = sizeof(XSaveHi16_ZMM) },
558 [XSTATE_PKRU_BIT] =
559 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
560 .offset = offsetof(X86XSaveArea, pkru_state),
561 .size = sizeof(XSavePKRU) },
562 };
563
564 const char *get_register_name_32(unsigned int reg)
565 {
566 if (reg >= CPU_NB_REGS32) {
567 return NULL;
568 }
569 return x86_reg_info_32[reg].name;
570 }
571
572 /*
573 * Returns the set of feature flags that are supported and migratable by
574 * QEMU, for a given FeatureWord.
575 */
576 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
577 {
578 FeatureWordInfo *wi = &feature_word_info[w];
579 uint32_t r = 0;
580 int i;
581
582 for (i = 0; i < 32; i++) {
583 uint32_t f = 1U << i;
584 /* If the feature name is unknown, it is not supported by QEMU yet */
585 if (!wi->feat_names[i]) {
586 continue;
587 }
588 /* Skip features known to QEMU, but explicitly marked as unmigratable */
589 if (wi->unmigratable_flags & f) {
590 continue;
591 }
592 r |= f;
593 }
594 return r;
595 }
596
597 void host_cpuid(uint32_t function, uint32_t count,
598 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
599 {
600 uint32_t vec[4];
601
602 #ifdef __x86_64__
603 asm volatile("cpuid"
604 : "=a"(vec[0]), "=b"(vec[1]),
605 "=c"(vec[2]), "=d"(vec[3])
606 : "0"(function), "c"(count) : "cc");
607 #elif defined(__i386__)
608 asm volatile("pusha \n\t"
609 "cpuid \n\t"
610 "mov %%eax, 0(%2) \n\t"
611 "mov %%ebx, 4(%2) \n\t"
612 "mov %%ecx, 8(%2) \n\t"
613 "mov %%edx, 12(%2) \n\t"
614 "popa"
615 : : "a"(function), "c"(count), "S"(vec)
616 : "memory", "cc");
617 #else
618 abort();
619 #endif
620
621 if (eax)
622 *eax = vec[0];
623 if (ebx)
624 *ebx = vec[1];
625 if (ecx)
626 *ecx = vec[2];
627 if (edx)
628 *edx = vec[3];
629 }
630
631 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
632
633 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
634 * a substring. ex if !NULL points to the first char after a substring,
635 * otherwise the string is assumed to sized by a terminating nul.
636 * Return lexical ordering of *s1:*s2.
637 */
638 static int sstrcmp(const char *s1, const char *e1,
639 const char *s2, const char *e2)
640 {
641 for (;;) {
642 if (!*s1 || !*s2 || *s1 != *s2)
643 return (*s1 - *s2);
644 ++s1, ++s2;
645 if (s1 == e1 && s2 == e2)
646 return (0);
647 else if (s1 == e1)
648 return (*s2);
649 else if (s2 == e2)
650 return (*s1);
651 }
652 }
653
654 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
655 * '|' delimited (possibly empty) strings in which case search for a match
656 * within the alternatives proceeds left to right. Return 0 for success,
657 * non-zero otherwise.
658 */
659 static int altcmp(const char *s, const char *e, const char *altstr)
660 {
661 const char *p, *q;
662
663 for (q = p = altstr; ; ) {
664 while (*p && *p != '|')
665 ++p;
666 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
667 return (0);
668 if (!*p)
669 return (1);
670 else
671 q = ++p;
672 }
673 }
674
675 /* search featureset for flag *[s..e), if found set corresponding bit in
676 * *pval and return true, otherwise return false
677 */
678 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
679 const char **featureset)
680 {
681 uint32_t mask;
682 const char **ppc;
683 bool found = false;
684
685 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
686 if (*ppc && !altcmp(s, e, *ppc)) {
687 *pval |= mask;
688 found = true;
689 }
690 }
691 return found;
692 }
693
694 static void add_flagname_to_bitmaps(const char *flagname,
695 FeatureWordArray words,
696 Error **errp)
697 {
698 FeatureWord w;
699 for (w = 0; w < FEATURE_WORDS; w++) {
700 FeatureWordInfo *wi = &feature_word_info[w];
701 if (wi->feat_names &&
702 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
703 break;
704 }
705 }
706 if (w == FEATURE_WORDS) {
707 error_setg(errp, "CPU feature %s not found", flagname);
708 }
709 }
710
711 /* CPU class name definitions: */
712
713 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
714 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
715
716 /* Return type name for a given CPU model name
717 * Caller is responsible for freeing the returned string.
718 */
719 static char *x86_cpu_type_name(const char *model_name)
720 {
721 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
722 }
723
724 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
725 {
726 ObjectClass *oc;
727 char *typename;
728
729 if (cpu_model == NULL) {
730 return NULL;
731 }
732
733 typename = x86_cpu_type_name(cpu_model);
734 oc = object_class_by_name(typename);
735 g_free(typename);
736 return oc;
737 }
738
739 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
740 {
741 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
742 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
743 return g_strndup(class_name,
744 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
745 }
746
747 struct X86CPUDefinition {
748 const char *name;
749 uint32_t level;
750 uint32_t xlevel;
751 uint32_t xlevel2;
752 /* vendor is zero-terminated, 12 character ASCII string */
753 char vendor[CPUID_VENDOR_SZ + 1];
754 int family;
755 int model;
756 int stepping;
757 FeatureWordArray features;
758 char model_id[48];
759 };
760
761 static X86CPUDefinition builtin_x86_defs[] = {
762 {
763 .name = "qemu64",
764 .level = 0xd,
765 .vendor = CPUID_VENDOR_AMD,
766 .family = 6,
767 .model = 6,
768 .stepping = 3,
769 .features[FEAT_1_EDX] =
770 PPRO_FEATURES |
771 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
772 CPUID_PSE36,
773 .features[FEAT_1_ECX] =
774 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
775 .features[FEAT_8000_0001_EDX] =
776 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
777 .features[FEAT_8000_0001_ECX] =
778 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
779 .xlevel = 0x8000000A,
780 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
781 },
782 {
783 .name = "phenom",
784 .level = 5,
785 .vendor = CPUID_VENDOR_AMD,
786 .family = 16,
787 .model = 2,
788 .stepping = 3,
789 /* Missing: CPUID_HT */
790 .features[FEAT_1_EDX] =
791 PPRO_FEATURES |
792 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
793 CPUID_PSE36 | CPUID_VME,
794 .features[FEAT_1_ECX] =
795 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
796 CPUID_EXT_POPCNT,
797 .features[FEAT_8000_0001_EDX] =
798 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
799 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
800 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
801 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
802 CPUID_EXT3_CR8LEG,
803 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
804 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
805 .features[FEAT_8000_0001_ECX] =
806 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
807 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
808 /* Missing: CPUID_SVM_LBRV */
809 .features[FEAT_SVM] =
810 CPUID_SVM_NPT,
811 .xlevel = 0x8000001A,
812 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
813 },
814 {
815 .name = "core2duo",
816 .level = 10,
817 .vendor = CPUID_VENDOR_INTEL,
818 .family = 6,
819 .model = 15,
820 .stepping = 11,
821 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
822 .features[FEAT_1_EDX] =
823 PPRO_FEATURES |
824 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
825 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
826 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
827 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
828 .features[FEAT_1_ECX] =
829 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
830 CPUID_EXT_CX16,
831 .features[FEAT_8000_0001_EDX] =
832 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
833 .features[FEAT_8000_0001_ECX] =
834 CPUID_EXT3_LAHF_LM,
835 .xlevel = 0x80000008,
836 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
837 },
838 {
839 .name = "kvm64",
840 .level = 0xd,
841 .vendor = CPUID_VENDOR_INTEL,
842 .family = 15,
843 .model = 6,
844 .stepping = 1,
845 /* Missing: CPUID_HT */
846 .features[FEAT_1_EDX] =
847 PPRO_FEATURES | CPUID_VME |
848 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
849 CPUID_PSE36,
850 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
851 .features[FEAT_1_ECX] =
852 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
853 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
854 .features[FEAT_8000_0001_EDX] =
855 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
856 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
857 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
858 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
859 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
860 .features[FEAT_8000_0001_ECX] =
861 0,
862 .xlevel = 0x80000008,
863 .model_id = "Common KVM processor"
864 },
865 {
866 .name = "qemu32",
867 .level = 4,
868 .vendor = CPUID_VENDOR_INTEL,
869 .family = 6,
870 .model = 6,
871 .stepping = 3,
872 .features[FEAT_1_EDX] =
873 PPRO_FEATURES,
874 .features[FEAT_1_ECX] =
875 CPUID_EXT_SSE3,
876 .xlevel = 0x80000004,
877 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
878 },
879 {
880 .name = "kvm32",
881 .level = 5,
882 .vendor = CPUID_VENDOR_INTEL,
883 .family = 15,
884 .model = 6,
885 .stepping = 1,
886 .features[FEAT_1_EDX] =
887 PPRO_FEATURES | CPUID_VME |
888 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
889 .features[FEAT_1_ECX] =
890 CPUID_EXT_SSE3,
891 .features[FEAT_8000_0001_ECX] =
892 0,
893 .xlevel = 0x80000008,
894 .model_id = "Common 32-bit KVM processor"
895 },
896 {
897 .name = "coreduo",
898 .level = 10,
899 .vendor = CPUID_VENDOR_INTEL,
900 .family = 6,
901 .model = 14,
902 .stepping = 8,
903 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
904 .features[FEAT_1_EDX] =
905 PPRO_FEATURES | CPUID_VME |
906 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
907 CPUID_SS,
908 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
909 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
910 .features[FEAT_1_ECX] =
911 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
912 .features[FEAT_8000_0001_EDX] =
913 CPUID_EXT2_NX,
914 .xlevel = 0x80000008,
915 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
916 },
917 {
918 .name = "486",
919 .level = 1,
920 .vendor = CPUID_VENDOR_INTEL,
921 .family = 4,
922 .model = 8,
923 .stepping = 0,
924 .features[FEAT_1_EDX] =
925 I486_FEATURES,
926 .xlevel = 0,
927 },
928 {
929 .name = "pentium",
930 .level = 1,
931 .vendor = CPUID_VENDOR_INTEL,
932 .family = 5,
933 .model = 4,
934 .stepping = 3,
935 .features[FEAT_1_EDX] =
936 PENTIUM_FEATURES,
937 .xlevel = 0,
938 },
939 {
940 .name = "pentium2",
941 .level = 2,
942 .vendor = CPUID_VENDOR_INTEL,
943 .family = 6,
944 .model = 5,
945 .stepping = 2,
946 .features[FEAT_1_EDX] =
947 PENTIUM2_FEATURES,
948 .xlevel = 0,
949 },
950 {
951 .name = "pentium3",
952 .level = 3,
953 .vendor = CPUID_VENDOR_INTEL,
954 .family = 6,
955 .model = 7,
956 .stepping = 3,
957 .features[FEAT_1_EDX] =
958 PENTIUM3_FEATURES,
959 .xlevel = 0,
960 },
961 {
962 .name = "athlon",
963 .level = 2,
964 .vendor = CPUID_VENDOR_AMD,
965 .family = 6,
966 .model = 2,
967 .stepping = 3,
968 .features[FEAT_1_EDX] =
969 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
970 CPUID_MCA,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
973 .xlevel = 0x80000008,
974 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
975 },
976 {
977 .name = "n270",
978 .level = 10,
979 .vendor = CPUID_VENDOR_INTEL,
980 .family = 6,
981 .model = 28,
982 .stepping = 2,
983 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
984 .features[FEAT_1_EDX] =
985 PPRO_FEATURES |
986 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
987 CPUID_ACPI | CPUID_SS,
988 /* Some CPUs got no CPUID_SEP */
989 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
990 * CPUID_EXT_XTPR */
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
993 CPUID_EXT_MOVBE,
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_NX,
996 .features[FEAT_8000_0001_ECX] =
997 CPUID_EXT3_LAHF_LM,
998 .xlevel = 0x80000008,
999 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1000 },
1001 {
1002 .name = "Conroe",
1003 .level = 10,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 15,
1007 .stepping = 3,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1016 .features[FEAT_8000_0001_EDX] =
1017 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1018 .features[FEAT_8000_0001_ECX] =
1019 CPUID_EXT3_LAHF_LM,
1020 .xlevel = 0x80000008,
1021 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1022 },
1023 {
1024 .name = "Penryn",
1025 .level = 10,
1026 .vendor = CPUID_VENDOR_INTEL,
1027 .family = 6,
1028 .model = 23,
1029 .stepping = 3,
1030 .features[FEAT_1_EDX] =
1031 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1032 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1033 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1034 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1035 CPUID_DE | CPUID_FP87,
1036 .features[FEAT_1_ECX] =
1037 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1038 CPUID_EXT_SSE3,
1039 .features[FEAT_8000_0001_EDX] =
1040 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1041 .features[FEAT_8000_0001_ECX] =
1042 CPUID_EXT3_LAHF_LM,
1043 .xlevel = 0x80000008,
1044 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1045 },
1046 {
1047 .name = "Nehalem",
1048 .level = 11,
1049 .vendor = CPUID_VENDOR_INTEL,
1050 .family = 6,
1051 .model = 26,
1052 .stepping = 3,
1053 .features[FEAT_1_EDX] =
1054 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1055 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1056 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1057 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1058 CPUID_DE | CPUID_FP87,
1059 .features[FEAT_1_ECX] =
1060 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1061 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1062 .features[FEAT_8000_0001_EDX] =
1063 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1064 .features[FEAT_8000_0001_ECX] =
1065 CPUID_EXT3_LAHF_LM,
1066 .xlevel = 0x80000008,
1067 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1068 },
1069 {
1070 .name = "Westmere",
1071 .level = 11,
1072 .vendor = CPUID_VENDOR_INTEL,
1073 .family = 6,
1074 .model = 44,
1075 .stepping = 1,
1076 .features[FEAT_1_EDX] =
1077 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1078 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1079 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1080 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1081 CPUID_DE | CPUID_FP87,
1082 .features[FEAT_1_ECX] =
1083 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1084 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1085 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1086 .features[FEAT_8000_0001_EDX] =
1087 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1088 .features[FEAT_8000_0001_ECX] =
1089 CPUID_EXT3_LAHF_LM,
1090 .features[FEAT_6_EAX] =
1091 CPUID_6_EAX_ARAT,
1092 .xlevel = 0x80000008,
1093 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1094 },
1095 {
1096 .name = "SandyBridge",
1097 .level = 0xd,
1098 .vendor = CPUID_VENDOR_INTEL,
1099 .family = 6,
1100 .model = 42,
1101 .stepping = 1,
1102 .features[FEAT_1_EDX] =
1103 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1104 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1105 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1106 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1107 CPUID_DE | CPUID_FP87,
1108 .features[FEAT_1_ECX] =
1109 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1110 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1111 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1112 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1113 CPUID_EXT_SSE3,
1114 .features[FEAT_8000_0001_EDX] =
1115 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1116 CPUID_EXT2_SYSCALL,
1117 .features[FEAT_8000_0001_ECX] =
1118 CPUID_EXT3_LAHF_LM,
1119 .features[FEAT_XSAVE] =
1120 CPUID_XSAVE_XSAVEOPT,
1121 .features[FEAT_6_EAX] =
1122 CPUID_6_EAX_ARAT,
1123 .xlevel = 0x80000008,
1124 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1125 },
1126 {
1127 .name = "IvyBridge",
1128 .level = 0xd,
1129 .vendor = CPUID_VENDOR_INTEL,
1130 .family = 6,
1131 .model = 58,
1132 .stepping = 9,
1133 .features[FEAT_1_EDX] =
1134 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1135 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1136 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1137 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1138 CPUID_DE | CPUID_FP87,
1139 .features[FEAT_1_ECX] =
1140 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1141 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1142 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1143 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1144 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1145 .features[FEAT_7_0_EBX] =
1146 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1147 CPUID_7_0_EBX_ERMS,
1148 .features[FEAT_8000_0001_EDX] =
1149 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1150 CPUID_EXT2_SYSCALL,
1151 .features[FEAT_8000_0001_ECX] =
1152 CPUID_EXT3_LAHF_LM,
1153 .features[FEAT_XSAVE] =
1154 CPUID_XSAVE_XSAVEOPT,
1155 .features[FEAT_6_EAX] =
1156 CPUID_6_EAX_ARAT,
1157 .xlevel = 0x80000008,
1158 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1159 },
1160 {
1161 .name = "Haswell-noTSX",
1162 .level = 0xd,
1163 .vendor = CPUID_VENDOR_INTEL,
1164 .family = 6,
1165 .model = 60,
1166 .stepping = 1,
1167 .features[FEAT_1_EDX] =
1168 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1169 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1170 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1171 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1172 CPUID_DE | CPUID_FP87,
1173 .features[FEAT_1_ECX] =
1174 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1175 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1176 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1177 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1178 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1179 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1180 .features[FEAT_8000_0001_EDX] =
1181 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1182 CPUID_EXT2_SYSCALL,
1183 .features[FEAT_8000_0001_ECX] =
1184 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1185 .features[FEAT_7_0_EBX] =
1186 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1187 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1188 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1189 .features[FEAT_XSAVE] =
1190 CPUID_XSAVE_XSAVEOPT,
1191 .features[FEAT_6_EAX] =
1192 CPUID_6_EAX_ARAT,
1193 .xlevel = 0x80000008,
1194 .model_id = "Intel Core Processor (Haswell, no TSX)",
1195 }, {
1196 .name = "Haswell",
1197 .level = 0xd,
1198 .vendor = CPUID_VENDOR_INTEL,
1199 .family = 6,
1200 .model = 60,
1201 .stepping = 1,
1202 .features[FEAT_1_EDX] =
1203 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1204 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1205 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1206 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1207 CPUID_DE | CPUID_FP87,
1208 .features[FEAT_1_ECX] =
1209 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1210 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1211 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1212 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1213 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1214 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1215 .features[FEAT_8000_0001_EDX] =
1216 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1217 CPUID_EXT2_SYSCALL,
1218 .features[FEAT_8000_0001_ECX] =
1219 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1220 .features[FEAT_7_0_EBX] =
1221 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1222 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1223 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1224 CPUID_7_0_EBX_RTM,
1225 .features[FEAT_XSAVE] =
1226 CPUID_XSAVE_XSAVEOPT,
1227 .features[FEAT_6_EAX] =
1228 CPUID_6_EAX_ARAT,
1229 .xlevel = 0x80000008,
1230 .model_id = "Intel Core Processor (Haswell)",
1231 },
1232 {
1233 .name = "Broadwell-noTSX",
1234 .level = 0xd,
1235 .vendor = CPUID_VENDOR_INTEL,
1236 .family = 6,
1237 .model = 61,
1238 .stepping = 2,
1239 .features[FEAT_1_EDX] =
1240 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1241 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1242 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1243 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1244 CPUID_DE | CPUID_FP87,
1245 .features[FEAT_1_ECX] =
1246 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1247 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1248 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1249 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1250 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1251 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1252 .features[FEAT_8000_0001_EDX] =
1253 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1254 CPUID_EXT2_SYSCALL,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1257 .features[FEAT_7_0_EBX] =
1258 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1259 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1260 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1261 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1262 CPUID_7_0_EBX_SMAP,
1263 .features[FEAT_XSAVE] =
1264 CPUID_XSAVE_XSAVEOPT,
1265 .features[FEAT_6_EAX] =
1266 CPUID_6_EAX_ARAT,
1267 .xlevel = 0x80000008,
1268 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1269 },
1270 {
1271 .name = "Broadwell",
1272 .level = 0xd,
1273 .vendor = CPUID_VENDOR_INTEL,
1274 .family = 6,
1275 .model = 61,
1276 .stepping = 2,
1277 .features[FEAT_1_EDX] =
1278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1282 CPUID_DE | CPUID_FP87,
1283 .features[FEAT_1_ECX] =
1284 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1285 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1286 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1287 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1288 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1289 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1290 .features[FEAT_8000_0001_EDX] =
1291 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1292 CPUID_EXT2_SYSCALL,
1293 .features[FEAT_8000_0001_ECX] =
1294 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1295 .features[FEAT_7_0_EBX] =
1296 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1297 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1298 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1299 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1300 CPUID_7_0_EBX_SMAP,
1301 .features[FEAT_XSAVE] =
1302 CPUID_XSAVE_XSAVEOPT,
1303 .features[FEAT_6_EAX] =
1304 CPUID_6_EAX_ARAT,
1305 .xlevel = 0x80000008,
1306 .model_id = "Intel Core Processor (Broadwell)",
1307 },
1308 {
1309 .name = "Skylake-Client",
1310 .level = 0xd,
1311 .vendor = CPUID_VENDOR_INTEL,
1312 .family = 6,
1313 .model = 94,
1314 .stepping = 3,
1315 .features[FEAT_1_EDX] =
1316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1320 CPUID_DE | CPUID_FP87,
1321 .features[FEAT_1_ECX] =
1322 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1323 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1324 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1325 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1326 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1327 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1328 .features[FEAT_8000_0001_EDX] =
1329 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1330 CPUID_EXT2_SYSCALL,
1331 .features[FEAT_8000_0001_ECX] =
1332 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1333 .features[FEAT_7_0_EBX] =
1334 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1335 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1336 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1337 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1338 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1339 /* Missing: XSAVES (not supported by some Linux versions,
1340 * including v4.1 to v4.6).
1341 * KVM doesn't yet expose any XSAVES state save component,
1342 * and the only one defined in Skylake (processor tracing)
1343 * probably will block migration anyway.
1344 */
1345 .features[FEAT_XSAVE] =
1346 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1347 CPUID_XSAVE_XGETBV1,
1348 .features[FEAT_6_EAX] =
1349 CPUID_6_EAX_ARAT,
1350 .xlevel = 0x80000008,
1351 .model_id = "Intel Core Processor (Skylake)",
1352 },
1353 {
1354 .name = "Opteron_G1",
1355 .level = 5,
1356 .vendor = CPUID_VENDOR_AMD,
1357 .family = 15,
1358 .model = 6,
1359 .stepping = 1,
1360 .features[FEAT_1_EDX] =
1361 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1362 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1363 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1364 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1365 CPUID_DE | CPUID_FP87,
1366 .features[FEAT_1_ECX] =
1367 CPUID_EXT_SSE3,
1368 .features[FEAT_8000_0001_EDX] =
1369 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1370 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1371 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1372 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1373 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1374 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1375 .xlevel = 0x80000008,
1376 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1377 },
1378 {
1379 .name = "Opteron_G2",
1380 .level = 5,
1381 .vendor = CPUID_VENDOR_AMD,
1382 .family = 15,
1383 .model = 6,
1384 .stepping = 1,
1385 .features[FEAT_1_EDX] =
1386 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1387 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1388 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1389 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1390 CPUID_DE | CPUID_FP87,
1391 .features[FEAT_1_ECX] =
1392 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1393 /* Missing: CPUID_EXT2_RDTSCP */
1394 .features[FEAT_8000_0001_EDX] =
1395 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1396 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1397 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1398 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1399 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1400 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1401 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1402 .features[FEAT_8000_0001_ECX] =
1403 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1404 .xlevel = 0x80000008,
1405 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1406 },
1407 {
1408 .name = "Opteron_G3",
1409 .level = 5,
1410 .vendor = CPUID_VENDOR_AMD,
1411 .family = 15,
1412 .model = 6,
1413 .stepping = 1,
1414 .features[FEAT_1_EDX] =
1415 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1416 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1417 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1418 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1419 CPUID_DE | CPUID_FP87,
1420 .features[FEAT_1_ECX] =
1421 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1422 CPUID_EXT_SSE3,
1423 /* Missing: CPUID_EXT2_RDTSCP */
1424 .features[FEAT_8000_0001_EDX] =
1425 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1426 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1427 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1428 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1429 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1430 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1431 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1432 .features[FEAT_8000_0001_ECX] =
1433 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1434 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1435 .xlevel = 0x80000008,
1436 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1437 },
1438 {
1439 .name = "Opteron_G4",
1440 .level = 0xd,
1441 .vendor = CPUID_VENDOR_AMD,
1442 .family = 21,
1443 .model = 1,
1444 .stepping = 2,
1445 .features[FEAT_1_EDX] =
1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1450 CPUID_DE | CPUID_FP87,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1453 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1454 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1455 CPUID_EXT_SSE3,
1456 /* Missing: CPUID_EXT2_RDTSCP */
1457 .features[FEAT_8000_0001_EDX] =
1458 CPUID_EXT2_LM |
1459 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1460 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1461 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1462 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1463 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1464 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1465 .features[FEAT_8000_0001_ECX] =
1466 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1467 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1468 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1469 CPUID_EXT3_LAHF_LM,
1470 /* no xsaveopt! */
1471 .xlevel = 0x8000001A,
1472 .model_id = "AMD Opteron 62xx class CPU",
1473 },
1474 {
1475 .name = "Opteron_G5",
1476 .level = 0xd,
1477 .vendor = CPUID_VENDOR_AMD,
1478 .family = 21,
1479 .model = 2,
1480 .stepping = 0,
1481 .features[FEAT_1_EDX] =
1482 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1483 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1484 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1485 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1486 CPUID_DE | CPUID_FP87,
1487 .features[FEAT_1_ECX] =
1488 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1489 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1490 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1491 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1492 /* Missing: CPUID_EXT2_RDTSCP */
1493 .features[FEAT_8000_0001_EDX] =
1494 CPUID_EXT2_LM |
1495 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1496 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1497 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1498 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1499 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1500 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1501 .features[FEAT_8000_0001_ECX] =
1502 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1503 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1504 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1505 CPUID_EXT3_LAHF_LM,
1506 /* no xsaveopt! */
1507 .xlevel = 0x8000001A,
1508 .model_id = "AMD Opteron 63xx class CPU",
1509 },
1510 };
1511
1512 typedef struct PropValue {
1513 const char *prop, *value;
1514 } PropValue;
1515
1516 /* KVM-specific features that are automatically added/removed
1517 * from all CPU models when KVM is enabled.
1518 */
1519 static PropValue kvm_default_props[] = {
1520 { "kvmclock", "on" },
1521 { "kvm-nopiodelay", "on" },
1522 { "kvm-asyncpf", "on" },
1523 { "kvm-steal-time", "on" },
1524 { "kvm-pv-eoi", "on" },
1525 { "kvmclock-stable-bit", "on" },
1526 { "x2apic", "on" },
1527 { "acpi", "off" },
1528 { "monitor", "off" },
1529 { "svm", "off" },
1530 { NULL, NULL },
1531 };
1532
1533 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1534 {
1535 PropValue *pv;
1536 for (pv = kvm_default_props; pv->prop; pv++) {
1537 if (!strcmp(pv->prop, prop)) {
1538 pv->value = value;
1539 break;
1540 }
1541 }
1542
1543 /* It is valid to call this function only for properties that
1544 * are already present in the kvm_default_props table.
1545 */
1546 assert(pv->prop);
1547 }
1548
1549 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1550 bool migratable_only);
1551
1552 #ifdef CONFIG_KVM
1553
1554 static bool lmce_supported(void)
1555 {
1556 uint64_t mce_cap;
1557
1558 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1559 return false;
1560 }
1561
1562 return !!(mce_cap & MCG_LMCE_P);
1563 }
1564
1565 static int cpu_x86_fill_model_id(char *str)
1566 {
1567 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1568 int i;
1569
1570 for (i = 0; i < 3; i++) {
1571 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1572 memcpy(str + i * 16 + 0, &eax, 4);
1573 memcpy(str + i * 16 + 4, &ebx, 4);
1574 memcpy(str + i * 16 + 8, &ecx, 4);
1575 memcpy(str + i * 16 + 12, &edx, 4);
1576 }
1577 return 0;
1578 }
1579
1580 static X86CPUDefinition host_cpudef;
1581
1582 static Property host_x86_cpu_properties[] = {
1583 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1584 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1585 DEFINE_PROP_END_OF_LIST()
1586 };
1587
1588 /* class_init for the "host" CPU model
1589 *
1590 * This function may be called before KVM is initialized.
1591 */
1592 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1593 {
1594 DeviceClass *dc = DEVICE_CLASS(oc);
1595 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1596 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1597
1598 xcc->kvm_required = true;
1599
1600 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1601 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1602
1603 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1604 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1605 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1606 host_cpudef.stepping = eax & 0x0F;
1607
1608 cpu_x86_fill_model_id(host_cpudef.model_id);
1609
1610 xcc->cpu_def = &host_cpudef;
1611
1612 /* level, xlevel, xlevel2, and the feature words are initialized on
1613 * instance_init, because they require KVM to be initialized.
1614 */
1615
1616 dc->props = host_x86_cpu_properties;
1617 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1618 dc->cannot_destroy_with_object_finalize_yet = true;
1619 }
1620
1621 static void host_x86_cpu_initfn(Object *obj)
1622 {
1623 X86CPU *cpu = X86_CPU(obj);
1624 CPUX86State *env = &cpu->env;
1625 KVMState *s = kvm_state;
1626
1627 /* We can't fill the features array here because we don't know yet if
1628 * "migratable" is true or false.
1629 */
1630 cpu->host_features = true;
1631
1632 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1633 if (kvm_enabled()) {
1634 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1635 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1636 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1637
1638 if (lmce_supported()) {
1639 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1640 }
1641 }
1642
1643 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1644 }
1645
1646 static const TypeInfo host_x86_cpu_type_info = {
1647 .name = X86_CPU_TYPE_NAME("host"),
1648 .parent = TYPE_X86_CPU,
1649 .instance_init = host_x86_cpu_initfn,
1650 .class_init = host_x86_cpu_class_init,
1651 };
1652
1653 #endif
1654
1655 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1656 {
1657 FeatureWordInfo *f = &feature_word_info[w];
1658 int i;
1659
1660 for (i = 0; i < 32; ++i) {
1661 if ((1UL << i) & mask) {
1662 const char *reg = get_register_name_32(f->cpuid_reg);
1663 assert(reg);
1664 fprintf(stderr, "warning: %s doesn't support requested feature: "
1665 "CPUID.%02XH:%s%s%s [bit %d]\n",
1666 kvm_enabled() ? "host" : "TCG",
1667 f->cpuid_eax, reg,
1668 f->feat_names[i] ? "." : "",
1669 f->feat_names[i] ? f->feat_names[i] : "", i);
1670 }
1671 }
1672 }
1673
1674 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1675 const char *name, void *opaque,
1676 Error **errp)
1677 {
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 int64_t value;
1681
1682 value = (env->cpuid_version >> 8) & 0xf;
1683 if (value == 0xf) {
1684 value += (env->cpuid_version >> 20) & 0xff;
1685 }
1686 visit_type_int(v, name, &value, errp);
1687 }
1688
1689 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1690 const char *name, void *opaque,
1691 Error **errp)
1692 {
1693 X86CPU *cpu = X86_CPU(obj);
1694 CPUX86State *env = &cpu->env;
1695 const int64_t min = 0;
1696 const int64_t max = 0xff + 0xf;
1697 Error *local_err = NULL;
1698 int64_t value;
1699
1700 visit_type_int(v, name, &value, &local_err);
1701 if (local_err) {
1702 error_propagate(errp, local_err);
1703 return;
1704 }
1705 if (value < min || value > max) {
1706 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1707 name ? name : "null", value, min, max);
1708 return;
1709 }
1710
1711 env->cpuid_version &= ~0xff00f00;
1712 if (value > 0x0f) {
1713 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1714 } else {
1715 env->cpuid_version |= value << 8;
1716 }
1717 }
1718
1719 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1720 const char *name, void *opaque,
1721 Error **errp)
1722 {
1723 X86CPU *cpu = X86_CPU(obj);
1724 CPUX86State *env = &cpu->env;
1725 int64_t value;
1726
1727 value = (env->cpuid_version >> 4) & 0xf;
1728 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1729 visit_type_int(v, name, &value, errp);
1730 }
1731
1732 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1733 const char *name, void *opaque,
1734 Error **errp)
1735 {
1736 X86CPU *cpu = X86_CPU(obj);
1737 CPUX86State *env = &cpu->env;
1738 const int64_t min = 0;
1739 const int64_t max = 0xff;
1740 Error *local_err = NULL;
1741 int64_t value;
1742
1743 visit_type_int(v, name, &value, &local_err);
1744 if (local_err) {
1745 error_propagate(errp, local_err);
1746 return;
1747 }
1748 if (value < min || value > max) {
1749 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1750 name ? name : "null", value, min, max);
1751 return;
1752 }
1753
1754 env->cpuid_version &= ~0xf00f0;
1755 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1756 }
1757
1758 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1759 const char *name, void *opaque,
1760 Error **errp)
1761 {
1762 X86CPU *cpu = X86_CPU(obj);
1763 CPUX86State *env = &cpu->env;
1764 int64_t value;
1765
1766 value = env->cpuid_version & 0xf;
1767 visit_type_int(v, name, &value, errp);
1768 }
1769
1770 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1771 const char *name, void *opaque,
1772 Error **errp)
1773 {
1774 X86CPU *cpu = X86_CPU(obj);
1775 CPUX86State *env = &cpu->env;
1776 const int64_t min = 0;
1777 const int64_t max = 0xf;
1778 Error *local_err = NULL;
1779 int64_t value;
1780
1781 visit_type_int(v, name, &value, &local_err);
1782 if (local_err) {
1783 error_propagate(errp, local_err);
1784 return;
1785 }
1786 if (value < min || value > max) {
1787 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1788 name ? name : "null", value, min, max);
1789 return;
1790 }
1791
1792 env->cpuid_version &= ~0xf;
1793 env->cpuid_version |= value & 0xf;
1794 }
1795
1796 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1797 {
1798 X86CPU *cpu = X86_CPU(obj);
1799 CPUX86State *env = &cpu->env;
1800 char *value;
1801
1802 value = g_malloc(CPUID_VENDOR_SZ + 1);
1803 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1804 env->cpuid_vendor3);
1805 return value;
1806 }
1807
1808 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1809 Error **errp)
1810 {
1811 X86CPU *cpu = X86_CPU(obj);
1812 CPUX86State *env = &cpu->env;
1813 int i;
1814
1815 if (strlen(value) != CPUID_VENDOR_SZ) {
1816 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1817 return;
1818 }
1819
1820 env->cpuid_vendor1 = 0;
1821 env->cpuid_vendor2 = 0;
1822 env->cpuid_vendor3 = 0;
1823 for (i = 0; i < 4; i++) {
1824 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1825 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1826 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1827 }
1828 }
1829
1830 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1831 {
1832 X86CPU *cpu = X86_CPU(obj);
1833 CPUX86State *env = &cpu->env;
1834 char *value;
1835 int i;
1836
1837 value = g_malloc(48 + 1);
1838 for (i = 0; i < 48; i++) {
1839 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1840 }
1841 value[48] = '\0';
1842 return value;
1843 }
1844
1845 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1846 Error **errp)
1847 {
1848 X86CPU *cpu = X86_CPU(obj);
1849 CPUX86State *env = &cpu->env;
1850 int c, len, i;
1851
1852 if (model_id == NULL) {
1853 model_id = "";
1854 }
1855 len = strlen(model_id);
1856 memset(env->cpuid_model, 0, 48);
1857 for (i = 0; i < 48; i++) {
1858 if (i >= len) {
1859 c = '\0';
1860 } else {
1861 c = (uint8_t)model_id[i];
1862 }
1863 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1864 }
1865 }
1866
1867 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1868 void *opaque, Error **errp)
1869 {
1870 X86CPU *cpu = X86_CPU(obj);
1871 int64_t value;
1872
1873 value = cpu->env.tsc_khz * 1000;
1874 visit_type_int(v, name, &value, errp);
1875 }
1876
1877 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1878 void *opaque, Error **errp)
1879 {
1880 X86CPU *cpu = X86_CPU(obj);
1881 const int64_t min = 0;
1882 const int64_t max = INT64_MAX;
1883 Error *local_err = NULL;
1884 int64_t value;
1885
1886 visit_type_int(v, name, &value, &local_err);
1887 if (local_err) {
1888 error_propagate(errp, local_err);
1889 return;
1890 }
1891 if (value < min || value > max) {
1892 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1893 name ? name : "null", value, min, max);
1894 return;
1895 }
1896
1897 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1898 }
1899
1900 /* Generic getter for "feature-words" and "filtered-features" properties */
1901 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1902 const char *name, void *opaque,
1903 Error **errp)
1904 {
1905 uint32_t *array = (uint32_t *)opaque;
1906 FeatureWord w;
1907 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1908 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1909 X86CPUFeatureWordInfoList *list = NULL;
1910
1911 for (w = 0; w < FEATURE_WORDS; w++) {
1912 FeatureWordInfo *wi = &feature_word_info[w];
1913 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1914 qwi->cpuid_input_eax = wi->cpuid_eax;
1915 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1916 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1917 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1918 qwi->features = array[w];
1919
1920 /* List will be in reverse order, but order shouldn't matter */
1921 list_entries[w].next = list;
1922 list_entries[w].value = &word_infos[w];
1923 list = &list_entries[w];
1924 }
1925
1926 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1927 }
1928
1929 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1930 void *opaque, Error **errp)
1931 {
1932 X86CPU *cpu = X86_CPU(obj);
1933 int64_t value = cpu->hyperv_spinlock_attempts;
1934
1935 visit_type_int(v, name, &value, errp);
1936 }
1937
1938 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1939 void *opaque, Error **errp)
1940 {
1941 const int64_t min = 0xFFF;
1942 const int64_t max = UINT_MAX;
1943 X86CPU *cpu = X86_CPU(obj);
1944 Error *err = NULL;
1945 int64_t value;
1946
1947 visit_type_int(v, name, &value, &err);
1948 if (err) {
1949 error_propagate(errp, err);
1950 return;
1951 }
1952
1953 if (value < min || value > max) {
1954 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1955 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1956 object_get_typename(obj), name ? name : "null",
1957 value, min, max);
1958 return;
1959 }
1960 cpu->hyperv_spinlock_attempts = value;
1961 }
1962
1963 static PropertyInfo qdev_prop_spinlocks = {
1964 .name = "int",
1965 .get = x86_get_hv_spinlocks,
1966 .set = x86_set_hv_spinlocks,
1967 };
1968
1969 /* Convert all '_' in a feature string option name to '-', to make feature
1970 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1971 */
1972 static inline void feat2prop(char *s)
1973 {
1974 while ((s = strchr(s, '_'))) {
1975 *s = '-';
1976 }
1977 }
1978
1979 /* Compatibily hack to maintain legacy +-feat semantic,
1980 * where +-feat overwrites any feature set by
1981 * feat=on|feat even if the later is parsed after +-feat
1982 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1983 */
1984 static FeatureWordArray plus_features = { 0 };
1985 static FeatureWordArray minus_features = { 0 };
1986
1987 /* Parse "+feature,-feature,feature=foo" CPU feature string
1988 */
1989 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1990 Error **errp)
1991 {
1992 char *featurestr; /* Single 'key=value" string being parsed */
1993 Error *local_err = NULL;
1994 static bool cpu_globals_initialized;
1995
1996 if (cpu_globals_initialized) {
1997 return;
1998 }
1999 cpu_globals_initialized = true;
2000
2001 if (!features) {
2002 return;
2003 }
2004
2005 for (featurestr = strtok(features, ",");
2006 featurestr && !local_err;
2007 featurestr = strtok(NULL, ",")) {
2008 const char *name;
2009 const char *val = NULL;
2010 char *eq = NULL;
2011 char num[32];
2012 GlobalProperty *prop;
2013
2014 /* Compatibility syntax: */
2015 if (featurestr[0] == '+') {
2016 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2017 continue;
2018 } else if (featurestr[0] == '-') {
2019 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2020 continue;
2021 }
2022
2023 eq = strchr(featurestr, '=');
2024 if (eq) {
2025 *eq++ = 0;
2026 val = eq;
2027 } else {
2028 val = "on";
2029 }
2030
2031 feat2prop(featurestr);
2032 name = featurestr;
2033
2034 /* Special case: */
2035 if (!strcmp(name, "tsc-freq")) {
2036 int64_t tsc_freq;
2037 char *err;
2038
2039 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2040 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2041 if (tsc_freq < 0 || *err) {
2042 error_setg(errp, "bad numerical value %s", val);
2043 return;
2044 }
2045 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2046 val = num;
2047 name = "tsc-frequency";
2048 }
2049
2050 prop = g_new0(typeof(*prop), 1);
2051 prop->driver = typename;
2052 prop->property = g_strdup(name);
2053 prop->value = g_strdup(val);
2054 prop->errp = &error_fatal;
2055 qdev_prop_register_global(prop);
2056 }
2057
2058 if (local_err) {
2059 error_propagate(errp, local_err);
2060 }
2061 }
2062
2063 /* Print all cpuid feature names in featureset
2064 */
2065 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2066 {
2067 int bit;
2068 bool first = true;
2069
2070 for (bit = 0; bit < 32; bit++) {
2071 if (featureset[bit]) {
2072 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2073 first = false;
2074 }
2075 }
2076 }
2077
2078 /* generate CPU information. */
2079 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2080 {
2081 X86CPUDefinition *def;
2082 char buf[256];
2083 int i;
2084
2085 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2086 def = &builtin_x86_defs[i];
2087 snprintf(buf, sizeof(buf), "%s", def->name);
2088 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2089 }
2090 #ifdef CONFIG_KVM
2091 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2092 "KVM processor with all supported host features "
2093 "(only available in KVM mode)");
2094 #endif
2095
2096 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2097 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2098 FeatureWordInfo *fw = &feature_word_info[i];
2099
2100 (*cpu_fprintf)(f, " ");
2101 listflags(f, cpu_fprintf, fw->feat_names);
2102 (*cpu_fprintf)(f, "\n");
2103 }
2104 }
2105
2106 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2107 {
2108 CpuDefinitionInfoList *cpu_list = NULL;
2109 X86CPUDefinition *def;
2110 int i;
2111
2112 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2113 CpuDefinitionInfoList *entry;
2114 CpuDefinitionInfo *info;
2115
2116 def = &builtin_x86_defs[i];
2117 info = g_malloc0(sizeof(*info));
2118 info->name = g_strdup(def->name);
2119
2120 entry = g_malloc0(sizeof(*entry));
2121 entry->value = info;
2122 entry->next = cpu_list;
2123 cpu_list = entry;
2124 }
2125
2126 return cpu_list;
2127 }
2128
2129 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2130 bool migratable_only)
2131 {
2132 FeatureWordInfo *wi = &feature_word_info[w];
2133 uint32_t r;
2134
2135 if (kvm_enabled()) {
2136 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2137 wi->cpuid_ecx,
2138 wi->cpuid_reg);
2139 } else if (tcg_enabled()) {
2140 r = wi->tcg_features;
2141 } else {
2142 return ~0;
2143 }
2144 if (migratable_only) {
2145 r &= x86_cpu_get_migratable_flags(w);
2146 }
2147 return r;
2148 }
2149
2150 /*
2151 * Filters CPU feature words based on host availability of each feature.
2152 *
2153 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2154 */
2155 static int x86_cpu_filter_features(X86CPU *cpu)
2156 {
2157 CPUX86State *env = &cpu->env;
2158 FeatureWord w;
2159 int rv = 0;
2160
2161 for (w = 0; w < FEATURE_WORDS; w++) {
2162 uint32_t host_feat =
2163 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2164 uint32_t requested_features = env->features[w];
2165 env->features[w] &= host_feat;
2166 cpu->filtered_features[w] = requested_features & ~env->features[w];
2167 if (cpu->filtered_features[w]) {
2168 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2169 report_unavailable_features(w, cpu->filtered_features[w]);
2170 }
2171 rv = 1;
2172 }
2173 }
2174
2175 return rv;
2176 }
2177
2178 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2179 {
2180 PropValue *pv;
2181 for (pv = props; pv->prop; pv++) {
2182 if (!pv->value) {
2183 continue;
2184 }
2185 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2186 &error_abort);
2187 }
2188 }
2189
2190 /* Load data from X86CPUDefinition
2191 */
2192 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2193 {
2194 CPUX86State *env = &cpu->env;
2195 const char *vendor;
2196 char host_vendor[CPUID_VENDOR_SZ + 1];
2197 FeatureWord w;
2198
2199 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2200 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2201 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2202 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2203 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2204 object_property_set_int(OBJECT(cpu), def->xlevel2, "xlevel2", errp);
2205 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2206 for (w = 0; w < FEATURE_WORDS; w++) {
2207 env->features[w] = def->features[w];
2208 }
2209
2210 /* Special cases not set in the X86CPUDefinition structs: */
2211 if (kvm_enabled()) {
2212 if (!kvm_irqchip_in_kernel()) {
2213 x86_cpu_change_kvm_default("x2apic", "off");
2214 }
2215
2216 x86_cpu_apply_props(cpu, kvm_default_props);
2217 }
2218
2219 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2220
2221 /* sysenter isn't supported in compatibility mode on AMD,
2222 * syscall isn't supported in compatibility mode on Intel.
2223 * Normally we advertise the actual CPU vendor, but you can
2224 * override this using the 'vendor' property if you want to use
2225 * KVM's sysenter/syscall emulation in compatibility mode and
2226 * when doing cross vendor migration
2227 */
2228 vendor = def->vendor;
2229 if (kvm_enabled()) {
2230 uint32_t ebx = 0, ecx = 0, edx = 0;
2231 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2232 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2233 vendor = host_vendor;
2234 }
2235
2236 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2237
2238 }
2239
2240 X86CPU *cpu_x86_init(const char *cpu_model)
2241 {
2242 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2243 }
2244
2245 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2246 {
2247 X86CPUDefinition *cpudef = data;
2248 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2249
2250 xcc->cpu_def = cpudef;
2251 }
2252
2253 static void x86_register_cpudef_type(X86CPUDefinition *def)
2254 {
2255 char *typename = x86_cpu_type_name(def->name);
2256 TypeInfo ti = {
2257 .name = typename,
2258 .parent = TYPE_X86_CPU,
2259 .class_init = x86_cpu_cpudef_class_init,
2260 .class_data = def,
2261 };
2262
2263 type_register(&ti);
2264 g_free(typename);
2265 }
2266
2267 #if !defined(CONFIG_USER_ONLY)
2268
2269 void cpu_clear_apic_feature(CPUX86State *env)
2270 {
2271 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2272 }
2273
2274 #endif /* !CONFIG_USER_ONLY */
2275
2276 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2277 uint32_t *eax, uint32_t *ebx,
2278 uint32_t *ecx, uint32_t *edx)
2279 {
2280 X86CPU *cpu = x86_env_get_cpu(env);
2281 CPUState *cs = CPU(cpu);
2282
2283 /* test if maximum index reached */
2284 if (index & 0x80000000) {
2285 if (index > env->cpuid_xlevel) {
2286 if (env->cpuid_xlevel2 > 0) {
2287 /* Handle the Centaur's CPUID instruction. */
2288 if (index > env->cpuid_xlevel2) {
2289 index = env->cpuid_xlevel2;
2290 } else if (index < 0xC0000000) {
2291 index = env->cpuid_xlevel;
2292 }
2293 } else {
2294 /* Intel documentation states that invalid EAX input will
2295 * return the same information as EAX=cpuid_level
2296 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2297 */
2298 index = env->cpuid_level;
2299 }
2300 }
2301 } else {
2302 if (index > env->cpuid_level)
2303 index = env->cpuid_level;
2304 }
2305
2306 switch(index) {
2307 case 0:
2308 *eax = env->cpuid_level;
2309 *ebx = env->cpuid_vendor1;
2310 *edx = env->cpuid_vendor2;
2311 *ecx = env->cpuid_vendor3;
2312 break;
2313 case 1:
2314 *eax = env->cpuid_version;
2315 *ebx = (cpu->apic_id << 24) |
2316 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2317 *ecx = env->features[FEAT_1_ECX];
2318 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2319 *ecx |= CPUID_EXT_OSXSAVE;
2320 }
2321 *edx = env->features[FEAT_1_EDX];
2322 if (cs->nr_cores * cs->nr_threads > 1) {
2323 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2324 *edx |= CPUID_HT;
2325 }
2326 break;
2327 case 2:
2328 /* cache info: needed for Pentium Pro compatibility */
2329 if (cpu->cache_info_passthrough) {
2330 host_cpuid(index, 0, eax, ebx, ecx, edx);
2331 break;
2332 }
2333 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2334 *ebx = 0;
2335 *ecx = 0;
2336 *edx = (L1D_DESCRIPTOR << 16) | \
2337 (L1I_DESCRIPTOR << 8) | \
2338 (L2_DESCRIPTOR);
2339 break;
2340 case 4:
2341 /* cache info: needed for Core compatibility */
2342 if (cpu->cache_info_passthrough) {
2343 host_cpuid(index, count, eax, ebx, ecx, edx);
2344 *eax &= ~0xFC000000;
2345 } else {
2346 *eax = 0;
2347 switch (count) {
2348 case 0: /* L1 dcache info */
2349 *eax |= CPUID_4_TYPE_DCACHE | \
2350 CPUID_4_LEVEL(1) | \
2351 CPUID_4_SELF_INIT_LEVEL;
2352 *ebx = (L1D_LINE_SIZE - 1) | \
2353 ((L1D_PARTITIONS - 1) << 12) | \
2354 ((L1D_ASSOCIATIVITY - 1) << 22);
2355 *ecx = L1D_SETS - 1;
2356 *edx = CPUID_4_NO_INVD_SHARING;
2357 break;
2358 case 1: /* L1 icache info */
2359 *eax |= CPUID_4_TYPE_ICACHE | \
2360 CPUID_4_LEVEL(1) | \
2361 CPUID_4_SELF_INIT_LEVEL;
2362 *ebx = (L1I_LINE_SIZE - 1) | \
2363 ((L1I_PARTITIONS - 1) << 12) | \
2364 ((L1I_ASSOCIATIVITY - 1) << 22);
2365 *ecx = L1I_SETS - 1;
2366 *edx = CPUID_4_NO_INVD_SHARING;
2367 break;
2368 case 2: /* L2 cache info */
2369 *eax |= CPUID_4_TYPE_UNIFIED | \
2370 CPUID_4_LEVEL(2) | \
2371 CPUID_4_SELF_INIT_LEVEL;
2372 if (cs->nr_threads > 1) {
2373 *eax |= (cs->nr_threads - 1) << 14;
2374 }
2375 *ebx = (L2_LINE_SIZE - 1) | \
2376 ((L2_PARTITIONS - 1) << 12) | \
2377 ((L2_ASSOCIATIVITY - 1) << 22);
2378 *ecx = L2_SETS - 1;
2379 *edx = CPUID_4_NO_INVD_SHARING;
2380 break;
2381 default: /* end of info */
2382 *eax = 0;
2383 *ebx = 0;
2384 *ecx = 0;
2385 *edx = 0;
2386 break;
2387 }
2388 }
2389
2390 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2391 if ((*eax & 31) && cs->nr_cores > 1) {
2392 *eax |= (cs->nr_cores - 1) << 26;
2393 }
2394 break;
2395 case 5:
2396 /* mwait info: needed for Core compatibility */
2397 *eax = 0; /* Smallest monitor-line size in bytes */
2398 *ebx = 0; /* Largest monitor-line size in bytes */
2399 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2400 *edx = 0;
2401 break;
2402 case 6:
2403 /* Thermal and Power Leaf */
2404 *eax = env->features[FEAT_6_EAX];
2405 *ebx = 0;
2406 *ecx = 0;
2407 *edx = 0;
2408 break;
2409 case 7:
2410 /* Structured Extended Feature Flags Enumeration Leaf */
2411 if (count == 0) {
2412 *eax = 0; /* Maximum ECX value for sub-leaves */
2413 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2414 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2415 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2416 *ecx |= CPUID_7_0_ECX_OSPKE;
2417 }
2418 *edx = 0; /* Reserved */
2419 } else {
2420 *eax = 0;
2421 *ebx = 0;
2422 *ecx = 0;
2423 *edx = 0;
2424 }
2425 break;
2426 case 9:
2427 /* Direct Cache Access Information Leaf */
2428 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2429 *ebx = 0;
2430 *ecx = 0;
2431 *edx = 0;
2432 break;
2433 case 0xA:
2434 /* Architectural Performance Monitoring Leaf */
2435 if (kvm_enabled() && cpu->enable_pmu) {
2436 KVMState *s = cs->kvm_state;
2437
2438 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2439 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2440 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2441 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2442 } else {
2443 *eax = 0;
2444 *ebx = 0;
2445 *ecx = 0;
2446 *edx = 0;
2447 }
2448 break;
2449 case 0xB:
2450 /* Extended Topology Enumeration Leaf */
2451 if (!cpu->enable_cpuid_0xb) {
2452 *eax = *ebx = *ecx = *edx = 0;
2453 break;
2454 }
2455
2456 *ecx = count & 0xff;
2457 *edx = cpu->apic_id;
2458
2459 switch (count) {
2460 case 0:
2461 *eax = apicid_core_offset(smp_cores, smp_threads);
2462 *ebx = smp_threads;
2463 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2464 break;
2465 case 1:
2466 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2467 *ebx = smp_cores * smp_threads;
2468 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2469 break;
2470 default:
2471 *eax = 0;
2472 *ebx = 0;
2473 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2474 }
2475
2476 assert(!(*eax & ~0x1f));
2477 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2478 break;
2479 case 0xD: {
2480 KVMState *s = cs->kvm_state;
2481 uint64_t ena_mask;
2482 int i;
2483
2484 /* Processor Extended State */
2485 *eax = 0;
2486 *ebx = 0;
2487 *ecx = 0;
2488 *edx = 0;
2489 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2490 break;
2491 }
2492 if (kvm_enabled()) {
2493 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2494 ena_mask <<= 32;
2495 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2496 } else {
2497 ena_mask = -1;
2498 }
2499
2500 if (count == 0) {
2501 *ecx = 0x240;
2502 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2503 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2504 if ((env->features[esa->feature] & esa->bits) == esa->bits
2505 && ((ena_mask >> i) & 1) != 0) {
2506 if (i < 32) {
2507 *eax |= 1u << i;
2508 } else {
2509 *edx |= 1u << (i - 32);
2510 }
2511 *ecx = MAX(*ecx, esa->offset + esa->size);
2512 }
2513 }
2514 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2515 *ebx = *ecx;
2516 } else if (count == 1) {
2517 *eax = env->features[FEAT_XSAVE];
2518 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2519 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2520 if ((env->features[esa->feature] & esa->bits) == esa->bits
2521 && ((ena_mask >> count) & 1) != 0) {
2522 *eax = esa->size;
2523 *ebx = esa->offset;
2524 }
2525 }
2526 break;
2527 }
2528 case 0x80000000:
2529 *eax = env->cpuid_xlevel;
2530 *ebx = env->cpuid_vendor1;
2531 *edx = env->cpuid_vendor2;
2532 *ecx = env->cpuid_vendor3;
2533 break;
2534 case 0x80000001:
2535 *eax = env->cpuid_version;
2536 *ebx = 0;
2537 *ecx = env->features[FEAT_8000_0001_ECX];
2538 *edx = env->features[FEAT_8000_0001_EDX];
2539
2540 /* The Linux kernel checks for the CMPLegacy bit and
2541 * discards multiple thread information if it is set.
2542 * So don't set it here for Intel to make Linux guests happy.
2543 */
2544 if (cs->nr_cores * cs->nr_threads > 1) {
2545 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2546 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2547 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2548 *ecx |= 1 << 1; /* CmpLegacy bit */
2549 }
2550 }
2551 break;
2552 case 0x80000002:
2553 case 0x80000003:
2554 case 0x80000004:
2555 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2556 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2557 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2558 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2559 break;
2560 case 0x80000005:
2561 /* cache info (L1 cache) */
2562 if (cpu->cache_info_passthrough) {
2563 host_cpuid(index, 0, eax, ebx, ecx, edx);
2564 break;
2565 }
2566 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2567 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2568 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2569 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2570 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2571 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2572 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2573 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2574 break;
2575 case 0x80000006:
2576 /* cache info (L2 cache) */
2577 if (cpu->cache_info_passthrough) {
2578 host_cpuid(index, 0, eax, ebx, ecx, edx);
2579 break;
2580 }
2581 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2582 (L2_DTLB_2M_ENTRIES << 16) | \
2583 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2584 (L2_ITLB_2M_ENTRIES);
2585 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2586 (L2_DTLB_4K_ENTRIES << 16) | \
2587 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2588 (L2_ITLB_4K_ENTRIES);
2589 *ecx = (L2_SIZE_KB_AMD << 16) | \
2590 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2591 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2592 *edx = ((L3_SIZE_KB/512) << 18) | \
2593 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2594 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2595 break;
2596 case 0x80000007:
2597 *eax = 0;
2598 *ebx = 0;
2599 *ecx = 0;
2600 *edx = env->features[FEAT_8000_0007_EDX];
2601 break;
2602 case 0x80000008:
2603 /* virtual & phys address size in low 2 bytes. */
2604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2605 /* 64 bit processor, 48 bits virtual, configurable
2606 * physical bits.
2607 */
2608 *eax = 0x00003000 + cpu->phys_bits;
2609 } else {
2610 *eax = cpu->phys_bits;
2611 }
2612 *ebx = 0;
2613 *ecx = 0;
2614 *edx = 0;
2615 if (cs->nr_cores * cs->nr_threads > 1) {
2616 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2617 }
2618 break;
2619 case 0x8000000A:
2620 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2621 *eax = 0x00000001; /* SVM Revision */
2622 *ebx = 0x00000010; /* nr of ASIDs */
2623 *ecx = 0;
2624 *edx = env->features[FEAT_SVM]; /* optional features */
2625 } else {
2626 *eax = 0;
2627 *ebx = 0;
2628 *ecx = 0;
2629 *edx = 0;
2630 }
2631 break;
2632 case 0xC0000000:
2633 *eax = env->cpuid_xlevel2;
2634 *ebx = 0;
2635 *ecx = 0;
2636 *edx = 0;
2637 break;
2638 case 0xC0000001:
2639 /* Support for VIA CPU's CPUID instruction */
2640 *eax = env->cpuid_version;
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = env->features[FEAT_C000_0001_EDX];
2644 break;
2645 case 0xC0000002:
2646 case 0xC0000003:
2647 case 0xC0000004:
2648 /* Reserved for the future, and now filled with zero */
2649 *eax = 0;
2650 *ebx = 0;
2651 *ecx = 0;
2652 *edx = 0;
2653 break;
2654 default:
2655 /* reserved values: zero */
2656 *eax = 0;
2657 *ebx = 0;
2658 *ecx = 0;
2659 *edx = 0;
2660 break;
2661 }
2662 }
2663
2664 /* CPUClass::reset() */
2665 static void x86_cpu_reset(CPUState *s)
2666 {
2667 X86CPU *cpu = X86_CPU(s);
2668 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2669 CPUX86State *env = &cpu->env;
2670 target_ulong cr4;
2671 uint64_t xcr0;
2672 int i;
2673
2674 xcc->parent_reset(s);
2675
2676 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2677
2678 tlb_flush(s, 1);
2679
2680 env->old_exception = -1;
2681
2682 /* init to reset state */
2683
2684 env->hflags2 |= HF2_GIF_MASK;
2685
2686 cpu_x86_update_cr0(env, 0x60000010);
2687 env->a20_mask = ~0x0;
2688 env->smbase = 0x30000;
2689
2690 env->idt.limit = 0xffff;
2691 env->gdt.limit = 0xffff;
2692 env->ldt.limit = 0xffff;
2693 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2694 env->tr.limit = 0xffff;
2695 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2696
2697 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2698 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2699 DESC_R_MASK | DESC_A_MASK);
2700 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2701 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2702 DESC_A_MASK);
2703 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2704 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2705 DESC_A_MASK);
2706 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2707 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2708 DESC_A_MASK);
2709 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2710 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2711 DESC_A_MASK);
2712 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2713 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2714 DESC_A_MASK);
2715
2716 env->eip = 0xfff0;
2717 env->regs[R_EDX] = env->cpuid_version;
2718
2719 env->eflags = 0x2;
2720
2721 /* FPU init */
2722 for (i = 0; i < 8; i++) {
2723 env->fptags[i] = 1;
2724 }
2725 cpu_set_fpuc(env, 0x37f);
2726
2727 env->mxcsr = 0x1f80;
2728 /* All units are in INIT state. */
2729 env->xstate_bv = 0;
2730
2731 env->pat = 0x0007040600070406ULL;
2732 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2733
2734 memset(env->dr, 0, sizeof(env->dr));
2735 env->dr[6] = DR6_FIXED_1;
2736 env->dr[7] = DR7_FIXED_1;
2737 cpu_breakpoint_remove_all(s, BP_CPU);
2738 cpu_watchpoint_remove_all(s, BP_CPU);
2739
2740 cr4 = 0;
2741 xcr0 = XSTATE_FP_MASK;
2742
2743 #ifdef CONFIG_USER_ONLY
2744 /* Enable all the features for user-mode. */
2745 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2746 xcr0 |= XSTATE_SSE_MASK;
2747 }
2748 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2749 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2750 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2751 xcr0 |= 1ull << i;
2752 }
2753 }
2754
2755 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2756 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2757 }
2758 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2759 cr4 |= CR4_FSGSBASE_MASK;
2760 }
2761 #endif
2762
2763 env->xcr0 = xcr0;
2764 cpu_x86_update_cr4(env, cr4);
2765
2766 /*
2767 * SDM 11.11.5 requires:
2768 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2769 * - IA32_MTRR_PHYSMASKn.V = 0
2770 * All other bits are undefined. For simplification, zero it all.
2771 */
2772 env->mtrr_deftype = 0;
2773 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2774 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2775
2776 #if !defined(CONFIG_USER_ONLY)
2777 /* We hard-wire the BSP to the first CPU. */
2778 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2779
2780 s->halted = !cpu_is_bsp(cpu);
2781
2782 if (kvm_enabled()) {
2783 kvm_arch_reset_vcpu(cpu);
2784 }
2785 #endif
2786 }
2787
2788 #ifndef CONFIG_USER_ONLY
2789 bool cpu_is_bsp(X86CPU *cpu)
2790 {
2791 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2792 }
2793
2794 /* TODO: remove me, when reset over QOM tree is implemented */
2795 static void x86_cpu_machine_reset_cb(void *opaque)
2796 {
2797 X86CPU *cpu = opaque;
2798 cpu_reset(CPU(cpu));
2799 }
2800 #endif
2801
2802 static void mce_init(X86CPU *cpu)
2803 {
2804 CPUX86State *cenv = &cpu->env;
2805 unsigned int bank;
2806
2807 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2808 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2809 (CPUID_MCE | CPUID_MCA)) {
2810 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2811 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2812 cenv->mcg_ctl = ~(uint64_t)0;
2813 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2814 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2815 }
2816 }
2817 }
2818
2819 #ifndef CONFIG_USER_ONLY
2820 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2821 {
2822 APICCommonState *apic;
2823 const char *apic_type = "apic";
2824
2825 if (kvm_apic_in_kernel()) {
2826 apic_type = "kvm-apic";
2827 } else if (xen_enabled()) {
2828 apic_type = "xen-apic";
2829 }
2830
2831 cpu->apic_state = DEVICE(object_new(apic_type));
2832
2833 object_property_add_child(OBJECT(cpu), "lapic",
2834 OBJECT(cpu->apic_state), &error_abort);
2835 object_unref(OBJECT(cpu->apic_state));
2836
2837 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2838 /* TODO: convert to link<> */
2839 apic = APIC_COMMON(cpu->apic_state);
2840 apic->cpu = cpu;
2841 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2842 }
2843
2844 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2845 {
2846 APICCommonState *apic;
2847 static bool apic_mmio_map_once;
2848
2849 if (cpu->apic_state == NULL) {
2850 return;
2851 }
2852 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2853 errp);
2854
2855 /* Map APIC MMIO area */
2856 apic = APIC_COMMON(cpu->apic_state);
2857 if (!apic_mmio_map_once) {
2858 memory_region_add_subregion_overlap(get_system_memory(),
2859 apic->apicbase &
2860 MSR_IA32_APICBASE_BASE,
2861 &apic->io_memory,
2862 0x1000);
2863 apic_mmio_map_once = true;
2864 }
2865 }
2866
2867 static void x86_cpu_machine_done(Notifier *n, void *unused)
2868 {
2869 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2870 MemoryRegion *smram =
2871 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2872
2873 if (smram) {
2874 cpu->smram = g_new(MemoryRegion, 1);
2875 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2876 smram, 0, 1ull << 32);
2877 memory_region_set_enabled(cpu->smram, false);
2878 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2879 }
2880 }
2881 #else
2882 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2883 {
2884 }
2885 #endif
2886
2887 /* Note: Only safe for use on x86(-64) hosts */
2888 static uint32_t x86_host_phys_bits(void)
2889 {
2890 uint32_t eax;
2891 uint32_t host_phys_bits;
2892
2893 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2894 if (eax >= 0x80000008) {
2895 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2896 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2897 * at 23:16 that can specify a maximum physical address bits for
2898 * the guest that can override this value; but I've not seen
2899 * anything with that set.
2900 */
2901 host_phys_bits = eax & 0xff;
2902 } else {
2903 /* It's an odd 64 bit machine that doesn't have the leaf for
2904 * physical address bits; fall back to 36 that's most older
2905 * Intel.
2906 */
2907 host_phys_bits = 36;
2908 }
2909
2910 return host_phys_bits;
2911 }
2912
2913 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2914 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2915 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2916 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2917 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2918 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2919 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2920 {
2921 CPUState *cs = CPU(dev);
2922 X86CPU *cpu = X86_CPU(dev);
2923 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2924 CPUX86State *env = &cpu->env;
2925 Error *local_err = NULL;
2926 static bool ht_warned;
2927 FeatureWord w;
2928
2929 if (xcc->kvm_required && !kvm_enabled()) {
2930 char *name = x86_cpu_class_get_model_name(xcc);
2931 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2932 g_free(name);
2933 goto out;
2934 }
2935
2936 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2937 error_setg(errp, "apic-id property was not initialized properly");
2938 return;
2939 }
2940
2941 /*TODO: cpu->host_features incorrectly overwrites features
2942 * set using "feat=on|off". Once we fix this, we can convert
2943 * plus_features & minus_features to global properties
2944 * inside x86_cpu_parse_featurestr() too.
2945 */
2946 if (cpu->host_features) {
2947 for (w = 0; w < FEATURE_WORDS; w++) {
2948 env->features[w] =
2949 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2950 }
2951 }
2952
2953 for (w = 0; w < FEATURE_WORDS; w++) {
2954 cpu->env.features[w] |= plus_features[w];
2955 cpu->env.features[w] &= ~minus_features[w];
2956 }
2957
2958 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2959 env->cpuid_level = 7;
2960 }
2961
2962 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2963 error_setg(&local_err,
2964 kvm_enabled() ?
2965 "Host doesn't support requested features" :
2966 "TCG doesn't support requested features");
2967 goto out;
2968 }
2969
2970 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2971 * CPUID[1].EDX.
2972 */
2973 if (IS_AMD_CPU(env)) {
2974 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2975 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2976 & CPUID_EXT2_AMD_ALIASES);
2977 }
2978
2979 /* For 64bit systems think about the number of physical bits to present.
2980 * ideally this should be the same as the host; anything other than matching
2981 * the host can cause incorrect guest behaviour.
2982 * QEMU used to pick the magic value of 40 bits that corresponds to
2983 * consumer AMD devices but nothing else.
2984 */
2985 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2986 if (kvm_enabled()) {
2987 uint32_t host_phys_bits = x86_host_phys_bits();
2988 static bool warned;
2989
2990 if (cpu->host_phys_bits) {
2991 /* The user asked for us to use the host physical bits */
2992 cpu->phys_bits = host_phys_bits;
2993 }
2994
2995 /* Print a warning if the user set it to a value that's not the
2996 * host value.
2997 */
2998 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
2999 !warned) {
3000 error_report("Warning: Host physical bits (%u)"
3001 " does not match phys-bits property (%u)",
3002 host_phys_bits, cpu->phys_bits);
3003 warned = true;
3004 }
3005
3006 if (cpu->phys_bits &&
3007 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3008 cpu->phys_bits < 32)) {
3009 error_setg(errp, "phys-bits should be between 32 and %u "
3010 " (but is %u)",
3011 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3012 return;
3013 }
3014 } else {
3015 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3016 error_setg(errp, "TCG only supports phys-bits=%u",
3017 TCG_PHYS_ADDR_BITS);
3018 return;
3019 }
3020 }
3021 /* 0 means it was not explicitly set by the user (or by machine
3022 * compat_props or by the host code above). In this case, the default
3023 * is the value used by TCG (40).
3024 */
3025 if (cpu->phys_bits == 0) {
3026 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3027 }
3028 } else {
3029 /* For 32 bit systems don't use the user set value, but keep
3030 * phys_bits consistent with what we tell the guest.
3031 */
3032 if (cpu->phys_bits != 0) {
3033 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3034 return;
3035 }
3036
3037 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3038 cpu->phys_bits = 36;
3039 } else {
3040 cpu->phys_bits = 32;
3041 }
3042 }
3043 cpu_exec_init(cs, &error_abort);
3044
3045 if (tcg_enabled()) {
3046 tcg_x86_init();
3047 }
3048
3049 #ifndef CONFIG_USER_ONLY
3050 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3051
3052 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3053 x86_cpu_apic_create(cpu, &local_err);
3054 if (local_err != NULL) {
3055 goto out;
3056 }
3057 }
3058 #endif
3059
3060 mce_init(cpu);
3061
3062 #ifndef CONFIG_USER_ONLY
3063 if (tcg_enabled()) {
3064 AddressSpace *newas = g_new(AddressSpace, 1);
3065
3066 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3067 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3068
3069 /* Outer container... */
3070 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3071 memory_region_set_enabled(cpu->cpu_as_root, true);
3072
3073 /* ... with two regions inside: normal system memory with low
3074 * priority, and...
3075 */
3076 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3077 get_system_memory(), 0, ~0ull);
3078 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3079 memory_region_set_enabled(cpu->cpu_as_mem, true);
3080 address_space_init(newas, cpu->cpu_as_root, "CPU");
3081 cs->num_ases = 1;
3082 cpu_address_space_init(cs, newas, 0);
3083
3084 /* ... SMRAM with higher priority, linked from /machine/smram. */
3085 cpu->machine_done.notify = x86_cpu_machine_done;
3086 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3087 }
3088 #endif
3089
3090 qemu_init_vcpu(cs);
3091
3092 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3093 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3094 * based on inputs (sockets,cores,threads), it is still better to gives
3095 * users a warning.
3096 *
3097 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3098 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3099 */
3100 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3101 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3102 " -smp options properly.");
3103 ht_warned = true;
3104 }
3105
3106 x86_cpu_apic_realize(cpu, &local_err);
3107 if (local_err != NULL) {
3108 goto out;
3109 }
3110 cpu_reset(cs);
3111
3112 xcc->parent_realize(dev, &local_err);
3113
3114 out:
3115 if (local_err != NULL) {
3116 error_propagate(errp, local_err);
3117 return;
3118 }
3119 }
3120
3121 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3122 {
3123 X86CPU *cpu = X86_CPU(dev);
3124
3125 #ifndef CONFIG_USER_ONLY
3126 cpu_remove_sync(CPU(dev));
3127 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3128 #endif
3129
3130 if (cpu->apic_state) {
3131 object_unparent(OBJECT(cpu->apic_state));
3132 cpu->apic_state = NULL;
3133 }
3134 }
3135
3136 typedef struct BitProperty {
3137 uint32_t *ptr;
3138 uint32_t mask;
3139 } BitProperty;
3140
3141 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3142 void *opaque, Error **errp)
3143 {
3144 BitProperty *fp = opaque;
3145 bool value = (*fp->ptr & fp->mask) == fp->mask;
3146 visit_type_bool(v, name, &value, errp);
3147 }
3148
3149 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3150 void *opaque, Error **errp)
3151 {
3152 DeviceState *dev = DEVICE(obj);
3153 BitProperty *fp = opaque;
3154 Error *local_err = NULL;
3155 bool value;
3156
3157 if (dev->realized) {
3158 qdev_prop_set_after_realize(dev, name, errp);
3159 return;
3160 }
3161
3162 visit_type_bool(v, name, &value, &local_err);
3163 if (local_err) {
3164 error_propagate(errp, local_err);
3165 return;
3166 }
3167
3168 if (value) {
3169 *fp->ptr |= fp->mask;
3170 } else {
3171 *fp->ptr &= ~fp->mask;
3172 }
3173 }
3174
3175 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3176 void *opaque)
3177 {
3178 BitProperty *prop = opaque;
3179 g_free(prop);
3180 }
3181
3182 /* Register a boolean property to get/set a single bit in a uint32_t field.
3183 *
3184 * The same property name can be registered multiple times to make it affect
3185 * multiple bits in the same FeatureWord. In that case, the getter will return
3186 * true only if all bits are set.
3187 */
3188 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3189 const char *prop_name,
3190 uint32_t *field,
3191 int bitnr)
3192 {
3193 BitProperty *fp;
3194 ObjectProperty *op;
3195 uint32_t mask = (1UL << bitnr);
3196
3197 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3198 if (op) {
3199 fp = op->opaque;
3200 assert(fp->ptr == field);
3201 fp->mask |= mask;
3202 } else {
3203 fp = g_new0(BitProperty, 1);
3204 fp->ptr = field;
3205 fp->mask = mask;
3206 object_property_add(OBJECT(cpu), prop_name, "bool",
3207 x86_cpu_get_bit_prop,
3208 x86_cpu_set_bit_prop,
3209 x86_cpu_release_bit_prop, fp, &error_abort);
3210 }
3211 }
3212
3213 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3214 FeatureWord w,
3215 int bitnr)
3216 {
3217 Object *obj = OBJECT(cpu);
3218 int i;
3219 char **names;
3220 FeatureWordInfo *fi = &feature_word_info[w];
3221
3222 if (!fi->feat_names) {
3223 return;
3224 }
3225 if (!fi->feat_names[bitnr]) {
3226 return;
3227 }
3228
3229 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3230
3231 feat2prop(names[0]);
3232 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3233
3234 for (i = 1; names[i]; i++) {
3235 feat2prop(names[i]);
3236 object_property_add_alias(obj, names[i], obj, names[0],
3237 &error_abort);
3238 }
3239
3240 g_strfreev(names);
3241 }
3242
3243 static void x86_cpu_initfn(Object *obj)
3244 {
3245 CPUState *cs = CPU(obj);
3246 X86CPU *cpu = X86_CPU(obj);
3247 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3248 CPUX86State *env = &cpu->env;
3249 FeatureWord w;
3250
3251 cs->env_ptr = env;
3252
3253 object_property_add(obj, "family", "int",
3254 x86_cpuid_version_get_family,
3255 x86_cpuid_version_set_family, NULL, NULL, NULL);
3256 object_property_add(obj, "model", "int",
3257 x86_cpuid_version_get_model,
3258 x86_cpuid_version_set_model, NULL, NULL, NULL);
3259 object_property_add(obj, "stepping", "int",
3260 x86_cpuid_version_get_stepping,
3261 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3262 object_property_add_str(obj, "vendor",
3263 x86_cpuid_get_vendor,
3264 x86_cpuid_set_vendor, NULL);
3265 object_property_add_str(obj, "model-id",
3266 x86_cpuid_get_model_id,
3267 x86_cpuid_set_model_id, NULL);
3268 object_property_add(obj, "tsc-frequency", "int",
3269 x86_cpuid_get_tsc_freq,
3270 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3271 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3272 x86_cpu_get_feature_words,
3273 NULL, NULL, (void *)env->features, NULL);
3274 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3275 x86_cpu_get_feature_words,
3276 NULL, NULL, (void *)cpu->filtered_features, NULL);
3277
3278 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3279
3280 for (w = 0; w < FEATURE_WORDS; w++) {
3281 int bitnr;
3282
3283 for (bitnr = 0; bitnr < 32; bitnr++) {
3284 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3285 }
3286 }
3287
3288 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3289 }
3290
3291 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3292 {
3293 X86CPU *cpu = X86_CPU(cs);
3294
3295 return cpu->apic_id;
3296 }
3297
3298 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3299 {
3300 X86CPU *cpu = X86_CPU(cs);
3301
3302 return cpu->env.cr[0] & CR0_PG_MASK;
3303 }
3304
3305 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3306 {
3307 X86CPU *cpu = X86_CPU(cs);
3308
3309 cpu->env.eip = value;
3310 }
3311
3312 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3313 {
3314 X86CPU *cpu = X86_CPU(cs);
3315
3316 cpu->env.eip = tb->pc - tb->cs_base;
3317 }
3318
3319 static bool x86_cpu_has_work(CPUState *cs)
3320 {
3321 X86CPU *cpu = X86_CPU(cs);
3322 CPUX86State *env = &cpu->env;
3323
3324 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3325 CPU_INTERRUPT_POLL)) &&
3326 (env->eflags & IF_MASK)) ||
3327 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3328 CPU_INTERRUPT_INIT |
3329 CPU_INTERRUPT_SIPI |
3330 CPU_INTERRUPT_MCE)) ||
3331 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3332 !(env->hflags & HF_SMM_MASK));
3333 }
3334
3335 static Property x86_cpu_properties[] = {
3336 #ifdef CONFIG_USER_ONLY
3337 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3338 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3339 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3340 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3341 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3342 #else
3343 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3344 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3345 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3346 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3347 #endif
3348 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3349 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3350 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3351 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3352 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3353 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3354 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3355 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3356 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3357 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3358 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3359 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3360 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3361 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3362 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3363 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3364 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3365 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, 0),
3366 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, 0),
3367 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, 0),
3368 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3369 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3370 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3371 DEFINE_PROP_END_OF_LIST()
3372 };
3373
3374 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3375 {
3376 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3377 CPUClass *cc = CPU_CLASS(oc);
3378 DeviceClass *dc = DEVICE_CLASS(oc);
3379
3380 xcc->parent_realize = dc->realize;
3381 dc->realize = x86_cpu_realizefn;
3382 dc->unrealize = x86_cpu_unrealizefn;
3383 dc->props = x86_cpu_properties;
3384
3385 xcc->parent_reset = cc->reset;
3386 cc->reset = x86_cpu_reset;
3387 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3388
3389 cc->class_by_name = x86_cpu_class_by_name;
3390 cc->parse_features = x86_cpu_parse_featurestr;
3391 cc->has_work = x86_cpu_has_work;
3392 cc->do_interrupt = x86_cpu_do_interrupt;
3393 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3394 cc->dump_state = x86_cpu_dump_state;
3395 cc->set_pc = x86_cpu_set_pc;
3396 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3397 cc->gdb_read_register = x86_cpu_gdb_read_register;
3398 cc->gdb_write_register = x86_cpu_gdb_write_register;
3399 cc->get_arch_id = x86_cpu_get_arch_id;
3400 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3401 #ifdef CONFIG_USER_ONLY
3402 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3403 #else
3404 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3405 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3406 cc->write_elf64_note = x86_cpu_write_elf64_note;
3407 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3408 cc->write_elf32_note = x86_cpu_write_elf32_note;
3409 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3410 cc->vmsd = &vmstate_x86_cpu;
3411 #endif
3412 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3413 #ifndef CONFIG_USER_ONLY
3414 cc->debug_excp_handler = breakpoint_handler;
3415 #endif
3416 cc->cpu_exec_enter = x86_cpu_exec_enter;
3417 cc->cpu_exec_exit = x86_cpu_exec_exit;
3418
3419 dc->cannot_instantiate_with_device_add_yet = false;
3420 /*
3421 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3422 * object in cpus -> dangling pointer after final object_unref().
3423 */
3424 dc->cannot_destroy_with_object_finalize_yet = true;
3425 }
3426
3427 static const TypeInfo x86_cpu_type_info = {
3428 .name = TYPE_X86_CPU,
3429 .parent = TYPE_CPU,
3430 .instance_size = sizeof(X86CPU),
3431 .instance_init = x86_cpu_initfn,
3432 .abstract = true,
3433 .class_size = sizeof(X86CPUClass),
3434 .class_init = x86_cpu_common_class_init,
3435 };
3436
3437 static void x86_cpu_register_types(void)
3438 {
3439 int i;
3440
3441 type_register_static(&x86_cpu_type_info);
3442 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3443 x86_register_cpudef_type(&builtin_x86_defs[i]);
3444 }
3445 #ifdef CONFIG_KVM
3446 type_register_static(&host_x86_cpu_type_info);
3447 #endif
3448 }
3449
3450 type_init(x86_cpu_register_types)