]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target-i386/cpu.c
target-i386: Enable CPUID[0x8000000A] if SVM is enabled
[mirror_qemu.git] / target-i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "qemu/osdep.h"
20#include "qemu/cutils.h"
21
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "sysemu/kvm.h"
25#include "sysemu/cpus.h"
26#include "kvm_i386.h"
27
28#include "qemu/error-report.h"
29#include "qemu/option.h"
30#include "qemu/config-file.h"
31#include "qapi/qmp/qerror.h"
32
33#include "qapi-types.h"
34#include "qapi-visit.h"
35#include "qapi/visitor.h"
36#include "sysemu/arch_init.h"
37
38#if defined(CONFIG_KVM)
39#include <linux/kvm_para.h>
40#endif
41
42#include "sysemu/sysemu.h"
43#include "hw/qdev-properties.h"
44#include "hw/i386/topology.h"
45#ifndef CONFIG_USER_ONLY
46#include "exec/address-spaces.h"
47#include "hw/hw.h"
48#include "hw/xen/xen.h"
49#include "hw/i386/apic_internal.h"
50#endif
51
52
53/* Cache topology CPUID constants: */
54
55/* CPUID Leaf 2 Descriptors */
56
57#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58#define CPUID_2_L1I_32KB_8WAY_64B 0x30
59#define CPUID_2_L2_2MB_8WAY_64B 0x7d
60#define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63/* CPUID Leaf 4 constants: */
64
65/* EAX: */
66#define CPUID_4_TYPE_DCACHE 1
67#define CPUID_4_TYPE_ICACHE 2
68#define CPUID_4_TYPE_UNIFIED 3
69
70#define CPUID_4_LEVEL(l) ((l) << 5)
71
72#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73#define CPUID_4_FULLY_ASSOC (1 << 9)
74
75/* EDX: */
76#define CPUID_4_NO_INVD_SHARING (1 << 0)
77#define CPUID_4_INCLUSIVE (1 << 1)
78#define CPUID_4_COMPLEX_IDX (1 << 2)
79
80#define ASSOC_FULL 0xFF
81
82/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97/* Definitions of the hardcoded cache entries we expose: */
98
99/* L1 data cache: */
100#define L1D_LINE_SIZE 64
101#define L1D_ASSOCIATIVITY 8
102#define L1D_SETS 64
103#define L1D_PARTITIONS 1
104/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107#define L1D_LINES_PER_TAG 1
108#define L1D_SIZE_KB_AMD 64
109#define L1D_ASSOCIATIVITY_AMD 2
110
111/* L1 instruction cache: */
112#define L1I_LINE_SIZE 64
113#define L1I_ASSOCIATIVITY 8
114#define L1I_SETS 64
115#define L1I_PARTITIONS 1
116/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119#define L1I_LINES_PER_TAG 1
120#define L1I_SIZE_KB_AMD 64
121#define L1I_ASSOCIATIVITY_AMD 2
122
123/* Level 2 unified cache: */
124#define L2_LINE_SIZE 64
125#define L2_ASSOCIATIVITY 16
126#define L2_SETS 4096
127#define L2_PARTITIONS 1
128/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132#define L2_LINES_PER_TAG 1
133#define L2_SIZE_KB_AMD 512
134
135/* Level 3 unified cache: */
136#define L3_SIZE_KB 0 /* disabled */
137#define L3_ASSOCIATIVITY 0 /* disabled */
138#define L3_LINES_PER_TAG 0 /* disabled */
139#define L3_LINE_SIZE 0 /* disabled */
140#define L3_N_LINE_SIZE 64
141#define L3_N_ASSOCIATIVITY 16
142#define L3_N_SETS 16384
143#define L3_N_PARTITIONS 1
144#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145#define L3_N_LINES_PER_TAG 1
146#define L3_N_SIZE_KB_AMD 16384
147
148/* TLB definitions: */
149
150#define L1_DTLB_2M_ASSOC 1
151#define L1_DTLB_2M_ENTRIES 255
152#define L1_DTLB_4K_ASSOC 1
153#define L1_DTLB_4K_ENTRIES 255
154
155#define L1_ITLB_2M_ASSOC 1
156#define L1_ITLB_2M_ENTRIES 255
157#define L1_ITLB_4K_ASSOC 1
158#define L1_ITLB_4K_ENTRIES 255
159
160#define L2_DTLB_2M_ASSOC 0 /* disabled */
161#define L2_DTLB_2M_ENTRIES 0 /* disabled */
162#define L2_DTLB_4K_ASSOC 4
163#define L2_DTLB_4K_ENTRIES 512
164
165#define L2_ITLB_2M_ASSOC 0 /* disabled */
166#define L2_ITLB_2M_ENTRIES 0 /* disabled */
167#define L2_ITLB_4K_ASSOC 4
168#define L2_ITLB_4K_ENTRIES 512
169
170
171
172static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174{
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182}
183
184/* feature flags taken from "Intel Processor Identification and the CPUID
185 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
186 * between feature naming conventions, aliases may be added.
187 */
188static const char *feature_name[] = {
189 "fpu", "vme", "de", "pse",
190 "tsc", "msr", "pae", "mce",
191 "cx8", "apic", NULL, "sep",
192 "mtrr", "pge", "mca", "cmov",
193 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
194 NULL, "ds" /* Intel dts */, "acpi", "mmx",
195 "fxsr", "sse", "sse2", "ss",
196 "ht" /* Intel htt */, "tm", "ia64", "pbe",
197};
198static const char *ext_feature_name[] = {
199 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
200 "ds_cpl", "vmx", "smx", "est",
201 "tm2", "ssse3", "cid", NULL,
202 "fma", "cx16", "xtpr", "pdcm",
203 NULL, "pcid", "dca", "sse4.1|sse4_1",
204 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
205 "tsc-deadline", "aes", "xsave", "osxsave",
206 "avx", "f16c", "rdrand", "hypervisor",
207};
208/* Feature names that are already defined on feature_name[] but are set on
209 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
210 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
211 * if and only if CPU vendor is AMD.
212 */
213static const char *ext2_feature_name[] = {
214 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
215 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
216 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
217 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
218 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
219 "nx|xd", NULL, "mmxext", NULL /* mmx */,
220 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
221 NULL, "lm|i64", "3dnowext", "3dnow",
222};
223static const char *ext3_feature_name[] = {
224 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
225 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
226 "3dnowprefetch", "osvw", "ibs", "xop",
227 "skinit", "wdt", NULL, "lwp",
228 "fma4", "tce", NULL, "nodeid_msr",
229 NULL, "tbm", "topoext", "perfctr_core",
230 "perfctr_nb", NULL, NULL, NULL,
231 NULL, NULL, NULL, NULL,
232};
233
234static const char *ext4_feature_name[] = {
235 NULL, NULL, "xstore", "xstore-en",
236 NULL, NULL, "xcrypt", "xcrypt-en",
237 "ace2", "ace2-en", "phe", "phe-en",
238 "pmm", "pmm-en", NULL, NULL,
239 NULL, NULL, NULL, NULL,
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243};
244
245static const char *kvm_feature_name[] = {
246 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
247 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
248 NULL, NULL, NULL, NULL,
249 NULL, NULL, NULL, NULL,
250 NULL, NULL, NULL, NULL,
251 NULL, NULL, NULL, NULL,
252 "kvmclock-stable-bit", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254};
255
256static const char *hyperv_priv_feature_name[] = {
257 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
258 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
259 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
260 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
261 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
262 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
263 NULL, NULL, NULL, NULL,
264 NULL, NULL, NULL, NULL,
265 NULL, NULL, NULL, NULL,
266 NULL, NULL, NULL, NULL,
267 NULL, NULL, NULL, NULL,
268};
269
270static const char *hyperv_ident_feature_name[] = {
271 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
272 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
273 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
274 NULL /* hv_create_port */, NULL /* hv_connect_port */,
275 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
276 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
277 NULL, NULL,
278 NULL, NULL, NULL, NULL,
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282};
283
284static const char *hyperv_misc_feature_name[] = {
285 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
286 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
287 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
288 NULL, NULL,
289 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
290 NULL, NULL, NULL, NULL,
291 NULL, NULL, NULL, NULL,
292 NULL, NULL, NULL, NULL,
293 NULL, NULL, NULL, NULL,
294 NULL, NULL, NULL, NULL,
295};
296
297static const char *svm_feature_name[] = {
298 "npt", "lbrv", "svm_lock", "nrip_save",
299 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
300 NULL, NULL, "pause_filter", NULL,
301 "pfthreshold", NULL, NULL, NULL,
302 NULL, NULL, NULL, NULL,
303 NULL, NULL, NULL, NULL,
304 NULL, NULL, NULL, NULL,
305 NULL, NULL, NULL, NULL,
306};
307
308static const char *cpuid_7_0_ebx_feature_name[] = {
309 "fsgsbase", "tsc_adjust", NULL, "bmi1",
310 "hle", "avx2", NULL, "smep",
311 "bmi2", "erms", "invpcid", "rtm",
312 NULL, NULL, "mpx", NULL,
313 "avx512f", "avx512dq", "rdseed", "adx",
314 "smap", "avx512ifma", "pcommit", "clflushopt",
315 "clwb", NULL, "avx512pf", "avx512er",
316 "avx512cd", NULL, "avx512bw", "avx512vl",
317};
318
319static const char *cpuid_7_0_ecx_feature_name[] = {
320 NULL, "avx512vbmi", "umip", "pku",
321 "ospke", NULL, NULL, NULL,
322 NULL, NULL, NULL, NULL,
323 NULL, NULL, NULL, NULL,
324 NULL, NULL, NULL, NULL,
325 NULL, NULL, "rdpid", NULL,
326 NULL, NULL, NULL, NULL,
327 NULL, NULL, NULL, NULL,
328};
329
330static const char *cpuid_apm_edx_feature_name[] = {
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 "invtsc", NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339};
340
341static const char *cpuid_xsave_feature_name[] = {
342 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350};
351
352static const char *cpuid_6_feature_name[] = {
353 NULL, NULL, "arat", NULL,
354 NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL,
358 NULL, NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL,
360 NULL, NULL, NULL, NULL,
361};
362
363#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
364#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
365 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
366#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
367 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
368 CPUID_PSE36 | CPUID_FXSR)
369#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
370#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
371 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
372 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
373 CPUID_PAE | CPUID_SEP | CPUID_APIC)
374
375#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
376 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
377 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
378 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
379 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
380 /* partly implemented:
381 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
382 /* missing:
383 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
384#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
385 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
386 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
387 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
388 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
389 /* missing:
390 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
391 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
392 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
393 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
394 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
395
396#ifdef TARGET_X86_64
397#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
398#else
399#define TCG_EXT2_X86_64_FEATURES 0
400#endif
401
402#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
403 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
404 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
405 TCG_EXT2_X86_64_FEATURES)
406#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
407 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
408#define TCG_EXT4_FEATURES 0
409#define TCG_SVM_FEATURES 0
410#define TCG_KVM_FEATURES 0
411#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
412 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
413 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
414 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
415 CPUID_7_0_EBX_ERMS)
416 /* missing:
417 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
418 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
419 CPUID_7_0_EBX_RDSEED */
420#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
421#define TCG_APM_FEATURES 0
422#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
423#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
424 /* missing:
425 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
426
427typedef struct FeatureWordInfo {
428 const char **feat_names;
429 uint32_t cpuid_eax; /* Input EAX for CPUID */
430 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
431 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
432 int cpuid_reg; /* output register (R_* constant) */
433 uint32_t tcg_features; /* Feature flags supported by TCG */
434 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
435} FeatureWordInfo;
436
437static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
438 [FEAT_1_EDX] = {
439 .feat_names = feature_name,
440 .cpuid_eax = 1, .cpuid_reg = R_EDX,
441 .tcg_features = TCG_FEATURES,
442 },
443 [FEAT_1_ECX] = {
444 .feat_names = ext_feature_name,
445 .cpuid_eax = 1, .cpuid_reg = R_ECX,
446 .tcg_features = TCG_EXT_FEATURES,
447 },
448 [FEAT_8000_0001_EDX] = {
449 .feat_names = ext2_feature_name,
450 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
451 .tcg_features = TCG_EXT2_FEATURES,
452 },
453 [FEAT_8000_0001_ECX] = {
454 .feat_names = ext3_feature_name,
455 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
456 .tcg_features = TCG_EXT3_FEATURES,
457 },
458 [FEAT_C000_0001_EDX] = {
459 .feat_names = ext4_feature_name,
460 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
461 .tcg_features = TCG_EXT4_FEATURES,
462 },
463 [FEAT_KVM] = {
464 .feat_names = kvm_feature_name,
465 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
466 .tcg_features = TCG_KVM_FEATURES,
467 },
468 [FEAT_HYPERV_EAX] = {
469 .feat_names = hyperv_priv_feature_name,
470 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
471 },
472 [FEAT_HYPERV_EBX] = {
473 .feat_names = hyperv_ident_feature_name,
474 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
475 },
476 [FEAT_HYPERV_EDX] = {
477 .feat_names = hyperv_misc_feature_name,
478 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
479 },
480 [FEAT_SVM] = {
481 .feat_names = svm_feature_name,
482 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
483 .tcg_features = TCG_SVM_FEATURES,
484 },
485 [FEAT_7_0_EBX] = {
486 .feat_names = cpuid_7_0_ebx_feature_name,
487 .cpuid_eax = 7,
488 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
489 .cpuid_reg = R_EBX,
490 .tcg_features = TCG_7_0_EBX_FEATURES,
491 },
492 [FEAT_7_0_ECX] = {
493 .feat_names = cpuid_7_0_ecx_feature_name,
494 .cpuid_eax = 7,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
496 .cpuid_reg = R_ECX,
497 .tcg_features = TCG_7_0_ECX_FEATURES,
498 },
499 [FEAT_8000_0007_EDX] = {
500 .feat_names = cpuid_apm_edx_feature_name,
501 .cpuid_eax = 0x80000007,
502 .cpuid_reg = R_EDX,
503 .tcg_features = TCG_APM_FEATURES,
504 .unmigratable_flags = CPUID_APM_INVTSC,
505 },
506 [FEAT_XSAVE] = {
507 .feat_names = cpuid_xsave_feature_name,
508 .cpuid_eax = 0xd,
509 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
510 .cpuid_reg = R_EAX,
511 .tcg_features = TCG_XSAVE_FEATURES,
512 },
513 [FEAT_6_EAX] = {
514 .feat_names = cpuid_6_feature_name,
515 .cpuid_eax = 6, .cpuid_reg = R_EAX,
516 .tcg_features = TCG_6_EAX_FEATURES,
517 },
518};
519
520typedef struct X86RegisterInfo32 {
521 /* Name of register */
522 const char *name;
523 /* QAPI enum value register */
524 X86CPURegister32 qapi_enum;
525} X86RegisterInfo32;
526
527#define REGISTER(reg) \
528 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
529static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
530 REGISTER(EAX),
531 REGISTER(ECX),
532 REGISTER(EDX),
533 REGISTER(EBX),
534 REGISTER(ESP),
535 REGISTER(EBP),
536 REGISTER(ESI),
537 REGISTER(EDI),
538};
539#undef REGISTER
540
541typedef struct ExtSaveArea {
542 uint32_t feature, bits;
543 uint32_t offset, size;
544} ExtSaveArea;
545
546static const ExtSaveArea x86_ext_save_areas[] = {
547 [XSTATE_YMM_BIT] =
548 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
549 .offset = offsetof(X86XSaveArea, avx_state),
550 .size = sizeof(XSaveAVX) },
551 [XSTATE_BNDREGS_BIT] =
552 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
553 .offset = offsetof(X86XSaveArea, bndreg_state),
554 .size = sizeof(XSaveBNDREG) },
555 [XSTATE_BNDCSR_BIT] =
556 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
557 .offset = offsetof(X86XSaveArea, bndcsr_state),
558 .size = sizeof(XSaveBNDCSR) },
559 [XSTATE_OPMASK_BIT] =
560 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
561 .offset = offsetof(X86XSaveArea, opmask_state),
562 .size = sizeof(XSaveOpmask) },
563 [XSTATE_ZMM_Hi256_BIT] =
564 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
565 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
566 .size = sizeof(XSaveZMM_Hi256) },
567 [XSTATE_Hi16_ZMM_BIT] =
568 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
569 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
570 .size = sizeof(XSaveHi16_ZMM) },
571 [XSTATE_PKRU_BIT] =
572 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
573 .offset = offsetof(X86XSaveArea, pkru_state),
574 .size = sizeof(XSavePKRU) },
575};
576
577const char *get_register_name_32(unsigned int reg)
578{
579 if (reg >= CPU_NB_REGS32) {
580 return NULL;
581 }
582 return x86_reg_info_32[reg].name;
583}
584
585/*
586 * Returns the set of feature flags that are supported and migratable by
587 * QEMU, for a given FeatureWord.
588 */
589static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
590{
591 FeatureWordInfo *wi = &feature_word_info[w];
592 uint32_t r = 0;
593 int i;
594
595 for (i = 0; i < 32; i++) {
596 uint32_t f = 1U << i;
597 /* If the feature name is unknown, it is not supported by QEMU yet */
598 if (!wi->feat_names[i]) {
599 continue;
600 }
601 /* Skip features known to QEMU, but explicitly marked as unmigratable */
602 if (wi->unmigratable_flags & f) {
603 continue;
604 }
605 r |= f;
606 }
607 return r;
608}
609
610void host_cpuid(uint32_t function, uint32_t count,
611 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
612{
613 uint32_t vec[4];
614
615#ifdef __x86_64__
616 asm volatile("cpuid"
617 : "=a"(vec[0]), "=b"(vec[1]),
618 "=c"(vec[2]), "=d"(vec[3])
619 : "0"(function), "c"(count) : "cc");
620#elif defined(__i386__)
621 asm volatile("pusha \n\t"
622 "cpuid \n\t"
623 "mov %%eax, 0(%2) \n\t"
624 "mov %%ebx, 4(%2) \n\t"
625 "mov %%ecx, 8(%2) \n\t"
626 "mov %%edx, 12(%2) \n\t"
627 "popa"
628 : : "a"(function), "c"(count), "S"(vec)
629 : "memory", "cc");
630#else
631 abort();
632#endif
633
634 if (eax)
635 *eax = vec[0];
636 if (ebx)
637 *ebx = vec[1];
638 if (ecx)
639 *ecx = vec[2];
640 if (edx)
641 *edx = vec[3];
642}
643
644#define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
645
646/* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
647 * a substring. ex if !NULL points to the first char after a substring,
648 * otherwise the string is assumed to sized by a terminating nul.
649 * Return lexical ordering of *s1:*s2.
650 */
651static int sstrcmp(const char *s1, const char *e1,
652 const char *s2, const char *e2)
653{
654 for (;;) {
655 if (!*s1 || !*s2 || *s1 != *s2)
656 return (*s1 - *s2);
657 ++s1, ++s2;
658 if (s1 == e1 && s2 == e2)
659 return (0);
660 else if (s1 == e1)
661 return (*s2);
662 else if (s2 == e2)
663 return (*s1);
664 }
665}
666
667/* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
668 * '|' delimited (possibly empty) strings in which case search for a match
669 * within the alternatives proceeds left to right. Return 0 for success,
670 * non-zero otherwise.
671 */
672static int altcmp(const char *s, const char *e, const char *altstr)
673{
674 const char *p, *q;
675
676 for (q = p = altstr; ; ) {
677 while (*p && *p != '|')
678 ++p;
679 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
680 return (0);
681 if (!*p)
682 return (1);
683 else
684 q = ++p;
685 }
686}
687
688/* search featureset for flag *[s..e), if found set corresponding bit in
689 * *pval and return true, otherwise return false
690 */
691static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
692 const char **featureset)
693{
694 uint32_t mask;
695 const char **ppc;
696 bool found = false;
697
698 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
699 if (*ppc && !altcmp(s, e, *ppc)) {
700 *pval |= mask;
701 found = true;
702 }
703 }
704 return found;
705}
706
707static void add_flagname_to_bitmaps(const char *flagname,
708 FeatureWordArray words,
709 Error **errp)
710{
711 FeatureWord w;
712 for (w = 0; w < FEATURE_WORDS; w++) {
713 FeatureWordInfo *wi = &feature_word_info[w];
714 if (wi->feat_names &&
715 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
716 break;
717 }
718 }
719 if (w == FEATURE_WORDS) {
720 error_setg(errp, "CPU feature %s not found", flagname);
721 }
722}
723
724/* CPU class name definitions: */
725
726#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
727#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
728
729/* Return type name for a given CPU model name
730 * Caller is responsible for freeing the returned string.
731 */
732static char *x86_cpu_type_name(const char *model_name)
733{
734 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
735}
736
737static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
738{
739 ObjectClass *oc;
740 char *typename;
741
742 if (cpu_model == NULL) {
743 return NULL;
744 }
745
746 typename = x86_cpu_type_name(cpu_model);
747 oc = object_class_by_name(typename);
748 g_free(typename);
749 return oc;
750}
751
752static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
753{
754 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
755 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
756 return g_strndup(class_name,
757 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
758}
759
760struct X86CPUDefinition {
761 const char *name;
762 uint32_t level;
763 uint32_t xlevel;
764 /* vendor is zero-terminated, 12 character ASCII string */
765 char vendor[CPUID_VENDOR_SZ + 1];
766 int family;
767 int model;
768 int stepping;
769 FeatureWordArray features;
770 char model_id[48];
771};
772
773static X86CPUDefinition builtin_x86_defs[] = {
774 {
775 .name = "qemu64",
776 .level = 0xd,
777 .vendor = CPUID_VENDOR_AMD,
778 .family = 6,
779 .model = 6,
780 .stepping = 3,
781 .features[FEAT_1_EDX] =
782 PPRO_FEATURES |
783 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
784 CPUID_PSE36,
785 .features[FEAT_1_ECX] =
786 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
787 .features[FEAT_8000_0001_EDX] =
788 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
789 .features[FEAT_8000_0001_ECX] =
790 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
791 .xlevel = 0x8000000A,
792 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
793 },
794 {
795 .name = "phenom",
796 .level = 5,
797 .vendor = CPUID_VENDOR_AMD,
798 .family = 16,
799 .model = 2,
800 .stepping = 3,
801 /* Missing: CPUID_HT */
802 .features[FEAT_1_EDX] =
803 PPRO_FEATURES |
804 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
805 CPUID_PSE36 | CPUID_VME,
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
808 CPUID_EXT_POPCNT,
809 .features[FEAT_8000_0001_EDX] =
810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
811 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
812 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
813 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
814 CPUID_EXT3_CR8LEG,
815 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
816 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
817 .features[FEAT_8000_0001_ECX] =
818 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
819 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
820 /* Missing: CPUID_SVM_LBRV */
821 .features[FEAT_SVM] =
822 CPUID_SVM_NPT,
823 .xlevel = 0x8000001A,
824 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
825 },
826 {
827 .name = "core2duo",
828 .level = 10,
829 .vendor = CPUID_VENDOR_INTEL,
830 .family = 6,
831 .model = 15,
832 .stepping = 11,
833 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
834 .features[FEAT_1_EDX] =
835 PPRO_FEATURES |
836 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
837 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
838 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
839 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
840 .features[FEAT_1_ECX] =
841 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
842 CPUID_EXT_CX16,
843 .features[FEAT_8000_0001_EDX] =
844 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
845 .features[FEAT_8000_0001_ECX] =
846 CPUID_EXT3_LAHF_LM,
847 .xlevel = 0x80000008,
848 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
849 },
850 {
851 .name = "kvm64",
852 .level = 0xd,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 15,
855 .model = 6,
856 .stepping = 1,
857 /* Missing: CPUID_HT */
858 .features[FEAT_1_EDX] =
859 PPRO_FEATURES | CPUID_VME |
860 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
861 CPUID_PSE36,
862 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
863 .features[FEAT_1_ECX] =
864 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
865 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
866 .features[FEAT_8000_0001_EDX] =
867 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
868 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
869 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
870 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
871 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
872 .features[FEAT_8000_0001_ECX] =
873 0,
874 .xlevel = 0x80000008,
875 .model_id = "Common KVM processor"
876 },
877 {
878 .name = "qemu32",
879 .level = 4,
880 .vendor = CPUID_VENDOR_INTEL,
881 .family = 6,
882 .model = 6,
883 .stepping = 3,
884 .features[FEAT_1_EDX] =
885 PPRO_FEATURES,
886 .features[FEAT_1_ECX] =
887 CPUID_EXT_SSE3,
888 .xlevel = 0x80000004,
889 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
890 },
891 {
892 .name = "kvm32",
893 .level = 5,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 15,
896 .model = 6,
897 .stepping = 1,
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
901 .features[FEAT_1_ECX] =
902 CPUID_EXT_SSE3,
903 .features[FEAT_8000_0001_ECX] =
904 0,
905 .xlevel = 0x80000008,
906 .model_id = "Common 32-bit KVM processor"
907 },
908 {
909 .name = "coreduo",
910 .level = 10,
911 .vendor = CPUID_VENDOR_INTEL,
912 .family = 6,
913 .model = 14,
914 .stepping = 8,
915 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
916 .features[FEAT_1_EDX] =
917 PPRO_FEATURES | CPUID_VME |
918 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
919 CPUID_SS,
920 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
921 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
922 .features[FEAT_1_ECX] =
923 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
924 .features[FEAT_8000_0001_EDX] =
925 CPUID_EXT2_NX,
926 .xlevel = 0x80000008,
927 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
928 },
929 {
930 .name = "486",
931 .level = 1,
932 .vendor = CPUID_VENDOR_INTEL,
933 .family = 4,
934 .model = 8,
935 .stepping = 0,
936 .features[FEAT_1_EDX] =
937 I486_FEATURES,
938 .xlevel = 0,
939 },
940 {
941 .name = "pentium",
942 .level = 1,
943 .vendor = CPUID_VENDOR_INTEL,
944 .family = 5,
945 .model = 4,
946 .stepping = 3,
947 .features[FEAT_1_EDX] =
948 PENTIUM_FEATURES,
949 .xlevel = 0,
950 },
951 {
952 .name = "pentium2",
953 .level = 2,
954 .vendor = CPUID_VENDOR_INTEL,
955 .family = 6,
956 .model = 5,
957 .stepping = 2,
958 .features[FEAT_1_EDX] =
959 PENTIUM2_FEATURES,
960 .xlevel = 0,
961 },
962 {
963 .name = "pentium3",
964 .level = 3,
965 .vendor = CPUID_VENDOR_INTEL,
966 .family = 6,
967 .model = 7,
968 .stepping = 3,
969 .features[FEAT_1_EDX] =
970 PENTIUM3_FEATURES,
971 .xlevel = 0,
972 },
973 {
974 .name = "athlon",
975 .level = 2,
976 .vendor = CPUID_VENDOR_AMD,
977 .family = 6,
978 .model = 2,
979 .stepping = 3,
980 .features[FEAT_1_EDX] =
981 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
982 CPUID_MCA,
983 .features[FEAT_8000_0001_EDX] =
984 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
985 .xlevel = 0x80000008,
986 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
987 },
988 {
989 .name = "n270",
990 .level = 10,
991 .vendor = CPUID_VENDOR_INTEL,
992 .family = 6,
993 .model = 28,
994 .stepping = 2,
995 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
996 .features[FEAT_1_EDX] =
997 PPRO_FEATURES |
998 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
999 CPUID_ACPI | CPUID_SS,
1000 /* Some CPUs got no CPUID_SEP */
1001 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1002 * CPUID_EXT_XTPR */
1003 .features[FEAT_1_ECX] =
1004 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1005 CPUID_EXT_MOVBE,
1006 .features[FEAT_8000_0001_EDX] =
1007 CPUID_EXT2_NX,
1008 .features[FEAT_8000_0001_ECX] =
1009 CPUID_EXT3_LAHF_LM,
1010 .xlevel = 0x80000008,
1011 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1012 },
1013 {
1014 .name = "Conroe",
1015 .level = 10,
1016 .vendor = CPUID_VENDOR_INTEL,
1017 .family = 6,
1018 .model = 15,
1019 .stepping = 3,
1020 .features[FEAT_1_EDX] =
1021 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1022 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1023 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1024 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1025 CPUID_DE | CPUID_FP87,
1026 .features[FEAT_1_ECX] =
1027 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1028 .features[FEAT_8000_0001_EDX] =
1029 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1030 .features[FEAT_8000_0001_ECX] =
1031 CPUID_EXT3_LAHF_LM,
1032 .xlevel = 0x80000008,
1033 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1034 },
1035 {
1036 .name = "Penryn",
1037 .level = 10,
1038 .vendor = CPUID_VENDOR_INTEL,
1039 .family = 6,
1040 .model = 23,
1041 .stepping = 3,
1042 .features[FEAT_1_EDX] =
1043 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1044 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1045 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1046 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1047 CPUID_DE | CPUID_FP87,
1048 .features[FEAT_1_ECX] =
1049 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1050 CPUID_EXT_SSE3,
1051 .features[FEAT_8000_0001_EDX] =
1052 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1053 .features[FEAT_8000_0001_ECX] =
1054 CPUID_EXT3_LAHF_LM,
1055 .xlevel = 0x80000008,
1056 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1057 },
1058 {
1059 .name = "Nehalem",
1060 .level = 11,
1061 .vendor = CPUID_VENDOR_INTEL,
1062 .family = 6,
1063 .model = 26,
1064 .stepping = 3,
1065 .features[FEAT_1_EDX] =
1066 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1067 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1068 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1069 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1070 CPUID_DE | CPUID_FP87,
1071 .features[FEAT_1_ECX] =
1072 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1073 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1074 .features[FEAT_8000_0001_EDX] =
1075 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1076 .features[FEAT_8000_0001_ECX] =
1077 CPUID_EXT3_LAHF_LM,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1080 },
1081 {
1082 .name = "Westmere",
1083 .level = 11,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 44,
1087 .stepping = 1,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1096 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1097 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1098 .features[FEAT_8000_0001_EDX] =
1099 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1100 .features[FEAT_8000_0001_ECX] =
1101 CPUID_EXT3_LAHF_LM,
1102 .features[FEAT_6_EAX] =
1103 CPUID_6_EAX_ARAT,
1104 .xlevel = 0x80000008,
1105 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1106 },
1107 {
1108 .name = "SandyBridge",
1109 .level = 0xd,
1110 .vendor = CPUID_VENDOR_INTEL,
1111 .family = 6,
1112 .model = 42,
1113 .stepping = 1,
1114 .features[FEAT_1_EDX] =
1115 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1116 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1117 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1118 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1119 CPUID_DE | CPUID_FP87,
1120 .features[FEAT_1_ECX] =
1121 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1122 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1123 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1124 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1125 CPUID_EXT_SSE3,
1126 .features[FEAT_8000_0001_EDX] =
1127 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1128 CPUID_EXT2_SYSCALL,
1129 .features[FEAT_8000_0001_ECX] =
1130 CPUID_EXT3_LAHF_LM,
1131 .features[FEAT_XSAVE] =
1132 CPUID_XSAVE_XSAVEOPT,
1133 .features[FEAT_6_EAX] =
1134 CPUID_6_EAX_ARAT,
1135 .xlevel = 0x80000008,
1136 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1137 },
1138 {
1139 .name = "IvyBridge",
1140 .level = 0xd,
1141 .vendor = CPUID_VENDOR_INTEL,
1142 .family = 6,
1143 .model = 58,
1144 .stepping = 9,
1145 .features[FEAT_1_EDX] =
1146 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1147 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1148 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1149 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1150 CPUID_DE | CPUID_FP87,
1151 .features[FEAT_1_ECX] =
1152 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1154 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1155 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1156 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1157 .features[FEAT_7_0_EBX] =
1158 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1159 CPUID_7_0_EBX_ERMS,
1160 .features[FEAT_8000_0001_EDX] =
1161 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1162 CPUID_EXT2_SYSCALL,
1163 .features[FEAT_8000_0001_ECX] =
1164 CPUID_EXT3_LAHF_LM,
1165 .features[FEAT_XSAVE] =
1166 CPUID_XSAVE_XSAVEOPT,
1167 .features[FEAT_6_EAX] =
1168 CPUID_6_EAX_ARAT,
1169 .xlevel = 0x80000008,
1170 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1171 },
1172 {
1173 .name = "Haswell-noTSX",
1174 .level = 0xd,
1175 .vendor = CPUID_VENDOR_INTEL,
1176 .family = 6,
1177 .model = 60,
1178 .stepping = 1,
1179 .features[FEAT_1_EDX] =
1180 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1181 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1182 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1183 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1184 CPUID_DE | CPUID_FP87,
1185 .features[FEAT_1_ECX] =
1186 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1187 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1188 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1189 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1190 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1191 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1192 .features[FEAT_8000_0001_EDX] =
1193 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1194 CPUID_EXT2_SYSCALL,
1195 .features[FEAT_8000_0001_ECX] =
1196 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1197 .features[FEAT_7_0_EBX] =
1198 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1199 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1200 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1201 .features[FEAT_XSAVE] =
1202 CPUID_XSAVE_XSAVEOPT,
1203 .features[FEAT_6_EAX] =
1204 CPUID_6_EAX_ARAT,
1205 .xlevel = 0x80000008,
1206 .model_id = "Intel Core Processor (Haswell, no TSX)",
1207 }, {
1208 .name = "Haswell",
1209 .level = 0xd,
1210 .vendor = CPUID_VENDOR_INTEL,
1211 .family = 6,
1212 .model = 60,
1213 .stepping = 1,
1214 .features[FEAT_1_EDX] =
1215 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1216 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1217 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1218 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1219 CPUID_DE | CPUID_FP87,
1220 .features[FEAT_1_ECX] =
1221 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1222 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1223 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1224 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1225 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1226 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1227 .features[FEAT_8000_0001_EDX] =
1228 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1229 CPUID_EXT2_SYSCALL,
1230 .features[FEAT_8000_0001_ECX] =
1231 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1232 .features[FEAT_7_0_EBX] =
1233 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1234 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1235 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1236 CPUID_7_0_EBX_RTM,
1237 .features[FEAT_XSAVE] =
1238 CPUID_XSAVE_XSAVEOPT,
1239 .features[FEAT_6_EAX] =
1240 CPUID_6_EAX_ARAT,
1241 .xlevel = 0x80000008,
1242 .model_id = "Intel Core Processor (Haswell)",
1243 },
1244 {
1245 .name = "Broadwell-noTSX",
1246 .level = 0xd,
1247 .vendor = CPUID_VENDOR_INTEL,
1248 .family = 6,
1249 .model = 61,
1250 .stepping = 2,
1251 .features[FEAT_1_EDX] =
1252 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1253 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1254 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1255 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1256 CPUID_DE | CPUID_FP87,
1257 .features[FEAT_1_ECX] =
1258 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1259 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1260 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1261 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1262 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1263 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1264 .features[FEAT_8000_0001_EDX] =
1265 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1266 CPUID_EXT2_SYSCALL,
1267 .features[FEAT_8000_0001_ECX] =
1268 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1269 .features[FEAT_7_0_EBX] =
1270 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1271 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1272 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1273 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1274 CPUID_7_0_EBX_SMAP,
1275 .features[FEAT_XSAVE] =
1276 CPUID_XSAVE_XSAVEOPT,
1277 .features[FEAT_6_EAX] =
1278 CPUID_6_EAX_ARAT,
1279 .xlevel = 0x80000008,
1280 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1281 },
1282 {
1283 .name = "Broadwell",
1284 .level = 0xd,
1285 .vendor = CPUID_VENDOR_INTEL,
1286 .family = 6,
1287 .model = 61,
1288 .stepping = 2,
1289 .features[FEAT_1_EDX] =
1290 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1291 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1292 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1293 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1294 CPUID_DE | CPUID_FP87,
1295 .features[FEAT_1_ECX] =
1296 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1297 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1298 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1299 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1300 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1301 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1302 .features[FEAT_8000_0001_EDX] =
1303 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1304 CPUID_EXT2_SYSCALL,
1305 .features[FEAT_8000_0001_ECX] =
1306 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1307 .features[FEAT_7_0_EBX] =
1308 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1309 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1310 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1311 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1312 CPUID_7_0_EBX_SMAP,
1313 .features[FEAT_XSAVE] =
1314 CPUID_XSAVE_XSAVEOPT,
1315 .features[FEAT_6_EAX] =
1316 CPUID_6_EAX_ARAT,
1317 .xlevel = 0x80000008,
1318 .model_id = "Intel Core Processor (Broadwell)",
1319 },
1320 {
1321 .name = "Skylake-Client",
1322 .level = 0xd,
1323 .vendor = CPUID_VENDOR_INTEL,
1324 .family = 6,
1325 .model = 94,
1326 .stepping = 3,
1327 .features[FEAT_1_EDX] =
1328 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1329 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1330 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1331 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1332 CPUID_DE | CPUID_FP87,
1333 .features[FEAT_1_ECX] =
1334 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1335 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1336 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1337 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1338 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1339 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1340 .features[FEAT_8000_0001_EDX] =
1341 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1342 CPUID_EXT2_SYSCALL,
1343 .features[FEAT_8000_0001_ECX] =
1344 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1345 .features[FEAT_7_0_EBX] =
1346 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1347 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1348 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1349 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1350 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1351 /* Missing: XSAVES (not supported by some Linux versions,
1352 * including v4.1 to v4.6).
1353 * KVM doesn't yet expose any XSAVES state save component,
1354 * and the only one defined in Skylake (processor tracing)
1355 * probably will block migration anyway.
1356 */
1357 .features[FEAT_XSAVE] =
1358 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1359 CPUID_XSAVE_XGETBV1,
1360 .features[FEAT_6_EAX] =
1361 CPUID_6_EAX_ARAT,
1362 .xlevel = 0x80000008,
1363 .model_id = "Intel Core Processor (Skylake)",
1364 },
1365 {
1366 .name = "Opteron_G1",
1367 .level = 5,
1368 .vendor = CPUID_VENDOR_AMD,
1369 .family = 15,
1370 .model = 6,
1371 .stepping = 1,
1372 .features[FEAT_1_EDX] =
1373 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1374 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1375 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1376 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1377 CPUID_DE | CPUID_FP87,
1378 .features[FEAT_1_ECX] =
1379 CPUID_EXT_SSE3,
1380 .features[FEAT_8000_0001_EDX] =
1381 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1382 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1383 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1384 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1385 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1386 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .xlevel = 0x80000008,
1388 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1389 },
1390 {
1391 .name = "Opteron_G2",
1392 .level = 5,
1393 .vendor = CPUID_VENDOR_AMD,
1394 .family = 15,
1395 .model = 6,
1396 .stepping = 1,
1397 .features[FEAT_1_EDX] =
1398 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1399 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1400 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1401 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1402 CPUID_DE | CPUID_FP87,
1403 .features[FEAT_1_ECX] =
1404 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1405 /* Missing: CPUID_EXT2_RDTSCP */
1406 .features[FEAT_8000_0001_EDX] =
1407 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1408 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1409 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1410 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1411 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1412 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1413 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1414 .features[FEAT_8000_0001_ECX] =
1415 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1416 .xlevel = 0x80000008,
1417 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1418 },
1419 {
1420 .name = "Opteron_G3",
1421 .level = 5,
1422 .vendor = CPUID_VENDOR_AMD,
1423 .family = 15,
1424 .model = 6,
1425 .stepping = 1,
1426 .features[FEAT_1_EDX] =
1427 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1428 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1429 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1430 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1431 CPUID_DE | CPUID_FP87,
1432 .features[FEAT_1_ECX] =
1433 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1434 CPUID_EXT_SSE3,
1435 /* Missing: CPUID_EXT2_RDTSCP */
1436 .features[FEAT_8000_0001_EDX] =
1437 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1438 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1439 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1440 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1441 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1442 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1443 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1444 .features[FEAT_8000_0001_ECX] =
1445 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1446 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1447 .xlevel = 0x80000008,
1448 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1449 },
1450 {
1451 .name = "Opteron_G4",
1452 .level = 0xd,
1453 .vendor = CPUID_VENDOR_AMD,
1454 .family = 21,
1455 .model = 1,
1456 .stepping = 2,
1457 .features[FEAT_1_EDX] =
1458 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1459 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1460 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1461 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1462 CPUID_DE | CPUID_FP87,
1463 .features[FEAT_1_ECX] =
1464 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1465 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1466 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1467 CPUID_EXT_SSE3,
1468 /* Missing: CPUID_EXT2_RDTSCP */
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM |
1471 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1472 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1473 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1474 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1475 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1476 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1477 .features[FEAT_8000_0001_ECX] =
1478 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1479 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1480 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1481 CPUID_EXT3_LAHF_LM,
1482 /* no xsaveopt! */
1483 .xlevel = 0x8000001A,
1484 .model_id = "AMD Opteron 62xx class CPU",
1485 },
1486 {
1487 .name = "Opteron_G5",
1488 .level = 0xd,
1489 .vendor = CPUID_VENDOR_AMD,
1490 .family = 21,
1491 .model = 2,
1492 .stepping = 0,
1493 .features[FEAT_1_EDX] =
1494 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1495 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1496 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1497 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1498 CPUID_DE | CPUID_FP87,
1499 .features[FEAT_1_ECX] =
1500 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1501 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1502 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1503 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1504 /* Missing: CPUID_EXT2_RDTSCP */
1505 .features[FEAT_8000_0001_EDX] =
1506 CPUID_EXT2_LM |
1507 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1508 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1509 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1510 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1511 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1512 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1513 .features[FEAT_8000_0001_ECX] =
1514 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1515 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1516 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1517 CPUID_EXT3_LAHF_LM,
1518 /* no xsaveopt! */
1519 .xlevel = 0x8000001A,
1520 .model_id = "AMD Opteron 63xx class CPU",
1521 },
1522};
1523
1524typedef struct PropValue {
1525 const char *prop, *value;
1526} PropValue;
1527
1528/* KVM-specific features that are automatically added/removed
1529 * from all CPU models when KVM is enabled.
1530 */
1531static PropValue kvm_default_props[] = {
1532 { "kvmclock", "on" },
1533 { "kvm-nopiodelay", "on" },
1534 { "kvm-asyncpf", "on" },
1535 { "kvm-steal-time", "on" },
1536 { "kvm-pv-eoi", "on" },
1537 { "kvmclock-stable-bit", "on" },
1538 { "x2apic", "on" },
1539 { "acpi", "off" },
1540 { "monitor", "off" },
1541 { "svm", "off" },
1542 { NULL, NULL },
1543};
1544
1545void x86_cpu_change_kvm_default(const char *prop, const char *value)
1546{
1547 PropValue *pv;
1548 for (pv = kvm_default_props; pv->prop; pv++) {
1549 if (!strcmp(pv->prop, prop)) {
1550 pv->value = value;
1551 break;
1552 }
1553 }
1554
1555 /* It is valid to call this function only for properties that
1556 * are already present in the kvm_default_props table.
1557 */
1558 assert(pv->prop);
1559}
1560
1561static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1562 bool migratable_only);
1563
1564#ifdef CONFIG_KVM
1565
1566static bool lmce_supported(void)
1567{
1568 uint64_t mce_cap;
1569
1570 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1571 return false;
1572 }
1573
1574 return !!(mce_cap & MCG_LMCE_P);
1575}
1576
1577static int cpu_x86_fill_model_id(char *str)
1578{
1579 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1580 int i;
1581
1582 for (i = 0; i < 3; i++) {
1583 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1584 memcpy(str + i * 16 + 0, &eax, 4);
1585 memcpy(str + i * 16 + 4, &ebx, 4);
1586 memcpy(str + i * 16 + 8, &ecx, 4);
1587 memcpy(str + i * 16 + 12, &edx, 4);
1588 }
1589 return 0;
1590}
1591
1592static X86CPUDefinition host_cpudef;
1593
1594static Property host_x86_cpu_properties[] = {
1595 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1596 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1597 DEFINE_PROP_END_OF_LIST()
1598};
1599
1600/* class_init for the "host" CPU model
1601 *
1602 * This function may be called before KVM is initialized.
1603 */
1604static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1605{
1606 DeviceClass *dc = DEVICE_CLASS(oc);
1607 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1608 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1609
1610 xcc->kvm_required = true;
1611
1612 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1613 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1614
1615 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1616 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1617 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1618 host_cpudef.stepping = eax & 0x0F;
1619
1620 cpu_x86_fill_model_id(host_cpudef.model_id);
1621
1622 xcc->cpu_def = &host_cpudef;
1623
1624 /* level, xlevel, xlevel2, and the feature words are initialized on
1625 * instance_init, because they require KVM to be initialized.
1626 */
1627
1628 dc->props = host_x86_cpu_properties;
1629 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1630 dc->cannot_destroy_with_object_finalize_yet = true;
1631}
1632
1633static void host_x86_cpu_initfn(Object *obj)
1634{
1635 X86CPU *cpu = X86_CPU(obj);
1636 CPUX86State *env = &cpu->env;
1637 KVMState *s = kvm_state;
1638
1639 /* We can't fill the features array here because we don't know yet if
1640 * "migratable" is true or false.
1641 */
1642 cpu->host_features = true;
1643
1644 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1645 if (kvm_enabled()) {
1646 env->cpuid_min_level =
1647 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1648 env->cpuid_min_xlevel =
1649 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1650 env->cpuid_min_xlevel2 =
1651 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1652
1653 if (lmce_supported()) {
1654 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1655 }
1656 }
1657
1658 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1659}
1660
1661static const TypeInfo host_x86_cpu_type_info = {
1662 .name = X86_CPU_TYPE_NAME("host"),
1663 .parent = TYPE_X86_CPU,
1664 .instance_init = host_x86_cpu_initfn,
1665 .class_init = host_x86_cpu_class_init,
1666};
1667
1668#endif
1669
1670static void report_unavailable_features(FeatureWord w, uint32_t mask)
1671{
1672 FeatureWordInfo *f = &feature_word_info[w];
1673 int i;
1674
1675 for (i = 0; i < 32; ++i) {
1676 if ((1UL << i) & mask) {
1677 const char *reg = get_register_name_32(f->cpuid_reg);
1678 assert(reg);
1679 fprintf(stderr, "warning: %s doesn't support requested feature: "
1680 "CPUID.%02XH:%s%s%s [bit %d]\n",
1681 kvm_enabled() ? "host" : "TCG",
1682 f->cpuid_eax, reg,
1683 f->feat_names[i] ? "." : "",
1684 f->feat_names[i] ? f->feat_names[i] : "", i);
1685 }
1686 }
1687}
1688
1689static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1690 const char *name, void *opaque,
1691 Error **errp)
1692{
1693 X86CPU *cpu = X86_CPU(obj);
1694 CPUX86State *env = &cpu->env;
1695 int64_t value;
1696
1697 value = (env->cpuid_version >> 8) & 0xf;
1698 if (value == 0xf) {
1699 value += (env->cpuid_version >> 20) & 0xff;
1700 }
1701 visit_type_int(v, name, &value, errp);
1702}
1703
1704static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1705 const char *name, void *opaque,
1706 Error **errp)
1707{
1708 X86CPU *cpu = X86_CPU(obj);
1709 CPUX86State *env = &cpu->env;
1710 const int64_t min = 0;
1711 const int64_t max = 0xff + 0xf;
1712 Error *local_err = NULL;
1713 int64_t value;
1714
1715 visit_type_int(v, name, &value, &local_err);
1716 if (local_err) {
1717 error_propagate(errp, local_err);
1718 return;
1719 }
1720 if (value < min || value > max) {
1721 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1722 name ? name : "null", value, min, max);
1723 return;
1724 }
1725
1726 env->cpuid_version &= ~0xff00f00;
1727 if (value > 0x0f) {
1728 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1729 } else {
1730 env->cpuid_version |= value << 8;
1731 }
1732}
1733
1734static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1735 const char *name, void *opaque,
1736 Error **errp)
1737{
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 int64_t value;
1741
1742 value = (env->cpuid_version >> 4) & 0xf;
1743 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1744 visit_type_int(v, name, &value, errp);
1745}
1746
1747static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1748 const char *name, void *opaque,
1749 Error **errp)
1750{
1751 X86CPU *cpu = X86_CPU(obj);
1752 CPUX86State *env = &cpu->env;
1753 const int64_t min = 0;
1754 const int64_t max = 0xff;
1755 Error *local_err = NULL;
1756 int64_t value;
1757
1758 visit_type_int(v, name, &value, &local_err);
1759 if (local_err) {
1760 error_propagate(errp, local_err);
1761 return;
1762 }
1763 if (value < min || value > max) {
1764 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1765 name ? name : "null", value, min, max);
1766 return;
1767 }
1768
1769 env->cpuid_version &= ~0xf00f0;
1770 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1771}
1772
1773static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1774 const char *name, void *opaque,
1775 Error **errp)
1776{
1777 X86CPU *cpu = X86_CPU(obj);
1778 CPUX86State *env = &cpu->env;
1779 int64_t value;
1780
1781 value = env->cpuid_version & 0xf;
1782 visit_type_int(v, name, &value, errp);
1783}
1784
1785static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1786 const char *name, void *opaque,
1787 Error **errp)
1788{
1789 X86CPU *cpu = X86_CPU(obj);
1790 CPUX86State *env = &cpu->env;
1791 const int64_t min = 0;
1792 const int64_t max = 0xf;
1793 Error *local_err = NULL;
1794 int64_t value;
1795
1796 visit_type_int(v, name, &value, &local_err);
1797 if (local_err) {
1798 error_propagate(errp, local_err);
1799 return;
1800 }
1801 if (value < min || value > max) {
1802 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1803 name ? name : "null", value, min, max);
1804 return;
1805 }
1806
1807 env->cpuid_version &= ~0xf;
1808 env->cpuid_version |= value & 0xf;
1809}
1810
1811static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1812{
1813 X86CPU *cpu = X86_CPU(obj);
1814 CPUX86State *env = &cpu->env;
1815 char *value;
1816
1817 value = g_malloc(CPUID_VENDOR_SZ + 1);
1818 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1819 env->cpuid_vendor3);
1820 return value;
1821}
1822
1823static void x86_cpuid_set_vendor(Object *obj, const char *value,
1824 Error **errp)
1825{
1826 X86CPU *cpu = X86_CPU(obj);
1827 CPUX86State *env = &cpu->env;
1828 int i;
1829
1830 if (strlen(value) != CPUID_VENDOR_SZ) {
1831 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1832 return;
1833 }
1834
1835 env->cpuid_vendor1 = 0;
1836 env->cpuid_vendor2 = 0;
1837 env->cpuid_vendor3 = 0;
1838 for (i = 0; i < 4; i++) {
1839 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1840 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1841 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1842 }
1843}
1844
1845static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1846{
1847 X86CPU *cpu = X86_CPU(obj);
1848 CPUX86State *env = &cpu->env;
1849 char *value;
1850 int i;
1851
1852 value = g_malloc(48 + 1);
1853 for (i = 0; i < 48; i++) {
1854 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1855 }
1856 value[48] = '\0';
1857 return value;
1858}
1859
1860static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1861 Error **errp)
1862{
1863 X86CPU *cpu = X86_CPU(obj);
1864 CPUX86State *env = &cpu->env;
1865 int c, len, i;
1866
1867 if (model_id == NULL) {
1868 model_id = "";
1869 }
1870 len = strlen(model_id);
1871 memset(env->cpuid_model, 0, 48);
1872 for (i = 0; i < 48; i++) {
1873 if (i >= len) {
1874 c = '\0';
1875 } else {
1876 c = (uint8_t)model_id[i];
1877 }
1878 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1879 }
1880}
1881
1882static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1883 void *opaque, Error **errp)
1884{
1885 X86CPU *cpu = X86_CPU(obj);
1886 int64_t value;
1887
1888 value = cpu->env.tsc_khz * 1000;
1889 visit_type_int(v, name, &value, errp);
1890}
1891
1892static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1893 void *opaque, Error **errp)
1894{
1895 X86CPU *cpu = X86_CPU(obj);
1896 const int64_t min = 0;
1897 const int64_t max = INT64_MAX;
1898 Error *local_err = NULL;
1899 int64_t value;
1900
1901 visit_type_int(v, name, &value, &local_err);
1902 if (local_err) {
1903 error_propagate(errp, local_err);
1904 return;
1905 }
1906 if (value < min || value > max) {
1907 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1908 name ? name : "null", value, min, max);
1909 return;
1910 }
1911
1912 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1913}
1914
1915/* Generic getter for "feature-words" and "filtered-features" properties */
1916static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1917 const char *name, void *opaque,
1918 Error **errp)
1919{
1920 uint32_t *array = (uint32_t *)opaque;
1921 FeatureWord w;
1922 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1923 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1924 X86CPUFeatureWordInfoList *list = NULL;
1925
1926 for (w = 0; w < FEATURE_WORDS; w++) {
1927 FeatureWordInfo *wi = &feature_word_info[w];
1928 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1929 qwi->cpuid_input_eax = wi->cpuid_eax;
1930 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1931 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1932 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1933 qwi->features = array[w];
1934
1935 /* List will be in reverse order, but order shouldn't matter */
1936 list_entries[w].next = list;
1937 list_entries[w].value = &word_infos[w];
1938 list = &list_entries[w];
1939 }
1940
1941 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1942}
1943
1944static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1945 void *opaque, Error **errp)
1946{
1947 X86CPU *cpu = X86_CPU(obj);
1948 int64_t value = cpu->hyperv_spinlock_attempts;
1949
1950 visit_type_int(v, name, &value, errp);
1951}
1952
1953static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1954 void *opaque, Error **errp)
1955{
1956 const int64_t min = 0xFFF;
1957 const int64_t max = UINT_MAX;
1958 X86CPU *cpu = X86_CPU(obj);
1959 Error *err = NULL;
1960 int64_t value;
1961
1962 visit_type_int(v, name, &value, &err);
1963 if (err) {
1964 error_propagate(errp, err);
1965 return;
1966 }
1967
1968 if (value < min || value > max) {
1969 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1970 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1971 object_get_typename(obj), name ? name : "null",
1972 value, min, max);
1973 return;
1974 }
1975 cpu->hyperv_spinlock_attempts = value;
1976}
1977
1978static PropertyInfo qdev_prop_spinlocks = {
1979 .name = "int",
1980 .get = x86_get_hv_spinlocks,
1981 .set = x86_set_hv_spinlocks,
1982};
1983
1984/* Convert all '_' in a feature string option name to '-', to make feature
1985 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1986 */
1987static inline void feat2prop(char *s)
1988{
1989 while ((s = strchr(s, '_'))) {
1990 *s = '-';
1991 }
1992}
1993
1994/* Compatibily hack to maintain legacy +-feat semantic,
1995 * where +-feat overwrites any feature set by
1996 * feat=on|feat even if the later is parsed after +-feat
1997 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1998 */
1999static FeatureWordArray plus_features = { 0 };
2000static FeatureWordArray minus_features = { 0 };
2001
2002/* Parse "+feature,-feature,feature=foo" CPU feature string
2003 */
2004static void x86_cpu_parse_featurestr(const char *typename, char *features,
2005 Error **errp)
2006{
2007 char *featurestr; /* Single 'key=value" string being parsed */
2008 Error *local_err = NULL;
2009 static bool cpu_globals_initialized;
2010
2011 if (cpu_globals_initialized) {
2012 return;
2013 }
2014 cpu_globals_initialized = true;
2015
2016 if (!features) {
2017 return;
2018 }
2019
2020 for (featurestr = strtok(features, ",");
2021 featurestr && !local_err;
2022 featurestr = strtok(NULL, ",")) {
2023 const char *name;
2024 const char *val = NULL;
2025 char *eq = NULL;
2026 char num[32];
2027 GlobalProperty *prop;
2028
2029 /* Compatibility syntax: */
2030 if (featurestr[0] == '+') {
2031 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2032 continue;
2033 } else if (featurestr[0] == '-') {
2034 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2035 continue;
2036 }
2037
2038 eq = strchr(featurestr, '=');
2039 if (eq) {
2040 *eq++ = 0;
2041 val = eq;
2042 } else {
2043 val = "on";
2044 }
2045
2046 feat2prop(featurestr);
2047 name = featurestr;
2048
2049 /* Special case: */
2050 if (!strcmp(name, "tsc-freq")) {
2051 int64_t tsc_freq;
2052 char *err;
2053
2054 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2055 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2056 if (tsc_freq < 0 || *err) {
2057 error_setg(errp, "bad numerical value %s", val);
2058 return;
2059 }
2060 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2061 val = num;
2062 name = "tsc-frequency";
2063 }
2064
2065 prop = g_new0(typeof(*prop), 1);
2066 prop->driver = typename;
2067 prop->property = g_strdup(name);
2068 prop->value = g_strdup(val);
2069 prop->errp = &error_fatal;
2070 qdev_prop_register_global(prop);
2071 }
2072
2073 if (local_err) {
2074 error_propagate(errp, local_err);
2075 }
2076}
2077
2078/* Print all cpuid feature names in featureset
2079 */
2080static void listflags(FILE *f, fprintf_function print, const char **featureset)
2081{
2082 int bit;
2083 bool first = true;
2084
2085 for (bit = 0; bit < 32; bit++) {
2086 if (featureset[bit]) {
2087 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2088 first = false;
2089 }
2090 }
2091}
2092
2093/* generate CPU information. */
2094void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2095{
2096 X86CPUDefinition *def;
2097 char buf[256];
2098 int i;
2099
2100 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2101 def = &builtin_x86_defs[i];
2102 snprintf(buf, sizeof(buf), "%s", def->name);
2103 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2104 }
2105#ifdef CONFIG_KVM
2106 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2107 "KVM processor with all supported host features "
2108 "(only available in KVM mode)");
2109#endif
2110
2111 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2112 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2113 FeatureWordInfo *fw = &feature_word_info[i];
2114
2115 (*cpu_fprintf)(f, " ");
2116 listflags(f, cpu_fprintf, fw->feat_names);
2117 (*cpu_fprintf)(f, "\n");
2118 }
2119}
2120
2121CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2122{
2123 CpuDefinitionInfoList *cpu_list = NULL;
2124 X86CPUDefinition *def;
2125 int i;
2126
2127 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2128 CpuDefinitionInfoList *entry;
2129 CpuDefinitionInfo *info;
2130
2131 def = &builtin_x86_defs[i];
2132 info = g_malloc0(sizeof(*info));
2133 info->name = g_strdup(def->name);
2134
2135 entry = g_malloc0(sizeof(*entry));
2136 entry->value = info;
2137 entry->next = cpu_list;
2138 cpu_list = entry;
2139 }
2140
2141 return cpu_list;
2142}
2143
2144static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2145 bool migratable_only)
2146{
2147 FeatureWordInfo *wi = &feature_word_info[w];
2148 uint32_t r;
2149
2150 if (kvm_enabled()) {
2151 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2152 wi->cpuid_ecx,
2153 wi->cpuid_reg);
2154 } else if (tcg_enabled()) {
2155 r = wi->tcg_features;
2156 } else {
2157 return ~0;
2158 }
2159 if (migratable_only) {
2160 r &= x86_cpu_get_migratable_flags(w);
2161 }
2162 return r;
2163}
2164
2165/*
2166 * Filters CPU feature words based on host availability of each feature.
2167 *
2168 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2169 */
2170static int x86_cpu_filter_features(X86CPU *cpu)
2171{
2172 CPUX86State *env = &cpu->env;
2173 FeatureWord w;
2174 int rv = 0;
2175
2176 for (w = 0; w < FEATURE_WORDS; w++) {
2177 uint32_t host_feat =
2178 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2179 uint32_t requested_features = env->features[w];
2180 env->features[w] &= host_feat;
2181 cpu->filtered_features[w] = requested_features & ~env->features[w];
2182 if (cpu->filtered_features[w]) {
2183 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2184 report_unavailable_features(w, cpu->filtered_features[w]);
2185 }
2186 rv = 1;
2187 }
2188 }
2189
2190 return rv;
2191}
2192
2193static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2194{
2195 PropValue *pv;
2196 for (pv = props; pv->prop; pv++) {
2197 if (!pv->value) {
2198 continue;
2199 }
2200 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2201 &error_abort);
2202 }
2203}
2204
2205/* Load data from X86CPUDefinition
2206 */
2207static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2208{
2209 CPUX86State *env = &cpu->env;
2210 const char *vendor;
2211 char host_vendor[CPUID_VENDOR_SZ + 1];
2212 FeatureWord w;
2213
2214 /* CPU models only set _minimum_ values for level/xlevel: */
2215 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2216 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2217
2218 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2219 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2220 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2221 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2222 for (w = 0; w < FEATURE_WORDS; w++) {
2223 env->features[w] = def->features[w];
2224 }
2225
2226 /* Special cases not set in the X86CPUDefinition structs: */
2227 if (kvm_enabled()) {
2228 if (!kvm_irqchip_in_kernel()) {
2229 x86_cpu_change_kvm_default("x2apic", "off");
2230 }
2231
2232 x86_cpu_apply_props(cpu, kvm_default_props);
2233 }
2234
2235 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2236
2237 /* sysenter isn't supported in compatibility mode on AMD,
2238 * syscall isn't supported in compatibility mode on Intel.
2239 * Normally we advertise the actual CPU vendor, but you can
2240 * override this using the 'vendor' property if you want to use
2241 * KVM's sysenter/syscall emulation in compatibility mode and
2242 * when doing cross vendor migration
2243 */
2244 vendor = def->vendor;
2245 if (kvm_enabled()) {
2246 uint32_t ebx = 0, ecx = 0, edx = 0;
2247 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2248 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2249 vendor = host_vendor;
2250 }
2251
2252 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2253
2254}
2255
2256X86CPU *cpu_x86_init(const char *cpu_model)
2257{
2258 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2259}
2260
2261static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2262{
2263 X86CPUDefinition *cpudef = data;
2264 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2265
2266 xcc->cpu_def = cpudef;
2267}
2268
2269static void x86_register_cpudef_type(X86CPUDefinition *def)
2270{
2271 char *typename = x86_cpu_type_name(def->name);
2272 TypeInfo ti = {
2273 .name = typename,
2274 .parent = TYPE_X86_CPU,
2275 .class_init = x86_cpu_cpudef_class_init,
2276 .class_data = def,
2277 };
2278
2279 type_register(&ti);
2280 g_free(typename);
2281}
2282
2283#if !defined(CONFIG_USER_ONLY)
2284
2285void cpu_clear_apic_feature(CPUX86State *env)
2286{
2287 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2288}
2289
2290#endif /* !CONFIG_USER_ONLY */
2291
2292void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2293 uint32_t *eax, uint32_t *ebx,
2294 uint32_t *ecx, uint32_t *edx)
2295{
2296 X86CPU *cpu = x86_env_get_cpu(env);
2297 CPUState *cs = CPU(cpu);
2298 uint32_t pkg_offset;
2299
2300 /* test if maximum index reached */
2301 if (index & 0x80000000) {
2302 if (index > env->cpuid_xlevel) {
2303 if (env->cpuid_xlevel2 > 0) {
2304 /* Handle the Centaur's CPUID instruction. */
2305 if (index > env->cpuid_xlevel2) {
2306 index = env->cpuid_xlevel2;
2307 } else if (index < 0xC0000000) {
2308 index = env->cpuid_xlevel;
2309 }
2310 } else {
2311 /* Intel documentation states that invalid EAX input will
2312 * return the same information as EAX=cpuid_level
2313 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2314 */
2315 index = env->cpuid_level;
2316 }
2317 }
2318 } else {
2319 if (index > env->cpuid_level)
2320 index = env->cpuid_level;
2321 }
2322
2323 switch(index) {
2324 case 0:
2325 *eax = env->cpuid_level;
2326 *ebx = env->cpuid_vendor1;
2327 *edx = env->cpuid_vendor2;
2328 *ecx = env->cpuid_vendor3;
2329 break;
2330 case 1:
2331 *eax = env->cpuid_version;
2332 *ebx = (cpu->apic_id << 24) |
2333 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2334 *ecx = env->features[FEAT_1_ECX];
2335 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2336 *ecx |= CPUID_EXT_OSXSAVE;
2337 }
2338 *edx = env->features[FEAT_1_EDX];
2339 if (cs->nr_cores * cs->nr_threads > 1) {
2340 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2341 *edx |= CPUID_HT;
2342 }
2343 break;
2344 case 2:
2345 /* cache info: needed for Pentium Pro compatibility */
2346 if (cpu->cache_info_passthrough) {
2347 host_cpuid(index, 0, eax, ebx, ecx, edx);
2348 break;
2349 }
2350 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2351 *ebx = 0;
2352 if (!cpu->enable_l3_cache) {
2353 *ecx = 0;
2354 } else {
2355 *ecx = L3_N_DESCRIPTOR;
2356 }
2357 *edx = (L1D_DESCRIPTOR << 16) | \
2358 (L1I_DESCRIPTOR << 8) | \
2359 (L2_DESCRIPTOR);
2360 break;
2361 case 4:
2362 /* cache info: needed for Core compatibility */
2363 if (cpu->cache_info_passthrough) {
2364 host_cpuid(index, count, eax, ebx, ecx, edx);
2365 *eax &= ~0xFC000000;
2366 } else {
2367 *eax = 0;
2368 switch (count) {
2369 case 0: /* L1 dcache info */
2370 *eax |= CPUID_4_TYPE_DCACHE | \
2371 CPUID_4_LEVEL(1) | \
2372 CPUID_4_SELF_INIT_LEVEL;
2373 *ebx = (L1D_LINE_SIZE - 1) | \
2374 ((L1D_PARTITIONS - 1) << 12) | \
2375 ((L1D_ASSOCIATIVITY - 1) << 22);
2376 *ecx = L1D_SETS - 1;
2377 *edx = CPUID_4_NO_INVD_SHARING;
2378 break;
2379 case 1: /* L1 icache info */
2380 *eax |= CPUID_4_TYPE_ICACHE | \
2381 CPUID_4_LEVEL(1) | \
2382 CPUID_4_SELF_INIT_LEVEL;
2383 *ebx = (L1I_LINE_SIZE - 1) | \
2384 ((L1I_PARTITIONS - 1) << 12) | \
2385 ((L1I_ASSOCIATIVITY - 1) << 22);
2386 *ecx = L1I_SETS - 1;
2387 *edx = CPUID_4_NO_INVD_SHARING;
2388 break;
2389 case 2: /* L2 cache info */
2390 *eax |= CPUID_4_TYPE_UNIFIED | \
2391 CPUID_4_LEVEL(2) | \
2392 CPUID_4_SELF_INIT_LEVEL;
2393 if (cs->nr_threads > 1) {
2394 *eax |= (cs->nr_threads - 1) << 14;
2395 }
2396 *ebx = (L2_LINE_SIZE - 1) | \
2397 ((L2_PARTITIONS - 1) << 12) | \
2398 ((L2_ASSOCIATIVITY - 1) << 22);
2399 *ecx = L2_SETS - 1;
2400 *edx = CPUID_4_NO_INVD_SHARING;
2401 break;
2402 case 3: /* L3 cache info */
2403 if (!cpu->enable_l3_cache) {
2404 *eax = 0;
2405 *ebx = 0;
2406 *ecx = 0;
2407 *edx = 0;
2408 break;
2409 }
2410 *eax |= CPUID_4_TYPE_UNIFIED | \
2411 CPUID_4_LEVEL(3) | \
2412 CPUID_4_SELF_INIT_LEVEL;
2413 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2414 *eax |= ((1 << pkg_offset) - 1) << 14;
2415 *ebx = (L3_N_LINE_SIZE - 1) | \
2416 ((L3_N_PARTITIONS - 1) << 12) | \
2417 ((L3_N_ASSOCIATIVITY - 1) << 22);
2418 *ecx = L3_N_SETS - 1;
2419 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2420 break;
2421 default: /* end of info */
2422 *eax = 0;
2423 *ebx = 0;
2424 *ecx = 0;
2425 *edx = 0;
2426 break;
2427 }
2428 }
2429
2430 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2431 if ((*eax & 31) && cs->nr_cores > 1) {
2432 *eax |= (cs->nr_cores - 1) << 26;
2433 }
2434 break;
2435 case 5:
2436 /* mwait info: needed for Core compatibility */
2437 *eax = 0; /* Smallest monitor-line size in bytes */
2438 *ebx = 0; /* Largest monitor-line size in bytes */
2439 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2440 *edx = 0;
2441 break;
2442 case 6:
2443 /* Thermal and Power Leaf */
2444 *eax = env->features[FEAT_6_EAX];
2445 *ebx = 0;
2446 *ecx = 0;
2447 *edx = 0;
2448 break;
2449 case 7:
2450 /* Structured Extended Feature Flags Enumeration Leaf */
2451 if (count == 0) {
2452 *eax = 0; /* Maximum ECX value for sub-leaves */
2453 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2454 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2455 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2456 *ecx |= CPUID_7_0_ECX_OSPKE;
2457 }
2458 *edx = 0; /* Reserved */
2459 } else {
2460 *eax = 0;
2461 *ebx = 0;
2462 *ecx = 0;
2463 *edx = 0;
2464 }
2465 break;
2466 case 9:
2467 /* Direct Cache Access Information Leaf */
2468 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2469 *ebx = 0;
2470 *ecx = 0;
2471 *edx = 0;
2472 break;
2473 case 0xA:
2474 /* Architectural Performance Monitoring Leaf */
2475 if (kvm_enabled() && cpu->enable_pmu) {
2476 KVMState *s = cs->kvm_state;
2477
2478 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2479 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2480 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2481 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2482 } else {
2483 *eax = 0;
2484 *ebx = 0;
2485 *ecx = 0;
2486 *edx = 0;
2487 }
2488 break;
2489 case 0xB:
2490 /* Extended Topology Enumeration Leaf */
2491 if (!cpu->enable_cpuid_0xb) {
2492 *eax = *ebx = *ecx = *edx = 0;
2493 break;
2494 }
2495
2496 *ecx = count & 0xff;
2497 *edx = cpu->apic_id;
2498
2499 switch (count) {
2500 case 0:
2501 *eax = apicid_core_offset(smp_cores, smp_threads);
2502 *ebx = smp_threads;
2503 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2504 break;
2505 case 1:
2506 *eax = apicid_pkg_offset(smp_cores, smp_threads);
2507 *ebx = smp_cores * smp_threads;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2509 break;
2510 default:
2511 *eax = 0;
2512 *ebx = 0;
2513 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2514 }
2515
2516 assert(!(*eax & ~0x1f));
2517 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2518 break;
2519 case 0xD: {
2520 KVMState *s = cs->kvm_state;
2521 uint64_t ena_mask;
2522 int i;
2523
2524 /* Processor Extended State */
2525 *eax = 0;
2526 *ebx = 0;
2527 *ecx = 0;
2528 *edx = 0;
2529 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2530 break;
2531 }
2532 if (kvm_enabled()) {
2533 ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2534 ena_mask <<= 32;
2535 ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2536 } else {
2537 ena_mask = -1;
2538 }
2539
2540 if (count == 0) {
2541 *ecx = 0x240;
2542 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2544 if ((env->features[esa->feature] & esa->bits) == esa->bits
2545 && ((ena_mask >> i) & 1) != 0) {
2546 if (i < 32) {
2547 *eax |= 1u << i;
2548 } else {
2549 *edx |= 1u << (i - 32);
2550 }
2551 *ecx = MAX(*ecx, esa->offset + esa->size);
2552 }
2553 }
2554 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2555 *ebx = *ecx;
2556 } else if (count == 1) {
2557 *eax = env->features[FEAT_XSAVE];
2558 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2559 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2560 if ((env->features[esa->feature] & esa->bits) == esa->bits
2561 && ((ena_mask >> count) & 1) != 0) {
2562 *eax = esa->size;
2563 *ebx = esa->offset;
2564 }
2565 }
2566 break;
2567 }
2568 case 0x80000000:
2569 *eax = env->cpuid_xlevel;
2570 *ebx = env->cpuid_vendor1;
2571 *edx = env->cpuid_vendor2;
2572 *ecx = env->cpuid_vendor3;
2573 break;
2574 case 0x80000001:
2575 *eax = env->cpuid_version;
2576 *ebx = 0;
2577 *ecx = env->features[FEAT_8000_0001_ECX];
2578 *edx = env->features[FEAT_8000_0001_EDX];
2579
2580 /* The Linux kernel checks for the CMPLegacy bit and
2581 * discards multiple thread information if it is set.
2582 * So don't set it here for Intel to make Linux guests happy.
2583 */
2584 if (cs->nr_cores * cs->nr_threads > 1) {
2585 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2586 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2587 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2588 *ecx |= 1 << 1; /* CmpLegacy bit */
2589 }
2590 }
2591 break;
2592 case 0x80000002:
2593 case 0x80000003:
2594 case 0x80000004:
2595 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2596 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2597 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2598 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2599 break;
2600 case 0x80000005:
2601 /* cache info (L1 cache) */
2602 if (cpu->cache_info_passthrough) {
2603 host_cpuid(index, 0, eax, ebx, ecx, edx);
2604 break;
2605 }
2606 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2607 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2608 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2609 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2610 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2611 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2612 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2613 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2614 break;
2615 case 0x80000006:
2616 /* cache info (L2 cache) */
2617 if (cpu->cache_info_passthrough) {
2618 host_cpuid(index, 0, eax, ebx, ecx, edx);
2619 break;
2620 }
2621 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2622 (L2_DTLB_2M_ENTRIES << 16) | \
2623 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2624 (L2_ITLB_2M_ENTRIES);
2625 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2626 (L2_DTLB_4K_ENTRIES << 16) | \
2627 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2628 (L2_ITLB_4K_ENTRIES);
2629 *ecx = (L2_SIZE_KB_AMD << 16) | \
2630 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2631 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2632 if (!cpu->enable_l3_cache) {
2633 *edx = ((L3_SIZE_KB / 512) << 18) | \
2634 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2635 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2636 } else {
2637 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2638 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2639 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2640 }
2641 break;
2642 case 0x80000007:
2643 *eax = 0;
2644 *ebx = 0;
2645 *ecx = 0;
2646 *edx = env->features[FEAT_8000_0007_EDX];
2647 break;
2648 case 0x80000008:
2649 /* virtual & phys address size in low 2 bytes. */
2650 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2651 /* 64 bit processor, 48 bits virtual, configurable
2652 * physical bits.
2653 */
2654 *eax = 0x00003000 + cpu->phys_bits;
2655 } else {
2656 *eax = cpu->phys_bits;
2657 }
2658 *ebx = 0;
2659 *ecx = 0;
2660 *edx = 0;
2661 if (cs->nr_cores * cs->nr_threads > 1) {
2662 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2663 }
2664 break;
2665 case 0x8000000A:
2666 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2667 *eax = 0x00000001; /* SVM Revision */
2668 *ebx = 0x00000010; /* nr of ASIDs */
2669 *ecx = 0;
2670 *edx = env->features[FEAT_SVM]; /* optional features */
2671 } else {
2672 *eax = 0;
2673 *ebx = 0;
2674 *ecx = 0;
2675 *edx = 0;
2676 }
2677 break;
2678 case 0xC0000000:
2679 *eax = env->cpuid_xlevel2;
2680 *ebx = 0;
2681 *ecx = 0;
2682 *edx = 0;
2683 break;
2684 case 0xC0000001:
2685 /* Support for VIA CPU's CPUID instruction */
2686 *eax = env->cpuid_version;
2687 *ebx = 0;
2688 *ecx = 0;
2689 *edx = env->features[FEAT_C000_0001_EDX];
2690 break;
2691 case 0xC0000002:
2692 case 0xC0000003:
2693 case 0xC0000004:
2694 /* Reserved for the future, and now filled with zero */
2695 *eax = 0;
2696 *ebx = 0;
2697 *ecx = 0;
2698 *edx = 0;
2699 break;
2700 default:
2701 /* reserved values: zero */
2702 *eax = 0;
2703 *ebx = 0;
2704 *ecx = 0;
2705 *edx = 0;
2706 break;
2707 }
2708}
2709
2710/* CPUClass::reset() */
2711static void x86_cpu_reset(CPUState *s)
2712{
2713 X86CPU *cpu = X86_CPU(s);
2714 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2715 CPUX86State *env = &cpu->env;
2716 target_ulong cr4;
2717 uint64_t xcr0;
2718 int i;
2719
2720 xcc->parent_reset(s);
2721
2722 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2723
2724 tlb_flush(s, 1);
2725
2726 env->old_exception = -1;
2727
2728 /* init to reset state */
2729
2730 env->hflags2 |= HF2_GIF_MASK;
2731
2732 cpu_x86_update_cr0(env, 0x60000010);
2733 env->a20_mask = ~0x0;
2734 env->smbase = 0x30000;
2735
2736 env->idt.limit = 0xffff;
2737 env->gdt.limit = 0xffff;
2738 env->ldt.limit = 0xffff;
2739 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2740 env->tr.limit = 0xffff;
2741 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2742
2743 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2744 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2745 DESC_R_MASK | DESC_A_MASK);
2746 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2747 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2748 DESC_A_MASK);
2749 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2750 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2751 DESC_A_MASK);
2752 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2753 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2754 DESC_A_MASK);
2755 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2756 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2757 DESC_A_MASK);
2758 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2759 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2760 DESC_A_MASK);
2761
2762 env->eip = 0xfff0;
2763 env->regs[R_EDX] = env->cpuid_version;
2764
2765 env->eflags = 0x2;
2766
2767 /* FPU init */
2768 for (i = 0; i < 8; i++) {
2769 env->fptags[i] = 1;
2770 }
2771 cpu_set_fpuc(env, 0x37f);
2772
2773 env->mxcsr = 0x1f80;
2774 /* All units are in INIT state. */
2775 env->xstate_bv = 0;
2776
2777 env->pat = 0x0007040600070406ULL;
2778 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2779
2780 memset(env->dr, 0, sizeof(env->dr));
2781 env->dr[6] = DR6_FIXED_1;
2782 env->dr[7] = DR7_FIXED_1;
2783 cpu_breakpoint_remove_all(s, BP_CPU);
2784 cpu_watchpoint_remove_all(s, BP_CPU);
2785
2786 cr4 = 0;
2787 xcr0 = XSTATE_FP_MASK;
2788
2789#ifdef CONFIG_USER_ONLY
2790 /* Enable all the features for user-mode. */
2791 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2792 xcr0 |= XSTATE_SSE_MASK;
2793 }
2794 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2795 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2796 if ((env->features[esa->feature] & esa->bits) == esa->bits) {
2797 xcr0 |= 1ull << i;
2798 }
2799 }
2800
2801 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2802 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2803 }
2804 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2805 cr4 |= CR4_FSGSBASE_MASK;
2806 }
2807#endif
2808
2809 env->xcr0 = xcr0;
2810 cpu_x86_update_cr4(env, cr4);
2811
2812 /*
2813 * SDM 11.11.5 requires:
2814 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2815 * - IA32_MTRR_PHYSMASKn.V = 0
2816 * All other bits are undefined. For simplification, zero it all.
2817 */
2818 env->mtrr_deftype = 0;
2819 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2820 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2821
2822#if !defined(CONFIG_USER_ONLY)
2823 /* We hard-wire the BSP to the first CPU. */
2824 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2825
2826 s->halted = !cpu_is_bsp(cpu);
2827
2828 if (kvm_enabled()) {
2829 kvm_arch_reset_vcpu(cpu);
2830 }
2831#endif
2832}
2833
2834#ifndef CONFIG_USER_ONLY
2835bool cpu_is_bsp(X86CPU *cpu)
2836{
2837 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2838}
2839
2840/* TODO: remove me, when reset over QOM tree is implemented */
2841static void x86_cpu_machine_reset_cb(void *opaque)
2842{
2843 X86CPU *cpu = opaque;
2844 cpu_reset(CPU(cpu));
2845}
2846#endif
2847
2848static void mce_init(X86CPU *cpu)
2849{
2850 CPUX86State *cenv = &cpu->env;
2851 unsigned int bank;
2852
2853 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2854 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2855 (CPUID_MCE | CPUID_MCA)) {
2856 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2857 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2858 cenv->mcg_ctl = ~(uint64_t)0;
2859 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2860 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2861 }
2862 }
2863}
2864
2865#ifndef CONFIG_USER_ONLY
2866static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2867{
2868 APICCommonState *apic;
2869 const char *apic_type = "apic";
2870
2871 if (kvm_apic_in_kernel()) {
2872 apic_type = "kvm-apic";
2873 } else if (xen_enabled()) {
2874 apic_type = "xen-apic";
2875 }
2876
2877 cpu->apic_state = DEVICE(object_new(apic_type));
2878
2879 object_property_add_child(OBJECT(cpu), "lapic",
2880 OBJECT(cpu->apic_state), &error_abort);
2881 object_unref(OBJECT(cpu->apic_state));
2882
2883 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2884 /* TODO: convert to link<> */
2885 apic = APIC_COMMON(cpu->apic_state);
2886 apic->cpu = cpu;
2887 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2888}
2889
2890static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2891{
2892 APICCommonState *apic;
2893 static bool apic_mmio_map_once;
2894
2895 if (cpu->apic_state == NULL) {
2896 return;
2897 }
2898 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2899 errp);
2900
2901 /* Map APIC MMIO area */
2902 apic = APIC_COMMON(cpu->apic_state);
2903 if (!apic_mmio_map_once) {
2904 memory_region_add_subregion_overlap(get_system_memory(),
2905 apic->apicbase &
2906 MSR_IA32_APICBASE_BASE,
2907 &apic->io_memory,
2908 0x1000);
2909 apic_mmio_map_once = true;
2910 }
2911}
2912
2913static void x86_cpu_machine_done(Notifier *n, void *unused)
2914{
2915 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2916 MemoryRegion *smram =
2917 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2918
2919 if (smram) {
2920 cpu->smram = g_new(MemoryRegion, 1);
2921 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2922 smram, 0, 1ull << 32);
2923 memory_region_set_enabled(cpu->smram, false);
2924 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2925 }
2926}
2927#else
2928static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2929{
2930}
2931#endif
2932
2933/* Note: Only safe for use on x86(-64) hosts */
2934static uint32_t x86_host_phys_bits(void)
2935{
2936 uint32_t eax;
2937 uint32_t host_phys_bits;
2938
2939 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2940 if (eax >= 0x80000008) {
2941 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2942 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2943 * at 23:16 that can specify a maximum physical address bits for
2944 * the guest that can override this value; but I've not seen
2945 * anything with that set.
2946 */
2947 host_phys_bits = eax & 0xff;
2948 } else {
2949 /* It's an odd 64 bit machine that doesn't have the leaf for
2950 * physical address bits; fall back to 36 that's most older
2951 * Intel.
2952 */
2953 host_phys_bits = 36;
2954 }
2955
2956 return host_phys_bits;
2957}
2958
2959static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2960{
2961 if (*min < value) {
2962 *min = value;
2963 }
2964}
2965
2966/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2967static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2968{
2969 CPUX86State *env = &cpu->env;
2970 FeatureWordInfo *fi = &feature_word_info[w];
2971 uint32_t eax = fi->cpuid_eax;
2972 uint32_t region = eax & 0xF0000000;
2973
2974 if (!env->features[w]) {
2975 return;
2976 }
2977
2978 switch (region) {
2979 case 0x00000000:
2980 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2981 break;
2982 case 0x80000000:
2983 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2984 break;
2985 case 0xC0000000:
2986 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2987 break;
2988 }
2989}
2990
2991#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2992 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2993 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2994#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2995 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2996 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2997static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2998{
2999 CPUState *cs = CPU(dev);
3000 X86CPU *cpu = X86_CPU(dev);
3001 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3002 CPUX86State *env = &cpu->env;
3003 Error *local_err = NULL;
3004 static bool ht_warned;
3005 FeatureWord w;
3006
3007 if (xcc->kvm_required && !kvm_enabled()) {
3008 char *name = x86_cpu_class_get_model_name(xcc);
3009 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3010 g_free(name);
3011 goto out;
3012 }
3013
3014 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3015 error_setg(errp, "apic-id property was not initialized properly");
3016 return;
3017 }
3018
3019 /*TODO: cpu->host_features incorrectly overwrites features
3020 * set using "feat=on|off". Once we fix this, we can convert
3021 * plus_features & minus_features to global properties
3022 * inside x86_cpu_parse_featurestr() too.
3023 */
3024 if (cpu->host_features) {
3025 for (w = 0; w < FEATURE_WORDS; w++) {
3026 env->features[w] =
3027 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3028 }
3029 }
3030
3031 for (w = 0; w < FEATURE_WORDS; w++) {
3032 cpu->env.features[w] |= plus_features[w];
3033 cpu->env.features[w] &= ~minus_features[w];
3034 }
3035
3036
3037 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3038 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3039 if (cpu->full_cpuid_auto_level) {
3040 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3041 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3042 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3043 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3044 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3045 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3046 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3047 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3048 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3049 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3050 /* SVM requires CPUID[0x8000000A] */
3051 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3052 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3053 }
3054 }
3055
3056 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3057 if (env->cpuid_level == UINT32_MAX) {
3058 env->cpuid_level = env->cpuid_min_level;
3059 }
3060 if (env->cpuid_xlevel == UINT32_MAX) {
3061 env->cpuid_xlevel = env->cpuid_min_xlevel;
3062 }
3063 if (env->cpuid_xlevel2 == UINT32_MAX) {
3064 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3065 }
3066
3067 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3068 error_setg(&local_err,
3069 kvm_enabled() ?
3070 "Host doesn't support requested features" :
3071 "TCG doesn't support requested features");
3072 goto out;
3073 }
3074
3075 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3076 * CPUID[1].EDX.
3077 */
3078 if (IS_AMD_CPU(env)) {
3079 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3080 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3081 & CPUID_EXT2_AMD_ALIASES);
3082 }
3083
3084 /* For 64bit systems think about the number of physical bits to present.
3085 * ideally this should be the same as the host; anything other than matching
3086 * the host can cause incorrect guest behaviour.
3087 * QEMU used to pick the magic value of 40 bits that corresponds to
3088 * consumer AMD devices but nothing else.
3089 */
3090 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3091 if (kvm_enabled()) {
3092 uint32_t host_phys_bits = x86_host_phys_bits();
3093 static bool warned;
3094
3095 if (cpu->host_phys_bits) {
3096 /* The user asked for us to use the host physical bits */
3097 cpu->phys_bits = host_phys_bits;
3098 }
3099
3100 /* Print a warning if the user set it to a value that's not the
3101 * host value.
3102 */
3103 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3104 !warned) {
3105 error_report("Warning: Host physical bits (%u)"
3106 " does not match phys-bits property (%u)",
3107 host_phys_bits, cpu->phys_bits);
3108 warned = true;
3109 }
3110
3111 if (cpu->phys_bits &&
3112 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3113 cpu->phys_bits < 32)) {
3114 error_setg(errp, "phys-bits should be between 32 and %u "
3115 " (but is %u)",
3116 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3117 return;
3118 }
3119 } else {
3120 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3121 error_setg(errp, "TCG only supports phys-bits=%u",
3122 TCG_PHYS_ADDR_BITS);
3123 return;
3124 }
3125 }
3126 /* 0 means it was not explicitly set by the user (or by machine
3127 * compat_props or by the host code above). In this case, the default
3128 * is the value used by TCG (40).
3129 */
3130 if (cpu->phys_bits == 0) {
3131 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3132 }
3133 } else {
3134 /* For 32 bit systems don't use the user set value, but keep
3135 * phys_bits consistent with what we tell the guest.
3136 */
3137 if (cpu->phys_bits != 0) {
3138 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3139 return;
3140 }
3141
3142 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3143 cpu->phys_bits = 36;
3144 } else {
3145 cpu->phys_bits = 32;
3146 }
3147 }
3148 cpu_exec_init(cs, &error_abort);
3149
3150 if (tcg_enabled()) {
3151 tcg_x86_init();
3152 }
3153
3154#ifndef CONFIG_USER_ONLY
3155 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3156
3157 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3158 x86_cpu_apic_create(cpu, &local_err);
3159 if (local_err != NULL) {
3160 goto out;
3161 }
3162 }
3163#endif
3164
3165 mce_init(cpu);
3166
3167#ifndef CONFIG_USER_ONLY
3168 if (tcg_enabled()) {
3169 AddressSpace *newas = g_new(AddressSpace, 1);
3170
3171 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3172 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3173
3174 /* Outer container... */
3175 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3176 memory_region_set_enabled(cpu->cpu_as_root, true);
3177
3178 /* ... with two regions inside: normal system memory with low
3179 * priority, and...
3180 */
3181 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3182 get_system_memory(), 0, ~0ull);
3183 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3184 memory_region_set_enabled(cpu->cpu_as_mem, true);
3185 address_space_init(newas, cpu->cpu_as_root, "CPU");
3186 cs->num_ases = 1;
3187 cpu_address_space_init(cs, newas, 0);
3188
3189 /* ... SMRAM with higher priority, linked from /machine/smram. */
3190 cpu->machine_done.notify = x86_cpu_machine_done;
3191 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3192 }
3193#endif
3194
3195 qemu_init_vcpu(cs);
3196
3197 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3198 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3199 * based on inputs (sockets,cores,threads), it is still better to gives
3200 * users a warning.
3201 *
3202 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3203 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3204 */
3205 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3206 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3207 " -smp options properly.");
3208 ht_warned = true;
3209 }
3210
3211 x86_cpu_apic_realize(cpu, &local_err);
3212 if (local_err != NULL) {
3213 goto out;
3214 }
3215 cpu_reset(cs);
3216
3217 xcc->parent_realize(dev, &local_err);
3218
3219out:
3220 if (local_err != NULL) {
3221 error_propagate(errp, local_err);
3222 return;
3223 }
3224}
3225
3226static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3227{
3228 X86CPU *cpu = X86_CPU(dev);
3229
3230#ifndef CONFIG_USER_ONLY
3231 cpu_remove_sync(CPU(dev));
3232 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3233#endif
3234
3235 if (cpu->apic_state) {
3236 object_unparent(OBJECT(cpu->apic_state));
3237 cpu->apic_state = NULL;
3238 }
3239}
3240
3241typedef struct BitProperty {
3242 uint32_t *ptr;
3243 uint32_t mask;
3244} BitProperty;
3245
3246static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3247 void *opaque, Error **errp)
3248{
3249 BitProperty *fp = opaque;
3250 bool value = (*fp->ptr & fp->mask) == fp->mask;
3251 visit_type_bool(v, name, &value, errp);
3252}
3253
3254static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3255 void *opaque, Error **errp)
3256{
3257 DeviceState *dev = DEVICE(obj);
3258 BitProperty *fp = opaque;
3259 Error *local_err = NULL;
3260 bool value;
3261
3262 if (dev->realized) {
3263 qdev_prop_set_after_realize(dev, name, errp);
3264 return;
3265 }
3266
3267 visit_type_bool(v, name, &value, &local_err);
3268 if (local_err) {
3269 error_propagate(errp, local_err);
3270 return;
3271 }
3272
3273 if (value) {
3274 *fp->ptr |= fp->mask;
3275 } else {
3276 *fp->ptr &= ~fp->mask;
3277 }
3278}
3279
3280static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3281 void *opaque)
3282{
3283 BitProperty *prop = opaque;
3284 g_free(prop);
3285}
3286
3287/* Register a boolean property to get/set a single bit in a uint32_t field.
3288 *
3289 * The same property name can be registered multiple times to make it affect
3290 * multiple bits in the same FeatureWord. In that case, the getter will return
3291 * true only if all bits are set.
3292 */
3293static void x86_cpu_register_bit_prop(X86CPU *cpu,
3294 const char *prop_name,
3295 uint32_t *field,
3296 int bitnr)
3297{
3298 BitProperty *fp;
3299 ObjectProperty *op;
3300 uint32_t mask = (1UL << bitnr);
3301
3302 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3303 if (op) {
3304 fp = op->opaque;
3305 assert(fp->ptr == field);
3306 fp->mask |= mask;
3307 } else {
3308 fp = g_new0(BitProperty, 1);
3309 fp->ptr = field;
3310 fp->mask = mask;
3311 object_property_add(OBJECT(cpu), prop_name, "bool",
3312 x86_cpu_get_bit_prop,
3313 x86_cpu_set_bit_prop,
3314 x86_cpu_release_bit_prop, fp, &error_abort);
3315 }
3316}
3317
3318static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3319 FeatureWord w,
3320 int bitnr)
3321{
3322 Object *obj = OBJECT(cpu);
3323 int i;
3324 char **names;
3325 FeatureWordInfo *fi = &feature_word_info[w];
3326
3327 if (!fi->feat_names) {
3328 return;
3329 }
3330 if (!fi->feat_names[bitnr]) {
3331 return;
3332 }
3333
3334 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3335
3336 feat2prop(names[0]);
3337 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3338
3339 for (i = 1; names[i]; i++) {
3340 feat2prop(names[i]);
3341 object_property_add_alias(obj, names[i], obj, names[0],
3342 &error_abort);
3343 }
3344
3345 g_strfreev(names);
3346}
3347
3348static void x86_cpu_initfn(Object *obj)
3349{
3350 CPUState *cs = CPU(obj);
3351 X86CPU *cpu = X86_CPU(obj);
3352 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3353 CPUX86State *env = &cpu->env;
3354 FeatureWord w;
3355
3356 cs->env_ptr = env;
3357
3358 object_property_add(obj, "family", "int",
3359 x86_cpuid_version_get_family,
3360 x86_cpuid_version_set_family, NULL, NULL, NULL);
3361 object_property_add(obj, "model", "int",
3362 x86_cpuid_version_get_model,
3363 x86_cpuid_version_set_model, NULL, NULL, NULL);
3364 object_property_add(obj, "stepping", "int",
3365 x86_cpuid_version_get_stepping,
3366 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3367 object_property_add_str(obj, "vendor",
3368 x86_cpuid_get_vendor,
3369 x86_cpuid_set_vendor, NULL);
3370 object_property_add_str(obj, "model-id",
3371 x86_cpuid_get_model_id,
3372 x86_cpuid_set_model_id, NULL);
3373 object_property_add(obj, "tsc-frequency", "int",
3374 x86_cpuid_get_tsc_freq,
3375 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3376 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3377 x86_cpu_get_feature_words,
3378 NULL, NULL, (void *)env->features, NULL);
3379 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3380 x86_cpu_get_feature_words,
3381 NULL, NULL, (void *)cpu->filtered_features, NULL);
3382
3383 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3384
3385 for (w = 0; w < FEATURE_WORDS; w++) {
3386 int bitnr;
3387
3388 for (bitnr = 0; bitnr < 32; bitnr++) {
3389 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3390 }
3391 }
3392
3393 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3394}
3395
3396static int64_t x86_cpu_get_arch_id(CPUState *cs)
3397{
3398 X86CPU *cpu = X86_CPU(cs);
3399
3400 return cpu->apic_id;
3401}
3402
3403static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3404{
3405 X86CPU *cpu = X86_CPU(cs);
3406
3407 return cpu->env.cr[0] & CR0_PG_MASK;
3408}
3409
3410static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3411{
3412 X86CPU *cpu = X86_CPU(cs);
3413
3414 cpu->env.eip = value;
3415}
3416
3417static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3418{
3419 X86CPU *cpu = X86_CPU(cs);
3420
3421 cpu->env.eip = tb->pc - tb->cs_base;
3422}
3423
3424static bool x86_cpu_has_work(CPUState *cs)
3425{
3426 X86CPU *cpu = X86_CPU(cs);
3427 CPUX86State *env = &cpu->env;
3428
3429 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3430 CPU_INTERRUPT_POLL)) &&
3431 (env->eflags & IF_MASK)) ||
3432 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3433 CPU_INTERRUPT_INIT |
3434 CPU_INTERRUPT_SIPI |
3435 CPU_INTERRUPT_MCE)) ||
3436 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3437 !(env->hflags & HF_SMM_MASK));
3438}
3439
3440static Property x86_cpu_properties[] = {
3441#ifdef CONFIG_USER_ONLY
3442 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3443 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3444 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3445 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3446 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3447#else
3448 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3449 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3450 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3451 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3452#endif
3453 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3454 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3455 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3456 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3457 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3458 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3459 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3460 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3461 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3462 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3463 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3464 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3465 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3466 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3467 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3468 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3469 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3470 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3471 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3472 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3473 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3474 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3475 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3476 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3477 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3478 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3479 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3480 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3481 DEFINE_PROP_END_OF_LIST()
3482};
3483
3484static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3485{
3486 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3487 CPUClass *cc = CPU_CLASS(oc);
3488 DeviceClass *dc = DEVICE_CLASS(oc);
3489
3490 xcc->parent_realize = dc->realize;
3491 dc->realize = x86_cpu_realizefn;
3492 dc->unrealize = x86_cpu_unrealizefn;
3493 dc->props = x86_cpu_properties;
3494
3495 xcc->parent_reset = cc->reset;
3496 cc->reset = x86_cpu_reset;
3497 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3498
3499 cc->class_by_name = x86_cpu_class_by_name;
3500 cc->parse_features = x86_cpu_parse_featurestr;
3501 cc->has_work = x86_cpu_has_work;
3502 cc->do_interrupt = x86_cpu_do_interrupt;
3503 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3504 cc->dump_state = x86_cpu_dump_state;
3505 cc->set_pc = x86_cpu_set_pc;
3506 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3507 cc->gdb_read_register = x86_cpu_gdb_read_register;
3508 cc->gdb_write_register = x86_cpu_gdb_write_register;
3509 cc->get_arch_id = x86_cpu_get_arch_id;
3510 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3511#ifdef CONFIG_USER_ONLY
3512 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3513#else
3514 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3515 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3516 cc->write_elf64_note = x86_cpu_write_elf64_note;
3517 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3518 cc->write_elf32_note = x86_cpu_write_elf32_note;
3519 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3520 cc->vmsd = &vmstate_x86_cpu;
3521#endif
3522 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3523#ifndef CONFIG_USER_ONLY
3524 cc->debug_excp_handler = breakpoint_handler;
3525#endif
3526 cc->cpu_exec_enter = x86_cpu_exec_enter;
3527 cc->cpu_exec_exit = x86_cpu_exec_exit;
3528
3529 dc->cannot_instantiate_with_device_add_yet = false;
3530 /*
3531 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3532 * object in cpus -> dangling pointer after final object_unref().
3533 */
3534 dc->cannot_destroy_with_object_finalize_yet = true;
3535}
3536
3537static const TypeInfo x86_cpu_type_info = {
3538 .name = TYPE_X86_CPU,
3539 .parent = TYPE_CPU,
3540 .instance_size = sizeof(X86CPU),
3541 .instance_init = x86_cpu_initfn,
3542 .abstract = true,
3543 .class_size = sizeof(X86CPUClass),
3544 .class_init = x86_cpu_common_class_init,
3545};
3546
3547static void x86_cpu_register_types(void)
3548{
3549 int i;
3550
3551 type_register_static(&x86_cpu_type_info);
3552 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3553 x86_register_cpudef_type(&builtin_x86_defs[i]);
3554 }
3555#ifdef CONFIG_KVM
3556 type_register_static(&host_x86_cpu_type_info);
3557#endif
3558}
3559
3560type_init(x86_cpu_register_types)