]> git.proxmox.com Git - mirror_qemu.git/blame_incremental - target/i386/cpu.c
Convert remaining single line fprintf() to warn_report()
[mirror_qemu.git] / target / i386 / cpu.c
... / ...
CommitLineData
1/*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19#include "qemu/osdep.h"
20#include "qemu/cutils.h"
21
22#include "cpu.h"
23#include "exec/exec-all.h"
24#include "sysemu/kvm.h"
25#include "sysemu/cpus.h"
26#include "kvm_i386.h"
27
28#include "qemu/error-report.h"
29#include "qemu/option.h"
30#include "qemu/config-file.h"
31#include "qapi/qmp/qerror.h"
32#include "qapi/qmp/types.h"
33
34#include "qapi-types.h"
35#include "qapi-visit.h"
36#include "qapi/visitor.h"
37#include "qom/qom-qobject.h"
38#include "sysemu/arch_init.h"
39
40#if defined(CONFIG_KVM)
41#include <linux/kvm_para.h>
42#endif
43
44#include "sysemu/sysemu.h"
45#include "hw/qdev-properties.h"
46#include "hw/i386/topology.h"
47#ifndef CONFIG_USER_ONLY
48#include "exec/address-spaces.h"
49#include "hw/hw.h"
50#include "hw/xen/xen.h"
51#include "hw/i386/apic_internal.h"
52#endif
53
54
55/* Cache topology CPUID constants: */
56
57/* CPUID Leaf 2 Descriptors */
58
59#define CPUID_2_L1D_32KB_8WAY_64B 0x2c
60#define CPUID_2_L1I_32KB_8WAY_64B 0x30
61#define CPUID_2_L2_2MB_8WAY_64B 0x7d
62#define CPUID_2_L3_16MB_16WAY_64B 0x4d
63
64
65/* CPUID Leaf 4 constants: */
66
67/* EAX: */
68#define CPUID_4_TYPE_DCACHE 1
69#define CPUID_4_TYPE_ICACHE 2
70#define CPUID_4_TYPE_UNIFIED 3
71
72#define CPUID_4_LEVEL(l) ((l) << 5)
73
74#define CPUID_4_SELF_INIT_LEVEL (1 << 8)
75#define CPUID_4_FULLY_ASSOC (1 << 9)
76
77/* EDX: */
78#define CPUID_4_NO_INVD_SHARING (1 << 0)
79#define CPUID_4_INCLUSIVE (1 << 1)
80#define CPUID_4_COMPLEX_IDX (1 << 2)
81
82#define ASSOC_FULL 0xFF
83
84/* AMD associativity encoding used on CPUID Leaf 0x80000006: */
85#define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
86 a == 2 ? 0x2 : \
87 a == 4 ? 0x4 : \
88 a == 8 ? 0x6 : \
89 a == 16 ? 0x8 : \
90 a == 32 ? 0xA : \
91 a == 48 ? 0xB : \
92 a == 64 ? 0xC : \
93 a == 96 ? 0xD : \
94 a == 128 ? 0xE : \
95 a == ASSOC_FULL ? 0xF : \
96 0 /* invalid value */)
97
98
99/* Definitions of the hardcoded cache entries we expose: */
100
101/* L1 data cache: */
102#define L1D_LINE_SIZE 64
103#define L1D_ASSOCIATIVITY 8
104#define L1D_SETS 64
105#define L1D_PARTITIONS 1
106/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
107#define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
108/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
109#define L1D_LINES_PER_TAG 1
110#define L1D_SIZE_KB_AMD 64
111#define L1D_ASSOCIATIVITY_AMD 2
112
113/* L1 instruction cache: */
114#define L1I_LINE_SIZE 64
115#define L1I_ASSOCIATIVITY 8
116#define L1I_SETS 64
117#define L1I_PARTITIONS 1
118/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
119#define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
120/*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
121#define L1I_LINES_PER_TAG 1
122#define L1I_SIZE_KB_AMD 64
123#define L1I_ASSOCIATIVITY_AMD 2
124
125/* Level 2 unified cache: */
126#define L2_LINE_SIZE 64
127#define L2_ASSOCIATIVITY 16
128#define L2_SETS 4096
129#define L2_PARTITIONS 1
130/* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
131/*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
132#define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
133/*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
134#define L2_LINES_PER_TAG 1
135#define L2_SIZE_KB_AMD 512
136
137/* Level 3 unified cache: */
138#define L3_SIZE_KB 0 /* disabled */
139#define L3_ASSOCIATIVITY 0 /* disabled */
140#define L3_LINES_PER_TAG 0 /* disabled */
141#define L3_LINE_SIZE 0 /* disabled */
142#define L3_N_LINE_SIZE 64
143#define L3_N_ASSOCIATIVITY 16
144#define L3_N_SETS 16384
145#define L3_N_PARTITIONS 1
146#define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
147#define L3_N_LINES_PER_TAG 1
148#define L3_N_SIZE_KB_AMD 16384
149
150/* TLB definitions: */
151
152#define L1_DTLB_2M_ASSOC 1
153#define L1_DTLB_2M_ENTRIES 255
154#define L1_DTLB_4K_ASSOC 1
155#define L1_DTLB_4K_ENTRIES 255
156
157#define L1_ITLB_2M_ASSOC 1
158#define L1_ITLB_2M_ENTRIES 255
159#define L1_ITLB_4K_ASSOC 1
160#define L1_ITLB_4K_ENTRIES 255
161
162#define L2_DTLB_2M_ASSOC 0 /* disabled */
163#define L2_DTLB_2M_ENTRIES 0 /* disabled */
164#define L2_DTLB_4K_ASSOC 4
165#define L2_DTLB_4K_ENTRIES 512
166
167#define L2_ITLB_2M_ASSOC 0 /* disabled */
168#define L2_ITLB_2M_ENTRIES 0 /* disabled */
169#define L2_ITLB_4K_ASSOC 4
170#define L2_ITLB_4K_ENTRIES 512
171
172
173
174static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
175 uint32_t vendor2, uint32_t vendor3)
176{
177 int i;
178 for (i = 0; i < 4; i++) {
179 dst[i] = vendor1 >> (8 * i);
180 dst[i + 4] = vendor2 >> (8 * i);
181 dst[i + 8] = vendor3 >> (8 * i);
182 }
183 dst[CPUID_VENDOR_SZ] = '\0';
184}
185
186#define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
187#define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
188 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
189#define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
190 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
191 CPUID_PSE36 | CPUID_FXSR)
192#define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
193#define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
194 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
195 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
196 CPUID_PAE | CPUID_SEP | CPUID_APIC)
197
198#define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
199 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
200 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
201 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
202 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
203 /* partly implemented:
204 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
205 /* missing:
206 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
207#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
208 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
209 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
210 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
211 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
212 /* missing:
213 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
214 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
215 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
216 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
217 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
218
219#ifdef TARGET_X86_64
220#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
221#else
222#define TCG_EXT2_X86_64_FEATURES 0
223#endif
224
225#define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
226 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
227 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
228 TCG_EXT2_X86_64_FEATURES)
229#define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
230 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
231#define TCG_EXT4_FEATURES 0
232#define TCG_SVM_FEATURES 0
233#define TCG_KVM_FEATURES 0
234#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
235 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
236 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
237 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
238 CPUID_7_0_EBX_ERMS)
239 /* missing:
240 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
241 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
242 CPUID_7_0_EBX_RDSEED */
243#define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
244 CPUID_7_0_ECX_LA57)
245#define TCG_7_0_EDX_FEATURES 0
246#define TCG_APM_FEATURES 0
247#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
248#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
249 /* missing:
250 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
251
252typedef struct FeatureWordInfo {
253 /* feature flags names are taken from "Intel Processor Identification and
254 * the CPUID Instruction" and AMD's "CPUID Specification".
255 * In cases of disagreement between feature naming conventions,
256 * aliases may be added.
257 */
258 const char *feat_names[32];
259 uint32_t cpuid_eax; /* Input EAX for CPUID */
260 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
261 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
262 int cpuid_reg; /* output register (R_* constant) */
263 uint32_t tcg_features; /* Feature flags supported by TCG */
264 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
265 uint32_t migratable_flags; /* Feature flags known to be migratable */
266} FeatureWordInfo;
267
268static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
269 [FEAT_1_EDX] = {
270 .feat_names = {
271 "fpu", "vme", "de", "pse",
272 "tsc", "msr", "pae", "mce",
273 "cx8", "apic", NULL, "sep",
274 "mtrr", "pge", "mca", "cmov",
275 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
276 NULL, "ds" /* Intel dts */, "acpi", "mmx",
277 "fxsr", "sse", "sse2", "ss",
278 "ht" /* Intel htt */, "tm", "ia64", "pbe",
279 },
280 .cpuid_eax = 1, .cpuid_reg = R_EDX,
281 .tcg_features = TCG_FEATURES,
282 },
283 [FEAT_1_ECX] = {
284 .feat_names = {
285 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
286 "ds-cpl", "vmx", "smx", "est",
287 "tm2", "ssse3", "cid", NULL,
288 "fma", "cx16", "xtpr", "pdcm",
289 NULL, "pcid", "dca", "sse4.1",
290 "sse4.2", "x2apic", "movbe", "popcnt",
291 "tsc-deadline", "aes", "xsave", "osxsave",
292 "avx", "f16c", "rdrand", "hypervisor",
293 },
294 .cpuid_eax = 1, .cpuid_reg = R_ECX,
295 .tcg_features = TCG_EXT_FEATURES,
296 },
297 /* Feature names that are already defined on feature_name[] but
298 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
299 * names on feat_names below. They are copied automatically
300 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
301 */
302 [FEAT_8000_0001_EDX] = {
303 .feat_names = {
304 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
305 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
306 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
307 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
308 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
309 "nx", NULL, "mmxext", NULL /* mmx */,
310 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
311 NULL, "lm", "3dnowext", "3dnow",
312 },
313 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
314 .tcg_features = TCG_EXT2_FEATURES,
315 },
316 [FEAT_8000_0001_ECX] = {
317 .feat_names = {
318 "lahf-lm", "cmp-legacy", "svm", "extapic",
319 "cr8legacy", "abm", "sse4a", "misalignsse",
320 "3dnowprefetch", "osvw", "ibs", "xop",
321 "skinit", "wdt", NULL, "lwp",
322 "fma4", "tce", NULL, "nodeid-msr",
323 NULL, "tbm", "topoext", "perfctr-core",
324 "perfctr-nb", NULL, NULL, NULL,
325 NULL, NULL, NULL, NULL,
326 },
327 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
328 .tcg_features = TCG_EXT3_FEATURES,
329 },
330 [FEAT_C000_0001_EDX] = {
331 .feat_names = {
332 NULL, NULL, "xstore", "xstore-en",
333 NULL, NULL, "xcrypt", "xcrypt-en",
334 "ace2", "ace2-en", "phe", "phe-en",
335 "pmm", "pmm-en", NULL, NULL,
336 NULL, NULL, NULL, NULL,
337 NULL, NULL, NULL, NULL,
338 NULL, NULL, NULL, NULL,
339 NULL, NULL, NULL, NULL,
340 },
341 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
342 .tcg_features = TCG_EXT4_FEATURES,
343 },
344 [FEAT_KVM] = {
345 .feat_names = {
346 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
347 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
348 NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL,
352 "kvmclock-stable-bit", NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL,
354 },
355 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
356 .tcg_features = TCG_KVM_FEATURES,
357 },
358 [FEAT_HYPERV_EAX] = {
359 .feat_names = {
360 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
361 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
362 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
363 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
364 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
365 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
366 NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL,
371 },
372 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
373 },
374 [FEAT_HYPERV_EBX] = {
375 .feat_names = {
376 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
377 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
378 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
379 NULL /* hv_create_port */, NULL /* hv_connect_port */,
380 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
381 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
382 NULL, NULL,
383 NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL,
385 NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL,
387 },
388 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
389 },
390 [FEAT_HYPERV_EDX] = {
391 .feat_names = {
392 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
393 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
394 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
395 NULL, NULL,
396 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
397 NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL,
402 },
403 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
404 },
405 [FEAT_SVM] = {
406 .feat_names = {
407 "npt", "lbrv", "svm-lock", "nrip-save",
408 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
409 NULL, NULL, "pause-filter", NULL,
410 "pfthreshold", NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL,
413 NULL, NULL, NULL, NULL,
414 NULL, NULL, NULL, NULL,
415 },
416 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
417 .tcg_features = TCG_SVM_FEATURES,
418 },
419 [FEAT_7_0_EBX] = {
420 .feat_names = {
421 "fsgsbase", "tsc-adjust", NULL, "bmi1",
422 "hle", "avx2", NULL, "smep",
423 "bmi2", "erms", "invpcid", "rtm",
424 NULL, NULL, "mpx", NULL,
425 "avx512f", "avx512dq", "rdseed", "adx",
426 "smap", "avx512ifma", "pcommit", "clflushopt",
427 "clwb", NULL, "avx512pf", "avx512er",
428 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
429 },
430 .cpuid_eax = 7,
431 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
432 .cpuid_reg = R_EBX,
433 .tcg_features = TCG_7_0_EBX_FEATURES,
434 },
435 [FEAT_7_0_ECX] = {
436 .feat_names = {
437 NULL, "avx512vbmi", "umip", "pku",
438 "ospke", NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, "avx512-vpopcntdq", NULL,
441 "la57", NULL, NULL, NULL,
442 NULL, NULL, "rdpid", NULL,
443 NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL,
445 },
446 .cpuid_eax = 7,
447 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
448 .cpuid_reg = R_ECX,
449 .tcg_features = TCG_7_0_ECX_FEATURES,
450 },
451 [FEAT_7_0_EDX] = {
452 .feat_names = {
453 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL,
461 },
462 .cpuid_eax = 7,
463 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
464 .cpuid_reg = R_EDX,
465 .tcg_features = TCG_7_0_EDX_FEATURES,
466 },
467 [FEAT_8000_0007_EDX] = {
468 .feat_names = {
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 "invtsc", NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL,
477 },
478 .cpuid_eax = 0x80000007,
479 .cpuid_reg = R_EDX,
480 .tcg_features = TCG_APM_FEATURES,
481 .unmigratable_flags = CPUID_APM_INVTSC,
482 },
483 [FEAT_XSAVE] = {
484 .feat_names = {
485 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL,
490 NULL, NULL, NULL, NULL,
491 NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL,
493 },
494 .cpuid_eax = 0xd,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
496 .cpuid_reg = R_EAX,
497 .tcg_features = TCG_XSAVE_FEATURES,
498 },
499 [FEAT_6_EAX] = {
500 .feat_names = {
501 NULL, NULL, "arat", NULL,
502 NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL,
509 },
510 .cpuid_eax = 6, .cpuid_reg = R_EAX,
511 .tcg_features = TCG_6_EAX_FEATURES,
512 },
513 [FEAT_XSAVE_COMP_LO] = {
514 .cpuid_eax = 0xD,
515 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
516 .cpuid_reg = R_EAX,
517 .tcg_features = ~0U,
518 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
519 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
520 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
521 XSTATE_PKRU_MASK,
522 },
523 [FEAT_XSAVE_COMP_HI] = {
524 .cpuid_eax = 0xD,
525 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
526 .cpuid_reg = R_EDX,
527 .tcg_features = ~0U,
528 },
529};
530
531typedef struct X86RegisterInfo32 {
532 /* Name of register */
533 const char *name;
534 /* QAPI enum value register */
535 X86CPURegister32 qapi_enum;
536} X86RegisterInfo32;
537
538#define REGISTER(reg) \
539 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
540static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
541 REGISTER(EAX),
542 REGISTER(ECX),
543 REGISTER(EDX),
544 REGISTER(EBX),
545 REGISTER(ESP),
546 REGISTER(EBP),
547 REGISTER(ESI),
548 REGISTER(EDI),
549};
550#undef REGISTER
551
552typedef struct ExtSaveArea {
553 uint32_t feature, bits;
554 uint32_t offset, size;
555} ExtSaveArea;
556
557static const ExtSaveArea x86_ext_save_areas[] = {
558 [XSTATE_FP_BIT] = {
559 /* x87 FP state component is always enabled if XSAVE is supported */
560 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
561 /* x87 state is in the legacy region of the XSAVE area */
562 .offset = 0,
563 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
564 },
565 [XSTATE_SSE_BIT] = {
566 /* SSE state component is always enabled if XSAVE is supported */
567 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
568 /* SSE state is in the legacy region of the XSAVE area */
569 .offset = 0,
570 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
571 },
572 [XSTATE_YMM_BIT] =
573 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
574 .offset = offsetof(X86XSaveArea, avx_state),
575 .size = sizeof(XSaveAVX) },
576 [XSTATE_BNDREGS_BIT] =
577 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
578 .offset = offsetof(X86XSaveArea, bndreg_state),
579 .size = sizeof(XSaveBNDREG) },
580 [XSTATE_BNDCSR_BIT] =
581 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
582 .offset = offsetof(X86XSaveArea, bndcsr_state),
583 .size = sizeof(XSaveBNDCSR) },
584 [XSTATE_OPMASK_BIT] =
585 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
586 .offset = offsetof(X86XSaveArea, opmask_state),
587 .size = sizeof(XSaveOpmask) },
588 [XSTATE_ZMM_Hi256_BIT] =
589 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
590 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
591 .size = sizeof(XSaveZMM_Hi256) },
592 [XSTATE_Hi16_ZMM_BIT] =
593 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
594 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
595 .size = sizeof(XSaveHi16_ZMM) },
596 [XSTATE_PKRU_BIT] =
597 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
598 .offset = offsetof(X86XSaveArea, pkru_state),
599 .size = sizeof(XSavePKRU) },
600};
601
602static uint32_t xsave_area_size(uint64_t mask)
603{
604 int i;
605 uint64_t ret = 0;
606
607 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
608 const ExtSaveArea *esa = &x86_ext_save_areas[i];
609 if ((mask >> i) & 1) {
610 ret = MAX(ret, esa->offset + esa->size);
611 }
612 }
613 return ret;
614}
615
616static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
617{
618 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
619 cpu->env.features[FEAT_XSAVE_COMP_LO];
620}
621
622const char *get_register_name_32(unsigned int reg)
623{
624 if (reg >= CPU_NB_REGS32) {
625 return NULL;
626 }
627 return x86_reg_info_32[reg].name;
628}
629
630/*
631 * Returns the set of feature flags that are supported and migratable by
632 * QEMU, for a given FeatureWord.
633 */
634static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
635{
636 FeatureWordInfo *wi = &feature_word_info[w];
637 uint32_t r = 0;
638 int i;
639
640 for (i = 0; i < 32; i++) {
641 uint32_t f = 1U << i;
642
643 /* If the feature name is known, it is implicitly considered migratable,
644 * unless it is explicitly set in unmigratable_flags */
645 if ((wi->migratable_flags & f) ||
646 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
647 r |= f;
648 }
649 }
650 return r;
651}
652
653void host_cpuid(uint32_t function, uint32_t count,
654 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
655{
656 uint32_t vec[4];
657
658#ifdef __x86_64__
659 asm volatile("cpuid"
660 : "=a"(vec[0]), "=b"(vec[1]),
661 "=c"(vec[2]), "=d"(vec[3])
662 : "0"(function), "c"(count) : "cc");
663#elif defined(__i386__)
664 asm volatile("pusha \n\t"
665 "cpuid \n\t"
666 "mov %%eax, 0(%2) \n\t"
667 "mov %%ebx, 4(%2) \n\t"
668 "mov %%ecx, 8(%2) \n\t"
669 "mov %%edx, 12(%2) \n\t"
670 "popa"
671 : : "a"(function), "c"(count), "S"(vec)
672 : "memory", "cc");
673#else
674 abort();
675#endif
676
677 if (eax)
678 *eax = vec[0];
679 if (ebx)
680 *ebx = vec[1];
681 if (ecx)
682 *ecx = vec[2];
683 if (edx)
684 *edx = vec[3];
685}
686
687void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
688{
689 uint32_t eax, ebx, ecx, edx;
690
691 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
692 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
693
694 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
695 if (family) {
696 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
697 }
698 if (model) {
699 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
700 }
701 if (stepping) {
702 *stepping = eax & 0x0F;
703 }
704}
705
706/* CPU class name definitions: */
707
708#define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
709#define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
710
711/* Return type name for a given CPU model name
712 * Caller is responsible for freeing the returned string.
713 */
714static char *x86_cpu_type_name(const char *model_name)
715{
716 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
717}
718
719static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
720{
721 ObjectClass *oc;
722 char *typename;
723
724 if (cpu_model == NULL) {
725 return NULL;
726 }
727
728 typename = x86_cpu_type_name(cpu_model);
729 oc = object_class_by_name(typename);
730 g_free(typename);
731 return oc;
732}
733
734static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
735{
736 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
737 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
738 return g_strndup(class_name,
739 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
740}
741
742struct X86CPUDefinition {
743 const char *name;
744 uint32_t level;
745 uint32_t xlevel;
746 /* vendor is zero-terminated, 12 character ASCII string */
747 char vendor[CPUID_VENDOR_SZ + 1];
748 int family;
749 int model;
750 int stepping;
751 FeatureWordArray features;
752 char model_id[48];
753};
754
755static X86CPUDefinition builtin_x86_defs[] = {
756 {
757 .name = "qemu64",
758 .level = 0xd,
759 .vendor = CPUID_VENDOR_AMD,
760 .family = 6,
761 .model = 6,
762 .stepping = 3,
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36,
767 .features[FEAT_1_ECX] =
768 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
769 .features[FEAT_8000_0001_EDX] =
770 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
771 .features[FEAT_8000_0001_ECX] =
772 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
773 .xlevel = 0x8000000A,
774 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
775 },
776 {
777 .name = "phenom",
778 .level = 5,
779 .vendor = CPUID_VENDOR_AMD,
780 .family = 16,
781 .model = 2,
782 .stepping = 3,
783 /* Missing: CPUID_HT */
784 .features[FEAT_1_EDX] =
785 PPRO_FEATURES |
786 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
787 CPUID_PSE36 | CPUID_VME,
788 .features[FEAT_1_ECX] =
789 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
790 CPUID_EXT_POPCNT,
791 .features[FEAT_8000_0001_EDX] =
792 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
793 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
794 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
795 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
796 CPUID_EXT3_CR8LEG,
797 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
798 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
799 .features[FEAT_8000_0001_ECX] =
800 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
801 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
802 /* Missing: CPUID_SVM_LBRV */
803 .features[FEAT_SVM] =
804 CPUID_SVM_NPT,
805 .xlevel = 0x8000001A,
806 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
807 },
808 {
809 .name = "core2duo",
810 .level = 10,
811 .vendor = CPUID_VENDOR_INTEL,
812 .family = 6,
813 .model = 15,
814 .stepping = 11,
815 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
816 .features[FEAT_1_EDX] =
817 PPRO_FEATURES |
818 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
819 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
820 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
821 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
822 .features[FEAT_1_ECX] =
823 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
824 CPUID_EXT_CX16,
825 .features[FEAT_8000_0001_EDX] =
826 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
827 .features[FEAT_8000_0001_ECX] =
828 CPUID_EXT3_LAHF_LM,
829 .xlevel = 0x80000008,
830 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
831 },
832 {
833 .name = "kvm64",
834 .level = 0xd,
835 .vendor = CPUID_VENDOR_INTEL,
836 .family = 15,
837 .model = 6,
838 .stepping = 1,
839 /* Missing: CPUID_HT */
840 .features[FEAT_1_EDX] =
841 PPRO_FEATURES | CPUID_VME |
842 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
843 CPUID_PSE36,
844 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
845 .features[FEAT_1_ECX] =
846 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
847 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
848 .features[FEAT_8000_0001_EDX] =
849 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
850 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
851 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
852 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
853 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
854 .features[FEAT_8000_0001_ECX] =
855 0,
856 .xlevel = 0x80000008,
857 .model_id = "Common KVM processor"
858 },
859 {
860 .name = "qemu32",
861 .level = 4,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 6,
865 .stepping = 3,
866 .features[FEAT_1_EDX] =
867 PPRO_FEATURES,
868 .features[FEAT_1_ECX] =
869 CPUID_EXT_SSE3,
870 .xlevel = 0x80000004,
871 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
872 },
873 {
874 .name = "kvm32",
875 .level = 5,
876 .vendor = CPUID_VENDOR_INTEL,
877 .family = 15,
878 .model = 6,
879 .stepping = 1,
880 .features[FEAT_1_EDX] =
881 PPRO_FEATURES | CPUID_VME |
882 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
883 .features[FEAT_1_ECX] =
884 CPUID_EXT_SSE3,
885 .features[FEAT_8000_0001_ECX] =
886 0,
887 .xlevel = 0x80000008,
888 .model_id = "Common 32-bit KVM processor"
889 },
890 {
891 .name = "coreduo",
892 .level = 10,
893 .vendor = CPUID_VENDOR_INTEL,
894 .family = 6,
895 .model = 14,
896 .stepping = 8,
897 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
898 .features[FEAT_1_EDX] =
899 PPRO_FEATURES | CPUID_VME |
900 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
901 CPUID_SS,
902 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
903 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
904 .features[FEAT_1_ECX] =
905 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
906 .features[FEAT_8000_0001_EDX] =
907 CPUID_EXT2_NX,
908 .xlevel = 0x80000008,
909 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
910 },
911 {
912 .name = "486",
913 .level = 1,
914 .vendor = CPUID_VENDOR_INTEL,
915 .family = 4,
916 .model = 8,
917 .stepping = 0,
918 .features[FEAT_1_EDX] =
919 I486_FEATURES,
920 .xlevel = 0,
921 },
922 {
923 .name = "pentium",
924 .level = 1,
925 .vendor = CPUID_VENDOR_INTEL,
926 .family = 5,
927 .model = 4,
928 .stepping = 3,
929 .features[FEAT_1_EDX] =
930 PENTIUM_FEATURES,
931 .xlevel = 0,
932 },
933 {
934 .name = "pentium2",
935 .level = 2,
936 .vendor = CPUID_VENDOR_INTEL,
937 .family = 6,
938 .model = 5,
939 .stepping = 2,
940 .features[FEAT_1_EDX] =
941 PENTIUM2_FEATURES,
942 .xlevel = 0,
943 },
944 {
945 .name = "pentium3",
946 .level = 3,
947 .vendor = CPUID_VENDOR_INTEL,
948 .family = 6,
949 .model = 7,
950 .stepping = 3,
951 .features[FEAT_1_EDX] =
952 PENTIUM3_FEATURES,
953 .xlevel = 0,
954 },
955 {
956 .name = "athlon",
957 .level = 2,
958 .vendor = CPUID_VENDOR_AMD,
959 .family = 6,
960 .model = 2,
961 .stepping = 3,
962 .features[FEAT_1_EDX] =
963 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
964 CPUID_MCA,
965 .features[FEAT_8000_0001_EDX] =
966 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
967 .xlevel = 0x80000008,
968 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
969 },
970 {
971 .name = "n270",
972 .level = 10,
973 .vendor = CPUID_VENDOR_INTEL,
974 .family = 6,
975 .model = 28,
976 .stepping = 2,
977 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
978 .features[FEAT_1_EDX] =
979 PPRO_FEATURES |
980 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
981 CPUID_ACPI | CPUID_SS,
982 /* Some CPUs got no CPUID_SEP */
983 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
984 * CPUID_EXT_XTPR */
985 .features[FEAT_1_ECX] =
986 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
987 CPUID_EXT_MOVBE,
988 .features[FEAT_8000_0001_EDX] =
989 CPUID_EXT2_NX,
990 .features[FEAT_8000_0001_ECX] =
991 CPUID_EXT3_LAHF_LM,
992 .xlevel = 0x80000008,
993 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
994 },
995 {
996 .name = "Conroe",
997 .level = 10,
998 .vendor = CPUID_VENDOR_INTEL,
999 .family = 6,
1000 .model = 15,
1001 .stepping = 3,
1002 .features[FEAT_1_EDX] =
1003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1007 CPUID_DE | CPUID_FP87,
1008 .features[FEAT_1_ECX] =
1009 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1010 .features[FEAT_8000_0001_EDX] =
1011 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1012 .features[FEAT_8000_0001_ECX] =
1013 CPUID_EXT3_LAHF_LM,
1014 .xlevel = 0x80000008,
1015 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1016 },
1017 {
1018 .name = "Penryn",
1019 .level = 10,
1020 .vendor = CPUID_VENDOR_INTEL,
1021 .family = 6,
1022 .model = 23,
1023 .stepping = 3,
1024 .features[FEAT_1_EDX] =
1025 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1026 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1027 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1028 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1029 CPUID_DE | CPUID_FP87,
1030 .features[FEAT_1_ECX] =
1031 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1032 CPUID_EXT_SSE3,
1033 .features[FEAT_8000_0001_EDX] =
1034 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1035 .features[FEAT_8000_0001_ECX] =
1036 CPUID_EXT3_LAHF_LM,
1037 .xlevel = 0x80000008,
1038 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1039 },
1040 {
1041 .name = "Nehalem",
1042 .level = 11,
1043 .vendor = CPUID_VENDOR_INTEL,
1044 .family = 6,
1045 .model = 26,
1046 .stepping = 3,
1047 .features[FEAT_1_EDX] =
1048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1052 CPUID_DE | CPUID_FP87,
1053 .features[FEAT_1_ECX] =
1054 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1055 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1056 .features[FEAT_8000_0001_EDX] =
1057 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .xlevel = 0x80000008,
1061 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1062 },
1063 {
1064 .name = "Westmere",
1065 .level = 11,
1066 .vendor = CPUID_VENDOR_INTEL,
1067 .family = 6,
1068 .model = 44,
1069 .stepping = 1,
1070 .features[FEAT_1_EDX] =
1071 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1072 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1073 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1074 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1075 CPUID_DE | CPUID_FP87,
1076 .features[FEAT_1_ECX] =
1077 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1078 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1079 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1080 .features[FEAT_8000_0001_EDX] =
1081 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1082 .features[FEAT_8000_0001_ECX] =
1083 CPUID_EXT3_LAHF_LM,
1084 .features[FEAT_6_EAX] =
1085 CPUID_6_EAX_ARAT,
1086 .xlevel = 0x80000008,
1087 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1088 },
1089 {
1090 .name = "SandyBridge",
1091 .level = 0xd,
1092 .vendor = CPUID_VENDOR_INTEL,
1093 .family = 6,
1094 .model = 42,
1095 .stepping = 1,
1096 .features[FEAT_1_EDX] =
1097 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1098 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1099 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1100 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1101 CPUID_DE | CPUID_FP87,
1102 .features[FEAT_1_ECX] =
1103 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1104 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1105 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1106 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1107 CPUID_EXT_SSE3,
1108 .features[FEAT_8000_0001_EDX] =
1109 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1110 CPUID_EXT2_SYSCALL,
1111 .features[FEAT_8000_0001_ECX] =
1112 CPUID_EXT3_LAHF_LM,
1113 .features[FEAT_XSAVE] =
1114 CPUID_XSAVE_XSAVEOPT,
1115 .features[FEAT_6_EAX] =
1116 CPUID_6_EAX_ARAT,
1117 .xlevel = 0x80000008,
1118 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1119 },
1120 {
1121 .name = "IvyBridge",
1122 .level = 0xd,
1123 .vendor = CPUID_VENDOR_INTEL,
1124 .family = 6,
1125 .model = 58,
1126 .stepping = 9,
1127 .features[FEAT_1_EDX] =
1128 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1129 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1130 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1131 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1132 CPUID_DE | CPUID_FP87,
1133 .features[FEAT_1_ECX] =
1134 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1135 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1136 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1137 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1138 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1139 .features[FEAT_7_0_EBX] =
1140 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1141 CPUID_7_0_EBX_ERMS,
1142 .features[FEAT_8000_0001_EDX] =
1143 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1144 CPUID_EXT2_SYSCALL,
1145 .features[FEAT_8000_0001_ECX] =
1146 CPUID_EXT3_LAHF_LM,
1147 .features[FEAT_XSAVE] =
1148 CPUID_XSAVE_XSAVEOPT,
1149 .features[FEAT_6_EAX] =
1150 CPUID_6_EAX_ARAT,
1151 .xlevel = 0x80000008,
1152 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1153 },
1154 {
1155 .name = "Haswell-noTSX",
1156 .level = 0xd,
1157 .vendor = CPUID_VENDOR_INTEL,
1158 .family = 6,
1159 .model = 60,
1160 .stepping = 1,
1161 .features[FEAT_1_EDX] =
1162 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1163 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1164 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1165 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1166 CPUID_DE | CPUID_FP87,
1167 .features[FEAT_1_ECX] =
1168 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1169 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1170 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1171 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1172 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1173 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1174 .features[FEAT_8000_0001_EDX] =
1175 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1176 CPUID_EXT2_SYSCALL,
1177 .features[FEAT_8000_0001_ECX] =
1178 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1179 .features[FEAT_7_0_EBX] =
1180 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1181 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1182 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1183 .features[FEAT_XSAVE] =
1184 CPUID_XSAVE_XSAVEOPT,
1185 .features[FEAT_6_EAX] =
1186 CPUID_6_EAX_ARAT,
1187 .xlevel = 0x80000008,
1188 .model_id = "Intel Core Processor (Haswell, no TSX)",
1189 }, {
1190 .name = "Haswell",
1191 .level = 0xd,
1192 .vendor = CPUID_VENDOR_INTEL,
1193 .family = 6,
1194 .model = 60,
1195 .stepping = 4,
1196 .features[FEAT_1_EDX] =
1197 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1198 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1199 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1200 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1201 CPUID_DE | CPUID_FP87,
1202 .features[FEAT_1_ECX] =
1203 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1204 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1205 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1206 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1207 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1208 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1209 .features[FEAT_8000_0001_EDX] =
1210 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1211 CPUID_EXT2_SYSCALL,
1212 .features[FEAT_8000_0001_ECX] =
1213 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1214 .features[FEAT_7_0_EBX] =
1215 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1216 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1217 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1218 CPUID_7_0_EBX_RTM,
1219 .features[FEAT_XSAVE] =
1220 CPUID_XSAVE_XSAVEOPT,
1221 .features[FEAT_6_EAX] =
1222 CPUID_6_EAX_ARAT,
1223 .xlevel = 0x80000008,
1224 .model_id = "Intel Core Processor (Haswell)",
1225 },
1226 {
1227 .name = "Broadwell-noTSX",
1228 .level = 0xd,
1229 .vendor = CPUID_VENDOR_INTEL,
1230 .family = 6,
1231 .model = 61,
1232 .stepping = 2,
1233 .features[FEAT_1_EDX] =
1234 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1235 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1236 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1237 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1238 CPUID_DE | CPUID_FP87,
1239 .features[FEAT_1_ECX] =
1240 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1241 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1242 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1243 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1244 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1245 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1246 .features[FEAT_8000_0001_EDX] =
1247 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1248 CPUID_EXT2_SYSCALL,
1249 .features[FEAT_8000_0001_ECX] =
1250 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1251 .features[FEAT_7_0_EBX] =
1252 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1253 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1254 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1255 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1256 CPUID_7_0_EBX_SMAP,
1257 .features[FEAT_XSAVE] =
1258 CPUID_XSAVE_XSAVEOPT,
1259 .features[FEAT_6_EAX] =
1260 CPUID_6_EAX_ARAT,
1261 .xlevel = 0x80000008,
1262 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1263 },
1264 {
1265 .name = "Broadwell",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_INTEL,
1268 .family = 6,
1269 .model = 61,
1270 .stepping = 2,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1279 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1281 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1282 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1283 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1284 .features[FEAT_8000_0001_EDX] =
1285 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1286 CPUID_EXT2_SYSCALL,
1287 .features[FEAT_8000_0001_ECX] =
1288 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1289 .features[FEAT_7_0_EBX] =
1290 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1291 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1292 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1293 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1294 CPUID_7_0_EBX_SMAP,
1295 .features[FEAT_XSAVE] =
1296 CPUID_XSAVE_XSAVEOPT,
1297 .features[FEAT_6_EAX] =
1298 CPUID_6_EAX_ARAT,
1299 .xlevel = 0x80000008,
1300 .model_id = "Intel Core Processor (Broadwell)",
1301 },
1302 {
1303 .name = "Skylake-Client",
1304 .level = 0xd,
1305 .vendor = CPUID_VENDOR_INTEL,
1306 .family = 6,
1307 .model = 94,
1308 .stepping = 3,
1309 .features[FEAT_1_EDX] =
1310 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1311 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1312 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1313 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1314 CPUID_DE | CPUID_FP87,
1315 .features[FEAT_1_ECX] =
1316 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1317 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1318 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1319 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1320 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1321 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1322 .features[FEAT_8000_0001_EDX] =
1323 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1324 CPUID_EXT2_SYSCALL,
1325 .features[FEAT_8000_0001_ECX] =
1326 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1327 .features[FEAT_7_0_EBX] =
1328 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1329 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1330 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1331 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1332 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1333 /* Missing: XSAVES (not supported by some Linux versions,
1334 * including v4.1 to v4.12).
1335 * KVM doesn't yet expose any XSAVES state save component,
1336 * and the only one defined in Skylake (processor tracing)
1337 * probably will block migration anyway.
1338 */
1339 .features[FEAT_XSAVE] =
1340 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1341 CPUID_XSAVE_XGETBV1,
1342 .features[FEAT_6_EAX] =
1343 CPUID_6_EAX_ARAT,
1344 .xlevel = 0x80000008,
1345 .model_id = "Intel Core Processor (Skylake)",
1346 },
1347 {
1348 .name = "Skylake-Server",
1349 .level = 0xd,
1350 .vendor = CPUID_VENDOR_INTEL,
1351 .family = 6,
1352 .model = 85,
1353 .stepping = 4,
1354 .features[FEAT_1_EDX] =
1355 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1356 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1357 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1358 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1359 CPUID_DE | CPUID_FP87,
1360 .features[FEAT_1_ECX] =
1361 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1362 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1363 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1364 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1365 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1366 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
1369 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1370 .features[FEAT_8000_0001_ECX] =
1371 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1372 .features[FEAT_7_0_EBX] =
1373 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1374 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1375 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1376 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1377 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
1378 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
1379 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
1380 CPUID_7_0_EBX_AVX512VL,
1381 /* Missing: XSAVES (not supported by some Linux versions,
1382 * including v4.1 to v4.12).
1383 * KVM doesn't yet expose any XSAVES state save component,
1384 * and the only one defined in Skylake (processor tracing)
1385 * probably will block migration anyway.
1386 */
1387 .features[FEAT_XSAVE] =
1388 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1389 CPUID_XSAVE_XGETBV1,
1390 .features[FEAT_6_EAX] =
1391 CPUID_6_EAX_ARAT,
1392 .xlevel = 0x80000008,
1393 .model_id = "Intel Xeon Processor (Skylake)",
1394 },
1395 {
1396 .name = "Opteron_G1",
1397 .level = 5,
1398 .vendor = CPUID_VENDOR_AMD,
1399 .family = 15,
1400 .model = 6,
1401 .stepping = 1,
1402 .features[FEAT_1_EDX] =
1403 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1404 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1405 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1406 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1407 CPUID_DE | CPUID_FP87,
1408 .features[FEAT_1_ECX] =
1409 CPUID_EXT_SSE3,
1410 .features[FEAT_8000_0001_EDX] =
1411 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1412 .xlevel = 0x80000008,
1413 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1414 },
1415 {
1416 .name = "Opteron_G2",
1417 .level = 5,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 15,
1420 .model = 6,
1421 .stepping = 1,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1430 /* Missing: CPUID_EXT2_RDTSCP */
1431 .features[FEAT_8000_0001_EDX] =
1432 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1433 .features[FEAT_8000_0001_ECX] =
1434 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1435 .xlevel = 0x80000008,
1436 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1437 },
1438 {
1439 .name = "Opteron_G3",
1440 .level = 5,
1441 .vendor = CPUID_VENDOR_AMD,
1442 .family = 16,
1443 .model = 2,
1444 .stepping = 3,
1445 .features[FEAT_1_EDX] =
1446 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1447 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1448 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1449 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1450 CPUID_DE | CPUID_FP87,
1451 .features[FEAT_1_ECX] =
1452 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1453 CPUID_EXT_SSE3,
1454 /* Missing: CPUID_EXT2_RDTSCP */
1455 .features[FEAT_8000_0001_EDX] =
1456 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1457 .features[FEAT_8000_0001_ECX] =
1458 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1459 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1460 .xlevel = 0x80000008,
1461 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1462 },
1463 {
1464 .name = "Opteron_G4",
1465 .level = 0xd,
1466 .vendor = CPUID_VENDOR_AMD,
1467 .family = 21,
1468 .model = 1,
1469 .stepping = 2,
1470 .features[FEAT_1_EDX] =
1471 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1472 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1473 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1474 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1475 CPUID_DE | CPUID_FP87,
1476 .features[FEAT_1_ECX] =
1477 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1478 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1479 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1480 CPUID_EXT_SSE3,
1481 /* Missing: CPUID_EXT2_RDTSCP */
1482 .features[FEAT_8000_0001_EDX] =
1483 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1484 CPUID_EXT2_SYSCALL,
1485 .features[FEAT_8000_0001_ECX] =
1486 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1487 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1488 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1489 CPUID_EXT3_LAHF_LM,
1490 /* no xsaveopt! */
1491 .xlevel = 0x8000001A,
1492 .model_id = "AMD Opteron 62xx class CPU",
1493 },
1494 {
1495 .name = "Opteron_G5",
1496 .level = 0xd,
1497 .vendor = CPUID_VENDOR_AMD,
1498 .family = 21,
1499 .model = 2,
1500 .stepping = 0,
1501 .features[FEAT_1_EDX] =
1502 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1503 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1504 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1505 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1506 CPUID_DE | CPUID_FP87,
1507 .features[FEAT_1_ECX] =
1508 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1509 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1510 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1511 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1512 /* Missing: CPUID_EXT2_RDTSCP */
1513 .features[FEAT_8000_0001_EDX] =
1514 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
1515 CPUID_EXT2_SYSCALL,
1516 .features[FEAT_8000_0001_ECX] =
1517 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1518 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1519 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1520 CPUID_EXT3_LAHF_LM,
1521 /* no xsaveopt! */
1522 .xlevel = 0x8000001A,
1523 .model_id = "AMD Opteron 63xx class CPU",
1524 },
1525 {
1526 .name = "EPYC",
1527 .level = 0xd,
1528 .vendor = CPUID_VENDOR_AMD,
1529 .family = 23,
1530 .model = 1,
1531 .stepping = 2,
1532 .features[FEAT_1_EDX] =
1533 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
1534 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
1535 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
1536 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
1537 CPUID_VME | CPUID_FP87,
1538 .features[FEAT_1_ECX] =
1539 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
1540 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
1541 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1542 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
1543 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1544 .features[FEAT_8000_0001_EDX] =
1545 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
1546 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
1547 CPUID_EXT2_SYSCALL,
1548 .features[FEAT_8000_0001_ECX] =
1549 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
1550 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
1551 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1552 .features[FEAT_7_0_EBX] =
1553 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
1554 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
1555 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
1556 CPUID_7_0_EBX_SHA_NI,
1557 /* Missing: XSAVES (not supported by some Linux versions,
1558 * including v4.1 to v4.12).
1559 * KVM doesn't yet expose any XSAVES state save component.
1560 */
1561 .features[FEAT_XSAVE] =
1562 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1563 CPUID_XSAVE_XGETBV1,
1564 .features[FEAT_6_EAX] =
1565 CPUID_6_EAX_ARAT,
1566 .xlevel = 0x8000000A,
1567 .model_id = "AMD EPYC Processor",
1568 },
1569};
1570
1571typedef struct PropValue {
1572 const char *prop, *value;
1573} PropValue;
1574
1575/* KVM-specific features that are automatically added/removed
1576 * from all CPU models when KVM is enabled.
1577 */
1578static PropValue kvm_default_props[] = {
1579 { "kvmclock", "on" },
1580 { "kvm-nopiodelay", "on" },
1581 { "kvm-asyncpf", "on" },
1582 { "kvm-steal-time", "on" },
1583 { "kvm-pv-eoi", "on" },
1584 { "kvmclock-stable-bit", "on" },
1585 { "x2apic", "on" },
1586 { "acpi", "off" },
1587 { "monitor", "off" },
1588 { "svm", "off" },
1589 { NULL, NULL },
1590};
1591
1592/* TCG-specific defaults that override all CPU models when using TCG
1593 */
1594static PropValue tcg_default_props[] = {
1595 { "vme", "off" },
1596 { NULL, NULL },
1597};
1598
1599
1600void x86_cpu_change_kvm_default(const char *prop, const char *value)
1601{
1602 PropValue *pv;
1603 for (pv = kvm_default_props; pv->prop; pv++) {
1604 if (!strcmp(pv->prop, prop)) {
1605 pv->value = value;
1606 break;
1607 }
1608 }
1609
1610 /* It is valid to call this function only for properties that
1611 * are already present in the kvm_default_props table.
1612 */
1613 assert(pv->prop);
1614}
1615
1616static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1617 bool migratable_only);
1618
1619static bool lmce_supported(void)
1620{
1621 uint64_t mce_cap = 0;
1622
1623#ifdef CONFIG_KVM
1624 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1625 return false;
1626 }
1627#endif
1628
1629 return !!(mce_cap & MCG_LMCE_P);
1630}
1631
1632#define CPUID_MODEL_ID_SZ 48
1633
1634/**
1635 * cpu_x86_fill_model_id:
1636 * Get CPUID model ID string from host CPU.
1637 *
1638 * @str should have at least CPUID_MODEL_ID_SZ bytes
1639 *
1640 * The function does NOT add a null terminator to the string
1641 * automatically.
1642 */
1643static int cpu_x86_fill_model_id(char *str)
1644{
1645 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1646 int i;
1647
1648 for (i = 0; i < 3; i++) {
1649 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1650 memcpy(str + i * 16 + 0, &eax, 4);
1651 memcpy(str + i * 16 + 4, &ebx, 4);
1652 memcpy(str + i * 16 + 8, &ecx, 4);
1653 memcpy(str + i * 16 + 12, &edx, 4);
1654 }
1655 return 0;
1656}
1657
1658static Property max_x86_cpu_properties[] = {
1659 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1660 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1661 DEFINE_PROP_END_OF_LIST()
1662};
1663
1664static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
1665{
1666 DeviceClass *dc = DEVICE_CLASS(oc);
1667 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1668
1669 xcc->ordering = 9;
1670
1671 xcc->model_description =
1672 "Enables all features supported by the accelerator in the current host";
1673
1674 dc->props = max_x86_cpu_properties;
1675}
1676
1677static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
1678
1679static void max_x86_cpu_initfn(Object *obj)
1680{
1681 X86CPU *cpu = X86_CPU(obj);
1682 CPUX86State *env = &cpu->env;
1683 KVMState *s = kvm_state;
1684
1685 /* We can't fill the features array here because we don't know yet if
1686 * "migratable" is true or false.
1687 */
1688 cpu->max_features = true;
1689
1690 if (kvm_enabled()) {
1691 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
1692 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
1693 int family, model, stepping;
1694
1695 host_vendor_fms(vendor, &family, &model, &stepping);
1696
1697 cpu_x86_fill_model_id(model_id);
1698
1699 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
1700 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
1701 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
1702 object_property_set_int(OBJECT(cpu), stepping, "stepping",
1703 &error_abort);
1704 object_property_set_str(OBJECT(cpu), model_id, "model-id",
1705 &error_abort);
1706
1707 env->cpuid_min_level =
1708 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1709 env->cpuid_min_xlevel =
1710 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1711 env->cpuid_min_xlevel2 =
1712 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1713
1714 if (lmce_supported()) {
1715 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1716 }
1717 } else {
1718 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
1719 "vendor", &error_abort);
1720 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
1721 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
1722 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
1723 object_property_set_str(OBJECT(cpu),
1724 "QEMU TCG CPU version " QEMU_HW_VERSION,
1725 "model-id", &error_abort);
1726 }
1727
1728 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1729}
1730
1731static const TypeInfo max_x86_cpu_type_info = {
1732 .name = X86_CPU_TYPE_NAME("max"),
1733 .parent = TYPE_X86_CPU,
1734 .instance_init = max_x86_cpu_initfn,
1735 .class_init = max_x86_cpu_class_init,
1736};
1737
1738#ifdef CONFIG_KVM
1739
1740static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1741{
1742 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1743
1744 xcc->kvm_required = true;
1745 xcc->ordering = 8;
1746
1747 xcc->model_description =
1748 "KVM processor with all supported host features "
1749 "(only available in KVM mode)";
1750}
1751
1752static const TypeInfo host_x86_cpu_type_info = {
1753 .name = X86_CPU_TYPE_NAME("host"),
1754 .parent = X86_CPU_TYPE_NAME("max"),
1755 .class_init = host_x86_cpu_class_init,
1756};
1757
1758#endif
1759
1760static void report_unavailable_features(FeatureWord w, uint32_t mask)
1761{
1762 FeatureWordInfo *f = &feature_word_info[w];
1763 int i;
1764
1765 for (i = 0; i < 32; ++i) {
1766 if ((1UL << i) & mask) {
1767 const char *reg = get_register_name_32(f->cpuid_reg);
1768 assert(reg);
1769 warn_report("%s doesn't support requested feature: "
1770 "CPUID.%02XH:%s%s%s [bit %d]",
1771 kvm_enabled() ? "host" : "TCG",
1772 f->cpuid_eax, reg,
1773 f->feat_names[i] ? "." : "",
1774 f->feat_names[i] ? f->feat_names[i] : "", i);
1775 }
1776 }
1777}
1778
1779static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1780 const char *name, void *opaque,
1781 Error **errp)
1782{
1783 X86CPU *cpu = X86_CPU(obj);
1784 CPUX86State *env = &cpu->env;
1785 int64_t value;
1786
1787 value = (env->cpuid_version >> 8) & 0xf;
1788 if (value == 0xf) {
1789 value += (env->cpuid_version >> 20) & 0xff;
1790 }
1791 visit_type_int(v, name, &value, errp);
1792}
1793
1794static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1795 const char *name, void *opaque,
1796 Error **errp)
1797{
1798 X86CPU *cpu = X86_CPU(obj);
1799 CPUX86State *env = &cpu->env;
1800 const int64_t min = 0;
1801 const int64_t max = 0xff + 0xf;
1802 Error *local_err = NULL;
1803 int64_t value;
1804
1805 visit_type_int(v, name, &value, &local_err);
1806 if (local_err) {
1807 error_propagate(errp, local_err);
1808 return;
1809 }
1810 if (value < min || value > max) {
1811 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1812 name ? name : "null", value, min, max);
1813 return;
1814 }
1815
1816 env->cpuid_version &= ~0xff00f00;
1817 if (value > 0x0f) {
1818 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1819 } else {
1820 env->cpuid_version |= value << 8;
1821 }
1822}
1823
1824static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1825 const char *name, void *opaque,
1826 Error **errp)
1827{
1828 X86CPU *cpu = X86_CPU(obj);
1829 CPUX86State *env = &cpu->env;
1830 int64_t value;
1831
1832 value = (env->cpuid_version >> 4) & 0xf;
1833 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1834 visit_type_int(v, name, &value, errp);
1835}
1836
1837static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1838 const char *name, void *opaque,
1839 Error **errp)
1840{
1841 X86CPU *cpu = X86_CPU(obj);
1842 CPUX86State *env = &cpu->env;
1843 const int64_t min = 0;
1844 const int64_t max = 0xff;
1845 Error *local_err = NULL;
1846 int64_t value;
1847
1848 visit_type_int(v, name, &value, &local_err);
1849 if (local_err) {
1850 error_propagate(errp, local_err);
1851 return;
1852 }
1853 if (value < min || value > max) {
1854 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1855 name ? name : "null", value, min, max);
1856 return;
1857 }
1858
1859 env->cpuid_version &= ~0xf00f0;
1860 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1861}
1862
1863static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1864 const char *name, void *opaque,
1865 Error **errp)
1866{
1867 X86CPU *cpu = X86_CPU(obj);
1868 CPUX86State *env = &cpu->env;
1869 int64_t value;
1870
1871 value = env->cpuid_version & 0xf;
1872 visit_type_int(v, name, &value, errp);
1873}
1874
1875static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1876 const char *name, void *opaque,
1877 Error **errp)
1878{
1879 X86CPU *cpu = X86_CPU(obj);
1880 CPUX86State *env = &cpu->env;
1881 const int64_t min = 0;
1882 const int64_t max = 0xf;
1883 Error *local_err = NULL;
1884 int64_t value;
1885
1886 visit_type_int(v, name, &value, &local_err);
1887 if (local_err) {
1888 error_propagate(errp, local_err);
1889 return;
1890 }
1891 if (value < min || value > max) {
1892 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1893 name ? name : "null", value, min, max);
1894 return;
1895 }
1896
1897 env->cpuid_version &= ~0xf;
1898 env->cpuid_version |= value & 0xf;
1899}
1900
1901static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1902{
1903 X86CPU *cpu = X86_CPU(obj);
1904 CPUX86State *env = &cpu->env;
1905 char *value;
1906
1907 value = g_malloc(CPUID_VENDOR_SZ + 1);
1908 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1909 env->cpuid_vendor3);
1910 return value;
1911}
1912
1913static void x86_cpuid_set_vendor(Object *obj, const char *value,
1914 Error **errp)
1915{
1916 X86CPU *cpu = X86_CPU(obj);
1917 CPUX86State *env = &cpu->env;
1918 int i;
1919
1920 if (strlen(value) != CPUID_VENDOR_SZ) {
1921 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1922 return;
1923 }
1924
1925 env->cpuid_vendor1 = 0;
1926 env->cpuid_vendor2 = 0;
1927 env->cpuid_vendor3 = 0;
1928 for (i = 0; i < 4; i++) {
1929 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1930 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1931 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1932 }
1933}
1934
1935static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1936{
1937 X86CPU *cpu = X86_CPU(obj);
1938 CPUX86State *env = &cpu->env;
1939 char *value;
1940 int i;
1941
1942 value = g_malloc(48 + 1);
1943 for (i = 0; i < 48; i++) {
1944 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1945 }
1946 value[48] = '\0';
1947 return value;
1948}
1949
1950static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1951 Error **errp)
1952{
1953 X86CPU *cpu = X86_CPU(obj);
1954 CPUX86State *env = &cpu->env;
1955 int c, len, i;
1956
1957 if (model_id == NULL) {
1958 model_id = "";
1959 }
1960 len = strlen(model_id);
1961 memset(env->cpuid_model, 0, 48);
1962 for (i = 0; i < 48; i++) {
1963 if (i >= len) {
1964 c = '\0';
1965 } else {
1966 c = (uint8_t)model_id[i];
1967 }
1968 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1969 }
1970}
1971
1972static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1973 void *opaque, Error **errp)
1974{
1975 X86CPU *cpu = X86_CPU(obj);
1976 int64_t value;
1977
1978 value = cpu->env.tsc_khz * 1000;
1979 visit_type_int(v, name, &value, errp);
1980}
1981
1982static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1983 void *opaque, Error **errp)
1984{
1985 X86CPU *cpu = X86_CPU(obj);
1986 const int64_t min = 0;
1987 const int64_t max = INT64_MAX;
1988 Error *local_err = NULL;
1989 int64_t value;
1990
1991 visit_type_int(v, name, &value, &local_err);
1992 if (local_err) {
1993 error_propagate(errp, local_err);
1994 return;
1995 }
1996 if (value < min || value > max) {
1997 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1998 name ? name : "null", value, min, max);
1999 return;
2000 }
2001
2002 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2003}
2004
2005/* Generic getter for "feature-words" and "filtered-features" properties */
2006static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2007 const char *name, void *opaque,
2008 Error **errp)
2009{
2010 uint32_t *array = (uint32_t *)opaque;
2011 FeatureWord w;
2012 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2013 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2014 X86CPUFeatureWordInfoList *list = NULL;
2015
2016 for (w = 0; w < FEATURE_WORDS; w++) {
2017 FeatureWordInfo *wi = &feature_word_info[w];
2018 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2019 qwi->cpuid_input_eax = wi->cpuid_eax;
2020 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2021 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2022 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2023 qwi->features = array[w];
2024
2025 /* List will be in reverse order, but order shouldn't matter */
2026 list_entries[w].next = list;
2027 list_entries[w].value = &word_infos[w];
2028 list = &list_entries[w];
2029 }
2030
2031 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2032}
2033
2034static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2035 void *opaque, Error **errp)
2036{
2037 X86CPU *cpu = X86_CPU(obj);
2038 int64_t value = cpu->hyperv_spinlock_attempts;
2039
2040 visit_type_int(v, name, &value, errp);
2041}
2042
2043static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2044 void *opaque, Error **errp)
2045{
2046 const int64_t min = 0xFFF;
2047 const int64_t max = UINT_MAX;
2048 X86CPU *cpu = X86_CPU(obj);
2049 Error *err = NULL;
2050 int64_t value;
2051
2052 visit_type_int(v, name, &value, &err);
2053 if (err) {
2054 error_propagate(errp, err);
2055 return;
2056 }
2057
2058 if (value < min || value > max) {
2059 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2060 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2061 object_get_typename(obj), name ? name : "null",
2062 value, min, max);
2063 return;
2064 }
2065 cpu->hyperv_spinlock_attempts = value;
2066}
2067
2068static const PropertyInfo qdev_prop_spinlocks = {
2069 .name = "int",
2070 .get = x86_get_hv_spinlocks,
2071 .set = x86_set_hv_spinlocks,
2072};
2073
2074/* Convert all '_' in a feature string option name to '-', to make feature
2075 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2076 */
2077static inline void feat2prop(char *s)
2078{
2079 while ((s = strchr(s, '_'))) {
2080 *s = '-';
2081 }
2082}
2083
2084/* Return the feature property name for a feature flag bit */
2085static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2086{
2087 /* XSAVE components are automatically enabled by other features,
2088 * so return the original feature name instead
2089 */
2090 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2091 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2092
2093 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2094 x86_ext_save_areas[comp].bits) {
2095 w = x86_ext_save_areas[comp].feature;
2096 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2097 }
2098 }
2099
2100 assert(bitnr < 32);
2101 assert(w < FEATURE_WORDS);
2102 return feature_word_info[w].feat_names[bitnr];
2103}
2104
2105/* Compatibily hack to maintain legacy +-feat semantic,
2106 * where +-feat overwrites any feature set by
2107 * feat=on|feat even if the later is parsed after +-feat
2108 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
2109 */
2110static GList *plus_features, *minus_features;
2111
2112static gint compare_string(gconstpointer a, gconstpointer b)
2113{
2114 return g_strcmp0(a, b);
2115}
2116
2117/* Parse "+feature,-feature,feature=foo" CPU feature string
2118 */
2119static void x86_cpu_parse_featurestr(const char *typename, char *features,
2120 Error **errp)
2121{
2122 char *featurestr; /* Single 'key=value" string being parsed */
2123 static bool cpu_globals_initialized;
2124 bool ambiguous = false;
2125
2126 if (cpu_globals_initialized) {
2127 return;
2128 }
2129 cpu_globals_initialized = true;
2130
2131 if (!features) {
2132 return;
2133 }
2134
2135 for (featurestr = strtok(features, ",");
2136 featurestr;
2137 featurestr = strtok(NULL, ",")) {
2138 const char *name;
2139 const char *val = NULL;
2140 char *eq = NULL;
2141 char num[32];
2142 GlobalProperty *prop;
2143
2144 /* Compatibility syntax: */
2145 if (featurestr[0] == '+') {
2146 plus_features = g_list_append(plus_features,
2147 g_strdup(featurestr + 1));
2148 continue;
2149 } else if (featurestr[0] == '-') {
2150 minus_features = g_list_append(minus_features,
2151 g_strdup(featurestr + 1));
2152 continue;
2153 }
2154
2155 eq = strchr(featurestr, '=');
2156 if (eq) {
2157 *eq++ = 0;
2158 val = eq;
2159 } else {
2160 val = "on";
2161 }
2162
2163 feat2prop(featurestr);
2164 name = featurestr;
2165
2166 if (g_list_find_custom(plus_features, name, compare_string)) {
2167 warn_report("Ambiguous CPU model string. "
2168 "Don't mix both \"+%s\" and \"%s=%s\"",
2169 name, name, val);
2170 ambiguous = true;
2171 }
2172 if (g_list_find_custom(minus_features, name, compare_string)) {
2173 warn_report("Ambiguous CPU model string. "
2174 "Don't mix both \"-%s\" and \"%s=%s\"",
2175 name, name, val);
2176 ambiguous = true;
2177 }
2178
2179 /* Special case: */
2180 if (!strcmp(name, "tsc-freq")) {
2181 int ret;
2182 uint64_t tsc_freq;
2183
2184 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
2185 if (ret < 0 || tsc_freq > INT64_MAX) {
2186 error_setg(errp, "bad numerical value %s", val);
2187 return;
2188 }
2189 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2190 val = num;
2191 name = "tsc-frequency";
2192 }
2193
2194 prop = g_new0(typeof(*prop), 1);
2195 prop->driver = typename;
2196 prop->property = g_strdup(name);
2197 prop->value = g_strdup(val);
2198 prop->errp = &error_fatal;
2199 qdev_prop_register_global(prop);
2200 }
2201
2202 if (ambiguous) {
2203 warn_report("Compatibility of ambiguous CPU model "
2204 "strings won't be kept on future QEMU versions");
2205 }
2206}
2207
2208static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
2209static int x86_cpu_filter_features(X86CPU *cpu);
2210
2211/* Check for missing features that may prevent the CPU class from
2212 * running using the current machine and accelerator.
2213 */
2214static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
2215 strList **missing_feats)
2216{
2217 X86CPU *xc;
2218 FeatureWord w;
2219 Error *err = NULL;
2220 strList **next = missing_feats;
2221
2222 if (xcc->kvm_required && !kvm_enabled()) {
2223 strList *new = g_new0(strList, 1);
2224 new->value = g_strdup("kvm");;
2225 *missing_feats = new;
2226 return;
2227 }
2228
2229 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2230
2231 x86_cpu_expand_features(xc, &err);
2232 if (err) {
2233 /* Errors at x86_cpu_expand_features should never happen,
2234 * but in case it does, just report the model as not
2235 * runnable at all using the "type" property.
2236 */
2237 strList *new = g_new0(strList, 1);
2238 new->value = g_strdup("type");
2239 *next = new;
2240 next = &new->next;
2241 }
2242
2243 x86_cpu_filter_features(xc);
2244
2245 for (w = 0; w < FEATURE_WORDS; w++) {
2246 uint32_t filtered = xc->filtered_features[w];
2247 int i;
2248 for (i = 0; i < 32; i++) {
2249 if (filtered & (1UL << i)) {
2250 strList *new = g_new0(strList, 1);
2251 new->value = g_strdup(x86_cpu_feature_name(w, i));
2252 *next = new;
2253 next = &new->next;
2254 }
2255 }
2256 }
2257
2258 object_unref(OBJECT(xc));
2259}
2260
2261/* Print all cpuid feature names in featureset
2262 */
2263static void listflags(FILE *f, fprintf_function print, const char **featureset)
2264{
2265 int bit;
2266 bool first = true;
2267
2268 for (bit = 0; bit < 32; bit++) {
2269 if (featureset[bit]) {
2270 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2271 first = false;
2272 }
2273 }
2274}
2275
2276/* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
2277static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2278{
2279 ObjectClass *class_a = (ObjectClass *)a;
2280 ObjectClass *class_b = (ObjectClass *)b;
2281 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2282 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2283 const char *name_a, *name_b;
2284
2285 if (cc_a->ordering != cc_b->ordering) {
2286 return cc_a->ordering - cc_b->ordering;
2287 } else {
2288 name_a = object_class_get_name(class_a);
2289 name_b = object_class_get_name(class_b);
2290 return strcmp(name_a, name_b);
2291 }
2292}
2293
2294static GSList *get_sorted_cpu_model_list(void)
2295{
2296 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2297 list = g_slist_sort(list, x86_cpu_list_compare);
2298 return list;
2299}
2300
2301static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2302{
2303 ObjectClass *oc = data;
2304 X86CPUClass *cc = X86_CPU_CLASS(oc);
2305 CPUListState *s = user_data;
2306 char *name = x86_cpu_class_get_model_name(cc);
2307 const char *desc = cc->model_description;
2308 if (!desc && cc->cpu_def) {
2309 desc = cc->cpu_def->model_id;
2310 }
2311
2312 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2313 name, desc);
2314 g_free(name);
2315}
2316
2317/* list available CPU models and flags */
2318void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2319{
2320 int i;
2321 CPUListState s = {
2322 .file = f,
2323 .cpu_fprintf = cpu_fprintf,
2324 };
2325 GSList *list;
2326
2327 (*cpu_fprintf)(f, "Available CPUs:\n");
2328 list = get_sorted_cpu_model_list();
2329 g_slist_foreach(list, x86_cpu_list_entry, &s);
2330 g_slist_free(list);
2331
2332 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2333 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2334 FeatureWordInfo *fw = &feature_word_info[i];
2335
2336 (*cpu_fprintf)(f, " ");
2337 listflags(f, cpu_fprintf, fw->feat_names);
2338 (*cpu_fprintf)(f, "\n");
2339 }
2340}
2341
2342static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2343{
2344 ObjectClass *oc = data;
2345 X86CPUClass *cc = X86_CPU_CLASS(oc);
2346 CpuDefinitionInfoList **cpu_list = user_data;
2347 CpuDefinitionInfoList *entry;
2348 CpuDefinitionInfo *info;
2349
2350 info = g_malloc0(sizeof(*info));
2351 info->name = x86_cpu_class_get_model_name(cc);
2352 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
2353 info->has_unavailable_features = true;
2354 info->q_typename = g_strdup(object_class_get_name(oc));
2355 info->migration_safe = cc->migration_safe;
2356 info->has_migration_safe = true;
2357 info->q_static = cc->static_model;
2358
2359 entry = g_malloc0(sizeof(*entry));
2360 entry->value = info;
2361 entry->next = *cpu_list;
2362 *cpu_list = entry;
2363}
2364
2365CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2366{
2367 CpuDefinitionInfoList *cpu_list = NULL;
2368 GSList *list = get_sorted_cpu_model_list();
2369 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2370 g_slist_free(list);
2371 return cpu_list;
2372}
2373
2374static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2375 bool migratable_only)
2376{
2377 FeatureWordInfo *wi = &feature_word_info[w];
2378 uint32_t r;
2379
2380 if (kvm_enabled()) {
2381 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2382 wi->cpuid_ecx,
2383 wi->cpuid_reg);
2384 } else if (tcg_enabled()) {
2385 r = wi->tcg_features;
2386 } else {
2387 return ~0;
2388 }
2389 if (migratable_only) {
2390 r &= x86_cpu_get_migratable_flags(w);
2391 }
2392 return r;
2393}
2394
2395static void x86_cpu_report_filtered_features(X86CPU *cpu)
2396{
2397 FeatureWord w;
2398
2399 for (w = 0; w < FEATURE_WORDS; w++) {
2400 report_unavailable_features(w, cpu->filtered_features[w]);
2401 }
2402}
2403
2404static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2405{
2406 PropValue *pv;
2407 for (pv = props; pv->prop; pv++) {
2408 if (!pv->value) {
2409 continue;
2410 }
2411 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2412 &error_abort);
2413 }
2414}
2415
2416/* Load data from X86CPUDefinition into a X86CPU object
2417 */
2418static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2419{
2420 CPUX86State *env = &cpu->env;
2421 const char *vendor;
2422 char host_vendor[CPUID_VENDOR_SZ + 1];
2423 FeatureWord w;
2424
2425 /*NOTE: any property set by this function should be returned by
2426 * x86_cpu_static_props(), so static expansion of
2427 * query-cpu-model-expansion is always complete.
2428 */
2429
2430 /* CPU models only set _minimum_ values for level/xlevel: */
2431 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
2432 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2433
2434 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2435 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2436 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2437 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2438 for (w = 0; w < FEATURE_WORDS; w++) {
2439 env->features[w] = def->features[w];
2440 }
2441
2442 /* Special cases not set in the X86CPUDefinition structs: */
2443 if (kvm_enabled()) {
2444 if (!kvm_irqchip_in_kernel()) {
2445 x86_cpu_change_kvm_default("x2apic", "off");
2446 }
2447
2448 x86_cpu_apply_props(cpu, kvm_default_props);
2449 } else if (tcg_enabled()) {
2450 x86_cpu_apply_props(cpu, tcg_default_props);
2451 }
2452
2453 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2454
2455 /* sysenter isn't supported in compatibility mode on AMD,
2456 * syscall isn't supported in compatibility mode on Intel.
2457 * Normally we advertise the actual CPU vendor, but you can
2458 * override this using the 'vendor' property if you want to use
2459 * KVM's sysenter/syscall emulation in compatibility mode and
2460 * when doing cross vendor migration
2461 */
2462 vendor = def->vendor;
2463 if (kvm_enabled()) {
2464 uint32_t ebx = 0, ecx = 0, edx = 0;
2465 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2466 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2467 vendor = host_vendor;
2468 }
2469
2470 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2471
2472}
2473
2474/* Return a QDict containing keys for all properties that can be included
2475 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
2476 * must be included in the dictionary.
2477 */
2478static QDict *x86_cpu_static_props(void)
2479{
2480 FeatureWord w;
2481 int i;
2482 static const char *props[] = {
2483 "min-level",
2484 "min-xlevel",
2485 "family",
2486 "model",
2487 "stepping",
2488 "model-id",
2489 "vendor",
2490 "lmce",
2491 NULL,
2492 };
2493 static QDict *d;
2494
2495 if (d) {
2496 return d;
2497 }
2498
2499 d = qdict_new();
2500 for (i = 0; props[i]; i++) {
2501 qdict_put_null(d, props[i]);
2502 }
2503
2504 for (w = 0; w < FEATURE_WORDS; w++) {
2505 FeatureWordInfo *fi = &feature_word_info[w];
2506 int bit;
2507 for (bit = 0; bit < 32; bit++) {
2508 if (!fi->feat_names[bit]) {
2509 continue;
2510 }
2511 qdict_put_null(d, fi->feat_names[bit]);
2512 }
2513 }
2514
2515 return d;
2516}
2517
2518/* Add an entry to @props dict, with the value for property. */
2519static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
2520{
2521 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
2522 &error_abort);
2523
2524 qdict_put_obj(props, prop, value);
2525}
2526
2527/* Convert CPU model data from X86CPU object to a property dictionary
2528 * that can recreate exactly the same CPU model.
2529 */
2530static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
2531{
2532 QDict *sprops = x86_cpu_static_props();
2533 const QDictEntry *e;
2534
2535 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
2536 const char *prop = qdict_entry_key(e);
2537 x86_cpu_expand_prop(cpu, props, prop);
2538 }
2539}
2540
2541/* Convert CPU model data from X86CPU object to a property dictionary
2542 * that can recreate exactly the same CPU model, including every
2543 * writeable QOM property.
2544 */
2545static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
2546{
2547 ObjectPropertyIterator iter;
2548 ObjectProperty *prop;
2549
2550 object_property_iter_init(&iter, OBJECT(cpu));
2551 while ((prop = object_property_iter_next(&iter))) {
2552 /* skip read-only or write-only properties */
2553 if (!prop->get || !prop->set) {
2554 continue;
2555 }
2556
2557 /* "hotplugged" is the only property that is configurable
2558 * on the command-line but will be set differently on CPUs
2559 * created using "-cpu ... -smp ..." and by CPUs created
2560 * on the fly by x86_cpu_from_model() for querying. Skip it.
2561 */
2562 if (!strcmp(prop->name, "hotplugged")) {
2563 continue;
2564 }
2565 x86_cpu_expand_prop(cpu, props, prop->name);
2566 }
2567}
2568
2569static void object_apply_props(Object *obj, QDict *props, Error **errp)
2570{
2571 const QDictEntry *prop;
2572 Error *err = NULL;
2573
2574 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
2575 object_property_set_qobject(obj, qdict_entry_value(prop),
2576 qdict_entry_key(prop), &err);
2577 if (err) {
2578 break;
2579 }
2580 }
2581
2582 error_propagate(errp, err);
2583}
2584
2585/* Create X86CPU object according to model+props specification */
2586static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
2587{
2588 X86CPU *xc = NULL;
2589 X86CPUClass *xcc;
2590 Error *err = NULL;
2591
2592 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
2593 if (xcc == NULL) {
2594 error_setg(&err, "CPU model '%s' not found", model);
2595 goto out;
2596 }
2597
2598 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
2599 if (props) {
2600 object_apply_props(OBJECT(xc), props, &err);
2601 if (err) {
2602 goto out;
2603 }
2604 }
2605
2606 x86_cpu_expand_features(xc, &err);
2607 if (err) {
2608 goto out;
2609 }
2610
2611out:
2612 if (err) {
2613 error_propagate(errp, err);
2614 object_unref(OBJECT(xc));
2615 xc = NULL;
2616 }
2617 return xc;
2618}
2619
2620CpuModelExpansionInfo *
2621arch_query_cpu_model_expansion(CpuModelExpansionType type,
2622 CpuModelInfo *model,
2623 Error **errp)
2624{
2625 X86CPU *xc = NULL;
2626 Error *err = NULL;
2627 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
2628 QDict *props = NULL;
2629 const char *base_name;
2630
2631 xc = x86_cpu_from_model(model->name,
2632 model->has_props ?
2633 qobject_to_qdict(model->props) :
2634 NULL, &err);
2635 if (err) {
2636 goto out;
2637 }
2638
2639 props = qdict_new();
2640
2641 switch (type) {
2642 case CPU_MODEL_EXPANSION_TYPE_STATIC:
2643 /* Static expansion will be based on "base" only */
2644 base_name = "base";
2645 x86_cpu_to_dict(xc, props);
2646 break;
2647 case CPU_MODEL_EXPANSION_TYPE_FULL:
2648 /* As we don't return every single property, full expansion needs
2649 * to keep the original model name+props, and add extra
2650 * properties on top of that.
2651 */
2652 base_name = model->name;
2653 x86_cpu_to_dict_full(xc, props);
2654 break;
2655 default:
2656 error_setg(&err, "Unsupportted expansion type");
2657 goto out;
2658 }
2659
2660 if (!props) {
2661 props = qdict_new();
2662 }
2663 x86_cpu_to_dict(xc, props);
2664
2665 ret->model = g_new0(CpuModelInfo, 1);
2666 ret->model->name = g_strdup(base_name);
2667 ret->model->props = QOBJECT(props);
2668 ret->model->has_props = true;
2669
2670out:
2671 object_unref(OBJECT(xc));
2672 if (err) {
2673 error_propagate(errp, err);
2674 qapi_free_CpuModelExpansionInfo(ret);
2675 ret = NULL;
2676 }
2677 return ret;
2678}
2679
2680static gchar *x86_gdb_arch_name(CPUState *cs)
2681{
2682#ifdef TARGET_X86_64
2683 return g_strdup("i386:x86-64");
2684#else
2685 return g_strdup("i386");
2686#endif
2687}
2688
2689static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2690{
2691 X86CPUDefinition *cpudef = data;
2692 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2693
2694 xcc->cpu_def = cpudef;
2695 xcc->migration_safe = true;
2696}
2697
2698static void x86_register_cpudef_type(X86CPUDefinition *def)
2699{
2700 char *typename = x86_cpu_type_name(def->name);
2701 TypeInfo ti = {
2702 .name = typename,
2703 .parent = TYPE_X86_CPU,
2704 .class_init = x86_cpu_cpudef_class_init,
2705 .class_data = def,
2706 };
2707
2708 /* AMD aliases are handled at runtime based on CPUID vendor, so
2709 * they shouldn't be set on the CPU model table.
2710 */
2711 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
2712
2713 type_register(&ti);
2714 g_free(typename);
2715}
2716
2717#if !defined(CONFIG_USER_ONLY)
2718
2719void cpu_clear_apic_feature(CPUX86State *env)
2720{
2721 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2722}
2723
2724#endif /* !CONFIG_USER_ONLY */
2725
2726void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2727 uint32_t *eax, uint32_t *ebx,
2728 uint32_t *ecx, uint32_t *edx)
2729{
2730 X86CPU *cpu = x86_env_get_cpu(env);
2731 CPUState *cs = CPU(cpu);
2732 uint32_t pkg_offset;
2733 uint32_t limit;
2734 uint32_t signature[3];
2735
2736 /* Calculate & apply limits for different index ranges */
2737 if (index >= 0xC0000000) {
2738 limit = env->cpuid_xlevel2;
2739 } else if (index >= 0x80000000) {
2740 limit = env->cpuid_xlevel;
2741 } else if (index >= 0x40000000) {
2742 limit = 0x40000001;
2743 } else {
2744 limit = env->cpuid_level;
2745 }
2746
2747 if (index > limit) {
2748 /* Intel documentation states that invalid EAX input will
2749 * return the same information as EAX=cpuid_level
2750 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2751 */
2752 index = env->cpuid_level;
2753 }
2754
2755 switch(index) {
2756 case 0:
2757 *eax = env->cpuid_level;
2758 *ebx = env->cpuid_vendor1;
2759 *edx = env->cpuid_vendor2;
2760 *ecx = env->cpuid_vendor3;
2761 break;
2762 case 1:
2763 *eax = env->cpuid_version;
2764 *ebx = (cpu->apic_id << 24) |
2765 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2766 *ecx = env->features[FEAT_1_ECX];
2767 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2768 *ecx |= CPUID_EXT_OSXSAVE;
2769 }
2770 *edx = env->features[FEAT_1_EDX];
2771 if (cs->nr_cores * cs->nr_threads > 1) {
2772 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2773 *edx |= CPUID_HT;
2774 }
2775 break;
2776 case 2:
2777 /* cache info: needed for Pentium Pro compatibility */
2778 if (cpu->cache_info_passthrough) {
2779 host_cpuid(index, 0, eax, ebx, ecx, edx);
2780 break;
2781 }
2782 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2783 *ebx = 0;
2784 if (!cpu->enable_l3_cache) {
2785 *ecx = 0;
2786 } else {
2787 *ecx = L3_N_DESCRIPTOR;
2788 }
2789 *edx = (L1D_DESCRIPTOR << 16) | \
2790 (L1I_DESCRIPTOR << 8) | \
2791 (L2_DESCRIPTOR);
2792 break;
2793 case 4:
2794 /* cache info: needed for Core compatibility */
2795 if (cpu->cache_info_passthrough) {
2796 host_cpuid(index, count, eax, ebx, ecx, edx);
2797 *eax &= ~0xFC000000;
2798 } else {
2799 *eax = 0;
2800 switch (count) {
2801 case 0: /* L1 dcache info */
2802 *eax |= CPUID_4_TYPE_DCACHE | \
2803 CPUID_4_LEVEL(1) | \
2804 CPUID_4_SELF_INIT_LEVEL;
2805 *ebx = (L1D_LINE_SIZE - 1) | \
2806 ((L1D_PARTITIONS - 1) << 12) | \
2807 ((L1D_ASSOCIATIVITY - 1) << 22);
2808 *ecx = L1D_SETS - 1;
2809 *edx = CPUID_4_NO_INVD_SHARING;
2810 break;
2811 case 1: /* L1 icache info */
2812 *eax |= CPUID_4_TYPE_ICACHE | \
2813 CPUID_4_LEVEL(1) | \
2814 CPUID_4_SELF_INIT_LEVEL;
2815 *ebx = (L1I_LINE_SIZE - 1) | \
2816 ((L1I_PARTITIONS - 1) << 12) | \
2817 ((L1I_ASSOCIATIVITY - 1) << 22);
2818 *ecx = L1I_SETS - 1;
2819 *edx = CPUID_4_NO_INVD_SHARING;
2820 break;
2821 case 2: /* L2 cache info */
2822 *eax |= CPUID_4_TYPE_UNIFIED | \
2823 CPUID_4_LEVEL(2) | \
2824 CPUID_4_SELF_INIT_LEVEL;
2825 if (cs->nr_threads > 1) {
2826 *eax |= (cs->nr_threads - 1) << 14;
2827 }
2828 *ebx = (L2_LINE_SIZE - 1) | \
2829 ((L2_PARTITIONS - 1) << 12) | \
2830 ((L2_ASSOCIATIVITY - 1) << 22);
2831 *ecx = L2_SETS - 1;
2832 *edx = CPUID_4_NO_INVD_SHARING;
2833 break;
2834 case 3: /* L3 cache info */
2835 if (!cpu->enable_l3_cache) {
2836 *eax = 0;
2837 *ebx = 0;
2838 *ecx = 0;
2839 *edx = 0;
2840 break;
2841 }
2842 *eax |= CPUID_4_TYPE_UNIFIED | \
2843 CPUID_4_LEVEL(3) | \
2844 CPUID_4_SELF_INIT_LEVEL;
2845 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2846 *eax |= ((1 << pkg_offset) - 1) << 14;
2847 *ebx = (L3_N_LINE_SIZE - 1) | \
2848 ((L3_N_PARTITIONS - 1) << 12) | \
2849 ((L3_N_ASSOCIATIVITY - 1) << 22);
2850 *ecx = L3_N_SETS - 1;
2851 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2852 break;
2853 default: /* end of info */
2854 *eax = 0;
2855 *ebx = 0;
2856 *ecx = 0;
2857 *edx = 0;
2858 break;
2859 }
2860 }
2861
2862 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2863 if ((*eax & 31) && cs->nr_cores > 1) {
2864 *eax |= (cs->nr_cores - 1) << 26;
2865 }
2866 break;
2867 case 5:
2868 /* mwait info: needed for Core compatibility */
2869 *eax = 0; /* Smallest monitor-line size in bytes */
2870 *ebx = 0; /* Largest monitor-line size in bytes */
2871 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2872 *edx = 0;
2873 break;
2874 case 6:
2875 /* Thermal and Power Leaf */
2876 *eax = env->features[FEAT_6_EAX];
2877 *ebx = 0;
2878 *ecx = 0;
2879 *edx = 0;
2880 break;
2881 case 7:
2882 /* Structured Extended Feature Flags Enumeration Leaf */
2883 if (count == 0) {
2884 *eax = 0; /* Maximum ECX value for sub-leaves */
2885 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2886 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2887 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2888 *ecx |= CPUID_7_0_ECX_OSPKE;
2889 }
2890 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
2891 } else {
2892 *eax = 0;
2893 *ebx = 0;
2894 *ecx = 0;
2895 *edx = 0;
2896 }
2897 break;
2898 case 9:
2899 /* Direct Cache Access Information Leaf */
2900 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2901 *ebx = 0;
2902 *ecx = 0;
2903 *edx = 0;
2904 break;
2905 case 0xA:
2906 /* Architectural Performance Monitoring Leaf */
2907 if (kvm_enabled() && cpu->enable_pmu) {
2908 KVMState *s = cs->kvm_state;
2909
2910 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2911 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2912 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2913 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2914 } else {
2915 *eax = 0;
2916 *ebx = 0;
2917 *ecx = 0;
2918 *edx = 0;
2919 }
2920 break;
2921 case 0xB:
2922 /* Extended Topology Enumeration Leaf */
2923 if (!cpu->enable_cpuid_0xb) {
2924 *eax = *ebx = *ecx = *edx = 0;
2925 break;
2926 }
2927
2928 *ecx = count & 0xff;
2929 *edx = cpu->apic_id;
2930
2931 switch (count) {
2932 case 0:
2933 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2934 *ebx = cs->nr_threads;
2935 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2936 break;
2937 case 1:
2938 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2939 *ebx = cs->nr_cores * cs->nr_threads;
2940 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2941 break;
2942 default:
2943 *eax = 0;
2944 *ebx = 0;
2945 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2946 }
2947
2948 assert(!(*eax & ~0x1f));
2949 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2950 break;
2951 case 0xD: {
2952 /* Processor Extended State */
2953 *eax = 0;
2954 *ebx = 0;
2955 *ecx = 0;
2956 *edx = 0;
2957 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2958 break;
2959 }
2960
2961 if (count == 0) {
2962 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2963 *eax = env->features[FEAT_XSAVE_COMP_LO];
2964 *edx = env->features[FEAT_XSAVE_COMP_HI];
2965 *ebx = *ecx;
2966 } else if (count == 1) {
2967 *eax = env->features[FEAT_XSAVE];
2968 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2969 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2970 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2971 *eax = esa->size;
2972 *ebx = esa->offset;
2973 }
2974 }
2975 break;
2976 }
2977 case 0x40000000:
2978 /*
2979 * CPUID code in kvm_arch_init_vcpu() ignores stuff
2980 * set here, but we restrict to TCG none the less.
2981 */
2982 if (tcg_enabled() && cpu->expose_tcg) {
2983 memcpy(signature, "TCGTCGTCGTCG", 12);
2984 *eax = 0x40000001;
2985 *ebx = signature[0];
2986 *ecx = signature[1];
2987 *edx = signature[2];
2988 } else {
2989 *eax = 0;
2990 *ebx = 0;
2991 *ecx = 0;
2992 *edx = 0;
2993 }
2994 break;
2995 case 0x40000001:
2996 *eax = 0;
2997 *ebx = 0;
2998 *ecx = 0;
2999 *edx = 0;
3000 break;
3001 case 0x80000000:
3002 *eax = env->cpuid_xlevel;
3003 *ebx = env->cpuid_vendor1;
3004 *edx = env->cpuid_vendor2;
3005 *ecx = env->cpuid_vendor3;
3006 break;
3007 case 0x80000001:
3008 *eax = env->cpuid_version;
3009 *ebx = 0;
3010 *ecx = env->features[FEAT_8000_0001_ECX];
3011 *edx = env->features[FEAT_8000_0001_EDX];
3012
3013 /* The Linux kernel checks for the CMPLegacy bit and
3014 * discards multiple thread information if it is set.
3015 * So don't set it here for Intel to make Linux guests happy.
3016 */
3017 if (cs->nr_cores * cs->nr_threads > 1) {
3018 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3019 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3020 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3021 *ecx |= 1 << 1; /* CmpLegacy bit */
3022 }
3023 }
3024 break;
3025 case 0x80000002:
3026 case 0x80000003:
3027 case 0x80000004:
3028 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3029 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3030 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3031 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3032 break;
3033 case 0x80000005:
3034 /* cache info (L1 cache) */
3035 if (cpu->cache_info_passthrough) {
3036 host_cpuid(index, 0, eax, ebx, ecx, edx);
3037 break;
3038 }
3039 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3040 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3041 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3042 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3043 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
3044 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
3045 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
3046 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
3047 break;
3048 case 0x80000006:
3049 /* cache info (L2 cache) */
3050 if (cpu->cache_info_passthrough) {
3051 host_cpuid(index, 0, eax, ebx, ecx, edx);
3052 break;
3053 }
3054 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3055 (L2_DTLB_2M_ENTRIES << 16) | \
3056 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3057 (L2_ITLB_2M_ENTRIES);
3058 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3059 (L2_DTLB_4K_ENTRIES << 16) | \
3060 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3061 (L2_ITLB_4K_ENTRIES);
3062 *ecx = (L2_SIZE_KB_AMD << 16) | \
3063 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
3064 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
3065 if (!cpu->enable_l3_cache) {
3066 *edx = ((L3_SIZE_KB / 512) << 18) | \
3067 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
3068 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
3069 } else {
3070 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
3071 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
3072 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
3073 }
3074 break;
3075 case 0x80000007:
3076 *eax = 0;
3077 *ebx = 0;
3078 *ecx = 0;
3079 *edx = env->features[FEAT_8000_0007_EDX];
3080 break;
3081 case 0x80000008:
3082 /* virtual & phys address size in low 2 bytes. */
3083 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3084 /* 64 bit processor */
3085 *eax = cpu->phys_bits; /* configurable physical bits */
3086 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3087 *eax |= 0x00003900; /* 57 bits virtual */
3088 } else {
3089 *eax |= 0x00003000; /* 48 bits virtual */
3090 }
3091 } else {
3092 *eax = cpu->phys_bits;
3093 }
3094 *ebx = 0;
3095 *ecx = 0;
3096 *edx = 0;
3097 if (cs->nr_cores * cs->nr_threads > 1) {
3098 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3099 }
3100 break;
3101 case 0x8000000A:
3102 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3103 *eax = 0x00000001; /* SVM Revision */
3104 *ebx = 0x00000010; /* nr of ASIDs */
3105 *ecx = 0;
3106 *edx = env->features[FEAT_SVM]; /* optional features */
3107 } else {
3108 *eax = 0;
3109 *ebx = 0;
3110 *ecx = 0;
3111 *edx = 0;
3112 }
3113 break;
3114 case 0xC0000000:
3115 *eax = env->cpuid_xlevel2;
3116 *ebx = 0;
3117 *ecx = 0;
3118 *edx = 0;
3119 break;
3120 case 0xC0000001:
3121 /* Support for VIA CPU's CPUID instruction */
3122 *eax = env->cpuid_version;
3123 *ebx = 0;
3124 *ecx = 0;
3125 *edx = env->features[FEAT_C000_0001_EDX];
3126 break;
3127 case 0xC0000002:
3128 case 0xC0000003:
3129 case 0xC0000004:
3130 /* Reserved for the future, and now filled with zero */
3131 *eax = 0;
3132 *ebx = 0;
3133 *ecx = 0;
3134 *edx = 0;
3135 break;
3136 default:
3137 /* reserved values: zero */
3138 *eax = 0;
3139 *ebx = 0;
3140 *ecx = 0;
3141 *edx = 0;
3142 break;
3143 }
3144}
3145
3146/* CPUClass::reset() */
3147static void x86_cpu_reset(CPUState *s)
3148{
3149 X86CPU *cpu = X86_CPU(s);
3150 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
3151 CPUX86State *env = &cpu->env;
3152 target_ulong cr4;
3153 uint64_t xcr0;
3154 int i;
3155
3156 xcc->parent_reset(s);
3157
3158 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
3159
3160 env->old_exception = -1;
3161
3162 /* init to reset state */
3163
3164 env->hflags2 |= HF2_GIF_MASK;
3165
3166 cpu_x86_update_cr0(env, 0x60000010);
3167 env->a20_mask = ~0x0;
3168 env->smbase = 0x30000;
3169
3170 env->idt.limit = 0xffff;
3171 env->gdt.limit = 0xffff;
3172 env->ldt.limit = 0xffff;
3173 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
3174 env->tr.limit = 0xffff;
3175 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
3176
3177 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
3178 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
3179 DESC_R_MASK | DESC_A_MASK);
3180 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
3181 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3182 DESC_A_MASK);
3183 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
3184 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3185 DESC_A_MASK);
3186 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
3187 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3188 DESC_A_MASK);
3189 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
3190 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3191 DESC_A_MASK);
3192 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
3193 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
3194 DESC_A_MASK);
3195
3196 env->eip = 0xfff0;
3197 env->regs[R_EDX] = env->cpuid_version;
3198
3199 env->eflags = 0x2;
3200
3201 /* FPU init */
3202 for (i = 0; i < 8; i++) {
3203 env->fptags[i] = 1;
3204 }
3205 cpu_set_fpuc(env, 0x37f);
3206
3207 env->mxcsr = 0x1f80;
3208 /* All units are in INIT state. */
3209 env->xstate_bv = 0;
3210
3211 env->pat = 0x0007040600070406ULL;
3212 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
3213
3214 memset(env->dr, 0, sizeof(env->dr));
3215 env->dr[6] = DR6_FIXED_1;
3216 env->dr[7] = DR7_FIXED_1;
3217 cpu_breakpoint_remove_all(s, BP_CPU);
3218 cpu_watchpoint_remove_all(s, BP_CPU);
3219
3220 cr4 = 0;
3221 xcr0 = XSTATE_FP_MASK;
3222
3223#ifdef CONFIG_USER_ONLY
3224 /* Enable all the features for user-mode. */
3225 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
3226 xcr0 |= XSTATE_SSE_MASK;
3227 }
3228 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3229 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3230 if (env->features[esa->feature] & esa->bits) {
3231 xcr0 |= 1ull << i;
3232 }
3233 }
3234
3235 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
3236 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
3237 }
3238 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
3239 cr4 |= CR4_FSGSBASE_MASK;
3240 }
3241#endif
3242
3243 env->xcr0 = xcr0;
3244 cpu_x86_update_cr4(env, cr4);
3245
3246 /*
3247 * SDM 11.11.5 requires:
3248 * - IA32_MTRR_DEF_TYPE MSR.E = 0
3249 * - IA32_MTRR_PHYSMASKn.V = 0
3250 * All other bits are undefined. For simplification, zero it all.
3251 */
3252 env->mtrr_deftype = 0;
3253 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
3254 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
3255
3256#if !defined(CONFIG_USER_ONLY)
3257 /* We hard-wire the BSP to the first CPU. */
3258 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
3259
3260 s->halted = !cpu_is_bsp(cpu);
3261
3262 if (kvm_enabled()) {
3263 kvm_arch_reset_vcpu(cpu);
3264 }
3265#endif
3266}
3267
3268#ifndef CONFIG_USER_ONLY
3269bool cpu_is_bsp(X86CPU *cpu)
3270{
3271 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
3272}
3273
3274/* TODO: remove me, when reset over QOM tree is implemented */
3275static void x86_cpu_machine_reset_cb(void *opaque)
3276{
3277 X86CPU *cpu = opaque;
3278 cpu_reset(CPU(cpu));
3279}
3280#endif
3281
3282static void mce_init(X86CPU *cpu)
3283{
3284 CPUX86State *cenv = &cpu->env;
3285 unsigned int bank;
3286
3287 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
3288 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
3289 (CPUID_MCE | CPUID_MCA)) {
3290 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
3291 (cpu->enable_lmce ? MCG_LMCE_P : 0);
3292 cenv->mcg_ctl = ~(uint64_t)0;
3293 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
3294 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
3295 }
3296 }
3297}
3298
3299#ifndef CONFIG_USER_ONLY
3300APICCommonClass *apic_get_class(void)
3301{
3302 const char *apic_type = "apic";
3303
3304 if (kvm_apic_in_kernel()) {
3305 apic_type = "kvm-apic";
3306 } else if (xen_enabled()) {
3307 apic_type = "xen-apic";
3308 }
3309
3310 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
3311}
3312
3313static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
3314{
3315 APICCommonState *apic;
3316 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
3317
3318 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
3319
3320 object_property_add_child(OBJECT(cpu), "lapic",
3321 OBJECT(cpu->apic_state), &error_abort);
3322 object_unref(OBJECT(cpu->apic_state));
3323
3324 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
3325 /* TODO: convert to link<> */
3326 apic = APIC_COMMON(cpu->apic_state);
3327 apic->cpu = cpu;
3328 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
3329}
3330
3331static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3332{
3333 APICCommonState *apic;
3334 static bool apic_mmio_map_once;
3335
3336 if (cpu->apic_state == NULL) {
3337 return;
3338 }
3339 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
3340 errp);
3341
3342 /* Map APIC MMIO area */
3343 apic = APIC_COMMON(cpu->apic_state);
3344 if (!apic_mmio_map_once) {
3345 memory_region_add_subregion_overlap(get_system_memory(),
3346 apic->apicbase &
3347 MSR_IA32_APICBASE_BASE,
3348 &apic->io_memory,
3349 0x1000);
3350 apic_mmio_map_once = true;
3351 }
3352}
3353
3354static void x86_cpu_machine_done(Notifier *n, void *unused)
3355{
3356 X86CPU *cpu = container_of(n, X86CPU, machine_done);
3357 MemoryRegion *smram =
3358 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
3359
3360 if (smram) {
3361 cpu->smram = g_new(MemoryRegion, 1);
3362 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
3363 smram, 0, 1ull << 32);
3364 memory_region_set_enabled(cpu->smram, true);
3365 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
3366 }
3367}
3368#else
3369static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
3370{
3371}
3372#endif
3373
3374/* Note: Only safe for use on x86(-64) hosts */
3375static uint32_t x86_host_phys_bits(void)
3376{
3377 uint32_t eax;
3378 uint32_t host_phys_bits;
3379
3380 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
3381 if (eax >= 0x80000008) {
3382 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
3383 /* Note: According to AMD doc 25481 rev 2.34 they have a field
3384 * at 23:16 that can specify a maximum physical address bits for
3385 * the guest that can override this value; but I've not seen
3386 * anything with that set.
3387 */
3388 host_phys_bits = eax & 0xff;
3389 } else {
3390 /* It's an odd 64 bit machine that doesn't have the leaf for
3391 * physical address bits; fall back to 36 that's most older
3392 * Intel.
3393 */
3394 host_phys_bits = 36;
3395 }
3396
3397 return host_phys_bits;
3398}
3399
3400static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
3401{
3402 if (*min < value) {
3403 *min = value;
3404 }
3405}
3406
3407/* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
3408static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
3409{
3410 CPUX86State *env = &cpu->env;
3411 FeatureWordInfo *fi = &feature_word_info[w];
3412 uint32_t eax = fi->cpuid_eax;
3413 uint32_t region = eax & 0xF0000000;
3414
3415 if (!env->features[w]) {
3416 return;
3417 }
3418
3419 switch (region) {
3420 case 0x00000000:
3421 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
3422 break;
3423 case 0x80000000:
3424 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
3425 break;
3426 case 0xC0000000:
3427 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
3428 break;
3429 }
3430}
3431
3432/* Calculate XSAVE components based on the configured CPU feature flags */
3433static void x86_cpu_enable_xsave_components(X86CPU *cpu)
3434{
3435 CPUX86State *env = &cpu->env;
3436 int i;
3437 uint64_t mask;
3438
3439 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3440 return;
3441 }
3442
3443 mask = 0;
3444 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
3445 const ExtSaveArea *esa = &x86_ext_save_areas[i];
3446 if (env->features[esa->feature] & esa->bits) {
3447 mask |= (1ULL << i);
3448 }
3449 }
3450
3451 env->features[FEAT_XSAVE_COMP_LO] = mask;
3452 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3453}
3454
3455/***** Steps involved on loading and filtering CPUID data
3456 *
3457 * When initializing and realizing a CPU object, the steps
3458 * involved in setting up CPUID data are:
3459 *
3460 * 1) Loading CPU model definition (X86CPUDefinition). This is
3461 * implemented by x86_cpu_load_def() and should be completely
3462 * transparent, as it is done automatically by instance_init.
3463 * No code should need to look at X86CPUDefinition structs
3464 * outside instance_init.
3465 *
3466 * 2) CPU expansion. This is done by realize before CPUID
3467 * filtering, and will make sure host/accelerator data is
3468 * loaded for CPU models that depend on host capabilities
3469 * (e.g. "host"). Done by x86_cpu_expand_features().
3470 *
3471 * 3) CPUID filtering. This initializes extra data related to
3472 * CPUID, and checks if the host supports all capabilities
3473 * required by the CPU. Runnability of a CPU model is
3474 * determined at this step. Done by x86_cpu_filter_features().
3475 *
3476 * Some operations don't require all steps to be performed.
3477 * More precisely:
3478 *
3479 * - CPU instance creation (instance_init) will run only CPU
3480 * model loading. CPU expansion can't run at instance_init-time
3481 * because host/accelerator data may be not available yet.
3482 * - CPU realization will perform both CPU model expansion and CPUID
3483 * filtering, and return an error in case one of them fails.
3484 * - query-cpu-definitions needs to run all 3 steps. It needs
3485 * to run CPUID filtering, as the 'unavailable-features'
3486 * field is set based on the filtering results.
3487 * - The query-cpu-model-expansion QMP command only needs to run
3488 * CPU model loading and CPU expansion. It should not filter
3489 * any CPUID data based on host capabilities.
3490 */
3491
3492/* Expand CPU configuration data, based on configured features
3493 * and host/accelerator capabilities when appropriate.
3494 */
3495static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
3496{
3497 CPUX86State *env = &cpu->env;
3498 FeatureWord w;
3499 GList *l;
3500 Error *local_err = NULL;
3501
3502 /*TODO: Now cpu->max_features doesn't overwrite features
3503 * set using QOM properties, and we can convert
3504 * plus_features & minus_features to global properties
3505 * inside x86_cpu_parse_featurestr() too.
3506 */
3507 if (cpu->max_features) {
3508 for (w = 0; w < FEATURE_WORDS; w++) {
3509 /* Override only features that weren't set explicitly
3510 * by the user.
3511 */
3512 env->features[w] |=
3513 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
3514 ~env->user_features[w];
3515 }
3516 }
3517
3518 for (l = plus_features; l; l = l->next) {
3519 const char *prop = l->data;
3520 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3521 if (local_err) {
3522 goto out;
3523 }
3524 }
3525
3526 for (l = minus_features; l; l = l->next) {
3527 const char *prop = l->data;
3528 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3529 if (local_err) {
3530 goto out;
3531 }
3532 }
3533
3534 if (!kvm_enabled() || !cpu->expose_kvm) {
3535 env->features[FEAT_KVM] = 0;
3536 }
3537
3538 x86_cpu_enable_xsave_components(cpu);
3539
3540 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3541 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3542 if (cpu->full_cpuid_auto_level) {
3543 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3544 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3545 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3546 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3547 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3548 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3549 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3550 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3551 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3552 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3553 /* SVM requires CPUID[0x8000000A] */
3554 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3555 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3556 }
3557 }
3558
3559 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3560 if (env->cpuid_level == UINT32_MAX) {
3561 env->cpuid_level = env->cpuid_min_level;
3562 }
3563 if (env->cpuid_xlevel == UINT32_MAX) {
3564 env->cpuid_xlevel = env->cpuid_min_xlevel;
3565 }
3566 if (env->cpuid_xlevel2 == UINT32_MAX) {
3567 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3568 }
3569
3570out:
3571 if (local_err != NULL) {
3572 error_propagate(errp, local_err);
3573 }
3574}
3575
3576/*
3577 * Finishes initialization of CPUID data, filters CPU feature
3578 * words based on host availability of each feature.
3579 *
3580 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
3581 */
3582static int x86_cpu_filter_features(X86CPU *cpu)
3583{
3584 CPUX86State *env = &cpu->env;
3585 FeatureWord w;
3586 int rv = 0;
3587
3588 for (w = 0; w < FEATURE_WORDS; w++) {
3589 uint32_t host_feat =
3590 x86_cpu_get_supported_feature_word(w, false);
3591 uint32_t requested_features = env->features[w];
3592 env->features[w] &= host_feat;
3593 cpu->filtered_features[w] = requested_features & ~env->features[w];
3594 if (cpu->filtered_features[w]) {
3595 rv = 1;
3596 }
3597 }
3598
3599 return rv;
3600}
3601
3602#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3603 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3604 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3605#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3606 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3607 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3608static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3609{
3610 CPUState *cs = CPU(dev);
3611 X86CPU *cpu = X86_CPU(dev);
3612 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3613 CPUX86State *env = &cpu->env;
3614 Error *local_err = NULL;
3615 static bool ht_warned;
3616
3617 if (xcc->kvm_required && !kvm_enabled()) {
3618 char *name = x86_cpu_class_get_model_name(xcc);
3619 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3620 g_free(name);
3621 goto out;
3622 }
3623
3624 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3625 error_setg(errp, "apic-id property was not initialized properly");
3626 return;
3627 }
3628
3629 x86_cpu_expand_features(cpu, &local_err);
3630 if (local_err) {
3631 goto out;
3632 }
3633
3634 if (x86_cpu_filter_features(cpu) &&
3635 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3636 x86_cpu_report_filtered_features(cpu);
3637 if (cpu->enforce_cpuid) {
3638 error_setg(&local_err,
3639 kvm_enabled() ?
3640 "Host doesn't support requested features" :
3641 "TCG doesn't support requested features");
3642 goto out;
3643 }
3644 }
3645
3646 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3647 * CPUID[1].EDX.
3648 */
3649 if (IS_AMD_CPU(env)) {
3650 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3651 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3652 & CPUID_EXT2_AMD_ALIASES);
3653 }
3654
3655 /* For 64bit systems think about the number of physical bits to present.
3656 * ideally this should be the same as the host; anything other than matching
3657 * the host can cause incorrect guest behaviour.
3658 * QEMU used to pick the magic value of 40 bits that corresponds to
3659 * consumer AMD devices but nothing else.
3660 */
3661 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3662 if (kvm_enabled()) {
3663 uint32_t host_phys_bits = x86_host_phys_bits();
3664 static bool warned;
3665
3666 if (cpu->host_phys_bits) {
3667 /* The user asked for us to use the host physical bits */
3668 cpu->phys_bits = host_phys_bits;
3669 }
3670
3671 /* Print a warning if the user set it to a value that's not the
3672 * host value.
3673 */
3674 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3675 !warned) {
3676 warn_report("Host physical bits (%u)"
3677 " does not match phys-bits property (%u)",
3678 host_phys_bits, cpu->phys_bits);
3679 warned = true;
3680 }
3681
3682 if (cpu->phys_bits &&
3683 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3684 cpu->phys_bits < 32)) {
3685 error_setg(errp, "phys-bits should be between 32 and %u "
3686 " (but is %u)",
3687 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3688 return;
3689 }
3690 } else {
3691 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3692 error_setg(errp, "TCG only supports phys-bits=%u",
3693 TCG_PHYS_ADDR_BITS);
3694 return;
3695 }
3696 }
3697 /* 0 means it was not explicitly set by the user (or by machine
3698 * compat_props or by the host code above). In this case, the default
3699 * is the value used by TCG (40).
3700 */
3701 if (cpu->phys_bits == 0) {
3702 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3703 }
3704 } else {
3705 /* For 32 bit systems don't use the user set value, but keep
3706 * phys_bits consistent with what we tell the guest.
3707 */
3708 if (cpu->phys_bits != 0) {
3709 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3710 return;
3711 }
3712
3713 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3714 cpu->phys_bits = 36;
3715 } else {
3716 cpu->phys_bits = 32;
3717 }
3718 }
3719 cpu_exec_realizefn(cs, &local_err);
3720 if (local_err != NULL) {
3721 error_propagate(errp, local_err);
3722 return;
3723 }
3724
3725 if (tcg_enabled()) {
3726 tcg_x86_init();
3727 }
3728
3729#ifndef CONFIG_USER_ONLY
3730 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3731
3732 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3733 x86_cpu_apic_create(cpu, &local_err);
3734 if (local_err != NULL) {
3735 goto out;
3736 }
3737 }
3738#endif
3739
3740 mce_init(cpu);
3741
3742#ifndef CONFIG_USER_ONLY
3743 if (tcg_enabled()) {
3744 AddressSpace *as_normal = address_space_init_shareable(cs->memory,
3745 "cpu-memory");
3746 AddressSpace *as_smm = g_new(AddressSpace, 1);
3747
3748 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3749 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3750
3751 /* Outer container... */
3752 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3753 memory_region_set_enabled(cpu->cpu_as_root, true);
3754
3755 /* ... with two regions inside: normal system memory with low
3756 * priority, and...
3757 */
3758 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3759 get_system_memory(), 0, ~0ull);
3760 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3761 memory_region_set_enabled(cpu->cpu_as_mem, true);
3762 address_space_init(as_smm, cpu->cpu_as_root, "CPU");
3763
3764 cs->num_ases = 2;
3765 cpu_address_space_init(cs, as_normal, 0);
3766 cpu_address_space_init(cs, as_smm, 1);
3767
3768 /* ... SMRAM with higher priority, linked from /machine/smram. */
3769 cpu->machine_done.notify = x86_cpu_machine_done;
3770 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3771 }
3772#endif
3773
3774 qemu_init_vcpu(cs);
3775
3776 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3777 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3778 * based on inputs (sockets,cores,threads), it is still better to gives
3779 * users a warning.
3780 *
3781 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3782 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3783 */
3784 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3785 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3786 " -smp options properly.");
3787 ht_warned = true;
3788 }
3789
3790 x86_cpu_apic_realize(cpu, &local_err);
3791 if (local_err != NULL) {
3792 goto out;
3793 }
3794 cpu_reset(cs);
3795
3796 xcc->parent_realize(dev, &local_err);
3797
3798out:
3799 if (local_err != NULL) {
3800 error_propagate(errp, local_err);
3801 return;
3802 }
3803}
3804
3805static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3806{
3807 X86CPU *cpu = X86_CPU(dev);
3808 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3809 Error *local_err = NULL;
3810
3811#ifndef CONFIG_USER_ONLY
3812 cpu_remove_sync(CPU(dev));
3813 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3814#endif
3815
3816 if (cpu->apic_state) {
3817 object_unparent(OBJECT(cpu->apic_state));
3818 cpu->apic_state = NULL;
3819 }
3820
3821 xcc->parent_unrealize(dev, &local_err);
3822 if (local_err != NULL) {
3823 error_propagate(errp, local_err);
3824 return;
3825 }
3826}
3827
3828typedef struct BitProperty {
3829 FeatureWord w;
3830 uint32_t mask;
3831} BitProperty;
3832
3833static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3834 void *opaque, Error **errp)
3835{
3836 X86CPU *cpu = X86_CPU(obj);
3837 BitProperty *fp = opaque;
3838 uint32_t f = cpu->env.features[fp->w];
3839 bool value = (f & fp->mask) == fp->mask;
3840 visit_type_bool(v, name, &value, errp);
3841}
3842
3843static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3844 void *opaque, Error **errp)
3845{
3846 DeviceState *dev = DEVICE(obj);
3847 X86CPU *cpu = X86_CPU(obj);
3848 BitProperty *fp = opaque;
3849 Error *local_err = NULL;
3850 bool value;
3851
3852 if (dev->realized) {
3853 qdev_prop_set_after_realize(dev, name, errp);
3854 return;
3855 }
3856
3857 visit_type_bool(v, name, &value, &local_err);
3858 if (local_err) {
3859 error_propagate(errp, local_err);
3860 return;
3861 }
3862
3863 if (value) {
3864 cpu->env.features[fp->w] |= fp->mask;
3865 } else {
3866 cpu->env.features[fp->w] &= ~fp->mask;
3867 }
3868 cpu->env.user_features[fp->w] |= fp->mask;
3869}
3870
3871static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3872 void *opaque)
3873{
3874 BitProperty *prop = opaque;
3875 g_free(prop);
3876}
3877
3878/* Register a boolean property to get/set a single bit in a uint32_t field.
3879 *
3880 * The same property name can be registered multiple times to make it affect
3881 * multiple bits in the same FeatureWord. In that case, the getter will return
3882 * true only if all bits are set.
3883 */
3884static void x86_cpu_register_bit_prop(X86CPU *cpu,
3885 const char *prop_name,
3886 FeatureWord w,
3887 int bitnr)
3888{
3889 BitProperty *fp;
3890 ObjectProperty *op;
3891 uint32_t mask = (1UL << bitnr);
3892
3893 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3894 if (op) {
3895 fp = op->opaque;
3896 assert(fp->w == w);
3897 fp->mask |= mask;
3898 } else {
3899 fp = g_new0(BitProperty, 1);
3900 fp->w = w;
3901 fp->mask = mask;
3902 object_property_add(OBJECT(cpu), prop_name, "bool",
3903 x86_cpu_get_bit_prop,
3904 x86_cpu_set_bit_prop,
3905 x86_cpu_release_bit_prop, fp, &error_abort);
3906 }
3907}
3908
3909static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3910 FeatureWord w,
3911 int bitnr)
3912{
3913 FeatureWordInfo *fi = &feature_word_info[w];
3914 const char *name = fi->feat_names[bitnr];
3915
3916 if (!name) {
3917 return;
3918 }
3919
3920 /* Property names should use "-" instead of "_".
3921 * Old names containing underscores are registered as aliases
3922 * using object_property_add_alias()
3923 */
3924 assert(!strchr(name, '_'));
3925 /* aliases don't use "|" delimiters anymore, they are registered
3926 * manually using object_property_add_alias() */
3927 assert(!strchr(name, '|'));
3928 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
3929}
3930
3931static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
3932{
3933 X86CPU *cpu = X86_CPU(cs);
3934 CPUX86State *env = &cpu->env;
3935 GuestPanicInformation *panic_info = NULL;
3936
3937 if (env->features[FEAT_HYPERV_EDX] & HV_X64_GUEST_CRASH_MSR_AVAILABLE) {
3938 panic_info = g_malloc0(sizeof(GuestPanicInformation));
3939
3940 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
3941
3942 assert(HV_X64_MSR_CRASH_PARAMS >= 5);
3943 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
3944 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
3945 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
3946 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
3947 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
3948 }
3949
3950 return panic_info;
3951}
3952static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
3953 const char *name, void *opaque,
3954 Error **errp)
3955{
3956 CPUState *cs = CPU(obj);
3957 GuestPanicInformation *panic_info;
3958
3959 if (!cs->crash_occurred) {
3960 error_setg(errp, "No crash occured");
3961 return;
3962 }
3963
3964 panic_info = x86_cpu_get_crash_info(cs);
3965 if (panic_info == NULL) {
3966 error_setg(errp, "No crash information");
3967 return;
3968 }
3969
3970 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
3971 errp);
3972 qapi_free_GuestPanicInformation(panic_info);
3973}
3974
3975static void x86_cpu_initfn(Object *obj)
3976{
3977 CPUState *cs = CPU(obj);
3978 X86CPU *cpu = X86_CPU(obj);
3979 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3980 CPUX86State *env = &cpu->env;
3981 FeatureWord w;
3982
3983 cs->env_ptr = env;
3984
3985 object_property_add(obj, "family", "int",
3986 x86_cpuid_version_get_family,
3987 x86_cpuid_version_set_family, NULL, NULL, NULL);
3988 object_property_add(obj, "model", "int",
3989 x86_cpuid_version_get_model,
3990 x86_cpuid_version_set_model, NULL, NULL, NULL);
3991 object_property_add(obj, "stepping", "int",
3992 x86_cpuid_version_get_stepping,
3993 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3994 object_property_add_str(obj, "vendor",
3995 x86_cpuid_get_vendor,
3996 x86_cpuid_set_vendor, NULL);
3997 object_property_add_str(obj, "model-id",
3998 x86_cpuid_get_model_id,
3999 x86_cpuid_set_model_id, NULL);
4000 object_property_add(obj, "tsc-frequency", "int",
4001 x86_cpuid_get_tsc_freq,
4002 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4003 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4004 x86_cpu_get_feature_words,
4005 NULL, NULL, (void *)env->features, NULL);
4006 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4007 x86_cpu_get_feature_words,
4008 NULL, NULL, (void *)cpu->filtered_features, NULL);
4009
4010 object_property_add(obj, "crash-information", "GuestPanicInformation",
4011 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4012
4013 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4014
4015 for (w = 0; w < FEATURE_WORDS; w++) {
4016 int bitnr;
4017
4018 for (bitnr = 0; bitnr < 32; bitnr++) {
4019 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4020 }
4021 }
4022
4023 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4024 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4025 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4026 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4027 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4028 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4029 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4030
4031 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4032 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4033 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4034 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4035 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4036 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4037 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4038 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4039 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4040 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4041 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4042 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4043 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4044 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4045 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4046 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4047 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4048 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4049 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4050 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4051 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4052
4053 if (xcc->cpu_def) {
4054 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4055 }
4056}
4057
4058static int64_t x86_cpu_get_arch_id(CPUState *cs)
4059{
4060 X86CPU *cpu = X86_CPU(cs);
4061
4062 return cpu->apic_id;
4063}
4064
4065static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4066{
4067 X86CPU *cpu = X86_CPU(cs);
4068
4069 return cpu->env.cr[0] & CR0_PG_MASK;
4070}
4071
4072static void x86_cpu_set_pc(CPUState *cs, vaddr value)
4073{
4074 X86CPU *cpu = X86_CPU(cs);
4075
4076 cpu->env.eip = value;
4077}
4078
4079static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
4080{
4081 X86CPU *cpu = X86_CPU(cs);
4082
4083 cpu->env.eip = tb->pc - tb->cs_base;
4084}
4085
4086static bool x86_cpu_has_work(CPUState *cs)
4087{
4088 X86CPU *cpu = X86_CPU(cs);
4089 CPUX86State *env = &cpu->env;
4090
4091 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
4092 CPU_INTERRUPT_POLL)) &&
4093 (env->eflags & IF_MASK)) ||
4094 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
4095 CPU_INTERRUPT_INIT |
4096 CPU_INTERRUPT_SIPI |
4097 CPU_INTERRUPT_MCE)) ||
4098 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
4099 !(env->hflags & HF_SMM_MASK));
4100}
4101
4102static Property x86_cpu_properties[] = {
4103#ifdef CONFIG_USER_ONLY
4104 /* apic_id = 0 by default for *-user, see commit 9886e834 */
4105 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
4106 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
4107 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
4108 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
4109#else
4110 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
4111 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
4112 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
4113 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
4114#endif
4115 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
4116 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
4117 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
4118 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
4119 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
4120 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
4121 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
4122 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
4123 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
4124 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
4125 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
4126 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
4127 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
4128 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
4129 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
4130 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
4131 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
4132 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
4133 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
4134 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
4135 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
4136 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
4137 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
4138 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
4139 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
4140 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
4141 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
4142 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
4143 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
4144 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
4145 false),
4146 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
4147 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
4148 DEFINE_PROP_END_OF_LIST()
4149};
4150
4151static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
4152{
4153 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4154 CPUClass *cc = CPU_CLASS(oc);
4155 DeviceClass *dc = DEVICE_CLASS(oc);
4156
4157 xcc->parent_realize = dc->realize;
4158 xcc->parent_unrealize = dc->unrealize;
4159 dc->realize = x86_cpu_realizefn;
4160 dc->unrealize = x86_cpu_unrealizefn;
4161 dc->props = x86_cpu_properties;
4162
4163 xcc->parent_reset = cc->reset;
4164 cc->reset = x86_cpu_reset;
4165 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
4166
4167 cc->class_by_name = x86_cpu_class_by_name;
4168 cc->parse_features = x86_cpu_parse_featurestr;
4169 cc->has_work = x86_cpu_has_work;
4170#ifdef CONFIG_TCG
4171 cc->do_interrupt = x86_cpu_do_interrupt;
4172 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
4173#endif
4174 cc->dump_state = x86_cpu_dump_state;
4175 cc->get_crash_info = x86_cpu_get_crash_info;
4176 cc->set_pc = x86_cpu_set_pc;
4177 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
4178 cc->gdb_read_register = x86_cpu_gdb_read_register;
4179 cc->gdb_write_register = x86_cpu_gdb_write_register;
4180 cc->get_arch_id = x86_cpu_get_arch_id;
4181 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
4182#ifdef CONFIG_USER_ONLY
4183 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
4184#else
4185 cc->asidx_from_attrs = x86_asidx_from_attrs;
4186 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
4187 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
4188 cc->write_elf64_note = x86_cpu_write_elf64_note;
4189 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
4190 cc->write_elf32_note = x86_cpu_write_elf32_note;
4191 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
4192 cc->vmsd = &vmstate_x86_cpu;
4193#endif
4194 cc->gdb_arch_name = x86_gdb_arch_name;
4195#ifdef TARGET_X86_64
4196 cc->gdb_core_xml_file = "i386-64bit.xml";
4197 cc->gdb_num_core_regs = 57;
4198#else
4199 cc->gdb_core_xml_file = "i386-32bit.xml";
4200 cc->gdb_num_core_regs = 41;
4201#endif
4202#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
4203 cc->debug_excp_handler = breakpoint_handler;
4204#endif
4205 cc->cpu_exec_enter = x86_cpu_exec_enter;
4206 cc->cpu_exec_exit = x86_cpu_exec_exit;
4207
4208 dc->user_creatable = true;
4209}
4210
4211static const TypeInfo x86_cpu_type_info = {
4212 .name = TYPE_X86_CPU,
4213 .parent = TYPE_CPU,
4214 .instance_size = sizeof(X86CPU),
4215 .instance_init = x86_cpu_initfn,
4216 .abstract = true,
4217 .class_size = sizeof(X86CPUClass),
4218 .class_init = x86_cpu_common_class_init,
4219};
4220
4221
4222/* "base" CPU model, used by query-cpu-model-expansion */
4223static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
4224{
4225 X86CPUClass *xcc = X86_CPU_CLASS(oc);
4226
4227 xcc->static_model = true;
4228 xcc->migration_safe = true;
4229 xcc->model_description = "base CPU model type with no features enabled";
4230 xcc->ordering = 8;
4231}
4232
4233static const TypeInfo x86_base_cpu_type_info = {
4234 .name = X86_CPU_TYPE_NAME("base"),
4235 .parent = TYPE_X86_CPU,
4236 .class_init = x86_cpu_base_class_init,
4237};
4238
4239static void x86_cpu_register_types(void)
4240{
4241 int i;
4242
4243 type_register_static(&x86_cpu_type_info);
4244 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
4245 x86_register_cpudef_type(&builtin_x86_defs[i]);
4246 }
4247 type_register_static(&max_x86_cpu_type_info);
4248 type_register_static(&x86_base_cpu_type_info);
4249#ifdef CONFIG_KVM
4250 type_register_static(&host_x86_cpu_type_info);
4251#endif
4252}
4253
4254type_init(x86_cpu_register_types)