]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: x86_cpu_load_features() function
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
247
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
253 */
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
262 } FeatureWordInfo;
263
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
265 [FEAT_1_EDX] = {
266 .feat_names = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
275 },
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
278 },
279 [FEAT_1_ECX] = {
280 .feat_names = {
281 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
282 "ds-cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1",
286 "sse4.2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
289 },
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
292 },
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
297 */
298 [FEAT_8000_0001_EDX] = {
299 .feat_names = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
307 NULL, "lm", "3dnowext", "3dnow",
308 },
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
311 },
312 [FEAT_8000_0001_ECX] = {
313 .feat_names = {
314 "lahf-lm", "cmp-legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid-msr",
319 NULL, "tbm", "topoext", "perfctr-core",
320 "perfctr-nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
322 },
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
325 },
326 [FEAT_C000_0001_EDX] = {
327 .feat_names = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 },
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
339 },
340 [FEAT_KVM] = {
341 .feat_names = {
342 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
343 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 },
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
353 },
354 [FEAT_HYPERV_EAX] = {
355 .feat_names = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 },
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
369 },
370 [FEAT_HYPERV_EBX] = {
371 .feat_names = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
378 NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 },
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
385 },
386 [FEAT_HYPERV_EDX] = {
387 .feat_names = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
391 NULL, NULL,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 },
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
400 },
401 [FEAT_SVM] = {
402 .feat_names = {
403 "npt", "lbrv", "svm-lock", "nrip-save",
404 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause-filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 },
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
414 },
415 [FEAT_7_0_EBX] = {
416 .feat_names = {
417 "fsgsbase", "tsc-adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
425 },
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_EBX,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
430 },
431 [FEAT_7_0_ECX] = {
432 .feat_names = {
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
441 },
442 .cpuid_eax = 7,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
444 .cpuid_reg = R_ECX,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
446 },
447 [FEAT_8000_0007_EDX] = {
448 .feat_names = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 },
458 .cpuid_eax = 0x80000007,
459 .cpuid_reg = R_EDX,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
462 },
463 [FEAT_XSAVE] = {
464 .feat_names = {
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 },
474 .cpuid_eax = 0xd,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
476 .cpuid_reg = R_EAX,
477 .tcg_features = TCG_XSAVE_FEATURES,
478 },
479 [FEAT_6_EAX] = {
480 .feat_names = {
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 },
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
492 },
493 [FEAT_XSAVE_COMP_LO] = {
494 .cpuid_eax = 0xD,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
496 .cpuid_reg = R_EAX,
497 .tcg_features = ~0U,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
501 XSTATE_PKRU_MASK,
502 },
503 [FEAT_XSAVE_COMP_HI] = {
504 .cpuid_eax = 0xD,
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
506 .cpuid_reg = R_EDX,
507 .tcg_features = ~0U,
508 },
509 };
510
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
513 const char *name;
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
516 } X86RegisterInfo32;
517
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
521 REGISTER(EAX),
522 REGISTER(ECX),
523 REGISTER(EDX),
524 REGISTER(EBX),
525 REGISTER(ESP),
526 REGISTER(EBP),
527 REGISTER(ESI),
528 REGISTER(EDI),
529 };
530 #undef REGISTER
531
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
535 } ExtSaveArea;
536
537 static const ExtSaveArea x86_ext_save_areas[] = {
538 [XSTATE_FP_BIT] = {
539 /* x87 FP state component is always enabled if XSAVE is supported */
540 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
541 /* x87 state is in the legacy region of the XSAVE area */
542 .offset = 0,
543 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
544 },
545 [XSTATE_SSE_BIT] = {
546 /* SSE state component is always enabled if XSAVE is supported */
547 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
548 /* SSE state is in the legacy region of the XSAVE area */
549 .offset = 0,
550 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
551 },
552 [XSTATE_YMM_BIT] =
553 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
554 .offset = offsetof(X86XSaveArea, avx_state),
555 .size = sizeof(XSaveAVX) },
556 [XSTATE_BNDREGS_BIT] =
557 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
558 .offset = offsetof(X86XSaveArea, bndreg_state),
559 .size = sizeof(XSaveBNDREG) },
560 [XSTATE_BNDCSR_BIT] =
561 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
562 .offset = offsetof(X86XSaveArea, bndcsr_state),
563 .size = sizeof(XSaveBNDCSR) },
564 [XSTATE_OPMASK_BIT] =
565 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
566 .offset = offsetof(X86XSaveArea, opmask_state),
567 .size = sizeof(XSaveOpmask) },
568 [XSTATE_ZMM_Hi256_BIT] =
569 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
570 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
571 .size = sizeof(XSaveZMM_Hi256) },
572 [XSTATE_Hi16_ZMM_BIT] =
573 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
574 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
575 .size = sizeof(XSaveHi16_ZMM) },
576 [XSTATE_PKRU_BIT] =
577 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
578 .offset = offsetof(X86XSaveArea, pkru_state),
579 .size = sizeof(XSavePKRU) },
580 };
581
582 static uint32_t xsave_area_size(uint64_t mask)
583 {
584 int i;
585 uint64_t ret = 0;
586
587 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
588 const ExtSaveArea *esa = &x86_ext_save_areas[i];
589 if ((mask >> i) & 1) {
590 ret = MAX(ret, esa->offset + esa->size);
591 }
592 }
593 return ret;
594 }
595
596 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
597 {
598 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
599 cpu->env.features[FEAT_XSAVE_COMP_LO];
600 }
601
602 const char *get_register_name_32(unsigned int reg)
603 {
604 if (reg >= CPU_NB_REGS32) {
605 return NULL;
606 }
607 return x86_reg_info_32[reg].name;
608 }
609
610 /*
611 * Returns the set of feature flags that are supported and migratable by
612 * QEMU, for a given FeatureWord.
613 */
614 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
615 {
616 FeatureWordInfo *wi = &feature_word_info[w];
617 uint32_t r = 0;
618 int i;
619
620 for (i = 0; i < 32; i++) {
621 uint32_t f = 1U << i;
622
623 /* If the feature name is known, it is implicitly considered migratable,
624 * unless it is explicitly set in unmigratable_flags */
625 if ((wi->migratable_flags & f) ||
626 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
627 r |= f;
628 }
629 }
630 return r;
631 }
632
633 void host_cpuid(uint32_t function, uint32_t count,
634 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
635 {
636 uint32_t vec[4];
637
638 #ifdef __x86_64__
639 asm volatile("cpuid"
640 : "=a"(vec[0]), "=b"(vec[1]),
641 "=c"(vec[2]), "=d"(vec[3])
642 : "0"(function), "c"(count) : "cc");
643 #elif defined(__i386__)
644 asm volatile("pusha \n\t"
645 "cpuid \n\t"
646 "mov %%eax, 0(%2) \n\t"
647 "mov %%ebx, 4(%2) \n\t"
648 "mov %%ecx, 8(%2) \n\t"
649 "mov %%edx, 12(%2) \n\t"
650 "popa"
651 : : "a"(function), "c"(count), "S"(vec)
652 : "memory", "cc");
653 #else
654 abort();
655 #endif
656
657 if (eax)
658 *eax = vec[0];
659 if (ebx)
660 *ebx = vec[1];
661 if (ecx)
662 *ecx = vec[2];
663 if (edx)
664 *edx = vec[3];
665 }
666
667 /* CPU class name definitions: */
668
669 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
670 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
671
672 /* Return type name for a given CPU model name
673 * Caller is responsible for freeing the returned string.
674 */
675 static char *x86_cpu_type_name(const char *model_name)
676 {
677 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
678 }
679
680 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
681 {
682 ObjectClass *oc;
683 char *typename;
684
685 if (cpu_model == NULL) {
686 return NULL;
687 }
688
689 typename = x86_cpu_type_name(cpu_model);
690 oc = object_class_by_name(typename);
691 g_free(typename);
692 return oc;
693 }
694
695 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
696 {
697 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
698 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
699 return g_strndup(class_name,
700 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
701 }
702
703 struct X86CPUDefinition {
704 const char *name;
705 uint32_t level;
706 uint32_t xlevel;
707 /* vendor is zero-terminated, 12 character ASCII string */
708 char vendor[CPUID_VENDOR_SZ + 1];
709 int family;
710 int model;
711 int stepping;
712 FeatureWordArray features;
713 char model_id[48];
714 };
715
716 static X86CPUDefinition builtin_x86_defs[] = {
717 {
718 .name = "qemu64",
719 .level = 0xd,
720 .vendor = CPUID_VENDOR_AMD,
721 .family = 6,
722 .model = 6,
723 .stepping = 3,
724 .features[FEAT_1_EDX] =
725 PPRO_FEATURES |
726 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
727 CPUID_PSE36,
728 .features[FEAT_1_ECX] =
729 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
730 .features[FEAT_8000_0001_EDX] =
731 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
732 .features[FEAT_8000_0001_ECX] =
733 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
734 .xlevel = 0x8000000A,
735 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
736 },
737 {
738 .name = "phenom",
739 .level = 5,
740 .vendor = CPUID_VENDOR_AMD,
741 .family = 16,
742 .model = 2,
743 .stepping = 3,
744 /* Missing: CPUID_HT */
745 .features[FEAT_1_EDX] =
746 PPRO_FEATURES |
747 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
748 CPUID_PSE36 | CPUID_VME,
749 .features[FEAT_1_ECX] =
750 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
751 CPUID_EXT_POPCNT,
752 .features[FEAT_8000_0001_EDX] =
753 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
754 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
755 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
756 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
757 CPUID_EXT3_CR8LEG,
758 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
759 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
760 .features[FEAT_8000_0001_ECX] =
761 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
762 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
763 /* Missing: CPUID_SVM_LBRV */
764 .features[FEAT_SVM] =
765 CPUID_SVM_NPT,
766 .xlevel = 0x8000001A,
767 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
768 },
769 {
770 .name = "core2duo",
771 .level = 10,
772 .vendor = CPUID_VENDOR_INTEL,
773 .family = 6,
774 .model = 15,
775 .stepping = 11,
776 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
777 .features[FEAT_1_EDX] =
778 PPRO_FEATURES |
779 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
780 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
781 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
782 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
783 .features[FEAT_1_ECX] =
784 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
785 CPUID_EXT_CX16,
786 .features[FEAT_8000_0001_EDX] =
787 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
788 .features[FEAT_8000_0001_ECX] =
789 CPUID_EXT3_LAHF_LM,
790 .xlevel = 0x80000008,
791 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
792 },
793 {
794 .name = "kvm64",
795 .level = 0xd,
796 .vendor = CPUID_VENDOR_INTEL,
797 .family = 15,
798 .model = 6,
799 .stepping = 1,
800 /* Missing: CPUID_HT */
801 .features[FEAT_1_EDX] =
802 PPRO_FEATURES | CPUID_VME |
803 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
804 CPUID_PSE36,
805 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
806 .features[FEAT_1_ECX] =
807 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
808 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
809 .features[FEAT_8000_0001_EDX] =
810 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
811 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
812 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
813 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
814 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
815 .features[FEAT_8000_0001_ECX] =
816 0,
817 .xlevel = 0x80000008,
818 .model_id = "Common KVM processor"
819 },
820 {
821 .name = "qemu32",
822 .level = 4,
823 .vendor = CPUID_VENDOR_INTEL,
824 .family = 6,
825 .model = 6,
826 .stepping = 3,
827 .features[FEAT_1_EDX] =
828 PPRO_FEATURES,
829 .features[FEAT_1_ECX] =
830 CPUID_EXT_SSE3,
831 .xlevel = 0x80000004,
832 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
833 },
834 {
835 .name = "kvm32",
836 .level = 5,
837 .vendor = CPUID_VENDOR_INTEL,
838 .family = 15,
839 .model = 6,
840 .stepping = 1,
841 .features[FEAT_1_EDX] =
842 PPRO_FEATURES | CPUID_VME |
843 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
844 .features[FEAT_1_ECX] =
845 CPUID_EXT_SSE3,
846 .features[FEAT_8000_0001_ECX] =
847 0,
848 .xlevel = 0x80000008,
849 .model_id = "Common 32-bit KVM processor"
850 },
851 {
852 .name = "coreduo",
853 .level = 10,
854 .vendor = CPUID_VENDOR_INTEL,
855 .family = 6,
856 .model = 14,
857 .stepping = 8,
858 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
859 .features[FEAT_1_EDX] =
860 PPRO_FEATURES | CPUID_VME |
861 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
862 CPUID_SS,
863 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
864 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
865 .features[FEAT_1_ECX] =
866 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
867 .features[FEAT_8000_0001_EDX] =
868 CPUID_EXT2_NX,
869 .xlevel = 0x80000008,
870 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
871 },
872 {
873 .name = "486",
874 .level = 1,
875 .vendor = CPUID_VENDOR_INTEL,
876 .family = 4,
877 .model = 8,
878 .stepping = 0,
879 .features[FEAT_1_EDX] =
880 I486_FEATURES,
881 .xlevel = 0,
882 },
883 {
884 .name = "pentium",
885 .level = 1,
886 .vendor = CPUID_VENDOR_INTEL,
887 .family = 5,
888 .model = 4,
889 .stepping = 3,
890 .features[FEAT_1_EDX] =
891 PENTIUM_FEATURES,
892 .xlevel = 0,
893 },
894 {
895 .name = "pentium2",
896 .level = 2,
897 .vendor = CPUID_VENDOR_INTEL,
898 .family = 6,
899 .model = 5,
900 .stepping = 2,
901 .features[FEAT_1_EDX] =
902 PENTIUM2_FEATURES,
903 .xlevel = 0,
904 },
905 {
906 .name = "pentium3",
907 .level = 3,
908 .vendor = CPUID_VENDOR_INTEL,
909 .family = 6,
910 .model = 7,
911 .stepping = 3,
912 .features[FEAT_1_EDX] =
913 PENTIUM3_FEATURES,
914 .xlevel = 0,
915 },
916 {
917 .name = "athlon",
918 .level = 2,
919 .vendor = CPUID_VENDOR_AMD,
920 .family = 6,
921 .model = 2,
922 .stepping = 3,
923 .features[FEAT_1_EDX] =
924 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
925 CPUID_MCA,
926 .features[FEAT_8000_0001_EDX] =
927 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
928 .xlevel = 0x80000008,
929 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
930 },
931 {
932 .name = "n270",
933 .level = 10,
934 .vendor = CPUID_VENDOR_INTEL,
935 .family = 6,
936 .model = 28,
937 .stepping = 2,
938 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
939 .features[FEAT_1_EDX] =
940 PPRO_FEATURES |
941 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
942 CPUID_ACPI | CPUID_SS,
943 /* Some CPUs got no CPUID_SEP */
944 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
945 * CPUID_EXT_XTPR */
946 .features[FEAT_1_ECX] =
947 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
948 CPUID_EXT_MOVBE,
949 .features[FEAT_8000_0001_EDX] =
950 CPUID_EXT2_NX,
951 .features[FEAT_8000_0001_ECX] =
952 CPUID_EXT3_LAHF_LM,
953 .xlevel = 0x80000008,
954 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
955 },
956 {
957 .name = "Conroe",
958 .level = 10,
959 .vendor = CPUID_VENDOR_INTEL,
960 .family = 6,
961 .model = 15,
962 .stepping = 3,
963 .features[FEAT_1_EDX] =
964 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
965 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
966 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
967 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
968 CPUID_DE | CPUID_FP87,
969 .features[FEAT_1_ECX] =
970 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
971 .features[FEAT_8000_0001_EDX] =
972 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
973 .features[FEAT_8000_0001_ECX] =
974 CPUID_EXT3_LAHF_LM,
975 .xlevel = 0x80000008,
976 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
977 },
978 {
979 .name = "Penryn",
980 .level = 10,
981 .vendor = CPUID_VENDOR_INTEL,
982 .family = 6,
983 .model = 23,
984 .stepping = 3,
985 .features[FEAT_1_EDX] =
986 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
987 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
988 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
989 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
990 CPUID_DE | CPUID_FP87,
991 .features[FEAT_1_ECX] =
992 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
993 CPUID_EXT_SSE3,
994 .features[FEAT_8000_0001_EDX] =
995 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
996 .features[FEAT_8000_0001_ECX] =
997 CPUID_EXT3_LAHF_LM,
998 .xlevel = 0x80000008,
999 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1000 },
1001 {
1002 .name = "Nehalem",
1003 .level = 11,
1004 .vendor = CPUID_VENDOR_INTEL,
1005 .family = 6,
1006 .model = 26,
1007 .stepping = 3,
1008 .features[FEAT_1_EDX] =
1009 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1010 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1011 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1012 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1013 CPUID_DE | CPUID_FP87,
1014 .features[FEAT_1_ECX] =
1015 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1016 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1017 .features[FEAT_8000_0001_EDX] =
1018 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1019 .features[FEAT_8000_0001_ECX] =
1020 CPUID_EXT3_LAHF_LM,
1021 .xlevel = 0x80000008,
1022 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1023 },
1024 {
1025 .name = "Westmere",
1026 .level = 11,
1027 .vendor = CPUID_VENDOR_INTEL,
1028 .family = 6,
1029 .model = 44,
1030 .stepping = 1,
1031 .features[FEAT_1_EDX] =
1032 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1033 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1034 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1035 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1036 CPUID_DE | CPUID_FP87,
1037 .features[FEAT_1_ECX] =
1038 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1039 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1040 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1041 .features[FEAT_8000_0001_EDX] =
1042 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1043 .features[FEAT_8000_0001_ECX] =
1044 CPUID_EXT3_LAHF_LM,
1045 .features[FEAT_6_EAX] =
1046 CPUID_6_EAX_ARAT,
1047 .xlevel = 0x80000008,
1048 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1049 },
1050 {
1051 .name = "SandyBridge",
1052 .level = 0xd,
1053 .vendor = CPUID_VENDOR_INTEL,
1054 .family = 6,
1055 .model = 42,
1056 .stepping = 1,
1057 .features[FEAT_1_EDX] =
1058 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1059 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1060 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1061 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1062 CPUID_DE | CPUID_FP87,
1063 .features[FEAT_1_ECX] =
1064 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1065 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1066 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1067 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1068 CPUID_EXT_SSE3,
1069 .features[FEAT_8000_0001_EDX] =
1070 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1071 CPUID_EXT2_SYSCALL,
1072 .features[FEAT_8000_0001_ECX] =
1073 CPUID_EXT3_LAHF_LM,
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .features[FEAT_6_EAX] =
1077 CPUID_6_EAX_ARAT,
1078 .xlevel = 0x80000008,
1079 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1080 },
1081 {
1082 .name = "IvyBridge",
1083 .level = 0xd,
1084 .vendor = CPUID_VENDOR_INTEL,
1085 .family = 6,
1086 .model = 58,
1087 .stepping = 9,
1088 .features[FEAT_1_EDX] =
1089 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1090 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1091 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1092 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1093 CPUID_DE | CPUID_FP87,
1094 .features[FEAT_1_ECX] =
1095 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1096 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1097 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1098 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1099 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1100 .features[FEAT_7_0_EBX] =
1101 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1102 CPUID_7_0_EBX_ERMS,
1103 .features[FEAT_8000_0001_EDX] =
1104 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1105 CPUID_EXT2_SYSCALL,
1106 .features[FEAT_8000_0001_ECX] =
1107 CPUID_EXT3_LAHF_LM,
1108 .features[FEAT_XSAVE] =
1109 CPUID_XSAVE_XSAVEOPT,
1110 .features[FEAT_6_EAX] =
1111 CPUID_6_EAX_ARAT,
1112 .xlevel = 0x80000008,
1113 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1114 },
1115 {
1116 .name = "Haswell-noTSX",
1117 .level = 0xd,
1118 .vendor = CPUID_VENDOR_INTEL,
1119 .family = 6,
1120 .model = 60,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1130 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1131 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1132 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1133 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1134 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1135 .features[FEAT_8000_0001_EDX] =
1136 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1137 CPUID_EXT2_SYSCALL,
1138 .features[FEAT_8000_0001_ECX] =
1139 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1140 .features[FEAT_7_0_EBX] =
1141 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1142 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1143 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1144 .features[FEAT_XSAVE] =
1145 CPUID_XSAVE_XSAVEOPT,
1146 .features[FEAT_6_EAX] =
1147 CPUID_6_EAX_ARAT,
1148 .xlevel = 0x80000008,
1149 .model_id = "Intel Core Processor (Haswell, no TSX)",
1150 }, {
1151 .name = "Haswell",
1152 .level = 0xd,
1153 .vendor = CPUID_VENDOR_INTEL,
1154 .family = 6,
1155 .model = 60,
1156 .stepping = 1,
1157 .features[FEAT_1_EDX] =
1158 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1159 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1160 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1161 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1162 CPUID_DE | CPUID_FP87,
1163 .features[FEAT_1_ECX] =
1164 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1165 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1166 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1167 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1168 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1169 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1170 .features[FEAT_8000_0001_EDX] =
1171 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1172 CPUID_EXT2_SYSCALL,
1173 .features[FEAT_8000_0001_ECX] =
1174 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1175 .features[FEAT_7_0_EBX] =
1176 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1177 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1178 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1179 CPUID_7_0_EBX_RTM,
1180 .features[FEAT_XSAVE] =
1181 CPUID_XSAVE_XSAVEOPT,
1182 .features[FEAT_6_EAX] =
1183 CPUID_6_EAX_ARAT,
1184 .xlevel = 0x80000008,
1185 .model_id = "Intel Core Processor (Haswell)",
1186 },
1187 {
1188 .name = "Broadwell-noTSX",
1189 .level = 0xd,
1190 .vendor = CPUID_VENDOR_INTEL,
1191 .family = 6,
1192 .model = 61,
1193 .stepping = 2,
1194 .features[FEAT_1_EDX] =
1195 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1196 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1197 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1198 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1199 CPUID_DE | CPUID_FP87,
1200 .features[FEAT_1_ECX] =
1201 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1202 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1203 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1204 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1205 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1206 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1207 .features[FEAT_8000_0001_EDX] =
1208 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1209 CPUID_EXT2_SYSCALL,
1210 .features[FEAT_8000_0001_ECX] =
1211 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1212 .features[FEAT_7_0_EBX] =
1213 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1214 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1215 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1216 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1217 CPUID_7_0_EBX_SMAP,
1218 .features[FEAT_XSAVE] =
1219 CPUID_XSAVE_XSAVEOPT,
1220 .features[FEAT_6_EAX] =
1221 CPUID_6_EAX_ARAT,
1222 .xlevel = 0x80000008,
1223 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1224 },
1225 {
1226 .name = "Broadwell",
1227 .level = 0xd,
1228 .vendor = CPUID_VENDOR_INTEL,
1229 .family = 6,
1230 .model = 61,
1231 .stepping = 2,
1232 .features[FEAT_1_EDX] =
1233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1237 CPUID_DE | CPUID_FP87,
1238 .features[FEAT_1_ECX] =
1239 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1240 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1241 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1242 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1243 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1244 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1245 .features[FEAT_8000_0001_EDX] =
1246 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1247 CPUID_EXT2_SYSCALL,
1248 .features[FEAT_8000_0001_ECX] =
1249 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1250 .features[FEAT_7_0_EBX] =
1251 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1252 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1253 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1254 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1255 CPUID_7_0_EBX_SMAP,
1256 .features[FEAT_XSAVE] =
1257 CPUID_XSAVE_XSAVEOPT,
1258 .features[FEAT_6_EAX] =
1259 CPUID_6_EAX_ARAT,
1260 .xlevel = 0x80000008,
1261 .model_id = "Intel Core Processor (Broadwell)",
1262 },
1263 {
1264 .name = "Skylake-Client",
1265 .level = 0xd,
1266 .vendor = CPUID_VENDOR_INTEL,
1267 .family = 6,
1268 .model = 94,
1269 .stepping = 3,
1270 .features[FEAT_1_EDX] =
1271 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1272 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1273 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1274 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1275 CPUID_DE | CPUID_FP87,
1276 .features[FEAT_1_ECX] =
1277 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1278 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1279 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1280 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1281 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1282 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1283 .features[FEAT_8000_0001_EDX] =
1284 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1285 CPUID_EXT2_SYSCALL,
1286 .features[FEAT_8000_0001_ECX] =
1287 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1288 .features[FEAT_7_0_EBX] =
1289 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1290 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1291 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1292 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1293 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1294 /* Missing: XSAVES (not supported by some Linux versions,
1295 * including v4.1 to v4.6).
1296 * KVM doesn't yet expose any XSAVES state save component,
1297 * and the only one defined in Skylake (processor tracing)
1298 * probably will block migration anyway.
1299 */
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1302 CPUID_XSAVE_XGETBV1,
1303 .features[FEAT_6_EAX] =
1304 CPUID_6_EAX_ARAT,
1305 .xlevel = 0x80000008,
1306 .model_id = "Intel Core Processor (Skylake)",
1307 },
1308 {
1309 .name = "Opteron_G1",
1310 .level = 5,
1311 .vendor = CPUID_VENDOR_AMD,
1312 .family = 15,
1313 .model = 6,
1314 .stepping = 1,
1315 .features[FEAT_1_EDX] =
1316 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1317 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1318 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1319 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1320 CPUID_DE | CPUID_FP87,
1321 .features[FEAT_1_ECX] =
1322 CPUID_EXT_SSE3,
1323 .features[FEAT_8000_0001_EDX] =
1324 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1325 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1326 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1327 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1328 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1329 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1330 .xlevel = 0x80000008,
1331 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1332 },
1333 {
1334 .name = "Opteron_G2",
1335 .level = 5,
1336 .vendor = CPUID_VENDOR_AMD,
1337 .family = 15,
1338 .model = 6,
1339 .stepping = 1,
1340 .features[FEAT_1_EDX] =
1341 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1342 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1343 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1344 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1345 CPUID_DE | CPUID_FP87,
1346 .features[FEAT_1_ECX] =
1347 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1348 /* Missing: CPUID_EXT2_RDTSCP */
1349 .features[FEAT_8000_0001_EDX] =
1350 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1351 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1352 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1353 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1354 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1355 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1356 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1357 .features[FEAT_8000_0001_ECX] =
1358 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1359 .xlevel = 0x80000008,
1360 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1361 },
1362 {
1363 .name = "Opteron_G3",
1364 .level = 5,
1365 .vendor = CPUID_VENDOR_AMD,
1366 .family = 16,
1367 .model = 2,
1368 .stepping = 3,
1369 .features[FEAT_1_EDX] =
1370 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1371 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1372 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1373 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1374 CPUID_DE | CPUID_FP87,
1375 .features[FEAT_1_ECX] =
1376 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1377 CPUID_EXT_SSE3,
1378 /* Missing: CPUID_EXT2_RDTSCP */
1379 .features[FEAT_8000_0001_EDX] =
1380 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1381 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1382 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1383 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1384 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1385 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1386 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .features[FEAT_8000_0001_ECX] =
1388 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1389 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1390 .xlevel = 0x80000008,
1391 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1392 },
1393 {
1394 .name = "Opteron_G4",
1395 .level = 0xd,
1396 .vendor = CPUID_VENDOR_AMD,
1397 .family = 21,
1398 .model = 1,
1399 .stepping = 2,
1400 .features[FEAT_1_EDX] =
1401 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1402 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1403 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1404 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1405 CPUID_DE | CPUID_FP87,
1406 .features[FEAT_1_ECX] =
1407 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1408 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1409 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1410 CPUID_EXT_SSE3,
1411 /* Missing: CPUID_EXT2_RDTSCP */
1412 .features[FEAT_8000_0001_EDX] =
1413 CPUID_EXT2_LM |
1414 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1415 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1416 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1417 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1418 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1419 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1420 .features[FEAT_8000_0001_ECX] =
1421 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1422 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1423 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1424 CPUID_EXT3_LAHF_LM,
1425 /* no xsaveopt! */
1426 .xlevel = 0x8000001A,
1427 .model_id = "AMD Opteron 62xx class CPU",
1428 },
1429 {
1430 .name = "Opteron_G5",
1431 .level = 0xd,
1432 .vendor = CPUID_VENDOR_AMD,
1433 .family = 21,
1434 .model = 2,
1435 .stepping = 0,
1436 .features[FEAT_1_EDX] =
1437 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1438 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1439 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1440 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1441 CPUID_DE | CPUID_FP87,
1442 .features[FEAT_1_ECX] =
1443 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1444 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1445 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1446 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1447 /* Missing: CPUID_EXT2_RDTSCP */
1448 .features[FEAT_8000_0001_EDX] =
1449 CPUID_EXT2_LM |
1450 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1451 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1452 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1453 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1454 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1455 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1456 .features[FEAT_8000_0001_ECX] =
1457 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1458 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1459 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1460 CPUID_EXT3_LAHF_LM,
1461 /* no xsaveopt! */
1462 .xlevel = 0x8000001A,
1463 .model_id = "AMD Opteron 63xx class CPU",
1464 },
1465 };
1466
1467 typedef struct PropValue {
1468 const char *prop, *value;
1469 } PropValue;
1470
1471 /* KVM-specific features that are automatically added/removed
1472 * from all CPU models when KVM is enabled.
1473 */
1474 static PropValue kvm_default_props[] = {
1475 { "kvmclock", "on" },
1476 { "kvm-nopiodelay", "on" },
1477 { "kvm-asyncpf", "on" },
1478 { "kvm-steal-time", "on" },
1479 { "kvm-pv-eoi", "on" },
1480 { "kvmclock-stable-bit", "on" },
1481 { "x2apic", "on" },
1482 { "acpi", "off" },
1483 { "monitor", "off" },
1484 { "svm", "off" },
1485 { NULL, NULL },
1486 };
1487
1488 /* TCG-specific defaults that override all CPU models when using TCG
1489 */
1490 static PropValue tcg_default_props[] = {
1491 { "vme", "off" },
1492 { NULL, NULL },
1493 };
1494
1495
1496 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1497 {
1498 PropValue *pv;
1499 for (pv = kvm_default_props; pv->prop; pv++) {
1500 if (!strcmp(pv->prop, prop)) {
1501 pv->value = value;
1502 break;
1503 }
1504 }
1505
1506 /* It is valid to call this function only for properties that
1507 * are already present in the kvm_default_props table.
1508 */
1509 assert(pv->prop);
1510 }
1511
1512 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1513 bool migratable_only);
1514
1515 #ifdef CONFIG_KVM
1516
1517 static bool lmce_supported(void)
1518 {
1519 uint64_t mce_cap;
1520
1521 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1522 return false;
1523 }
1524
1525 return !!(mce_cap & MCG_LMCE_P);
1526 }
1527
1528 static int cpu_x86_fill_model_id(char *str)
1529 {
1530 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1531 int i;
1532
1533 for (i = 0; i < 3; i++) {
1534 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1535 memcpy(str + i * 16 + 0, &eax, 4);
1536 memcpy(str + i * 16 + 4, &ebx, 4);
1537 memcpy(str + i * 16 + 8, &ecx, 4);
1538 memcpy(str + i * 16 + 12, &edx, 4);
1539 }
1540 return 0;
1541 }
1542
1543 static X86CPUDefinition host_cpudef;
1544
1545 static Property host_x86_cpu_properties[] = {
1546 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1547 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1548 DEFINE_PROP_END_OF_LIST()
1549 };
1550
1551 /* class_init for the "host" CPU model
1552 *
1553 * This function may be called before KVM is initialized.
1554 */
1555 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1556 {
1557 DeviceClass *dc = DEVICE_CLASS(oc);
1558 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1559 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1560
1561 xcc->kvm_required = true;
1562
1563 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1564 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1565
1566 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1567 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1568 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1569 host_cpudef.stepping = eax & 0x0F;
1570
1571 cpu_x86_fill_model_id(host_cpudef.model_id);
1572
1573 xcc->cpu_def = &host_cpudef;
1574 xcc->model_description =
1575 "KVM processor with all supported host features "
1576 "(only available in KVM mode)";
1577
1578 /* level, xlevel, xlevel2, and the feature words are initialized on
1579 * instance_init, because they require KVM to be initialized.
1580 */
1581
1582 dc->props = host_x86_cpu_properties;
1583 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1584 dc->cannot_destroy_with_object_finalize_yet = true;
1585 }
1586
1587 static void host_x86_cpu_initfn(Object *obj)
1588 {
1589 X86CPU *cpu = X86_CPU(obj);
1590 CPUX86State *env = &cpu->env;
1591 KVMState *s = kvm_state;
1592
1593 /* We can't fill the features array here because we don't know yet if
1594 * "migratable" is true or false.
1595 */
1596 cpu->host_features = true;
1597
1598 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1599 if (kvm_enabled()) {
1600 env->cpuid_min_level =
1601 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1602 env->cpuid_min_xlevel =
1603 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1604 env->cpuid_min_xlevel2 =
1605 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1606
1607 if (lmce_supported()) {
1608 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1609 }
1610 }
1611
1612 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1613 }
1614
1615 static const TypeInfo host_x86_cpu_type_info = {
1616 .name = X86_CPU_TYPE_NAME("host"),
1617 .parent = TYPE_X86_CPU,
1618 .instance_init = host_x86_cpu_initfn,
1619 .class_init = host_x86_cpu_class_init,
1620 };
1621
1622 #endif
1623
1624 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1625 {
1626 FeatureWordInfo *f = &feature_word_info[w];
1627 int i;
1628
1629 for (i = 0; i < 32; ++i) {
1630 if ((1UL << i) & mask) {
1631 const char *reg = get_register_name_32(f->cpuid_reg);
1632 assert(reg);
1633 fprintf(stderr, "warning: %s doesn't support requested feature: "
1634 "CPUID.%02XH:%s%s%s [bit %d]\n",
1635 kvm_enabled() ? "host" : "TCG",
1636 f->cpuid_eax, reg,
1637 f->feat_names[i] ? "." : "",
1638 f->feat_names[i] ? f->feat_names[i] : "", i);
1639 }
1640 }
1641 }
1642
1643 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1644 const char *name, void *opaque,
1645 Error **errp)
1646 {
1647 X86CPU *cpu = X86_CPU(obj);
1648 CPUX86State *env = &cpu->env;
1649 int64_t value;
1650
1651 value = (env->cpuid_version >> 8) & 0xf;
1652 if (value == 0xf) {
1653 value += (env->cpuid_version >> 20) & 0xff;
1654 }
1655 visit_type_int(v, name, &value, errp);
1656 }
1657
1658 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1659 const char *name, void *opaque,
1660 Error **errp)
1661 {
1662 X86CPU *cpu = X86_CPU(obj);
1663 CPUX86State *env = &cpu->env;
1664 const int64_t min = 0;
1665 const int64_t max = 0xff + 0xf;
1666 Error *local_err = NULL;
1667 int64_t value;
1668
1669 visit_type_int(v, name, &value, &local_err);
1670 if (local_err) {
1671 error_propagate(errp, local_err);
1672 return;
1673 }
1674 if (value < min || value > max) {
1675 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1676 name ? name : "null", value, min, max);
1677 return;
1678 }
1679
1680 env->cpuid_version &= ~0xff00f00;
1681 if (value > 0x0f) {
1682 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1683 } else {
1684 env->cpuid_version |= value << 8;
1685 }
1686 }
1687
1688 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1689 const char *name, void *opaque,
1690 Error **errp)
1691 {
1692 X86CPU *cpu = X86_CPU(obj);
1693 CPUX86State *env = &cpu->env;
1694 int64_t value;
1695
1696 value = (env->cpuid_version >> 4) & 0xf;
1697 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1698 visit_type_int(v, name, &value, errp);
1699 }
1700
1701 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1702 const char *name, void *opaque,
1703 Error **errp)
1704 {
1705 X86CPU *cpu = X86_CPU(obj);
1706 CPUX86State *env = &cpu->env;
1707 const int64_t min = 0;
1708 const int64_t max = 0xff;
1709 Error *local_err = NULL;
1710 int64_t value;
1711
1712 visit_type_int(v, name, &value, &local_err);
1713 if (local_err) {
1714 error_propagate(errp, local_err);
1715 return;
1716 }
1717 if (value < min || value > max) {
1718 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1719 name ? name : "null", value, min, max);
1720 return;
1721 }
1722
1723 env->cpuid_version &= ~0xf00f0;
1724 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1725 }
1726
1727 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1728 const char *name, void *opaque,
1729 Error **errp)
1730 {
1731 X86CPU *cpu = X86_CPU(obj);
1732 CPUX86State *env = &cpu->env;
1733 int64_t value;
1734
1735 value = env->cpuid_version & 0xf;
1736 visit_type_int(v, name, &value, errp);
1737 }
1738
1739 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1740 const char *name, void *opaque,
1741 Error **errp)
1742 {
1743 X86CPU *cpu = X86_CPU(obj);
1744 CPUX86State *env = &cpu->env;
1745 const int64_t min = 0;
1746 const int64_t max = 0xf;
1747 Error *local_err = NULL;
1748 int64_t value;
1749
1750 visit_type_int(v, name, &value, &local_err);
1751 if (local_err) {
1752 error_propagate(errp, local_err);
1753 return;
1754 }
1755 if (value < min || value > max) {
1756 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1757 name ? name : "null", value, min, max);
1758 return;
1759 }
1760
1761 env->cpuid_version &= ~0xf;
1762 env->cpuid_version |= value & 0xf;
1763 }
1764
1765 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1766 {
1767 X86CPU *cpu = X86_CPU(obj);
1768 CPUX86State *env = &cpu->env;
1769 char *value;
1770
1771 value = g_malloc(CPUID_VENDOR_SZ + 1);
1772 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1773 env->cpuid_vendor3);
1774 return value;
1775 }
1776
1777 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1778 Error **errp)
1779 {
1780 X86CPU *cpu = X86_CPU(obj);
1781 CPUX86State *env = &cpu->env;
1782 int i;
1783
1784 if (strlen(value) != CPUID_VENDOR_SZ) {
1785 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1786 return;
1787 }
1788
1789 env->cpuid_vendor1 = 0;
1790 env->cpuid_vendor2 = 0;
1791 env->cpuid_vendor3 = 0;
1792 for (i = 0; i < 4; i++) {
1793 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1794 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1795 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1796 }
1797 }
1798
1799 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1800 {
1801 X86CPU *cpu = X86_CPU(obj);
1802 CPUX86State *env = &cpu->env;
1803 char *value;
1804 int i;
1805
1806 value = g_malloc(48 + 1);
1807 for (i = 0; i < 48; i++) {
1808 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1809 }
1810 value[48] = '\0';
1811 return value;
1812 }
1813
1814 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1815 Error **errp)
1816 {
1817 X86CPU *cpu = X86_CPU(obj);
1818 CPUX86State *env = &cpu->env;
1819 int c, len, i;
1820
1821 if (model_id == NULL) {
1822 model_id = "";
1823 }
1824 len = strlen(model_id);
1825 memset(env->cpuid_model, 0, 48);
1826 for (i = 0; i < 48; i++) {
1827 if (i >= len) {
1828 c = '\0';
1829 } else {
1830 c = (uint8_t)model_id[i];
1831 }
1832 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1833 }
1834 }
1835
1836 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1837 void *opaque, Error **errp)
1838 {
1839 X86CPU *cpu = X86_CPU(obj);
1840 int64_t value;
1841
1842 value = cpu->env.tsc_khz * 1000;
1843 visit_type_int(v, name, &value, errp);
1844 }
1845
1846 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1847 void *opaque, Error **errp)
1848 {
1849 X86CPU *cpu = X86_CPU(obj);
1850 const int64_t min = 0;
1851 const int64_t max = INT64_MAX;
1852 Error *local_err = NULL;
1853 int64_t value;
1854
1855 visit_type_int(v, name, &value, &local_err);
1856 if (local_err) {
1857 error_propagate(errp, local_err);
1858 return;
1859 }
1860 if (value < min || value > max) {
1861 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1862 name ? name : "null", value, min, max);
1863 return;
1864 }
1865
1866 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1867 }
1868
1869 /* Generic getter for "feature-words" and "filtered-features" properties */
1870 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1871 const char *name, void *opaque,
1872 Error **errp)
1873 {
1874 uint32_t *array = (uint32_t *)opaque;
1875 FeatureWord w;
1876 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1877 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1878 X86CPUFeatureWordInfoList *list = NULL;
1879
1880 for (w = 0; w < FEATURE_WORDS; w++) {
1881 FeatureWordInfo *wi = &feature_word_info[w];
1882 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1883 qwi->cpuid_input_eax = wi->cpuid_eax;
1884 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1885 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1886 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1887 qwi->features = array[w];
1888
1889 /* List will be in reverse order, but order shouldn't matter */
1890 list_entries[w].next = list;
1891 list_entries[w].value = &word_infos[w];
1892 list = &list_entries[w];
1893 }
1894
1895 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1896 }
1897
1898 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1899 void *opaque, Error **errp)
1900 {
1901 X86CPU *cpu = X86_CPU(obj);
1902 int64_t value = cpu->hyperv_spinlock_attempts;
1903
1904 visit_type_int(v, name, &value, errp);
1905 }
1906
1907 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1908 void *opaque, Error **errp)
1909 {
1910 const int64_t min = 0xFFF;
1911 const int64_t max = UINT_MAX;
1912 X86CPU *cpu = X86_CPU(obj);
1913 Error *err = NULL;
1914 int64_t value;
1915
1916 visit_type_int(v, name, &value, &err);
1917 if (err) {
1918 error_propagate(errp, err);
1919 return;
1920 }
1921
1922 if (value < min || value > max) {
1923 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1924 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1925 object_get_typename(obj), name ? name : "null",
1926 value, min, max);
1927 return;
1928 }
1929 cpu->hyperv_spinlock_attempts = value;
1930 }
1931
1932 static PropertyInfo qdev_prop_spinlocks = {
1933 .name = "int",
1934 .get = x86_get_hv_spinlocks,
1935 .set = x86_set_hv_spinlocks,
1936 };
1937
1938 /* Convert all '_' in a feature string option name to '-', to make feature
1939 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1940 */
1941 static inline void feat2prop(char *s)
1942 {
1943 while ((s = strchr(s, '_'))) {
1944 *s = '-';
1945 }
1946 }
1947
1948 /* Compatibily hack to maintain legacy +-feat semantic,
1949 * where +-feat overwrites any feature set by
1950 * feat=on|feat even if the later is parsed after +-feat
1951 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1952 */
1953 static GList *plus_features, *minus_features;
1954
1955 /* Parse "+feature,-feature,feature=foo" CPU feature string
1956 */
1957 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1958 Error **errp)
1959 {
1960 char *featurestr; /* Single 'key=value" string being parsed */
1961 Error *local_err = NULL;
1962 static bool cpu_globals_initialized;
1963
1964 if (cpu_globals_initialized) {
1965 return;
1966 }
1967 cpu_globals_initialized = true;
1968
1969 if (!features) {
1970 return;
1971 }
1972
1973 for (featurestr = strtok(features, ",");
1974 featurestr && !local_err;
1975 featurestr = strtok(NULL, ",")) {
1976 const char *name;
1977 const char *val = NULL;
1978 char *eq = NULL;
1979 char num[32];
1980 GlobalProperty *prop;
1981
1982 /* Compatibility syntax: */
1983 if (featurestr[0] == '+') {
1984 plus_features = g_list_append(plus_features,
1985 g_strdup(featurestr + 1));
1986 continue;
1987 } else if (featurestr[0] == '-') {
1988 minus_features = g_list_append(minus_features,
1989 g_strdup(featurestr + 1));
1990 continue;
1991 }
1992
1993 eq = strchr(featurestr, '=');
1994 if (eq) {
1995 *eq++ = 0;
1996 val = eq;
1997 } else {
1998 val = "on";
1999 }
2000
2001 feat2prop(featurestr);
2002 name = featurestr;
2003
2004 /* Special case: */
2005 if (!strcmp(name, "tsc-freq")) {
2006 int64_t tsc_freq;
2007 char *err;
2008
2009 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2010 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2011 if (tsc_freq < 0 || *err) {
2012 error_setg(errp, "bad numerical value %s", val);
2013 return;
2014 }
2015 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2016 val = num;
2017 name = "tsc-frequency";
2018 }
2019
2020 prop = g_new0(typeof(*prop), 1);
2021 prop->driver = typename;
2022 prop->property = g_strdup(name);
2023 prop->value = g_strdup(val);
2024 prop->errp = &error_fatal;
2025 qdev_prop_register_global(prop);
2026 }
2027
2028 if (local_err) {
2029 error_propagate(errp, local_err);
2030 }
2031 }
2032
2033 /* Print all cpuid feature names in featureset
2034 */
2035 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2036 {
2037 int bit;
2038 bool first = true;
2039
2040 for (bit = 0; bit < 32; bit++) {
2041 if (featureset[bit]) {
2042 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2043 first = false;
2044 }
2045 }
2046 }
2047
2048 /* Sort alphabetically by type name, listing kvm_required models last. */
2049 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2050 {
2051 ObjectClass *class_a = (ObjectClass *)a;
2052 ObjectClass *class_b = (ObjectClass *)b;
2053 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2054 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2055 const char *name_a, *name_b;
2056
2057 if (cc_a->kvm_required != cc_b->kvm_required) {
2058 /* kvm_required items go last */
2059 return cc_a->kvm_required ? 1 : -1;
2060 } else {
2061 name_a = object_class_get_name(class_a);
2062 name_b = object_class_get_name(class_b);
2063 return strcmp(name_a, name_b);
2064 }
2065 }
2066
2067 static GSList *get_sorted_cpu_model_list(void)
2068 {
2069 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2070 list = g_slist_sort(list, x86_cpu_list_compare);
2071 return list;
2072 }
2073
2074 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2075 {
2076 ObjectClass *oc = data;
2077 X86CPUClass *cc = X86_CPU_CLASS(oc);
2078 CPUListState *s = user_data;
2079 char *name = x86_cpu_class_get_model_name(cc);
2080 const char *desc = cc->model_description;
2081 if (!desc) {
2082 desc = cc->cpu_def->model_id;
2083 }
2084
2085 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2086 name, desc);
2087 g_free(name);
2088 }
2089
2090 /* list available CPU models and flags */
2091 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2092 {
2093 int i;
2094 CPUListState s = {
2095 .file = f,
2096 .cpu_fprintf = cpu_fprintf,
2097 };
2098 GSList *list;
2099
2100 (*cpu_fprintf)(f, "Available CPUs:\n");
2101 list = get_sorted_cpu_model_list();
2102 g_slist_foreach(list, x86_cpu_list_entry, &s);
2103 g_slist_free(list);
2104
2105 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2106 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2107 FeatureWordInfo *fw = &feature_word_info[i];
2108
2109 (*cpu_fprintf)(f, " ");
2110 listflags(f, cpu_fprintf, fw->feat_names);
2111 (*cpu_fprintf)(f, "\n");
2112 }
2113 }
2114
2115 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2116 {
2117 ObjectClass *oc = data;
2118 X86CPUClass *cc = X86_CPU_CLASS(oc);
2119 CpuDefinitionInfoList **cpu_list = user_data;
2120 CpuDefinitionInfoList *entry;
2121 CpuDefinitionInfo *info;
2122
2123 info = g_malloc0(sizeof(*info));
2124 info->name = x86_cpu_class_get_model_name(cc);
2125
2126 entry = g_malloc0(sizeof(*entry));
2127 entry->value = info;
2128 entry->next = *cpu_list;
2129 *cpu_list = entry;
2130 }
2131
2132 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2133 {
2134 CpuDefinitionInfoList *cpu_list = NULL;
2135 GSList *list = get_sorted_cpu_model_list();
2136 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2137 g_slist_free(list);
2138 return cpu_list;
2139 }
2140
2141 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2142 bool migratable_only)
2143 {
2144 FeatureWordInfo *wi = &feature_word_info[w];
2145 uint32_t r;
2146
2147 if (kvm_enabled()) {
2148 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2149 wi->cpuid_ecx,
2150 wi->cpuid_reg);
2151 } else if (tcg_enabled()) {
2152 r = wi->tcg_features;
2153 } else {
2154 return ~0;
2155 }
2156 if (migratable_only) {
2157 r &= x86_cpu_get_migratable_flags(w);
2158 }
2159 return r;
2160 }
2161
2162 /*
2163 * Filters CPU feature words based on host availability of each feature.
2164 *
2165 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2166 */
2167 static int x86_cpu_filter_features(X86CPU *cpu)
2168 {
2169 CPUX86State *env = &cpu->env;
2170 FeatureWord w;
2171 int rv = 0;
2172
2173 for (w = 0; w < FEATURE_WORDS; w++) {
2174 uint32_t host_feat =
2175 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2176 uint32_t requested_features = env->features[w];
2177 env->features[w] &= host_feat;
2178 cpu->filtered_features[w] = requested_features & ~env->features[w];
2179 if (cpu->filtered_features[w]) {
2180 rv = 1;
2181 }
2182 }
2183
2184 return rv;
2185 }
2186
2187 static void x86_cpu_report_filtered_features(X86CPU *cpu)
2188 {
2189 FeatureWord w;
2190
2191 for (w = 0; w < FEATURE_WORDS; w++) {
2192 report_unavailable_features(w, cpu->filtered_features[w]);
2193 }
2194 }
2195
2196 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2197 {
2198 PropValue *pv;
2199 for (pv = props; pv->prop; pv++) {
2200 if (!pv->value) {
2201 continue;
2202 }
2203 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2204 &error_abort);
2205 }
2206 }
2207
2208 /* Load data from X86CPUDefinition
2209 */
2210 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2211 {
2212 CPUX86State *env = &cpu->env;
2213 const char *vendor;
2214 char host_vendor[CPUID_VENDOR_SZ + 1];
2215 FeatureWord w;
2216
2217 /* CPU models only set _minimum_ values for level/xlevel: */
2218 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2219 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2220
2221 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2222 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2223 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2224 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2225 for (w = 0; w < FEATURE_WORDS; w++) {
2226 env->features[w] = def->features[w];
2227 }
2228
2229 /* Special cases not set in the X86CPUDefinition structs: */
2230 if (kvm_enabled()) {
2231 if (!kvm_irqchip_in_kernel()) {
2232 x86_cpu_change_kvm_default("x2apic", "off");
2233 }
2234
2235 x86_cpu_apply_props(cpu, kvm_default_props);
2236 } else if (tcg_enabled()) {
2237 x86_cpu_apply_props(cpu, tcg_default_props);
2238 }
2239
2240 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2241
2242 /* sysenter isn't supported in compatibility mode on AMD,
2243 * syscall isn't supported in compatibility mode on Intel.
2244 * Normally we advertise the actual CPU vendor, but you can
2245 * override this using the 'vendor' property if you want to use
2246 * KVM's sysenter/syscall emulation in compatibility mode and
2247 * when doing cross vendor migration
2248 */
2249 vendor = def->vendor;
2250 if (kvm_enabled()) {
2251 uint32_t ebx = 0, ecx = 0, edx = 0;
2252 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2253 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2254 vendor = host_vendor;
2255 }
2256
2257 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2258
2259 }
2260
2261 X86CPU *cpu_x86_init(const char *cpu_model)
2262 {
2263 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2264 }
2265
2266 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2267 {
2268 X86CPUDefinition *cpudef = data;
2269 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2270
2271 xcc->cpu_def = cpudef;
2272 }
2273
2274 static void x86_register_cpudef_type(X86CPUDefinition *def)
2275 {
2276 char *typename = x86_cpu_type_name(def->name);
2277 TypeInfo ti = {
2278 .name = typename,
2279 .parent = TYPE_X86_CPU,
2280 .class_init = x86_cpu_cpudef_class_init,
2281 .class_data = def,
2282 };
2283
2284 type_register(&ti);
2285 g_free(typename);
2286 }
2287
2288 #if !defined(CONFIG_USER_ONLY)
2289
2290 void cpu_clear_apic_feature(CPUX86State *env)
2291 {
2292 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2293 }
2294
2295 #endif /* !CONFIG_USER_ONLY */
2296
2297 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2298 uint32_t *eax, uint32_t *ebx,
2299 uint32_t *ecx, uint32_t *edx)
2300 {
2301 X86CPU *cpu = x86_env_get_cpu(env);
2302 CPUState *cs = CPU(cpu);
2303 uint32_t pkg_offset;
2304
2305 /* test if maximum index reached */
2306 if (index & 0x80000000) {
2307 if (index > env->cpuid_xlevel) {
2308 if (env->cpuid_xlevel2 > 0) {
2309 /* Handle the Centaur's CPUID instruction. */
2310 if (index > env->cpuid_xlevel2) {
2311 index = env->cpuid_xlevel2;
2312 } else if (index < 0xC0000000) {
2313 index = env->cpuid_xlevel;
2314 }
2315 } else {
2316 /* Intel documentation states that invalid EAX input will
2317 * return the same information as EAX=cpuid_level
2318 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2319 */
2320 index = env->cpuid_level;
2321 }
2322 }
2323 } else {
2324 if (index > env->cpuid_level)
2325 index = env->cpuid_level;
2326 }
2327
2328 switch(index) {
2329 case 0:
2330 *eax = env->cpuid_level;
2331 *ebx = env->cpuid_vendor1;
2332 *edx = env->cpuid_vendor2;
2333 *ecx = env->cpuid_vendor3;
2334 break;
2335 case 1:
2336 *eax = env->cpuid_version;
2337 *ebx = (cpu->apic_id << 24) |
2338 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2339 *ecx = env->features[FEAT_1_ECX];
2340 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2341 *ecx |= CPUID_EXT_OSXSAVE;
2342 }
2343 *edx = env->features[FEAT_1_EDX];
2344 if (cs->nr_cores * cs->nr_threads > 1) {
2345 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2346 *edx |= CPUID_HT;
2347 }
2348 break;
2349 case 2:
2350 /* cache info: needed for Pentium Pro compatibility */
2351 if (cpu->cache_info_passthrough) {
2352 host_cpuid(index, 0, eax, ebx, ecx, edx);
2353 break;
2354 }
2355 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2356 *ebx = 0;
2357 if (!cpu->enable_l3_cache) {
2358 *ecx = 0;
2359 } else {
2360 *ecx = L3_N_DESCRIPTOR;
2361 }
2362 *edx = (L1D_DESCRIPTOR << 16) | \
2363 (L1I_DESCRIPTOR << 8) | \
2364 (L2_DESCRIPTOR);
2365 break;
2366 case 4:
2367 /* cache info: needed for Core compatibility */
2368 if (cpu->cache_info_passthrough) {
2369 host_cpuid(index, count, eax, ebx, ecx, edx);
2370 *eax &= ~0xFC000000;
2371 } else {
2372 *eax = 0;
2373 switch (count) {
2374 case 0: /* L1 dcache info */
2375 *eax |= CPUID_4_TYPE_DCACHE | \
2376 CPUID_4_LEVEL(1) | \
2377 CPUID_4_SELF_INIT_LEVEL;
2378 *ebx = (L1D_LINE_SIZE - 1) | \
2379 ((L1D_PARTITIONS - 1) << 12) | \
2380 ((L1D_ASSOCIATIVITY - 1) << 22);
2381 *ecx = L1D_SETS - 1;
2382 *edx = CPUID_4_NO_INVD_SHARING;
2383 break;
2384 case 1: /* L1 icache info */
2385 *eax |= CPUID_4_TYPE_ICACHE | \
2386 CPUID_4_LEVEL(1) | \
2387 CPUID_4_SELF_INIT_LEVEL;
2388 *ebx = (L1I_LINE_SIZE - 1) | \
2389 ((L1I_PARTITIONS - 1) << 12) | \
2390 ((L1I_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L1I_SETS - 1;
2392 *edx = CPUID_4_NO_INVD_SHARING;
2393 break;
2394 case 2: /* L2 cache info */
2395 *eax |= CPUID_4_TYPE_UNIFIED | \
2396 CPUID_4_LEVEL(2) | \
2397 CPUID_4_SELF_INIT_LEVEL;
2398 if (cs->nr_threads > 1) {
2399 *eax |= (cs->nr_threads - 1) << 14;
2400 }
2401 *ebx = (L2_LINE_SIZE - 1) | \
2402 ((L2_PARTITIONS - 1) << 12) | \
2403 ((L2_ASSOCIATIVITY - 1) << 22);
2404 *ecx = L2_SETS - 1;
2405 *edx = CPUID_4_NO_INVD_SHARING;
2406 break;
2407 case 3: /* L3 cache info */
2408 if (!cpu->enable_l3_cache) {
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 }
2415 *eax |= CPUID_4_TYPE_UNIFIED | \
2416 CPUID_4_LEVEL(3) | \
2417 CPUID_4_SELF_INIT_LEVEL;
2418 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2419 *eax |= ((1 << pkg_offset) - 1) << 14;
2420 *ebx = (L3_N_LINE_SIZE - 1) | \
2421 ((L3_N_PARTITIONS - 1) << 12) | \
2422 ((L3_N_ASSOCIATIVITY - 1) << 22);
2423 *ecx = L3_N_SETS - 1;
2424 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2425 break;
2426 default: /* end of info */
2427 *eax = 0;
2428 *ebx = 0;
2429 *ecx = 0;
2430 *edx = 0;
2431 break;
2432 }
2433 }
2434
2435 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2436 if ((*eax & 31) && cs->nr_cores > 1) {
2437 *eax |= (cs->nr_cores - 1) << 26;
2438 }
2439 break;
2440 case 5:
2441 /* mwait info: needed for Core compatibility */
2442 *eax = 0; /* Smallest monitor-line size in bytes */
2443 *ebx = 0; /* Largest monitor-line size in bytes */
2444 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2445 *edx = 0;
2446 break;
2447 case 6:
2448 /* Thermal and Power Leaf */
2449 *eax = env->features[FEAT_6_EAX];
2450 *ebx = 0;
2451 *ecx = 0;
2452 *edx = 0;
2453 break;
2454 case 7:
2455 /* Structured Extended Feature Flags Enumeration Leaf */
2456 if (count == 0) {
2457 *eax = 0; /* Maximum ECX value for sub-leaves */
2458 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2459 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2460 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2461 *ecx |= CPUID_7_0_ECX_OSPKE;
2462 }
2463 *edx = 0; /* Reserved */
2464 } else {
2465 *eax = 0;
2466 *ebx = 0;
2467 *ecx = 0;
2468 *edx = 0;
2469 }
2470 break;
2471 case 9:
2472 /* Direct Cache Access Information Leaf */
2473 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2474 *ebx = 0;
2475 *ecx = 0;
2476 *edx = 0;
2477 break;
2478 case 0xA:
2479 /* Architectural Performance Monitoring Leaf */
2480 if (kvm_enabled() && cpu->enable_pmu) {
2481 KVMState *s = cs->kvm_state;
2482
2483 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2484 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2485 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2486 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2487 } else {
2488 *eax = 0;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = 0;
2492 }
2493 break;
2494 case 0xB:
2495 /* Extended Topology Enumeration Leaf */
2496 if (!cpu->enable_cpuid_0xb) {
2497 *eax = *ebx = *ecx = *edx = 0;
2498 break;
2499 }
2500
2501 *ecx = count & 0xff;
2502 *edx = cpu->apic_id;
2503
2504 switch (count) {
2505 case 0:
2506 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2507 *ebx = cs->nr_threads;
2508 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2509 break;
2510 case 1:
2511 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2512 *ebx = cs->nr_cores * cs->nr_threads;
2513 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2514 break;
2515 default:
2516 *eax = 0;
2517 *ebx = 0;
2518 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2519 }
2520
2521 assert(!(*eax & ~0x1f));
2522 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2523 break;
2524 case 0xD: {
2525 /* Processor Extended State */
2526 *eax = 0;
2527 *ebx = 0;
2528 *ecx = 0;
2529 *edx = 0;
2530 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2531 break;
2532 }
2533
2534 if (count == 0) {
2535 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2536 *eax = env->features[FEAT_XSAVE_COMP_LO];
2537 *edx = env->features[FEAT_XSAVE_COMP_HI];
2538 *ebx = *ecx;
2539 } else if (count == 1) {
2540 *eax = env->features[FEAT_XSAVE];
2541 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2542 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2544 *eax = esa->size;
2545 *ebx = esa->offset;
2546 }
2547 }
2548 break;
2549 }
2550 case 0x80000000:
2551 *eax = env->cpuid_xlevel;
2552 *ebx = env->cpuid_vendor1;
2553 *edx = env->cpuid_vendor2;
2554 *ecx = env->cpuid_vendor3;
2555 break;
2556 case 0x80000001:
2557 *eax = env->cpuid_version;
2558 *ebx = 0;
2559 *ecx = env->features[FEAT_8000_0001_ECX];
2560 *edx = env->features[FEAT_8000_0001_EDX];
2561
2562 /* The Linux kernel checks for the CMPLegacy bit and
2563 * discards multiple thread information if it is set.
2564 * So don't set it here for Intel to make Linux guests happy.
2565 */
2566 if (cs->nr_cores * cs->nr_threads > 1) {
2567 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2568 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2569 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2570 *ecx |= 1 << 1; /* CmpLegacy bit */
2571 }
2572 }
2573 break;
2574 case 0x80000002:
2575 case 0x80000003:
2576 case 0x80000004:
2577 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2578 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2579 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2580 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2581 break;
2582 case 0x80000005:
2583 /* cache info (L1 cache) */
2584 if (cpu->cache_info_passthrough) {
2585 host_cpuid(index, 0, eax, ebx, ecx, edx);
2586 break;
2587 }
2588 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2589 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2590 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2591 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2592 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2593 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2594 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2595 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2596 break;
2597 case 0x80000006:
2598 /* cache info (L2 cache) */
2599 if (cpu->cache_info_passthrough) {
2600 host_cpuid(index, 0, eax, ebx, ecx, edx);
2601 break;
2602 }
2603 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2604 (L2_DTLB_2M_ENTRIES << 16) | \
2605 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2606 (L2_ITLB_2M_ENTRIES);
2607 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2608 (L2_DTLB_4K_ENTRIES << 16) | \
2609 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2610 (L2_ITLB_4K_ENTRIES);
2611 *ecx = (L2_SIZE_KB_AMD << 16) | \
2612 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2613 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2614 if (!cpu->enable_l3_cache) {
2615 *edx = ((L3_SIZE_KB / 512) << 18) | \
2616 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2617 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2618 } else {
2619 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2620 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2621 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2622 }
2623 break;
2624 case 0x80000007:
2625 *eax = 0;
2626 *ebx = 0;
2627 *ecx = 0;
2628 *edx = env->features[FEAT_8000_0007_EDX];
2629 break;
2630 case 0x80000008:
2631 /* virtual & phys address size in low 2 bytes. */
2632 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2633 /* 64 bit processor, 48 bits virtual, configurable
2634 * physical bits.
2635 */
2636 *eax = 0x00003000 + cpu->phys_bits;
2637 } else {
2638 *eax = cpu->phys_bits;
2639 }
2640 *ebx = 0;
2641 *ecx = 0;
2642 *edx = 0;
2643 if (cs->nr_cores * cs->nr_threads > 1) {
2644 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2645 }
2646 break;
2647 case 0x8000000A:
2648 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2649 *eax = 0x00000001; /* SVM Revision */
2650 *ebx = 0x00000010; /* nr of ASIDs */
2651 *ecx = 0;
2652 *edx = env->features[FEAT_SVM]; /* optional features */
2653 } else {
2654 *eax = 0;
2655 *ebx = 0;
2656 *ecx = 0;
2657 *edx = 0;
2658 }
2659 break;
2660 case 0xC0000000:
2661 *eax = env->cpuid_xlevel2;
2662 *ebx = 0;
2663 *ecx = 0;
2664 *edx = 0;
2665 break;
2666 case 0xC0000001:
2667 /* Support for VIA CPU's CPUID instruction */
2668 *eax = env->cpuid_version;
2669 *ebx = 0;
2670 *ecx = 0;
2671 *edx = env->features[FEAT_C000_0001_EDX];
2672 break;
2673 case 0xC0000002:
2674 case 0xC0000003:
2675 case 0xC0000004:
2676 /* Reserved for the future, and now filled with zero */
2677 *eax = 0;
2678 *ebx = 0;
2679 *ecx = 0;
2680 *edx = 0;
2681 break;
2682 default:
2683 /* reserved values: zero */
2684 *eax = 0;
2685 *ebx = 0;
2686 *ecx = 0;
2687 *edx = 0;
2688 break;
2689 }
2690 }
2691
2692 /* CPUClass::reset() */
2693 static void x86_cpu_reset(CPUState *s)
2694 {
2695 X86CPU *cpu = X86_CPU(s);
2696 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2697 CPUX86State *env = &cpu->env;
2698 target_ulong cr4;
2699 uint64_t xcr0;
2700 int i;
2701
2702 xcc->parent_reset(s);
2703
2704 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2705
2706 tlb_flush(s, 1);
2707
2708 env->old_exception = -1;
2709
2710 /* init to reset state */
2711
2712 env->hflags2 |= HF2_GIF_MASK;
2713
2714 cpu_x86_update_cr0(env, 0x60000010);
2715 env->a20_mask = ~0x0;
2716 env->smbase = 0x30000;
2717
2718 env->idt.limit = 0xffff;
2719 env->gdt.limit = 0xffff;
2720 env->ldt.limit = 0xffff;
2721 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2722 env->tr.limit = 0xffff;
2723 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2724
2725 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2726 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2727 DESC_R_MASK | DESC_A_MASK);
2728 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2730 DESC_A_MASK);
2731 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2733 DESC_A_MASK);
2734 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2736 DESC_A_MASK);
2737 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2739 DESC_A_MASK);
2740 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2741 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2742 DESC_A_MASK);
2743
2744 env->eip = 0xfff0;
2745 env->regs[R_EDX] = env->cpuid_version;
2746
2747 env->eflags = 0x2;
2748
2749 /* FPU init */
2750 for (i = 0; i < 8; i++) {
2751 env->fptags[i] = 1;
2752 }
2753 cpu_set_fpuc(env, 0x37f);
2754
2755 env->mxcsr = 0x1f80;
2756 /* All units are in INIT state. */
2757 env->xstate_bv = 0;
2758
2759 env->pat = 0x0007040600070406ULL;
2760 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2761
2762 memset(env->dr, 0, sizeof(env->dr));
2763 env->dr[6] = DR6_FIXED_1;
2764 env->dr[7] = DR7_FIXED_1;
2765 cpu_breakpoint_remove_all(s, BP_CPU);
2766 cpu_watchpoint_remove_all(s, BP_CPU);
2767
2768 cr4 = 0;
2769 xcr0 = XSTATE_FP_MASK;
2770
2771 #ifdef CONFIG_USER_ONLY
2772 /* Enable all the features for user-mode. */
2773 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2774 xcr0 |= XSTATE_SSE_MASK;
2775 }
2776 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2777 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2778 if (env->features[esa->feature] & esa->bits) {
2779 xcr0 |= 1ull << i;
2780 }
2781 }
2782
2783 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2784 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2785 }
2786 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2787 cr4 |= CR4_FSGSBASE_MASK;
2788 }
2789 #endif
2790
2791 env->xcr0 = xcr0;
2792 cpu_x86_update_cr4(env, cr4);
2793
2794 /*
2795 * SDM 11.11.5 requires:
2796 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2797 * - IA32_MTRR_PHYSMASKn.V = 0
2798 * All other bits are undefined. For simplification, zero it all.
2799 */
2800 env->mtrr_deftype = 0;
2801 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2802 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2803
2804 #if !defined(CONFIG_USER_ONLY)
2805 /* We hard-wire the BSP to the first CPU. */
2806 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2807
2808 s->halted = !cpu_is_bsp(cpu);
2809
2810 if (kvm_enabled()) {
2811 kvm_arch_reset_vcpu(cpu);
2812 }
2813 #endif
2814 }
2815
2816 #ifndef CONFIG_USER_ONLY
2817 bool cpu_is_bsp(X86CPU *cpu)
2818 {
2819 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2820 }
2821
2822 /* TODO: remove me, when reset over QOM tree is implemented */
2823 static void x86_cpu_machine_reset_cb(void *opaque)
2824 {
2825 X86CPU *cpu = opaque;
2826 cpu_reset(CPU(cpu));
2827 }
2828 #endif
2829
2830 static void mce_init(X86CPU *cpu)
2831 {
2832 CPUX86State *cenv = &cpu->env;
2833 unsigned int bank;
2834
2835 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2836 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2837 (CPUID_MCE | CPUID_MCA)) {
2838 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2839 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2840 cenv->mcg_ctl = ~(uint64_t)0;
2841 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2842 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2843 }
2844 }
2845 }
2846
2847 #ifndef CONFIG_USER_ONLY
2848 APICCommonClass *apic_get_class(void)
2849 {
2850 const char *apic_type = "apic";
2851
2852 if (kvm_apic_in_kernel()) {
2853 apic_type = "kvm-apic";
2854 } else if (xen_enabled()) {
2855 apic_type = "xen-apic";
2856 }
2857
2858 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
2859 }
2860
2861 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2862 {
2863 APICCommonState *apic;
2864 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
2865
2866 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
2867
2868 object_property_add_child(OBJECT(cpu), "lapic",
2869 OBJECT(cpu->apic_state), &error_abort);
2870 object_unref(OBJECT(cpu->apic_state));
2871
2872 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2873 /* TODO: convert to link<> */
2874 apic = APIC_COMMON(cpu->apic_state);
2875 apic->cpu = cpu;
2876 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2877 }
2878
2879 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2880 {
2881 APICCommonState *apic;
2882 static bool apic_mmio_map_once;
2883
2884 if (cpu->apic_state == NULL) {
2885 return;
2886 }
2887 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2888 errp);
2889
2890 /* Map APIC MMIO area */
2891 apic = APIC_COMMON(cpu->apic_state);
2892 if (!apic_mmio_map_once) {
2893 memory_region_add_subregion_overlap(get_system_memory(),
2894 apic->apicbase &
2895 MSR_IA32_APICBASE_BASE,
2896 &apic->io_memory,
2897 0x1000);
2898 apic_mmio_map_once = true;
2899 }
2900 }
2901
2902 static void x86_cpu_machine_done(Notifier *n, void *unused)
2903 {
2904 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2905 MemoryRegion *smram =
2906 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2907
2908 if (smram) {
2909 cpu->smram = g_new(MemoryRegion, 1);
2910 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2911 smram, 0, 1ull << 32);
2912 memory_region_set_enabled(cpu->smram, false);
2913 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2914 }
2915 }
2916 #else
2917 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2918 {
2919 }
2920 #endif
2921
2922 /* Note: Only safe for use on x86(-64) hosts */
2923 static uint32_t x86_host_phys_bits(void)
2924 {
2925 uint32_t eax;
2926 uint32_t host_phys_bits;
2927
2928 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2929 if (eax >= 0x80000008) {
2930 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2931 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2932 * at 23:16 that can specify a maximum physical address bits for
2933 * the guest that can override this value; but I've not seen
2934 * anything with that set.
2935 */
2936 host_phys_bits = eax & 0xff;
2937 } else {
2938 /* It's an odd 64 bit machine that doesn't have the leaf for
2939 * physical address bits; fall back to 36 that's most older
2940 * Intel.
2941 */
2942 host_phys_bits = 36;
2943 }
2944
2945 return host_phys_bits;
2946 }
2947
2948 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2949 {
2950 if (*min < value) {
2951 *min = value;
2952 }
2953 }
2954
2955 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2956 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2957 {
2958 CPUX86State *env = &cpu->env;
2959 FeatureWordInfo *fi = &feature_word_info[w];
2960 uint32_t eax = fi->cpuid_eax;
2961 uint32_t region = eax & 0xF0000000;
2962
2963 if (!env->features[w]) {
2964 return;
2965 }
2966
2967 switch (region) {
2968 case 0x00000000:
2969 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2970 break;
2971 case 0x80000000:
2972 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2973 break;
2974 case 0xC0000000:
2975 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2976 break;
2977 }
2978 }
2979
2980 /* Calculate XSAVE components based on the configured CPU feature flags */
2981 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2982 {
2983 CPUX86State *env = &cpu->env;
2984 int i;
2985 uint64_t mask;
2986
2987 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2988 return;
2989 }
2990
2991 mask = 0;
2992 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2993 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2994 if (env->features[esa->feature] & esa->bits) {
2995 mask |= (1ULL << i);
2996 }
2997 }
2998
2999 env->features[FEAT_XSAVE_COMP_LO] = mask;
3000 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
3001 }
3002
3003 /* Load CPUID data based on configured features */
3004 static void x86_cpu_load_features(X86CPU *cpu, Error **errp)
3005 {
3006 CPUX86State *env = &cpu->env;
3007 FeatureWord w;
3008 GList *l;
3009 Error *local_err = NULL;
3010
3011 /*TODO: cpu->host_features incorrectly overwrites features
3012 * set using "feat=on|off". Once we fix this, we can convert
3013 * plus_features & minus_features to global properties
3014 * inside x86_cpu_parse_featurestr() too.
3015 */
3016 if (cpu->host_features) {
3017 for (w = 0; w < FEATURE_WORDS; w++) {
3018 env->features[w] =
3019 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3020 }
3021 }
3022
3023 for (l = plus_features; l; l = l->next) {
3024 const char *prop = l->data;
3025 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3026 if (local_err) {
3027 goto out;
3028 }
3029 }
3030
3031 for (l = minus_features; l; l = l->next) {
3032 const char *prop = l->data;
3033 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3034 if (local_err) {
3035 goto out;
3036 }
3037 }
3038
3039 if (!kvm_enabled() || !cpu->expose_kvm) {
3040 env->features[FEAT_KVM] = 0;
3041 }
3042
3043 x86_cpu_enable_xsave_components(cpu);
3044
3045 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3046 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3047 if (cpu->full_cpuid_auto_level) {
3048 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3049 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3050 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3051 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3052 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3053 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3054 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3055 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3056 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3057 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3058 /* SVM requires CPUID[0x8000000A] */
3059 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3060 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3061 }
3062 }
3063
3064 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3065 if (env->cpuid_level == UINT32_MAX) {
3066 env->cpuid_level = env->cpuid_min_level;
3067 }
3068 if (env->cpuid_xlevel == UINT32_MAX) {
3069 env->cpuid_xlevel = env->cpuid_min_xlevel;
3070 }
3071 if (env->cpuid_xlevel2 == UINT32_MAX) {
3072 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3073 }
3074
3075 out:
3076 if (local_err != NULL) {
3077 error_propagate(errp, local_err);
3078 }
3079 }
3080
3081 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
3082 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
3083 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
3084 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
3085 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
3086 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
3087 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
3088 {
3089 CPUState *cs = CPU(dev);
3090 X86CPU *cpu = X86_CPU(dev);
3091 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
3092 CPUX86State *env = &cpu->env;
3093 Error *local_err = NULL;
3094 static bool ht_warned;
3095
3096 if (xcc->kvm_required && !kvm_enabled()) {
3097 char *name = x86_cpu_class_get_model_name(xcc);
3098 error_setg(&local_err, "CPU model '%s' requires KVM", name);
3099 g_free(name);
3100 goto out;
3101 }
3102
3103 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3104 error_setg(errp, "apic-id property was not initialized properly");
3105 return;
3106 }
3107
3108 x86_cpu_load_features(cpu, &local_err);
3109 if (local_err) {
3110 goto out;
3111 }
3112
3113 if (x86_cpu_filter_features(cpu) &&
3114 (cpu->check_cpuid || cpu->enforce_cpuid)) {
3115 x86_cpu_report_filtered_features(cpu);
3116 if (cpu->enforce_cpuid) {
3117 error_setg(&local_err,
3118 kvm_enabled() ?
3119 "Host doesn't support requested features" :
3120 "TCG doesn't support requested features");
3121 goto out;
3122 }
3123 }
3124
3125 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3126 * CPUID[1].EDX.
3127 */
3128 if (IS_AMD_CPU(env)) {
3129 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3130 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3131 & CPUID_EXT2_AMD_ALIASES);
3132 }
3133
3134 /* For 64bit systems think about the number of physical bits to present.
3135 * ideally this should be the same as the host; anything other than matching
3136 * the host can cause incorrect guest behaviour.
3137 * QEMU used to pick the magic value of 40 bits that corresponds to
3138 * consumer AMD devices but nothing else.
3139 */
3140 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3141 if (kvm_enabled()) {
3142 uint32_t host_phys_bits = x86_host_phys_bits();
3143 static bool warned;
3144
3145 if (cpu->host_phys_bits) {
3146 /* The user asked for us to use the host physical bits */
3147 cpu->phys_bits = host_phys_bits;
3148 }
3149
3150 /* Print a warning if the user set it to a value that's not the
3151 * host value.
3152 */
3153 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3154 !warned) {
3155 error_report("Warning: Host physical bits (%u)"
3156 " does not match phys-bits property (%u)",
3157 host_phys_bits, cpu->phys_bits);
3158 warned = true;
3159 }
3160
3161 if (cpu->phys_bits &&
3162 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3163 cpu->phys_bits < 32)) {
3164 error_setg(errp, "phys-bits should be between 32 and %u "
3165 " (but is %u)",
3166 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3167 return;
3168 }
3169 } else {
3170 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3171 error_setg(errp, "TCG only supports phys-bits=%u",
3172 TCG_PHYS_ADDR_BITS);
3173 return;
3174 }
3175 }
3176 /* 0 means it was not explicitly set by the user (or by machine
3177 * compat_props or by the host code above). In this case, the default
3178 * is the value used by TCG (40).
3179 */
3180 if (cpu->phys_bits == 0) {
3181 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3182 }
3183 } else {
3184 /* For 32 bit systems don't use the user set value, but keep
3185 * phys_bits consistent with what we tell the guest.
3186 */
3187 if (cpu->phys_bits != 0) {
3188 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3189 return;
3190 }
3191
3192 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3193 cpu->phys_bits = 36;
3194 } else {
3195 cpu->phys_bits = 32;
3196 }
3197 }
3198 cpu_exec_init(cs, &error_abort);
3199
3200 if (tcg_enabled()) {
3201 tcg_x86_init();
3202 }
3203
3204 #ifndef CONFIG_USER_ONLY
3205 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3206
3207 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3208 x86_cpu_apic_create(cpu, &local_err);
3209 if (local_err != NULL) {
3210 goto out;
3211 }
3212 }
3213 #endif
3214
3215 mce_init(cpu);
3216
3217 #ifndef CONFIG_USER_ONLY
3218 if (tcg_enabled()) {
3219 AddressSpace *newas = g_new(AddressSpace, 1);
3220
3221 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3222 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3223
3224 /* Outer container... */
3225 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3226 memory_region_set_enabled(cpu->cpu_as_root, true);
3227
3228 /* ... with two regions inside: normal system memory with low
3229 * priority, and...
3230 */
3231 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3232 get_system_memory(), 0, ~0ull);
3233 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3234 memory_region_set_enabled(cpu->cpu_as_mem, true);
3235 address_space_init(newas, cpu->cpu_as_root, "CPU");
3236 cs->num_ases = 1;
3237 cpu_address_space_init(cs, newas, 0);
3238
3239 /* ... SMRAM with higher priority, linked from /machine/smram. */
3240 cpu->machine_done.notify = x86_cpu_machine_done;
3241 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3242 }
3243 #endif
3244
3245 qemu_init_vcpu(cs);
3246
3247 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3248 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3249 * based on inputs (sockets,cores,threads), it is still better to gives
3250 * users a warning.
3251 *
3252 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3253 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3254 */
3255 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3256 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3257 " -smp options properly.");
3258 ht_warned = true;
3259 }
3260
3261 x86_cpu_apic_realize(cpu, &local_err);
3262 if (local_err != NULL) {
3263 goto out;
3264 }
3265 cpu_reset(cs);
3266
3267 xcc->parent_realize(dev, &local_err);
3268
3269 out:
3270 if (local_err != NULL) {
3271 error_propagate(errp, local_err);
3272 return;
3273 }
3274 }
3275
3276 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3277 {
3278 X86CPU *cpu = X86_CPU(dev);
3279
3280 #ifndef CONFIG_USER_ONLY
3281 cpu_remove_sync(CPU(dev));
3282 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3283 #endif
3284
3285 if (cpu->apic_state) {
3286 object_unparent(OBJECT(cpu->apic_state));
3287 cpu->apic_state = NULL;
3288 }
3289 }
3290
3291 typedef struct BitProperty {
3292 uint32_t *ptr;
3293 uint32_t mask;
3294 } BitProperty;
3295
3296 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3297 void *opaque, Error **errp)
3298 {
3299 BitProperty *fp = opaque;
3300 bool value = (*fp->ptr & fp->mask) == fp->mask;
3301 visit_type_bool(v, name, &value, errp);
3302 }
3303
3304 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3305 void *opaque, Error **errp)
3306 {
3307 DeviceState *dev = DEVICE(obj);
3308 BitProperty *fp = opaque;
3309 Error *local_err = NULL;
3310 bool value;
3311
3312 if (dev->realized) {
3313 qdev_prop_set_after_realize(dev, name, errp);
3314 return;
3315 }
3316
3317 visit_type_bool(v, name, &value, &local_err);
3318 if (local_err) {
3319 error_propagate(errp, local_err);
3320 return;
3321 }
3322
3323 if (value) {
3324 *fp->ptr |= fp->mask;
3325 } else {
3326 *fp->ptr &= ~fp->mask;
3327 }
3328 }
3329
3330 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3331 void *opaque)
3332 {
3333 BitProperty *prop = opaque;
3334 g_free(prop);
3335 }
3336
3337 /* Register a boolean property to get/set a single bit in a uint32_t field.
3338 *
3339 * The same property name can be registered multiple times to make it affect
3340 * multiple bits in the same FeatureWord. In that case, the getter will return
3341 * true only if all bits are set.
3342 */
3343 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3344 const char *prop_name,
3345 uint32_t *field,
3346 int bitnr)
3347 {
3348 BitProperty *fp;
3349 ObjectProperty *op;
3350 uint32_t mask = (1UL << bitnr);
3351
3352 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3353 if (op) {
3354 fp = op->opaque;
3355 assert(fp->ptr == field);
3356 fp->mask |= mask;
3357 } else {
3358 fp = g_new0(BitProperty, 1);
3359 fp->ptr = field;
3360 fp->mask = mask;
3361 object_property_add(OBJECT(cpu), prop_name, "bool",
3362 x86_cpu_get_bit_prop,
3363 x86_cpu_set_bit_prop,
3364 x86_cpu_release_bit_prop, fp, &error_abort);
3365 }
3366 }
3367
3368 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3369 FeatureWord w,
3370 int bitnr)
3371 {
3372 FeatureWordInfo *fi = &feature_word_info[w];
3373 const char *name = fi->feat_names[bitnr];
3374
3375 if (!name) {
3376 return;
3377 }
3378
3379 /* Property names should use "-" instead of "_".
3380 * Old names containing underscores are registered as aliases
3381 * using object_property_add_alias()
3382 */
3383 assert(!strchr(name, '_'));
3384 /* aliases don't use "|" delimiters anymore, they are registered
3385 * manually using object_property_add_alias() */
3386 assert(!strchr(name, '|'));
3387 x86_cpu_register_bit_prop(cpu, name, &cpu->env.features[w], bitnr);
3388 }
3389
3390 static void x86_cpu_initfn(Object *obj)
3391 {
3392 CPUState *cs = CPU(obj);
3393 X86CPU *cpu = X86_CPU(obj);
3394 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3395 CPUX86State *env = &cpu->env;
3396 FeatureWord w;
3397
3398 cs->env_ptr = env;
3399
3400 object_property_add(obj, "family", "int",
3401 x86_cpuid_version_get_family,
3402 x86_cpuid_version_set_family, NULL, NULL, NULL);
3403 object_property_add(obj, "model", "int",
3404 x86_cpuid_version_get_model,
3405 x86_cpuid_version_set_model, NULL, NULL, NULL);
3406 object_property_add(obj, "stepping", "int",
3407 x86_cpuid_version_get_stepping,
3408 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3409 object_property_add_str(obj, "vendor",
3410 x86_cpuid_get_vendor,
3411 x86_cpuid_set_vendor, NULL);
3412 object_property_add_str(obj, "model-id",
3413 x86_cpuid_get_model_id,
3414 x86_cpuid_set_model_id, NULL);
3415 object_property_add(obj, "tsc-frequency", "int",
3416 x86_cpuid_get_tsc_freq,
3417 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3418 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3419 x86_cpu_get_feature_words,
3420 NULL, NULL, (void *)env->features, NULL);
3421 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3422 x86_cpu_get_feature_words,
3423 NULL, NULL, (void *)cpu->filtered_features, NULL);
3424
3425 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3426
3427 for (w = 0; w < FEATURE_WORDS; w++) {
3428 int bitnr;
3429
3430 for (bitnr = 0; bitnr < 32; bitnr++) {
3431 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3432 }
3433 }
3434
3435 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
3436 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
3437 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
3438 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
3439 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
3440 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
3441 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
3442
3443 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3444 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3445 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3446 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3447 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3448 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3449 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3450 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3451 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3452 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3453 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3454 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3455 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3456 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3457 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3458 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3459 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3460 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3461 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3462 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3463 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3464
3465 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3466 }
3467
3468 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3469 {
3470 X86CPU *cpu = X86_CPU(cs);
3471
3472 return cpu->apic_id;
3473 }
3474
3475 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3476 {
3477 X86CPU *cpu = X86_CPU(cs);
3478
3479 return cpu->env.cr[0] & CR0_PG_MASK;
3480 }
3481
3482 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3483 {
3484 X86CPU *cpu = X86_CPU(cs);
3485
3486 cpu->env.eip = value;
3487 }
3488
3489 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3490 {
3491 X86CPU *cpu = X86_CPU(cs);
3492
3493 cpu->env.eip = tb->pc - tb->cs_base;
3494 }
3495
3496 static bool x86_cpu_has_work(CPUState *cs)
3497 {
3498 X86CPU *cpu = X86_CPU(cs);
3499 CPUX86State *env = &cpu->env;
3500
3501 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3502 CPU_INTERRUPT_POLL)) &&
3503 (env->eflags & IF_MASK)) ||
3504 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3505 CPU_INTERRUPT_INIT |
3506 CPU_INTERRUPT_SIPI |
3507 CPU_INTERRUPT_MCE)) ||
3508 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3509 !(env->hflags & HF_SMM_MASK));
3510 }
3511
3512 static Property x86_cpu_properties[] = {
3513 #ifdef CONFIG_USER_ONLY
3514 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3515 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3516 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3517 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3518 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3519 #else
3520 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3521 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3522 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3523 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3524 #endif
3525 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3526 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3527 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3528 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3529 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3530 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3531 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3532 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3533 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3534 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3535 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3536 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3537 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3538 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3539 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3540 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3541 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3542 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3543 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3544 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3545 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3546 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3547 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3548 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3549 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3550 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3551 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3552 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3553 DEFINE_PROP_END_OF_LIST()
3554 };
3555
3556 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3557 {
3558 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3559 CPUClass *cc = CPU_CLASS(oc);
3560 DeviceClass *dc = DEVICE_CLASS(oc);
3561
3562 xcc->parent_realize = dc->realize;
3563 dc->realize = x86_cpu_realizefn;
3564 dc->unrealize = x86_cpu_unrealizefn;
3565 dc->props = x86_cpu_properties;
3566
3567 xcc->parent_reset = cc->reset;
3568 cc->reset = x86_cpu_reset;
3569 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3570
3571 cc->class_by_name = x86_cpu_class_by_name;
3572 cc->parse_features = x86_cpu_parse_featurestr;
3573 cc->has_work = x86_cpu_has_work;
3574 cc->do_interrupt = x86_cpu_do_interrupt;
3575 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3576 cc->dump_state = x86_cpu_dump_state;
3577 cc->set_pc = x86_cpu_set_pc;
3578 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3579 cc->gdb_read_register = x86_cpu_gdb_read_register;
3580 cc->gdb_write_register = x86_cpu_gdb_write_register;
3581 cc->get_arch_id = x86_cpu_get_arch_id;
3582 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3583 #ifdef CONFIG_USER_ONLY
3584 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3585 #else
3586 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3587 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3588 cc->write_elf64_note = x86_cpu_write_elf64_note;
3589 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3590 cc->write_elf32_note = x86_cpu_write_elf32_note;
3591 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3592 cc->vmsd = &vmstate_x86_cpu;
3593 #endif
3594 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3595 #ifndef CONFIG_USER_ONLY
3596 cc->debug_excp_handler = breakpoint_handler;
3597 #endif
3598 cc->cpu_exec_enter = x86_cpu_exec_enter;
3599 cc->cpu_exec_exit = x86_cpu_exec_exit;
3600
3601 dc->cannot_instantiate_with_device_add_yet = false;
3602 }
3603
3604 static const TypeInfo x86_cpu_type_info = {
3605 .name = TYPE_X86_CPU,
3606 .parent = TYPE_CPU,
3607 .instance_size = sizeof(X86CPU),
3608 .instance_init = x86_cpu_initfn,
3609 .abstract = true,
3610 .class_size = sizeof(X86CPUClass),
3611 .class_init = x86_cpu_common_class_init,
3612 };
3613
3614 static void x86_cpu_register_types(void)
3615 {
3616 int i;
3617
3618 type_register_static(&x86_cpu_type_info);
3619 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3620 x86_register_cpudef_type(&builtin_x86_defs[i]);
3621 }
3622 #ifdef CONFIG_KVM
3623 type_register_static(&host_x86_cpu_type_info);
3624 #endif
3625 }
3626
3627 type_init(x86_cpu_register_types)