]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: xsave: Calculate enabled components only once
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
247
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
253 */
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 } FeatureWordInfo;
262
263 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
264 [FEAT_1_EDX] = {
265 .feat_names = {
266 "fpu", "vme", "de", "pse",
267 "tsc", "msr", "pae", "mce",
268 "cx8", "apic", NULL, "sep",
269 "mtrr", "pge", "mca", "cmov",
270 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
271 NULL, "ds" /* Intel dts */, "acpi", "mmx",
272 "fxsr", "sse", "sse2", "ss",
273 "ht" /* Intel htt */, "tm", "ia64", "pbe",
274 },
275 .cpuid_eax = 1, .cpuid_reg = R_EDX,
276 .tcg_features = TCG_FEATURES,
277 },
278 [FEAT_1_ECX] = {
279 .feat_names = {
280 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
281 "ds_cpl", "vmx", "smx", "est",
282 "tm2", "ssse3", "cid", NULL,
283 "fma", "cx16", "xtpr", "pdcm",
284 NULL, "pcid", "dca", "sse4.1|sse4_1",
285 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
286 "tsc-deadline", "aes", "xsave", "osxsave",
287 "avx", "f16c", "rdrand", "hypervisor",
288 },
289 .cpuid_eax = 1, .cpuid_reg = R_ECX,
290 .tcg_features = TCG_EXT_FEATURES,
291 },
292 /* Feature names that are already defined on feature_name[] but
293 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
294 * names on feat_names below. They are copied automatically
295 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
296 */
297 [FEAT_8000_0001_EDX] = {
298 .feat_names = {
299 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
300 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
301 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
302 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
303 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
304 "nx|xd", NULL, "mmxext", NULL /* mmx */,
305 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
306 NULL, "lm|i64", "3dnowext", "3dnow",
307 },
308 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
309 .tcg_features = TCG_EXT2_FEATURES,
310 },
311 [FEAT_8000_0001_ECX] = {
312 .feat_names = {
313 "lahf_lm", "cmp_legacy", "svm", "extapic",
314 "cr8legacy", "abm", "sse4a", "misalignsse",
315 "3dnowprefetch", "osvw", "ibs", "xop",
316 "skinit", "wdt", NULL, "lwp",
317 "fma4", "tce", NULL, "nodeid_msr",
318 NULL, "tbm", "topoext", "perfctr_core",
319 "perfctr_nb", NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 },
322 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
323 .tcg_features = TCG_EXT3_FEATURES,
324 },
325 [FEAT_C000_0001_EDX] = {
326 .feat_names = {
327 NULL, NULL, "xstore", "xstore-en",
328 NULL, NULL, "xcrypt", "xcrypt-en",
329 "ace2", "ace2-en", "phe", "phe-en",
330 "pmm", "pmm-en", NULL, NULL,
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 },
336 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
337 .tcg_features = TCG_EXT4_FEATURES,
338 },
339 [FEAT_KVM] = {
340 .feat_names = {
341 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
342 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 "kvmclock-stable-bit", NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 },
350 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
351 .tcg_features = TCG_KVM_FEATURES,
352 },
353 [FEAT_HYPERV_EAX] = {
354 .feat_names = {
355 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
356 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
357 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
358 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
359 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
360 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
361 NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 },
367 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
368 },
369 [FEAT_HYPERV_EBX] = {
370 .feat_names = {
371 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
372 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
373 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
374 NULL /* hv_create_port */, NULL /* hv_connect_port */,
375 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
376 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
377 NULL, NULL,
378 NULL, NULL, NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 },
383 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
384 },
385 [FEAT_HYPERV_EDX] = {
386 .feat_names = {
387 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
388 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
389 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
390 NULL, NULL,
391 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 },
398 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
399 },
400 [FEAT_SVM] = {
401 .feat_names = {
402 "npt", "lbrv", "svm_lock", "nrip_save",
403 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
404 NULL, NULL, "pause_filter", NULL,
405 "pfthreshold", NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 },
411 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
412 .tcg_features = TCG_SVM_FEATURES,
413 },
414 [FEAT_7_0_EBX] = {
415 .feat_names = {
416 "fsgsbase", "tsc_adjust", NULL, "bmi1",
417 "hle", "avx2", NULL, "smep",
418 "bmi2", "erms", "invpcid", "rtm",
419 NULL, NULL, "mpx", NULL,
420 "avx512f", "avx512dq", "rdseed", "adx",
421 "smap", "avx512ifma", "pcommit", "clflushopt",
422 "clwb", NULL, "avx512pf", "avx512er",
423 "avx512cd", NULL, "avx512bw", "avx512vl",
424 },
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_EBX,
428 .tcg_features = TCG_7_0_EBX_FEATURES,
429 },
430 [FEAT_7_0_ECX] = {
431 .feat_names = {
432 NULL, "avx512vbmi", "umip", "pku",
433 "ospke", NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, "rdpid", NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 },
441 .cpuid_eax = 7,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
443 .cpuid_reg = R_ECX,
444 .tcg_features = TCG_7_0_ECX_FEATURES,
445 },
446 [FEAT_8000_0007_EDX] = {
447 .feat_names = {
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
450 "invtsc", NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 },
457 .cpuid_eax = 0x80000007,
458 .cpuid_reg = R_EDX,
459 .tcg_features = TCG_APM_FEATURES,
460 .unmigratable_flags = CPUID_APM_INVTSC,
461 },
462 [FEAT_XSAVE] = {
463 .feat_names = {
464 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 },
473 .cpuid_eax = 0xd,
474 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
475 .cpuid_reg = R_EAX,
476 .tcg_features = TCG_XSAVE_FEATURES,
477 },
478 [FEAT_6_EAX] = {
479 .feat_names = {
480 NULL, NULL, "arat", NULL,
481 NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 },
489 .cpuid_eax = 6, .cpuid_reg = R_EAX,
490 .tcg_features = TCG_6_EAX_FEATURES,
491 },
492 };
493
494 typedef struct X86RegisterInfo32 {
495 /* Name of register */
496 const char *name;
497 /* QAPI enum value register */
498 X86CPURegister32 qapi_enum;
499 } X86RegisterInfo32;
500
501 #define REGISTER(reg) \
502 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
503 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
504 REGISTER(EAX),
505 REGISTER(ECX),
506 REGISTER(EDX),
507 REGISTER(EBX),
508 REGISTER(ESP),
509 REGISTER(EBP),
510 REGISTER(ESI),
511 REGISTER(EDI),
512 };
513 #undef REGISTER
514
515 typedef struct ExtSaveArea {
516 uint32_t feature, bits;
517 uint32_t offset, size;
518 } ExtSaveArea;
519
520 static const ExtSaveArea x86_ext_save_areas[] = {
521 [XSTATE_YMM_BIT] =
522 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
523 .offset = offsetof(X86XSaveArea, avx_state),
524 .size = sizeof(XSaveAVX) },
525 [XSTATE_BNDREGS_BIT] =
526 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
527 .offset = offsetof(X86XSaveArea, bndreg_state),
528 .size = sizeof(XSaveBNDREG) },
529 [XSTATE_BNDCSR_BIT] =
530 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
531 .offset = offsetof(X86XSaveArea, bndcsr_state),
532 .size = sizeof(XSaveBNDCSR) },
533 [XSTATE_OPMASK_BIT] =
534 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
535 .offset = offsetof(X86XSaveArea, opmask_state),
536 .size = sizeof(XSaveOpmask) },
537 [XSTATE_ZMM_Hi256_BIT] =
538 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
539 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
540 .size = sizeof(XSaveZMM_Hi256) },
541 [XSTATE_Hi16_ZMM_BIT] =
542 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
543 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
544 .size = sizeof(XSaveHi16_ZMM) },
545 [XSTATE_PKRU_BIT] =
546 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
547 .offset = offsetof(X86XSaveArea, pkru_state),
548 .size = sizeof(XSavePKRU) },
549 };
550
551 const char *get_register_name_32(unsigned int reg)
552 {
553 if (reg >= CPU_NB_REGS32) {
554 return NULL;
555 }
556 return x86_reg_info_32[reg].name;
557 }
558
559 /*
560 * Returns the set of feature flags that are supported and migratable by
561 * QEMU, for a given FeatureWord.
562 */
563 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
564 {
565 FeatureWordInfo *wi = &feature_word_info[w];
566 uint32_t r = 0;
567 int i;
568
569 for (i = 0; i < 32; i++) {
570 uint32_t f = 1U << i;
571 /* If the feature name is unknown, it is not supported by QEMU yet */
572 if (!wi->feat_names[i]) {
573 continue;
574 }
575 /* Skip features known to QEMU, but explicitly marked as unmigratable */
576 if (wi->unmigratable_flags & f) {
577 continue;
578 }
579 r |= f;
580 }
581 return r;
582 }
583
584 void host_cpuid(uint32_t function, uint32_t count,
585 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
586 {
587 uint32_t vec[4];
588
589 #ifdef __x86_64__
590 asm volatile("cpuid"
591 : "=a"(vec[0]), "=b"(vec[1]),
592 "=c"(vec[2]), "=d"(vec[3])
593 : "0"(function), "c"(count) : "cc");
594 #elif defined(__i386__)
595 asm volatile("pusha \n\t"
596 "cpuid \n\t"
597 "mov %%eax, 0(%2) \n\t"
598 "mov %%ebx, 4(%2) \n\t"
599 "mov %%ecx, 8(%2) \n\t"
600 "mov %%edx, 12(%2) \n\t"
601 "popa"
602 : : "a"(function), "c"(count), "S"(vec)
603 : "memory", "cc");
604 #else
605 abort();
606 #endif
607
608 if (eax)
609 *eax = vec[0];
610 if (ebx)
611 *ebx = vec[1];
612 if (ecx)
613 *ecx = vec[2];
614 if (edx)
615 *edx = vec[3];
616 }
617
618 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
619
620 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
621 * a substring. ex if !NULL points to the first char after a substring,
622 * otherwise the string is assumed to sized by a terminating nul.
623 * Return lexical ordering of *s1:*s2.
624 */
625 static int sstrcmp(const char *s1, const char *e1,
626 const char *s2, const char *e2)
627 {
628 for (;;) {
629 if (!*s1 || !*s2 || *s1 != *s2)
630 return (*s1 - *s2);
631 ++s1, ++s2;
632 if (s1 == e1 && s2 == e2)
633 return (0);
634 else if (s1 == e1)
635 return (*s2);
636 else if (s2 == e2)
637 return (*s1);
638 }
639 }
640
641 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
642 * '|' delimited (possibly empty) strings in which case search for a match
643 * within the alternatives proceeds left to right. Return 0 for success,
644 * non-zero otherwise.
645 */
646 static int altcmp(const char *s, const char *e, const char *altstr)
647 {
648 const char *p, *q;
649
650 for (q = p = altstr; ; ) {
651 while (*p && *p != '|')
652 ++p;
653 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
654 return (0);
655 if (!*p)
656 return (1);
657 else
658 q = ++p;
659 }
660 }
661
662 /* search featureset for flag *[s..e), if found set corresponding bit in
663 * *pval and return true, otherwise return false
664 */
665 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
666 const char **featureset)
667 {
668 uint32_t mask;
669 const char **ppc;
670 bool found = false;
671
672 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
673 if (*ppc && !altcmp(s, e, *ppc)) {
674 *pval |= mask;
675 found = true;
676 }
677 }
678 return found;
679 }
680
681 static void add_flagname_to_bitmaps(const char *flagname,
682 FeatureWordArray words,
683 Error **errp)
684 {
685 FeatureWord w;
686 for (w = 0; w < FEATURE_WORDS; w++) {
687 FeatureWordInfo *wi = &feature_word_info[w];
688 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
689 break;
690 }
691 }
692 if (w == FEATURE_WORDS) {
693 error_setg(errp, "CPU feature %s not found", flagname);
694 }
695 }
696
697 /* CPU class name definitions: */
698
699 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
700 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
701
702 /* Return type name for a given CPU model name
703 * Caller is responsible for freeing the returned string.
704 */
705 static char *x86_cpu_type_name(const char *model_name)
706 {
707 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
708 }
709
710 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
711 {
712 ObjectClass *oc;
713 char *typename;
714
715 if (cpu_model == NULL) {
716 return NULL;
717 }
718
719 typename = x86_cpu_type_name(cpu_model);
720 oc = object_class_by_name(typename);
721 g_free(typename);
722 return oc;
723 }
724
725 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
726 {
727 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
728 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
729 return g_strndup(class_name,
730 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
731 }
732
733 struct X86CPUDefinition {
734 const char *name;
735 uint32_t level;
736 uint32_t xlevel;
737 /* vendor is zero-terminated, 12 character ASCII string */
738 char vendor[CPUID_VENDOR_SZ + 1];
739 int family;
740 int model;
741 int stepping;
742 FeatureWordArray features;
743 char model_id[48];
744 };
745
746 static X86CPUDefinition builtin_x86_defs[] = {
747 {
748 .name = "qemu64",
749 .level = 0xd,
750 .vendor = CPUID_VENDOR_AMD,
751 .family = 6,
752 .model = 6,
753 .stepping = 3,
754 .features[FEAT_1_EDX] =
755 PPRO_FEATURES |
756 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
757 CPUID_PSE36,
758 .features[FEAT_1_ECX] =
759 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
760 .features[FEAT_8000_0001_EDX] =
761 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
762 .features[FEAT_8000_0001_ECX] =
763 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
764 .xlevel = 0x8000000A,
765 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
766 },
767 {
768 .name = "phenom",
769 .level = 5,
770 .vendor = CPUID_VENDOR_AMD,
771 .family = 16,
772 .model = 2,
773 .stepping = 3,
774 /* Missing: CPUID_HT */
775 .features[FEAT_1_EDX] =
776 PPRO_FEATURES |
777 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
778 CPUID_PSE36 | CPUID_VME,
779 .features[FEAT_1_ECX] =
780 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
781 CPUID_EXT_POPCNT,
782 .features[FEAT_8000_0001_EDX] =
783 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
784 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
785 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
786 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
787 CPUID_EXT3_CR8LEG,
788 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
789 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
790 .features[FEAT_8000_0001_ECX] =
791 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
792 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
793 /* Missing: CPUID_SVM_LBRV */
794 .features[FEAT_SVM] =
795 CPUID_SVM_NPT,
796 .xlevel = 0x8000001A,
797 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
798 },
799 {
800 .name = "core2duo",
801 .level = 10,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 15,
805 .stepping = 11,
806 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
810 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
811 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
812 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
813 .features[FEAT_1_ECX] =
814 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
815 CPUID_EXT_CX16,
816 .features[FEAT_8000_0001_EDX] =
817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
818 .features[FEAT_8000_0001_ECX] =
819 CPUID_EXT3_LAHF_LM,
820 .xlevel = 0x80000008,
821 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
822 },
823 {
824 .name = "kvm64",
825 .level = 0xd,
826 .vendor = CPUID_VENDOR_INTEL,
827 .family = 15,
828 .model = 6,
829 .stepping = 1,
830 /* Missing: CPUID_HT */
831 .features[FEAT_1_EDX] =
832 PPRO_FEATURES | CPUID_VME |
833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
834 CPUID_PSE36,
835 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
836 .features[FEAT_1_ECX] =
837 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
838 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
839 .features[FEAT_8000_0001_EDX] =
840 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
841 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
842 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
843 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
844 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
845 .features[FEAT_8000_0001_ECX] =
846 0,
847 .xlevel = 0x80000008,
848 .model_id = "Common KVM processor"
849 },
850 {
851 .name = "qemu32",
852 .level = 4,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 6,
855 .model = 6,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PPRO_FEATURES,
859 .features[FEAT_1_ECX] =
860 CPUID_EXT_SSE3,
861 .xlevel = 0x80000004,
862 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
863 },
864 {
865 .name = "kvm32",
866 .level = 5,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 15,
869 .model = 6,
870 .stepping = 1,
871 .features[FEAT_1_EDX] =
872 PPRO_FEATURES | CPUID_VME |
873 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
874 .features[FEAT_1_ECX] =
875 CPUID_EXT_SSE3,
876 .features[FEAT_8000_0001_ECX] =
877 0,
878 .xlevel = 0x80000008,
879 .model_id = "Common 32-bit KVM processor"
880 },
881 {
882 .name = "coreduo",
883 .level = 10,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 14,
887 .stepping = 8,
888 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES | CPUID_VME |
891 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
892 CPUID_SS,
893 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
894 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
895 .features[FEAT_1_ECX] =
896 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
897 .features[FEAT_8000_0001_EDX] =
898 CPUID_EXT2_NX,
899 .xlevel = 0x80000008,
900 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
901 },
902 {
903 .name = "486",
904 .level = 1,
905 .vendor = CPUID_VENDOR_INTEL,
906 .family = 4,
907 .model = 8,
908 .stepping = 0,
909 .features[FEAT_1_EDX] =
910 I486_FEATURES,
911 .xlevel = 0,
912 },
913 {
914 .name = "pentium",
915 .level = 1,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 5,
918 .model = 4,
919 .stepping = 3,
920 .features[FEAT_1_EDX] =
921 PENTIUM_FEATURES,
922 .xlevel = 0,
923 },
924 {
925 .name = "pentium2",
926 .level = 2,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 6,
929 .model = 5,
930 .stepping = 2,
931 .features[FEAT_1_EDX] =
932 PENTIUM2_FEATURES,
933 .xlevel = 0,
934 },
935 {
936 .name = "pentium3",
937 .level = 3,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 7,
941 .stepping = 3,
942 .features[FEAT_1_EDX] =
943 PENTIUM3_FEATURES,
944 .xlevel = 0,
945 },
946 {
947 .name = "athlon",
948 .level = 2,
949 .vendor = CPUID_VENDOR_AMD,
950 .family = 6,
951 .model = 2,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
955 CPUID_MCA,
956 .features[FEAT_8000_0001_EDX] =
957 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
958 .xlevel = 0x80000008,
959 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
960 },
961 {
962 .name = "n270",
963 .level = 10,
964 .vendor = CPUID_VENDOR_INTEL,
965 .family = 6,
966 .model = 28,
967 .stepping = 2,
968 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
969 .features[FEAT_1_EDX] =
970 PPRO_FEATURES |
971 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
972 CPUID_ACPI | CPUID_SS,
973 /* Some CPUs got no CPUID_SEP */
974 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
975 * CPUID_EXT_XTPR */
976 .features[FEAT_1_ECX] =
977 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
978 CPUID_EXT_MOVBE,
979 .features[FEAT_8000_0001_EDX] =
980 CPUID_EXT2_NX,
981 .features[FEAT_8000_0001_ECX] =
982 CPUID_EXT3_LAHF_LM,
983 .xlevel = 0x80000008,
984 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
985 },
986 {
987 .name = "Conroe",
988 .level = 10,
989 .vendor = CPUID_VENDOR_INTEL,
990 .family = 6,
991 .model = 15,
992 .stepping = 3,
993 .features[FEAT_1_EDX] =
994 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
995 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
996 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
997 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
998 CPUID_DE | CPUID_FP87,
999 .features[FEAT_1_ECX] =
1000 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1001 .features[FEAT_8000_0001_EDX] =
1002 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1003 .features[FEAT_8000_0001_ECX] =
1004 CPUID_EXT3_LAHF_LM,
1005 .xlevel = 0x80000008,
1006 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1007 },
1008 {
1009 .name = "Penryn",
1010 .level = 10,
1011 .vendor = CPUID_VENDOR_INTEL,
1012 .family = 6,
1013 .model = 23,
1014 .stepping = 3,
1015 .features[FEAT_1_EDX] =
1016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1020 CPUID_DE | CPUID_FP87,
1021 .features[FEAT_1_ECX] =
1022 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1023 CPUID_EXT_SSE3,
1024 .features[FEAT_8000_0001_EDX] =
1025 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1026 .features[FEAT_8000_0001_ECX] =
1027 CPUID_EXT3_LAHF_LM,
1028 .xlevel = 0x80000008,
1029 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1030 },
1031 {
1032 .name = "Nehalem",
1033 .level = 11,
1034 .vendor = CPUID_VENDOR_INTEL,
1035 .family = 6,
1036 .model = 26,
1037 .stepping = 3,
1038 .features[FEAT_1_EDX] =
1039 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1040 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1041 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1042 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1043 CPUID_DE | CPUID_FP87,
1044 .features[FEAT_1_ECX] =
1045 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1046 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1049 .features[FEAT_8000_0001_ECX] =
1050 CPUID_EXT3_LAHF_LM,
1051 .xlevel = 0x80000008,
1052 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1053 },
1054 {
1055 .name = "Westmere",
1056 .level = 11,
1057 .vendor = CPUID_VENDOR_INTEL,
1058 .family = 6,
1059 .model = 44,
1060 .stepping = 1,
1061 .features[FEAT_1_EDX] =
1062 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1063 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1064 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1065 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1066 CPUID_DE | CPUID_FP87,
1067 .features[FEAT_1_ECX] =
1068 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1069 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1070 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1071 .features[FEAT_8000_0001_EDX] =
1072 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1073 .features[FEAT_8000_0001_ECX] =
1074 CPUID_EXT3_LAHF_LM,
1075 .features[FEAT_6_EAX] =
1076 CPUID_6_EAX_ARAT,
1077 .xlevel = 0x80000008,
1078 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1079 },
1080 {
1081 .name = "SandyBridge",
1082 .level = 0xd,
1083 .vendor = CPUID_VENDOR_INTEL,
1084 .family = 6,
1085 .model = 42,
1086 .stepping = 1,
1087 .features[FEAT_1_EDX] =
1088 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1089 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1090 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1091 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1092 CPUID_DE | CPUID_FP87,
1093 .features[FEAT_1_ECX] =
1094 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1096 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1097 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1098 CPUID_EXT_SSE3,
1099 .features[FEAT_8000_0001_EDX] =
1100 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1101 CPUID_EXT2_SYSCALL,
1102 .features[FEAT_8000_0001_ECX] =
1103 CPUID_EXT3_LAHF_LM,
1104 .features[FEAT_XSAVE] =
1105 CPUID_XSAVE_XSAVEOPT,
1106 .features[FEAT_6_EAX] =
1107 CPUID_6_EAX_ARAT,
1108 .xlevel = 0x80000008,
1109 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1110 },
1111 {
1112 .name = "IvyBridge",
1113 .level = 0xd,
1114 .vendor = CPUID_VENDOR_INTEL,
1115 .family = 6,
1116 .model = 58,
1117 .stepping = 9,
1118 .features[FEAT_1_EDX] =
1119 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1120 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1121 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1122 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1123 CPUID_DE | CPUID_FP87,
1124 .features[FEAT_1_ECX] =
1125 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1126 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1127 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1128 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1129 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1130 .features[FEAT_7_0_EBX] =
1131 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1132 CPUID_7_0_EBX_ERMS,
1133 .features[FEAT_8000_0001_EDX] =
1134 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1135 CPUID_EXT2_SYSCALL,
1136 .features[FEAT_8000_0001_ECX] =
1137 CPUID_EXT3_LAHF_LM,
1138 .features[FEAT_XSAVE] =
1139 CPUID_XSAVE_XSAVEOPT,
1140 .features[FEAT_6_EAX] =
1141 CPUID_6_EAX_ARAT,
1142 .xlevel = 0x80000008,
1143 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1144 },
1145 {
1146 .name = "Haswell-noTSX",
1147 .level = 0xd,
1148 .vendor = CPUID_VENDOR_INTEL,
1149 .family = 6,
1150 .model = 60,
1151 .stepping = 1,
1152 .features[FEAT_1_EDX] =
1153 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1154 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1155 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1156 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1157 CPUID_DE | CPUID_FP87,
1158 .features[FEAT_1_ECX] =
1159 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1160 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1161 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1162 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1163 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1164 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1165 .features[FEAT_8000_0001_EDX] =
1166 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1167 CPUID_EXT2_SYSCALL,
1168 .features[FEAT_8000_0001_ECX] =
1169 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1170 .features[FEAT_7_0_EBX] =
1171 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1172 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1173 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1174 .features[FEAT_XSAVE] =
1175 CPUID_XSAVE_XSAVEOPT,
1176 .features[FEAT_6_EAX] =
1177 CPUID_6_EAX_ARAT,
1178 .xlevel = 0x80000008,
1179 .model_id = "Intel Core Processor (Haswell, no TSX)",
1180 }, {
1181 .name = "Haswell",
1182 .level = 0xd,
1183 .vendor = CPUID_VENDOR_INTEL,
1184 .family = 6,
1185 .model = 60,
1186 .stepping = 1,
1187 .features[FEAT_1_EDX] =
1188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1192 CPUID_DE | CPUID_FP87,
1193 .features[FEAT_1_ECX] =
1194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1202 CPUID_EXT2_SYSCALL,
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EBX] =
1206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1207 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1209 CPUID_7_0_EBX_RTM,
1210 .features[FEAT_XSAVE] =
1211 CPUID_XSAVE_XSAVEOPT,
1212 .features[FEAT_6_EAX] =
1213 CPUID_6_EAX_ARAT,
1214 .xlevel = 0x80000008,
1215 .model_id = "Intel Core Processor (Haswell)",
1216 },
1217 {
1218 .name = "Broadwell-noTSX",
1219 .level = 0xd,
1220 .vendor = CPUID_VENDOR_INTEL,
1221 .family = 6,
1222 .model = 61,
1223 .stepping = 2,
1224 .features[FEAT_1_EDX] =
1225 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1226 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1227 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1228 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1229 CPUID_DE | CPUID_FP87,
1230 .features[FEAT_1_ECX] =
1231 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1232 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1233 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1234 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1235 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1236 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1237 .features[FEAT_8000_0001_EDX] =
1238 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1239 CPUID_EXT2_SYSCALL,
1240 .features[FEAT_8000_0001_ECX] =
1241 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1242 .features[FEAT_7_0_EBX] =
1243 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1244 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1245 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1246 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1247 CPUID_7_0_EBX_SMAP,
1248 .features[FEAT_XSAVE] =
1249 CPUID_XSAVE_XSAVEOPT,
1250 .features[FEAT_6_EAX] =
1251 CPUID_6_EAX_ARAT,
1252 .xlevel = 0x80000008,
1253 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1254 },
1255 {
1256 .name = "Broadwell",
1257 .level = 0xd,
1258 .vendor = CPUID_VENDOR_INTEL,
1259 .family = 6,
1260 .model = 61,
1261 .stepping = 2,
1262 .features[FEAT_1_EDX] =
1263 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1264 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1265 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1266 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1267 CPUID_DE | CPUID_FP87,
1268 .features[FEAT_1_ECX] =
1269 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1270 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1271 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1272 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1273 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1274 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1275 .features[FEAT_8000_0001_EDX] =
1276 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1277 CPUID_EXT2_SYSCALL,
1278 .features[FEAT_8000_0001_ECX] =
1279 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1280 .features[FEAT_7_0_EBX] =
1281 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1282 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1283 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1284 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1285 CPUID_7_0_EBX_SMAP,
1286 .features[FEAT_XSAVE] =
1287 CPUID_XSAVE_XSAVEOPT,
1288 .features[FEAT_6_EAX] =
1289 CPUID_6_EAX_ARAT,
1290 .xlevel = 0x80000008,
1291 .model_id = "Intel Core Processor (Broadwell)",
1292 },
1293 {
1294 .name = "Skylake-Client",
1295 .level = 0xd,
1296 .vendor = CPUID_VENDOR_INTEL,
1297 .family = 6,
1298 .model = 94,
1299 .stepping = 3,
1300 .features[FEAT_1_EDX] =
1301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1305 CPUID_DE | CPUID_FP87,
1306 .features[FEAT_1_ECX] =
1307 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1308 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1309 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1310 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1311 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1312 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1313 .features[FEAT_8000_0001_EDX] =
1314 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1315 CPUID_EXT2_SYSCALL,
1316 .features[FEAT_8000_0001_ECX] =
1317 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1318 .features[FEAT_7_0_EBX] =
1319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1323 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1324 /* Missing: XSAVES (not supported by some Linux versions,
1325 * including v4.1 to v4.6).
1326 * KVM doesn't yet expose any XSAVES state save component,
1327 * and the only one defined in Skylake (processor tracing)
1328 * probably will block migration anyway.
1329 */
1330 .features[FEAT_XSAVE] =
1331 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1332 CPUID_XSAVE_XGETBV1,
1333 .features[FEAT_6_EAX] =
1334 CPUID_6_EAX_ARAT,
1335 .xlevel = 0x80000008,
1336 .model_id = "Intel Core Processor (Skylake)",
1337 },
1338 {
1339 .name = "Opteron_G1",
1340 .level = 5,
1341 .vendor = CPUID_VENDOR_AMD,
1342 .family = 15,
1343 .model = 6,
1344 .stepping = 1,
1345 .features[FEAT_1_EDX] =
1346 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1347 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1348 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1349 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1350 CPUID_DE | CPUID_FP87,
1351 .features[FEAT_1_ECX] =
1352 CPUID_EXT_SSE3,
1353 .features[FEAT_8000_0001_EDX] =
1354 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1355 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1356 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1357 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1358 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1359 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1360 .xlevel = 0x80000008,
1361 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1362 },
1363 {
1364 .name = "Opteron_G2",
1365 .level = 5,
1366 .vendor = CPUID_VENDOR_AMD,
1367 .family = 15,
1368 .model = 6,
1369 .stepping = 1,
1370 .features[FEAT_1_EDX] =
1371 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1372 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1373 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1374 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1375 CPUID_DE | CPUID_FP87,
1376 .features[FEAT_1_ECX] =
1377 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1378 /* Missing: CPUID_EXT2_RDTSCP */
1379 .features[FEAT_8000_0001_EDX] =
1380 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1381 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1382 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1383 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1384 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1385 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1386 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .features[FEAT_8000_0001_ECX] =
1388 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1389 .xlevel = 0x80000008,
1390 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1391 },
1392 {
1393 .name = "Opteron_G3",
1394 .level = 5,
1395 .vendor = CPUID_VENDOR_AMD,
1396 .family = 15,
1397 .model = 6,
1398 .stepping = 1,
1399 .features[FEAT_1_EDX] =
1400 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1401 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1402 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1403 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1404 CPUID_DE | CPUID_FP87,
1405 .features[FEAT_1_ECX] =
1406 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1407 CPUID_EXT_SSE3,
1408 /* Missing: CPUID_EXT2_RDTSCP */
1409 .features[FEAT_8000_0001_EDX] =
1410 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1411 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1412 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1413 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1414 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1415 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1416 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1417 .features[FEAT_8000_0001_ECX] =
1418 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1419 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1420 .xlevel = 0x80000008,
1421 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1422 },
1423 {
1424 .name = "Opteron_G4",
1425 .level = 0xd,
1426 .vendor = CPUID_VENDOR_AMD,
1427 .family = 21,
1428 .model = 1,
1429 .stepping = 2,
1430 .features[FEAT_1_EDX] =
1431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1435 CPUID_DE | CPUID_FP87,
1436 .features[FEAT_1_ECX] =
1437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1438 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1439 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1440 CPUID_EXT_SSE3,
1441 /* Missing: CPUID_EXT2_RDTSCP */
1442 .features[FEAT_8000_0001_EDX] =
1443 CPUID_EXT2_LM |
1444 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1445 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1446 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1447 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1448 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1449 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1450 .features[FEAT_8000_0001_ECX] =
1451 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1452 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1453 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1454 CPUID_EXT3_LAHF_LM,
1455 /* no xsaveopt! */
1456 .xlevel = 0x8000001A,
1457 .model_id = "AMD Opteron 62xx class CPU",
1458 },
1459 {
1460 .name = "Opteron_G5",
1461 .level = 0xd,
1462 .vendor = CPUID_VENDOR_AMD,
1463 .family = 21,
1464 .model = 2,
1465 .stepping = 0,
1466 .features[FEAT_1_EDX] =
1467 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1468 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1469 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1470 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1471 CPUID_DE | CPUID_FP87,
1472 .features[FEAT_1_ECX] =
1473 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1474 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1475 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1476 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1477 /* Missing: CPUID_EXT2_RDTSCP */
1478 .features[FEAT_8000_0001_EDX] =
1479 CPUID_EXT2_LM |
1480 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1481 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1482 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1483 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1484 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1485 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1486 .features[FEAT_8000_0001_ECX] =
1487 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1488 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1489 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1490 CPUID_EXT3_LAHF_LM,
1491 /* no xsaveopt! */
1492 .xlevel = 0x8000001A,
1493 .model_id = "AMD Opteron 63xx class CPU",
1494 },
1495 };
1496
1497 typedef struct PropValue {
1498 const char *prop, *value;
1499 } PropValue;
1500
1501 /* KVM-specific features that are automatically added/removed
1502 * from all CPU models when KVM is enabled.
1503 */
1504 static PropValue kvm_default_props[] = {
1505 { "kvmclock", "on" },
1506 { "kvm-nopiodelay", "on" },
1507 { "kvm-asyncpf", "on" },
1508 { "kvm-steal-time", "on" },
1509 { "kvm-pv-eoi", "on" },
1510 { "kvmclock-stable-bit", "on" },
1511 { "x2apic", "on" },
1512 { "acpi", "off" },
1513 { "monitor", "off" },
1514 { "svm", "off" },
1515 { NULL, NULL },
1516 };
1517
1518 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1519 {
1520 PropValue *pv;
1521 for (pv = kvm_default_props; pv->prop; pv++) {
1522 if (!strcmp(pv->prop, prop)) {
1523 pv->value = value;
1524 break;
1525 }
1526 }
1527
1528 /* It is valid to call this function only for properties that
1529 * are already present in the kvm_default_props table.
1530 */
1531 assert(pv->prop);
1532 }
1533
1534 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1535 bool migratable_only);
1536
1537 #ifdef CONFIG_KVM
1538
1539 static bool lmce_supported(void)
1540 {
1541 uint64_t mce_cap;
1542
1543 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1544 return false;
1545 }
1546
1547 return !!(mce_cap & MCG_LMCE_P);
1548 }
1549
1550 static int cpu_x86_fill_model_id(char *str)
1551 {
1552 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1553 int i;
1554
1555 for (i = 0; i < 3; i++) {
1556 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1557 memcpy(str + i * 16 + 0, &eax, 4);
1558 memcpy(str + i * 16 + 4, &ebx, 4);
1559 memcpy(str + i * 16 + 8, &ecx, 4);
1560 memcpy(str + i * 16 + 12, &edx, 4);
1561 }
1562 return 0;
1563 }
1564
1565 static X86CPUDefinition host_cpudef;
1566
1567 static Property host_x86_cpu_properties[] = {
1568 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1569 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1570 DEFINE_PROP_END_OF_LIST()
1571 };
1572
1573 /* class_init for the "host" CPU model
1574 *
1575 * This function may be called before KVM is initialized.
1576 */
1577 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1578 {
1579 DeviceClass *dc = DEVICE_CLASS(oc);
1580 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1581 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1582
1583 xcc->kvm_required = true;
1584
1585 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1586 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1587
1588 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1589 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1590 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1591 host_cpudef.stepping = eax & 0x0F;
1592
1593 cpu_x86_fill_model_id(host_cpudef.model_id);
1594
1595 xcc->cpu_def = &host_cpudef;
1596
1597 /* level, xlevel, xlevel2, and the feature words are initialized on
1598 * instance_init, because they require KVM to be initialized.
1599 */
1600
1601 dc->props = host_x86_cpu_properties;
1602 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1603 dc->cannot_destroy_with_object_finalize_yet = true;
1604 }
1605
1606 static void host_x86_cpu_initfn(Object *obj)
1607 {
1608 X86CPU *cpu = X86_CPU(obj);
1609 CPUX86State *env = &cpu->env;
1610 KVMState *s = kvm_state;
1611
1612 /* We can't fill the features array here because we don't know yet if
1613 * "migratable" is true or false.
1614 */
1615 cpu->host_features = true;
1616
1617 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1618 if (kvm_enabled()) {
1619 env->cpuid_min_level =
1620 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1621 env->cpuid_min_xlevel =
1622 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1623 env->cpuid_min_xlevel2 =
1624 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1625
1626 if (lmce_supported()) {
1627 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1628 }
1629 }
1630
1631 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1632 }
1633
1634 static const TypeInfo host_x86_cpu_type_info = {
1635 .name = X86_CPU_TYPE_NAME("host"),
1636 .parent = TYPE_X86_CPU,
1637 .instance_init = host_x86_cpu_initfn,
1638 .class_init = host_x86_cpu_class_init,
1639 };
1640
1641 #endif
1642
1643 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1644 {
1645 FeatureWordInfo *f = &feature_word_info[w];
1646 int i;
1647
1648 for (i = 0; i < 32; ++i) {
1649 if ((1UL << i) & mask) {
1650 const char *reg = get_register_name_32(f->cpuid_reg);
1651 assert(reg);
1652 fprintf(stderr, "warning: %s doesn't support requested feature: "
1653 "CPUID.%02XH:%s%s%s [bit %d]\n",
1654 kvm_enabled() ? "host" : "TCG",
1655 f->cpuid_eax, reg,
1656 f->feat_names[i] ? "." : "",
1657 f->feat_names[i] ? f->feat_names[i] : "", i);
1658 }
1659 }
1660 }
1661
1662 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1663 const char *name, void *opaque,
1664 Error **errp)
1665 {
1666 X86CPU *cpu = X86_CPU(obj);
1667 CPUX86State *env = &cpu->env;
1668 int64_t value;
1669
1670 value = (env->cpuid_version >> 8) & 0xf;
1671 if (value == 0xf) {
1672 value += (env->cpuid_version >> 20) & 0xff;
1673 }
1674 visit_type_int(v, name, &value, errp);
1675 }
1676
1677 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1678 const char *name, void *opaque,
1679 Error **errp)
1680 {
1681 X86CPU *cpu = X86_CPU(obj);
1682 CPUX86State *env = &cpu->env;
1683 const int64_t min = 0;
1684 const int64_t max = 0xff + 0xf;
1685 Error *local_err = NULL;
1686 int64_t value;
1687
1688 visit_type_int(v, name, &value, &local_err);
1689 if (local_err) {
1690 error_propagate(errp, local_err);
1691 return;
1692 }
1693 if (value < min || value > max) {
1694 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1695 name ? name : "null", value, min, max);
1696 return;
1697 }
1698
1699 env->cpuid_version &= ~0xff00f00;
1700 if (value > 0x0f) {
1701 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1702 } else {
1703 env->cpuid_version |= value << 8;
1704 }
1705 }
1706
1707 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1708 const char *name, void *opaque,
1709 Error **errp)
1710 {
1711 X86CPU *cpu = X86_CPU(obj);
1712 CPUX86State *env = &cpu->env;
1713 int64_t value;
1714
1715 value = (env->cpuid_version >> 4) & 0xf;
1716 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1717 visit_type_int(v, name, &value, errp);
1718 }
1719
1720 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1721 const char *name, void *opaque,
1722 Error **errp)
1723 {
1724 X86CPU *cpu = X86_CPU(obj);
1725 CPUX86State *env = &cpu->env;
1726 const int64_t min = 0;
1727 const int64_t max = 0xff;
1728 Error *local_err = NULL;
1729 int64_t value;
1730
1731 visit_type_int(v, name, &value, &local_err);
1732 if (local_err) {
1733 error_propagate(errp, local_err);
1734 return;
1735 }
1736 if (value < min || value > max) {
1737 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1738 name ? name : "null", value, min, max);
1739 return;
1740 }
1741
1742 env->cpuid_version &= ~0xf00f0;
1743 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1744 }
1745
1746 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1747 const char *name, void *opaque,
1748 Error **errp)
1749 {
1750 X86CPU *cpu = X86_CPU(obj);
1751 CPUX86State *env = &cpu->env;
1752 int64_t value;
1753
1754 value = env->cpuid_version & 0xf;
1755 visit_type_int(v, name, &value, errp);
1756 }
1757
1758 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1759 const char *name, void *opaque,
1760 Error **errp)
1761 {
1762 X86CPU *cpu = X86_CPU(obj);
1763 CPUX86State *env = &cpu->env;
1764 const int64_t min = 0;
1765 const int64_t max = 0xf;
1766 Error *local_err = NULL;
1767 int64_t value;
1768
1769 visit_type_int(v, name, &value, &local_err);
1770 if (local_err) {
1771 error_propagate(errp, local_err);
1772 return;
1773 }
1774 if (value < min || value > max) {
1775 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1776 name ? name : "null", value, min, max);
1777 return;
1778 }
1779
1780 env->cpuid_version &= ~0xf;
1781 env->cpuid_version |= value & 0xf;
1782 }
1783
1784 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1785 {
1786 X86CPU *cpu = X86_CPU(obj);
1787 CPUX86State *env = &cpu->env;
1788 char *value;
1789
1790 value = g_malloc(CPUID_VENDOR_SZ + 1);
1791 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1792 env->cpuid_vendor3);
1793 return value;
1794 }
1795
1796 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1797 Error **errp)
1798 {
1799 X86CPU *cpu = X86_CPU(obj);
1800 CPUX86State *env = &cpu->env;
1801 int i;
1802
1803 if (strlen(value) != CPUID_VENDOR_SZ) {
1804 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1805 return;
1806 }
1807
1808 env->cpuid_vendor1 = 0;
1809 env->cpuid_vendor2 = 0;
1810 env->cpuid_vendor3 = 0;
1811 for (i = 0; i < 4; i++) {
1812 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1813 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1814 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1815 }
1816 }
1817
1818 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1819 {
1820 X86CPU *cpu = X86_CPU(obj);
1821 CPUX86State *env = &cpu->env;
1822 char *value;
1823 int i;
1824
1825 value = g_malloc(48 + 1);
1826 for (i = 0; i < 48; i++) {
1827 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1828 }
1829 value[48] = '\0';
1830 return value;
1831 }
1832
1833 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1834 Error **errp)
1835 {
1836 X86CPU *cpu = X86_CPU(obj);
1837 CPUX86State *env = &cpu->env;
1838 int c, len, i;
1839
1840 if (model_id == NULL) {
1841 model_id = "";
1842 }
1843 len = strlen(model_id);
1844 memset(env->cpuid_model, 0, 48);
1845 for (i = 0; i < 48; i++) {
1846 if (i >= len) {
1847 c = '\0';
1848 } else {
1849 c = (uint8_t)model_id[i];
1850 }
1851 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1852 }
1853 }
1854
1855 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1856 void *opaque, Error **errp)
1857 {
1858 X86CPU *cpu = X86_CPU(obj);
1859 int64_t value;
1860
1861 value = cpu->env.tsc_khz * 1000;
1862 visit_type_int(v, name, &value, errp);
1863 }
1864
1865 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1866 void *opaque, Error **errp)
1867 {
1868 X86CPU *cpu = X86_CPU(obj);
1869 const int64_t min = 0;
1870 const int64_t max = INT64_MAX;
1871 Error *local_err = NULL;
1872 int64_t value;
1873
1874 visit_type_int(v, name, &value, &local_err);
1875 if (local_err) {
1876 error_propagate(errp, local_err);
1877 return;
1878 }
1879 if (value < min || value > max) {
1880 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1881 name ? name : "null", value, min, max);
1882 return;
1883 }
1884
1885 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1886 }
1887
1888 /* Generic getter for "feature-words" and "filtered-features" properties */
1889 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1890 const char *name, void *opaque,
1891 Error **errp)
1892 {
1893 uint32_t *array = (uint32_t *)opaque;
1894 FeatureWord w;
1895 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1896 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1897 X86CPUFeatureWordInfoList *list = NULL;
1898
1899 for (w = 0; w < FEATURE_WORDS; w++) {
1900 FeatureWordInfo *wi = &feature_word_info[w];
1901 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1902 qwi->cpuid_input_eax = wi->cpuid_eax;
1903 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1904 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1905 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1906 qwi->features = array[w];
1907
1908 /* List will be in reverse order, but order shouldn't matter */
1909 list_entries[w].next = list;
1910 list_entries[w].value = &word_infos[w];
1911 list = &list_entries[w];
1912 }
1913
1914 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1915 }
1916
1917 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1918 void *opaque, Error **errp)
1919 {
1920 X86CPU *cpu = X86_CPU(obj);
1921 int64_t value = cpu->hyperv_spinlock_attempts;
1922
1923 visit_type_int(v, name, &value, errp);
1924 }
1925
1926 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1927 void *opaque, Error **errp)
1928 {
1929 const int64_t min = 0xFFF;
1930 const int64_t max = UINT_MAX;
1931 X86CPU *cpu = X86_CPU(obj);
1932 Error *err = NULL;
1933 int64_t value;
1934
1935 visit_type_int(v, name, &value, &err);
1936 if (err) {
1937 error_propagate(errp, err);
1938 return;
1939 }
1940
1941 if (value < min || value > max) {
1942 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1943 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1944 object_get_typename(obj), name ? name : "null",
1945 value, min, max);
1946 return;
1947 }
1948 cpu->hyperv_spinlock_attempts = value;
1949 }
1950
1951 static PropertyInfo qdev_prop_spinlocks = {
1952 .name = "int",
1953 .get = x86_get_hv_spinlocks,
1954 .set = x86_set_hv_spinlocks,
1955 };
1956
1957 /* Convert all '_' in a feature string option name to '-', to make feature
1958 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1959 */
1960 static inline void feat2prop(char *s)
1961 {
1962 while ((s = strchr(s, '_'))) {
1963 *s = '-';
1964 }
1965 }
1966
1967 /* Compatibily hack to maintain legacy +-feat semantic,
1968 * where +-feat overwrites any feature set by
1969 * feat=on|feat even if the later is parsed after +-feat
1970 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1971 */
1972 static FeatureWordArray plus_features = { 0 };
1973 static FeatureWordArray minus_features = { 0 };
1974
1975 /* Parse "+feature,-feature,feature=foo" CPU feature string
1976 */
1977 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1978 Error **errp)
1979 {
1980 char *featurestr; /* Single 'key=value" string being parsed */
1981 Error *local_err = NULL;
1982 static bool cpu_globals_initialized;
1983
1984 if (cpu_globals_initialized) {
1985 return;
1986 }
1987 cpu_globals_initialized = true;
1988
1989 if (!features) {
1990 return;
1991 }
1992
1993 for (featurestr = strtok(features, ",");
1994 featurestr && !local_err;
1995 featurestr = strtok(NULL, ",")) {
1996 const char *name;
1997 const char *val = NULL;
1998 char *eq = NULL;
1999 char num[32];
2000 GlobalProperty *prop;
2001
2002 /* Compatibility syntax: */
2003 if (featurestr[0] == '+') {
2004 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2005 continue;
2006 } else if (featurestr[0] == '-') {
2007 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2008 continue;
2009 }
2010
2011 eq = strchr(featurestr, '=');
2012 if (eq) {
2013 *eq++ = 0;
2014 val = eq;
2015 } else {
2016 val = "on";
2017 }
2018
2019 feat2prop(featurestr);
2020 name = featurestr;
2021
2022 /* Special case: */
2023 if (!strcmp(name, "tsc-freq")) {
2024 int64_t tsc_freq;
2025 char *err;
2026
2027 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2028 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2029 if (tsc_freq < 0 || *err) {
2030 error_setg(errp, "bad numerical value %s", val);
2031 return;
2032 }
2033 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2034 val = num;
2035 name = "tsc-frequency";
2036 }
2037
2038 prop = g_new0(typeof(*prop), 1);
2039 prop->driver = typename;
2040 prop->property = g_strdup(name);
2041 prop->value = g_strdup(val);
2042 prop->errp = &error_fatal;
2043 qdev_prop_register_global(prop);
2044 }
2045
2046 if (local_err) {
2047 error_propagate(errp, local_err);
2048 }
2049 }
2050
2051 /* Print all cpuid feature names in featureset
2052 */
2053 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2054 {
2055 int bit;
2056 bool first = true;
2057
2058 for (bit = 0; bit < 32; bit++) {
2059 if (featureset[bit]) {
2060 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2061 first = false;
2062 }
2063 }
2064 }
2065
2066 /* generate CPU information. */
2067 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2068 {
2069 X86CPUDefinition *def;
2070 char buf[256];
2071 int i;
2072
2073 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2074 def = &builtin_x86_defs[i];
2075 snprintf(buf, sizeof(buf), "%s", def->name);
2076 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2077 }
2078 #ifdef CONFIG_KVM
2079 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2080 "KVM processor with all supported host features "
2081 "(only available in KVM mode)");
2082 #endif
2083
2084 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2085 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2086 FeatureWordInfo *fw = &feature_word_info[i];
2087
2088 (*cpu_fprintf)(f, " ");
2089 listflags(f, cpu_fprintf, fw->feat_names);
2090 (*cpu_fprintf)(f, "\n");
2091 }
2092 }
2093
2094 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2095 {
2096 CpuDefinitionInfoList *cpu_list = NULL;
2097 X86CPUDefinition *def;
2098 int i;
2099
2100 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2101 CpuDefinitionInfoList *entry;
2102 CpuDefinitionInfo *info;
2103
2104 def = &builtin_x86_defs[i];
2105 info = g_malloc0(sizeof(*info));
2106 info->name = g_strdup(def->name);
2107
2108 entry = g_malloc0(sizeof(*entry));
2109 entry->value = info;
2110 entry->next = cpu_list;
2111 cpu_list = entry;
2112 }
2113
2114 return cpu_list;
2115 }
2116
2117 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2118 bool migratable_only)
2119 {
2120 FeatureWordInfo *wi = &feature_word_info[w];
2121 uint32_t r;
2122
2123 if (kvm_enabled()) {
2124 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2125 wi->cpuid_ecx,
2126 wi->cpuid_reg);
2127 } else if (tcg_enabled()) {
2128 r = wi->tcg_features;
2129 } else {
2130 return ~0;
2131 }
2132 if (migratable_only) {
2133 r &= x86_cpu_get_migratable_flags(w);
2134 }
2135 return r;
2136 }
2137
2138 /*
2139 * Filters CPU feature words based on host availability of each feature.
2140 *
2141 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2142 */
2143 static int x86_cpu_filter_features(X86CPU *cpu)
2144 {
2145 CPUX86State *env = &cpu->env;
2146 FeatureWord w;
2147 int rv = 0;
2148
2149 for (w = 0; w < FEATURE_WORDS; w++) {
2150 uint32_t host_feat =
2151 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2152 uint32_t requested_features = env->features[w];
2153 env->features[w] &= host_feat;
2154 cpu->filtered_features[w] = requested_features & ~env->features[w];
2155 if (cpu->filtered_features[w]) {
2156 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2157 report_unavailable_features(w, cpu->filtered_features[w]);
2158 }
2159 rv = 1;
2160 }
2161 }
2162
2163 return rv;
2164 }
2165
2166 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2167 {
2168 PropValue *pv;
2169 for (pv = props; pv->prop; pv++) {
2170 if (!pv->value) {
2171 continue;
2172 }
2173 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2174 &error_abort);
2175 }
2176 }
2177
2178 /* Load data from X86CPUDefinition
2179 */
2180 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2181 {
2182 CPUX86State *env = &cpu->env;
2183 const char *vendor;
2184 char host_vendor[CPUID_VENDOR_SZ + 1];
2185 FeatureWord w;
2186
2187 /* CPU models only set _minimum_ values for level/xlevel: */
2188 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2189 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2190
2191 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2192 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2193 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2194 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2195 for (w = 0; w < FEATURE_WORDS; w++) {
2196 env->features[w] = def->features[w];
2197 }
2198
2199 /* Special cases not set in the X86CPUDefinition structs: */
2200 if (kvm_enabled()) {
2201 if (!kvm_irqchip_in_kernel()) {
2202 x86_cpu_change_kvm_default("x2apic", "off");
2203 }
2204
2205 x86_cpu_apply_props(cpu, kvm_default_props);
2206 }
2207
2208 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2209
2210 /* sysenter isn't supported in compatibility mode on AMD,
2211 * syscall isn't supported in compatibility mode on Intel.
2212 * Normally we advertise the actual CPU vendor, but you can
2213 * override this using the 'vendor' property if you want to use
2214 * KVM's sysenter/syscall emulation in compatibility mode and
2215 * when doing cross vendor migration
2216 */
2217 vendor = def->vendor;
2218 if (kvm_enabled()) {
2219 uint32_t ebx = 0, ecx = 0, edx = 0;
2220 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2221 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2222 vendor = host_vendor;
2223 }
2224
2225 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2226
2227 }
2228
2229 X86CPU *cpu_x86_init(const char *cpu_model)
2230 {
2231 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2232 }
2233
2234 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2235 {
2236 X86CPUDefinition *cpudef = data;
2237 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2238
2239 xcc->cpu_def = cpudef;
2240 }
2241
2242 static void x86_register_cpudef_type(X86CPUDefinition *def)
2243 {
2244 char *typename = x86_cpu_type_name(def->name);
2245 TypeInfo ti = {
2246 .name = typename,
2247 .parent = TYPE_X86_CPU,
2248 .class_init = x86_cpu_cpudef_class_init,
2249 .class_data = def,
2250 };
2251
2252 type_register(&ti);
2253 g_free(typename);
2254 }
2255
2256 #if !defined(CONFIG_USER_ONLY)
2257
2258 void cpu_clear_apic_feature(CPUX86State *env)
2259 {
2260 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2261 }
2262
2263 #endif /* !CONFIG_USER_ONLY */
2264
2265 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2266 uint32_t *eax, uint32_t *ebx,
2267 uint32_t *ecx, uint32_t *edx)
2268 {
2269 X86CPU *cpu = x86_env_get_cpu(env);
2270 CPUState *cs = CPU(cpu);
2271 uint32_t pkg_offset;
2272
2273 /* test if maximum index reached */
2274 if (index & 0x80000000) {
2275 if (index > env->cpuid_xlevel) {
2276 if (env->cpuid_xlevel2 > 0) {
2277 /* Handle the Centaur's CPUID instruction. */
2278 if (index > env->cpuid_xlevel2) {
2279 index = env->cpuid_xlevel2;
2280 } else if (index < 0xC0000000) {
2281 index = env->cpuid_xlevel;
2282 }
2283 } else {
2284 /* Intel documentation states that invalid EAX input will
2285 * return the same information as EAX=cpuid_level
2286 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2287 */
2288 index = env->cpuid_level;
2289 }
2290 }
2291 } else {
2292 if (index > env->cpuid_level)
2293 index = env->cpuid_level;
2294 }
2295
2296 switch(index) {
2297 case 0:
2298 *eax = env->cpuid_level;
2299 *ebx = env->cpuid_vendor1;
2300 *edx = env->cpuid_vendor2;
2301 *ecx = env->cpuid_vendor3;
2302 break;
2303 case 1:
2304 *eax = env->cpuid_version;
2305 *ebx = (cpu->apic_id << 24) |
2306 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2307 *ecx = env->features[FEAT_1_ECX];
2308 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2309 *ecx |= CPUID_EXT_OSXSAVE;
2310 }
2311 *edx = env->features[FEAT_1_EDX];
2312 if (cs->nr_cores * cs->nr_threads > 1) {
2313 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2314 *edx |= CPUID_HT;
2315 }
2316 break;
2317 case 2:
2318 /* cache info: needed for Pentium Pro compatibility */
2319 if (cpu->cache_info_passthrough) {
2320 host_cpuid(index, 0, eax, ebx, ecx, edx);
2321 break;
2322 }
2323 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2324 *ebx = 0;
2325 if (!cpu->enable_l3_cache) {
2326 *ecx = 0;
2327 } else {
2328 *ecx = L3_N_DESCRIPTOR;
2329 }
2330 *edx = (L1D_DESCRIPTOR << 16) | \
2331 (L1I_DESCRIPTOR << 8) | \
2332 (L2_DESCRIPTOR);
2333 break;
2334 case 4:
2335 /* cache info: needed for Core compatibility */
2336 if (cpu->cache_info_passthrough) {
2337 host_cpuid(index, count, eax, ebx, ecx, edx);
2338 *eax &= ~0xFC000000;
2339 } else {
2340 *eax = 0;
2341 switch (count) {
2342 case 0: /* L1 dcache info */
2343 *eax |= CPUID_4_TYPE_DCACHE | \
2344 CPUID_4_LEVEL(1) | \
2345 CPUID_4_SELF_INIT_LEVEL;
2346 *ebx = (L1D_LINE_SIZE - 1) | \
2347 ((L1D_PARTITIONS - 1) << 12) | \
2348 ((L1D_ASSOCIATIVITY - 1) << 22);
2349 *ecx = L1D_SETS - 1;
2350 *edx = CPUID_4_NO_INVD_SHARING;
2351 break;
2352 case 1: /* L1 icache info */
2353 *eax |= CPUID_4_TYPE_ICACHE | \
2354 CPUID_4_LEVEL(1) | \
2355 CPUID_4_SELF_INIT_LEVEL;
2356 *ebx = (L1I_LINE_SIZE - 1) | \
2357 ((L1I_PARTITIONS - 1) << 12) | \
2358 ((L1I_ASSOCIATIVITY - 1) << 22);
2359 *ecx = L1I_SETS - 1;
2360 *edx = CPUID_4_NO_INVD_SHARING;
2361 break;
2362 case 2: /* L2 cache info */
2363 *eax |= CPUID_4_TYPE_UNIFIED | \
2364 CPUID_4_LEVEL(2) | \
2365 CPUID_4_SELF_INIT_LEVEL;
2366 if (cs->nr_threads > 1) {
2367 *eax |= (cs->nr_threads - 1) << 14;
2368 }
2369 *ebx = (L2_LINE_SIZE - 1) | \
2370 ((L2_PARTITIONS - 1) << 12) | \
2371 ((L2_ASSOCIATIVITY - 1) << 22);
2372 *ecx = L2_SETS - 1;
2373 *edx = CPUID_4_NO_INVD_SHARING;
2374 break;
2375 case 3: /* L3 cache info */
2376 if (!cpu->enable_l3_cache) {
2377 *eax = 0;
2378 *ebx = 0;
2379 *ecx = 0;
2380 *edx = 0;
2381 break;
2382 }
2383 *eax |= CPUID_4_TYPE_UNIFIED | \
2384 CPUID_4_LEVEL(3) | \
2385 CPUID_4_SELF_INIT_LEVEL;
2386 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2387 *eax |= ((1 << pkg_offset) - 1) << 14;
2388 *ebx = (L3_N_LINE_SIZE - 1) | \
2389 ((L3_N_PARTITIONS - 1) << 12) | \
2390 ((L3_N_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L3_N_SETS - 1;
2392 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2393 break;
2394 default: /* end of info */
2395 *eax = 0;
2396 *ebx = 0;
2397 *ecx = 0;
2398 *edx = 0;
2399 break;
2400 }
2401 }
2402
2403 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2404 if ((*eax & 31) && cs->nr_cores > 1) {
2405 *eax |= (cs->nr_cores - 1) << 26;
2406 }
2407 break;
2408 case 5:
2409 /* mwait info: needed for Core compatibility */
2410 *eax = 0; /* Smallest monitor-line size in bytes */
2411 *ebx = 0; /* Largest monitor-line size in bytes */
2412 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2413 *edx = 0;
2414 break;
2415 case 6:
2416 /* Thermal and Power Leaf */
2417 *eax = env->features[FEAT_6_EAX];
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 break;
2422 case 7:
2423 /* Structured Extended Feature Flags Enumeration Leaf */
2424 if (count == 0) {
2425 *eax = 0; /* Maximum ECX value for sub-leaves */
2426 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2427 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2428 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2429 *ecx |= CPUID_7_0_ECX_OSPKE;
2430 }
2431 *edx = 0; /* Reserved */
2432 } else {
2433 *eax = 0;
2434 *ebx = 0;
2435 *ecx = 0;
2436 *edx = 0;
2437 }
2438 break;
2439 case 9:
2440 /* Direct Cache Access Information Leaf */
2441 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2442 *ebx = 0;
2443 *ecx = 0;
2444 *edx = 0;
2445 break;
2446 case 0xA:
2447 /* Architectural Performance Monitoring Leaf */
2448 if (kvm_enabled() && cpu->enable_pmu) {
2449 KVMState *s = cs->kvm_state;
2450
2451 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2452 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2453 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2454 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2455 } else {
2456 *eax = 0;
2457 *ebx = 0;
2458 *ecx = 0;
2459 *edx = 0;
2460 }
2461 break;
2462 case 0xB:
2463 /* Extended Topology Enumeration Leaf */
2464 if (!cpu->enable_cpuid_0xb) {
2465 *eax = *ebx = *ecx = *edx = 0;
2466 break;
2467 }
2468
2469 *ecx = count & 0xff;
2470 *edx = cpu->apic_id;
2471
2472 switch (count) {
2473 case 0:
2474 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2475 *ebx = cs->nr_threads;
2476 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2477 break;
2478 case 1:
2479 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2480 *ebx = cs->nr_cores * cs->nr_threads;
2481 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2482 break;
2483 default:
2484 *eax = 0;
2485 *ebx = 0;
2486 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2487 }
2488
2489 assert(!(*eax & ~0x1f));
2490 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2491 break;
2492 case 0xD: {
2493 uint64_t ena_mask;
2494 int i;
2495
2496 /* Processor Extended State */
2497 *eax = 0;
2498 *ebx = 0;
2499 *ecx = 0;
2500 *edx = 0;
2501 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2502 break;
2503 }
2504
2505 ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2506 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2507 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2508 if (env->features[esa->feature] & esa->bits) {
2509 ena_mask |= (1ULL << i);
2510 }
2511 }
2512
2513 if (kvm_enabled()) {
2514 KVMState *s = cs->kvm_state;
2515 uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2516 kvm_mask <<= 32;
2517 kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2518 ena_mask &= kvm_mask;
2519 }
2520
2521 if (count == 0) {
2522 *ecx = 0x240;
2523 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2524 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2525 if ((ena_mask >> i) & 1) {
2526 if (i < 32) {
2527 *eax |= 1u << i;
2528 } else {
2529 *edx |= 1u << (i - 32);
2530 }
2531 *ecx = MAX(*ecx, esa->offset + esa->size);
2532 }
2533 }
2534 *eax |= ena_mask & (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2535 *ebx = *ecx;
2536 } else if (count == 1) {
2537 *eax = env->features[FEAT_XSAVE];
2538 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2539 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2540 if ((ena_mask >> count) & 1) {
2541 *eax = esa->size;
2542 *ebx = esa->offset;
2543 }
2544 }
2545 break;
2546 }
2547 case 0x80000000:
2548 *eax = env->cpuid_xlevel;
2549 *ebx = env->cpuid_vendor1;
2550 *edx = env->cpuid_vendor2;
2551 *ecx = env->cpuid_vendor3;
2552 break;
2553 case 0x80000001:
2554 *eax = env->cpuid_version;
2555 *ebx = 0;
2556 *ecx = env->features[FEAT_8000_0001_ECX];
2557 *edx = env->features[FEAT_8000_0001_EDX];
2558
2559 /* The Linux kernel checks for the CMPLegacy bit and
2560 * discards multiple thread information if it is set.
2561 * So don't set it here for Intel to make Linux guests happy.
2562 */
2563 if (cs->nr_cores * cs->nr_threads > 1) {
2564 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2565 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2566 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2567 *ecx |= 1 << 1; /* CmpLegacy bit */
2568 }
2569 }
2570 break;
2571 case 0x80000002:
2572 case 0x80000003:
2573 case 0x80000004:
2574 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2575 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2576 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2577 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2578 break;
2579 case 0x80000005:
2580 /* cache info (L1 cache) */
2581 if (cpu->cache_info_passthrough) {
2582 host_cpuid(index, 0, eax, ebx, ecx, edx);
2583 break;
2584 }
2585 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2586 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2587 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2588 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2589 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2590 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2591 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2592 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2593 break;
2594 case 0x80000006:
2595 /* cache info (L2 cache) */
2596 if (cpu->cache_info_passthrough) {
2597 host_cpuid(index, 0, eax, ebx, ecx, edx);
2598 break;
2599 }
2600 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2601 (L2_DTLB_2M_ENTRIES << 16) | \
2602 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2603 (L2_ITLB_2M_ENTRIES);
2604 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2605 (L2_DTLB_4K_ENTRIES << 16) | \
2606 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2607 (L2_ITLB_4K_ENTRIES);
2608 *ecx = (L2_SIZE_KB_AMD << 16) | \
2609 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2610 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2611 if (!cpu->enable_l3_cache) {
2612 *edx = ((L3_SIZE_KB / 512) << 18) | \
2613 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2614 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2615 } else {
2616 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2617 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2618 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2619 }
2620 break;
2621 case 0x80000007:
2622 *eax = 0;
2623 *ebx = 0;
2624 *ecx = 0;
2625 *edx = env->features[FEAT_8000_0007_EDX];
2626 break;
2627 case 0x80000008:
2628 /* virtual & phys address size in low 2 bytes. */
2629 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2630 /* 64 bit processor, 48 bits virtual, configurable
2631 * physical bits.
2632 */
2633 *eax = 0x00003000 + cpu->phys_bits;
2634 } else {
2635 *eax = cpu->phys_bits;
2636 }
2637 *ebx = 0;
2638 *ecx = 0;
2639 *edx = 0;
2640 if (cs->nr_cores * cs->nr_threads > 1) {
2641 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2642 }
2643 break;
2644 case 0x8000000A:
2645 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2646 *eax = 0x00000001; /* SVM Revision */
2647 *ebx = 0x00000010; /* nr of ASIDs */
2648 *ecx = 0;
2649 *edx = env->features[FEAT_SVM]; /* optional features */
2650 } else {
2651 *eax = 0;
2652 *ebx = 0;
2653 *ecx = 0;
2654 *edx = 0;
2655 }
2656 break;
2657 case 0xC0000000:
2658 *eax = env->cpuid_xlevel2;
2659 *ebx = 0;
2660 *ecx = 0;
2661 *edx = 0;
2662 break;
2663 case 0xC0000001:
2664 /* Support for VIA CPU's CPUID instruction */
2665 *eax = env->cpuid_version;
2666 *ebx = 0;
2667 *ecx = 0;
2668 *edx = env->features[FEAT_C000_0001_EDX];
2669 break;
2670 case 0xC0000002:
2671 case 0xC0000003:
2672 case 0xC0000004:
2673 /* Reserved for the future, and now filled with zero */
2674 *eax = 0;
2675 *ebx = 0;
2676 *ecx = 0;
2677 *edx = 0;
2678 break;
2679 default:
2680 /* reserved values: zero */
2681 *eax = 0;
2682 *ebx = 0;
2683 *ecx = 0;
2684 *edx = 0;
2685 break;
2686 }
2687 }
2688
2689 /* CPUClass::reset() */
2690 static void x86_cpu_reset(CPUState *s)
2691 {
2692 X86CPU *cpu = X86_CPU(s);
2693 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2694 CPUX86State *env = &cpu->env;
2695 target_ulong cr4;
2696 uint64_t xcr0;
2697 int i;
2698
2699 xcc->parent_reset(s);
2700
2701 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2702
2703 tlb_flush(s, 1);
2704
2705 env->old_exception = -1;
2706
2707 /* init to reset state */
2708
2709 env->hflags2 |= HF2_GIF_MASK;
2710
2711 cpu_x86_update_cr0(env, 0x60000010);
2712 env->a20_mask = ~0x0;
2713 env->smbase = 0x30000;
2714
2715 env->idt.limit = 0xffff;
2716 env->gdt.limit = 0xffff;
2717 env->ldt.limit = 0xffff;
2718 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2719 env->tr.limit = 0xffff;
2720 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2721
2722 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2723 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2724 DESC_R_MASK | DESC_A_MASK);
2725 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2726 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2727 DESC_A_MASK);
2728 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2729 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2730 DESC_A_MASK);
2731 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2732 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2733 DESC_A_MASK);
2734 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2735 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2736 DESC_A_MASK);
2737 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2738 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2739 DESC_A_MASK);
2740
2741 env->eip = 0xfff0;
2742 env->regs[R_EDX] = env->cpuid_version;
2743
2744 env->eflags = 0x2;
2745
2746 /* FPU init */
2747 for (i = 0; i < 8; i++) {
2748 env->fptags[i] = 1;
2749 }
2750 cpu_set_fpuc(env, 0x37f);
2751
2752 env->mxcsr = 0x1f80;
2753 /* All units are in INIT state. */
2754 env->xstate_bv = 0;
2755
2756 env->pat = 0x0007040600070406ULL;
2757 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2758
2759 memset(env->dr, 0, sizeof(env->dr));
2760 env->dr[6] = DR6_FIXED_1;
2761 env->dr[7] = DR7_FIXED_1;
2762 cpu_breakpoint_remove_all(s, BP_CPU);
2763 cpu_watchpoint_remove_all(s, BP_CPU);
2764
2765 cr4 = 0;
2766 xcr0 = XSTATE_FP_MASK;
2767
2768 #ifdef CONFIG_USER_ONLY
2769 /* Enable all the features for user-mode. */
2770 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2771 xcr0 |= XSTATE_SSE_MASK;
2772 }
2773 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2774 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2775 if (env->features[esa->feature] & esa->bits) {
2776 xcr0 |= 1ull << i;
2777 }
2778 }
2779
2780 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2781 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2782 }
2783 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2784 cr4 |= CR4_FSGSBASE_MASK;
2785 }
2786 #endif
2787
2788 env->xcr0 = xcr0;
2789 cpu_x86_update_cr4(env, cr4);
2790
2791 /*
2792 * SDM 11.11.5 requires:
2793 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2794 * - IA32_MTRR_PHYSMASKn.V = 0
2795 * All other bits are undefined. For simplification, zero it all.
2796 */
2797 env->mtrr_deftype = 0;
2798 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2799 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2800
2801 #if !defined(CONFIG_USER_ONLY)
2802 /* We hard-wire the BSP to the first CPU. */
2803 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2804
2805 s->halted = !cpu_is_bsp(cpu);
2806
2807 if (kvm_enabled()) {
2808 kvm_arch_reset_vcpu(cpu);
2809 }
2810 #endif
2811 }
2812
2813 #ifndef CONFIG_USER_ONLY
2814 bool cpu_is_bsp(X86CPU *cpu)
2815 {
2816 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2817 }
2818
2819 /* TODO: remove me, when reset over QOM tree is implemented */
2820 static void x86_cpu_machine_reset_cb(void *opaque)
2821 {
2822 X86CPU *cpu = opaque;
2823 cpu_reset(CPU(cpu));
2824 }
2825 #endif
2826
2827 static void mce_init(X86CPU *cpu)
2828 {
2829 CPUX86State *cenv = &cpu->env;
2830 unsigned int bank;
2831
2832 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2833 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2834 (CPUID_MCE | CPUID_MCA)) {
2835 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2836 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2837 cenv->mcg_ctl = ~(uint64_t)0;
2838 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2839 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2840 }
2841 }
2842 }
2843
2844 #ifndef CONFIG_USER_ONLY
2845 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2846 {
2847 APICCommonState *apic;
2848 const char *apic_type = "apic";
2849
2850 if (kvm_apic_in_kernel()) {
2851 apic_type = "kvm-apic";
2852 } else if (xen_enabled()) {
2853 apic_type = "xen-apic";
2854 }
2855
2856 cpu->apic_state = DEVICE(object_new(apic_type));
2857
2858 object_property_add_child(OBJECT(cpu), "lapic",
2859 OBJECT(cpu->apic_state), &error_abort);
2860 object_unref(OBJECT(cpu->apic_state));
2861
2862 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2863 /* TODO: convert to link<> */
2864 apic = APIC_COMMON(cpu->apic_state);
2865 apic->cpu = cpu;
2866 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2867 }
2868
2869 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2870 {
2871 APICCommonState *apic;
2872 static bool apic_mmio_map_once;
2873
2874 if (cpu->apic_state == NULL) {
2875 return;
2876 }
2877 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2878 errp);
2879
2880 /* Map APIC MMIO area */
2881 apic = APIC_COMMON(cpu->apic_state);
2882 if (!apic_mmio_map_once) {
2883 memory_region_add_subregion_overlap(get_system_memory(),
2884 apic->apicbase &
2885 MSR_IA32_APICBASE_BASE,
2886 &apic->io_memory,
2887 0x1000);
2888 apic_mmio_map_once = true;
2889 }
2890 }
2891
2892 static void x86_cpu_machine_done(Notifier *n, void *unused)
2893 {
2894 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2895 MemoryRegion *smram =
2896 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2897
2898 if (smram) {
2899 cpu->smram = g_new(MemoryRegion, 1);
2900 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2901 smram, 0, 1ull << 32);
2902 memory_region_set_enabled(cpu->smram, false);
2903 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2904 }
2905 }
2906 #else
2907 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2908 {
2909 }
2910 #endif
2911
2912 /* Note: Only safe for use on x86(-64) hosts */
2913 static uint32_t x86_host_phys_bits(void)
2914 {
2915 uint32_t eax;
2916 uint32_t host_phys_bits;
2917
2918 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2919 if (eax >= 0x80000008) {
2920 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2921 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2922 * at 23:16 that can specify a maximum physical address bits for
2923 * the guest that can override this value; but I've not seen
2924 * anything with that set.
2925 */
2926 host_phys_bits = eax & 0xff;
2927 } else {
2928 /* It's an odd 64 bit machine that doesn't have the leaf for
2929 * physical address bits; fall back to 36 that's most older
2930 * Intel.
2931 */
2932 host_phys_bits = 36;
2933 }
2934
2935 return host_phys_bits;
2936 }
2937
2938 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2939 {
2940 if (*min < value) {
2941 *min = value;
2942 }
2943 }
2944
2945 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2946 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2947 {
2948 CPUX86State *env = &cpu->env;
2949 FeatureWordInfo *fi = &feature_word_info[w];
2950 uint32_t eax = fi->cpuid_eax;
2951 uint32_t region = eax & 0xF0000000;
2952
2953 if (!env->features[w]) {
2954 return;
2955 }
2956
2957 switch (region) {
2958 case 0x00000000:
2959 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2960 break;
2961 case 0x80000000:
2962 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2963 break;
2964 case 0xC0000000:
2965 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2966 break;
2967 }
2968 }
2969
2970 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2971 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2972 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2973 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2974 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2975 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2976 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2977 {
2978 CPUState *cs = CPU(dev);
2979 X86CPU *cpu = X86_CPU(dev);
2980 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2981 CPUX86State *env = &cpu->env;
2982 Error *local_err = NULL;
2983 static bool ht_warned;
2984 FeatureWord w;
2985
2986 if (xcc->kvm_required && !kvm_enabled()) {
2987 char *name = x86_cpu_class_get_model_name(xcc);
2988 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2989 g_free(name);
2990 goto out;
2991 }
2992
2993 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2994 error_setg(errp, "apic-id property was not initialized properly");
2995 return;
2996 }
2997
2998 /*TODO: cpu->host_features incorrectly overwrites features
2999 * set using "feat=on|off". Once we fix this, we can convert
3000 * plus_features & minus_features to global properties
3001 * inside x86_cpu_parse_featurestr() too.
3002 */
3003 if (cpu->host_features) {
3004 for (w = 0; w < FEATURE_WORDS; w++) {
3005 env->features[w] =
3006 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3007 }
3008 }
3009
3010 for (w = 0; w < FEATURE_WORDS; w++) {
3011 cpu->env.features[w] |= plus_features[w];
3012 cpu->env.features[w] &= ~minus_features[w];
3013 }
3014
3015
3016 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3017 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3018 if (cpu->full_cpuid_auto_level) {
3019 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3020 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3021 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3022 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3023 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3024 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3025 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3026 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3027 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3028 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3029 /* SVM requires CPUID[0x8000000A] */
3030 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3031 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3032 }
3033 }
3034
3035 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3036 if (env->cpuid_level == UINT32_MAX) {
3037 env->cpuid_level = env->cpuid_min_level;
3038 }
3039 if (env->cpuid_xlevel == UINT32_MAX) {
3040 env->cpuid_xlevel = env->cpuid_min_xlevel;
3041 }
3042 if (env->cpuid_xlevel2 == UINT32_MAX) {
3043 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3044 }
3045
3046 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3047 error_setg(&local_err,
3048 kvm_enabled() ?
3049 "Host doesn't support requested features" :
3050 "TCG doesn't support requested features");
3051 goto out;
3052 }
3053
3054 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3055 * CPUID[1].EDX.
3056 */
3057 if (IS_AMD_CPU(env)) {
3058 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3059 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3060 & CPUID_EXT2_AMD_ALIASES);
3061 }
3062
3063 /* For 64bit systems think about the number of physical bits to present.
3064 * ideally this should be the same as the host; anything other than matching
3065 * the host can cause incorrect guest behaviour.
3066 * QEMU used to pick the magic value of 40 bits that corresponds to
3067 * consumer AMD devices but nothing else.
3068 */
3069 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3070 if (kvm_enabled()) {
3071 uint32_t host_phys_bits = x86_host_phys_bits();
3072 static bool warned;
3073
3074 if (cpu->host_phys_bits) {
3075 /* The user asked for us to use the host physical bits */
3076 cpu->phys_bits = host_phys_bits;
3077 }
3078
3079 /* Print a warning if the user set it to a value that's not the
3080 * host value.
3081 */
3082 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3083 !warned) {
3084 error_report("Warning: Host physical bits (%u)"
3085 " does not match phys-bits property (%u)",
3086 host_phys_bits, cpu->phys_bits);
3087 warned = true;
3088 }
3089
3090 if (cpu->phys_bits &&
3091 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3092 cpu->phys_bits < 32)) {
3093 error_setg(errp, "phys-bits should be between 32 and %u "
3094 " (but is %u)",
3095 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3096 return;
3097 }
3098 } else {
3099 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3100 error_setg(errp, "TCG only supports phys-bits=%u",
3101 TCG_PHYS_ADDR_BITS);
3102 return;
3103 }
3104 }
3105 /* 0 means it was not explicitly set by the user (or by machine
3106 * compat_props or by the host code above). In this case, the default
3107 * is the value used by TCG (40).
3108 */
3109 if (cpu->phys_bits == 0) {
3110 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3111 }
3112 } else {
3113 /* For 32 bit systems don't use the user set value, but keep
3114 * phys_bits consistent with what we tell the guest.
3115 */
3116 if (cpu->phys_bits != 0) {
3117 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3118 return;
3119 }
3120
3121 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3122 cpu->phys_bits = 36;
3123 } else {
3124 cpu->phys_bits = 32;
3125 }
3126 }
3127 cpu_exec_init(cs, &error_abort);
3128
3129 if (tcg_enabled()) {
3130 tcg_x86_init();
3131 }
3132
3133 #ifndef CONFIG_USER_ONLY
3134 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3135
3136 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3137 x86_cpu_apic_create(cpu, &local_err);
3138 if (local_err != NULL) {
3139 goto out;
3140 }
3141 }
3142 #endif
3143
3144 mce_init(cpu);
3145
3146 #ifndef CONFIG_USER_ONLY
3147 if (tcg_enabled()) {
3148 AddressSpace *newas = g_new(AddressSpace, 1);
3149
3150 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3151 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3152
3153 /* Outer container... */
3154 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3155 memory_region_set_enabled(cpu->cpu_as_root, true);
3156
3157 /* ... with two regions inside: normal system memory with low
3158 * priority, and...
3159 */
3160 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3161 get_system_memory(), 0, ~0ull);
3162 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3163 memory_region_set_enabled(cpu->cpu_as_mem, true);
3164 address_space_init(newas, cpu->cpu_as_root, "CPU");
3165 cs->num_ases = 1;
3166 cpu_address_space_init(cs, newas, 0);
3167
3168 /* ... SMRAM with higher priority, linked from /machine/smram. */
3169 cpu->machine_done.notify = x86_cpu_machine_done;
3170 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3171 }
3172 #endif
3173
3174 qemu_init_vcpu(cs);
3175
3176 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3177 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3178 * based on inputs (sockets,cores,threads), it is still better to gives
3179 * users a warning.
3180 *
3181 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3182 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3183 */
3184 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3185 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3186 " -smp options properly.");
3187 ht_warned = true;
3188 }
3189
3190 x86_cpu_apic_realize(cpu, &local_err);
3191 if (local_err != NULL) {
3192 goto out;
3193 }
3194 cpu_reset(cs);
3195
3196 xcc->parent_realize(dev, &local_err);
3197
3198 out:
3199 if (local_err != NULL) {
3200 error_propagate(errp, local_err);
3201 return;
3202 }
3203 }
3204
3205 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3206 {
3207 X86CPU *cpu = X86_CPU(dev);
3208
3209 #ifndef CONFIG_USER_ONLY
3210 cpu_remove_sync(CPU(dev));
3211 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3212 #endif
3213
3214 if (cpu->apic_state) {
3215 object_unparent(OBJECT(cpu->apic_state));
3216 cpu->apic_state = NULL;
3217 }
3218 }
3219
3220 typedef struct BitProperty {
3221 uint32_t *ptr;
3222 uint32_t mask;
3223 } BitProperty;
3224
3225 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3226 void *opaque, Error **errp)
3227 {
3228 BitProperty *fp = opaque;
3229 bool value = (*fp->ptr & fp->mask) == fp->mask;
3230 visit_type_bool(v, name, &value, errp);
3231 }
3232
3233 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3234 void *opaque, Error **errp)
3235 {
3236 DeviceState *dev = DEVICE(obj);
3237 BitProperty *fp = opaque;
3238 Error *local_err = NULL;
3239 bool value;
3240
3241 if (dev->realized) {
3242 qdev_prop_set_after_realize(dev, name, errp);
3243 return;
3244 }
3245
3246 visit_type_bool(v, name, &value, &local_err);
3247 if (local_err) {
3248 error_propagate(errp, local_err);
3249 return;
3250 }
3251
3252 if (value) {
3253 *fp->ptr |= fp->mask;
3254 } else {
3255 *fp->ptr &= ~fp->mask;
3256 }
3257 }
3258
3259 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3260 void *opaque)
3261 {
3262 BitProperty *prop = opaque;
3263 g_free(prop);
3264 }
3265
3266 /* Register a boolean property to get/set a single bit in a uint32_t field.
3267 *
3268 * The same property name can be registered multiple times to make it affect
3269 * multiple bits in the same FeatureWord. In that case, the getter will return
3270 * true only if all bits are set.
3271 */
3272 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3273 const char *prop_name,
3274 uint32_t *field,
3275 int bitnr)
3276 {
3277 BitProperty *fp;
3278 ObjectProperty *op;
3279 uint32_t mask = (1UL << bitnr);
3280
3281 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3282 if (op) {
3283 fp = op->opaque;
3284 assert(fp->ptr == field);
3285 fp->mask |= mask;
3286 } else {
3287 fp = g_new0(BitProperty, 1);
3288 fp->ptr = field;
3289 fp->mask = mask;
3290 object_property_add(OBJECT(cpu), prop_name, "bool",
3291 x86_cpu_get_bit_prop,
3292 x86_cpu_set_bit_prop,
3293 x86_cpu_release_bit_prop, fp, &error_abort);
3294 }
3295 }
3296
3297 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3298 FeatureWord w,
3299 int bitnr)
3300 {
3301 Object *obj = OBJECT(cpu);
3302 int i;
3303 char **names;
3304 FeatureWordInfo *fi = &feature_word_info[w];
3305
3306 if (!fi->feat_names[bitnr]) {
3307 return;
3308 }
3309
3310 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3311
3312 feat2prop(names[0]);
3313 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3314
3315 for (i = 1; names[i]; i++) {
3316 feat2prop(names[i]);
3317 object_property_add_alias(obj, names[i], obj, names[0],
3318 &error_abort);
3319 }
3320
3321 g_strfreev(names);
3322 }
3323
3324 static void x86_cpu_initfn(Object *obj)
3325 {
3326 CPUState *cs = CPU(obj);
3327 X86CPU *cpu = X86_CPU(obj);
3328 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3329 CPUX86State *env = &cpu->env;
3330 FeatureWord w;
3331
3332 cs->env_ptr = env;
3333
3334 object_property_add(obj, "family", "int",
3335 x86_cpuid_version_get_family,
3336 x86_cpuid_version_set_family, NULL, NULL, NULL);
3337 object_property_add(obj, "model", "int",
3338 x86_cpuid_version_get_model,
3339 x86_cpuid_version_set_model, NULL, NULL, NULL);
3340 object_property_add(obj, "stepping", "int",
3341 x86_cpuid_version_get_stepping,
3342 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3343 object_property_add_str(obj, "vendor",
3344 x86_cpuid_get_vendor,
3345 x86_cpuid_set_vendor, NULL);
3346 object_property_add_str(obj, "model-id",
3347 x86_cpuid_get_model_id,
3348 x86_cpuid_set_model_id, NULL);
3349 object_property_add(obj, "tsc-frequency", "int",
3350 x86_cpuid_get_tsc_freq,
3351 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3352 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3353 x86_cpu_get_feature_words,
3354 NULL, NULL, (void *)env->features, NULL);
3355 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3356 x86_cpu_get_feature_words,
3357 NULL, NULL, (void *)cpu->filtered_features, NULL);
3358
3359 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3360
3361 for (w = 0; w < FEATURE_WORDS; w++) {
3362 int bitnr;
3363
3364 for (bitnr = 0; bitnr < 32; bitnr++) {
3365 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3366 }
3367 }
3368
3369 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3370 }
3371
3372 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3373 {
3374 X86CPU *cpu = X86_CPU(cs);
3375
3376 return cpu->apic_id;
3377 }
3378
3379 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3380 {
3381 X86CPU *cpu = X86_CPU(cs);
3382
3383 return cpu->env.cr[0] & CR0_PG_MASK;
3384 }
3385
3386 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3387 {
3388 X86CPU *cpu = X86_CPU(cs);
3389
3390 cpu->env.eip = value;
3391 }
3392
3393 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3394 {
3395 X86CPU *cpu = X86_CPU(cs);
3396
3397 cpu->env.eip = tb->pc - tb->cs_base;
3398 }
3399
3400 static bool x86_cpu_has_work(CPUState *cs)
3401 {
3402 X86CPU *cpu = X86_CPU(cs);
3403 CPUX86State *env = &cpu->env;
3404
3405 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3406 CPU_INTERRUPT_POLL)) &&
3407 (env->eflags & IF_MASK)) ||
3408 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3409 CPU_INTERRUPT_INIT |
3410 CPU_INTERRUPT_SIPI |
3411 CPU_INTERRUPT_MCE)) ||
3412 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3413 !(env->hflags & HF_SMM_MASK));
3414 }
3415
3416 static Property x86_cpu_properties[] = {
3417 #ifdef CONFIG_USER_ONLY
3418 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3419 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3420 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3421 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3422 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3423 #else
3424 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3425 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3426 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3427 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3428 #endif
3429 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3430 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3431 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3432 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3433 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3434 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3435 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3436 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3437 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3438 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3439 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3440 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3441 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3442 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3443 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3444 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3445 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3446 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3447 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3448 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3449 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3450 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3451 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3452 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3453 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3454 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3455 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3456 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3457 DEFINE_PROP_END_OF_LIST()
3458 };
3459
3460 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3461 {
3462 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3463 CPUClass *cc = CPU_CLASS(oc);
3464 DeviceClass *dc = DEVICE_CLASS(oc);
3465
3466 xcc->parent_realize = dc->realize;
3467 dc->realize = x86_cpu_realizefn;
3468 dc->unrealize = x86_cpu_unrealizefn;
3469 dc->props = x86_cpu_properties;
3470
3471 xcc->parent_reset = cc->reset;
3472 cc->reset = x86_cpu_reset;
3473 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3474
3475 cc->class_by_name = x86_cpu_class_by_name;
3476 cc->parse_features = x86_cpu_parse_featurestr;
3477 cc->has_work = x86_cpu_has_work;
3478 cc->do_interrupt = x86_cpu_do_interrupt;
3479 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3480 cc->dump_state = x86_cpu_dump_state;
3481 cc->set_pc = x86_cpu_set_pc;
3482 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3483 cc->gdb_read_register = x86_cpu_gdb_read_register;
3484 cc->gdb_write_register = x86_cpu_gdb_write_register;
3485 cc->get_arch_id = x86_cpu_get_arch_id;
3486 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3487 #ifdef CONFIG_USER_ONLY
3488 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3489 #else
3490 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3491 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3492 cc->write_elf64_note = x86_cpu_write_elf64_note;
3493 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3494 cc->write_elf32_note = x86_cpu_write_elf32_note;
3495 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3496 cc->vmsd = &vmstate_x86_cpu;
3497 #endif
3498 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3499 #ifndef CONFIG_USER_ONLY
3500 cc->debug_excp_handler = breakpoint_handler;
3501 #endif
3502 cc->cpu_exec_enter = x86_cpu_exec_enter;
3503 cc->cpu_exec_exit = x86_cpu_exec_exit;
3504
3505 dc->cannot_instantiate_with_device_add_yet = false;
3506 /*
3507 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3508 * object in cpus -> dangling pointer after final object_unref().
3509 */
3510 dc->cannot_destroy_with_object_finalize_yet = true;
3511 }
3512
3513 static const TypeInfo x86_cpu_type_info = {
3514 .name = TYPE_X86_CPU,
3515 .parent = TYPE_CPU,
3516 .instance_size = sizeof(X86CPU),
3517 .instance_init = x86_cpu_initfn,
3518 .abstract = true,
3519 .class_size = sizeof(X86CPUClass),
3520 .class_init = x86_cpu_common_class_init,
3521 };
3522
3523 static void x86_cpu_register_types(void)
3524 {
3525 int i;
3526
3527 type_register_static(&x86_cpu_type_info);
3528 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3529 x86_register_cpudef_type(&builtin_x86_defs[i]);
3530 }
3531 #ifdef CONFIG_KVM
3532 type_register_static(&host_x86_cpu_type_info);
3533 #endif
3534 }
3535
3536 type_init(x86_cpu_register_types)