]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: xsave: Simplify CPUID[0xD,0].{EAX,EDX} calculation
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
247
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
253 */
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 } FeatureWordInfo;
262
263 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
264 [FEAT_1_EDX] = {
265 .feat_names = {
266 "fpu", "vme", "de", "pse",
267 "tsc", "msr", "pae", "mce",
268 "cx8", "apic", NULL, "sep",
269 "mtrr", "pge", "mca", "cmov",
270 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
271 NULL, "ds" /* Intel dts */, "acpi", "mmx",
272 "fxsr", "sse", "sse2", "ss",
273 "ht" /* Intel htt */, "tm", "ia64", "pbe",
274 },
275 .cpuid_eax = 1, .cpuid_reg = R_EDX,
276 .tcg_features = TCG_FEATURES,
277 },
278 [FEAT_1_ECX] = {
279 .feat_names = {
280 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
281 "ds_cpl", "vmx", "smx", "est",
282 "tm2", "ssse3", "cid", NULL,
283 "fma", "cx16", "xtpr", "pdcm",
284 NULL, "pcid", "dca", "sse4.1|sse4_1",
285 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
286 "tsc-deadline", "aes", "xsave", "osxsave",
287 "avx", "f16c", "rdrand", "hypervisor",
288 },
289 .cpuid_eax = 1, .cpuid_reg = R_ECX,
290 .tcg_features = TCG_EXT_FEATURES,
291 },
292 /* Feature names that are already defined on feature_name[] but
293 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
294 * names on feat_names below. They are copied automatically
295 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
296 */
297 [FEAT_8000_0001_EDX] = {
298 .feat_names = {
299 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
300 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
301 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
302 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
303 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
304 "nx|xd", NULL, "mmxext", NULL /* mmx */,
305 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
306 NULL, "lm|i64", "3dnowext", "3dnow",
307 },
308 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
309 .tcg_features = TCG_EXT2_FEATURES,
310 },
311 [FEAT_8000_0001_ECX] = {
312 .feat_names = {
313 "lahf_lm", "cmp_legacy", "svm", "extapic",
314 "cr8legacy", "abm", "sse4a", "misalignsse",
315 "3dnowprefetch", "osvw", "ibs", "xop",
316 "skinit", "wdt", NULL, "lwp",
317 "fma4", "tce", NULL, "nodeid_msr",
318 NULL, "tbm", "topoext", "perfctr_core",
319 "perfctr_nb", NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 },
322 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
323 .tcg_features = TCG_EXT3_FEATURES,
324 },
325 [FEAT_C000_0001_EDX] = {
326 .feat_names = {
327 NULL, NULL, "xstore", "xstore-en",
328 NULL, NULL, "xcrypt", "xcrypt-en",
329 "ace2", "ace2-en", "phe", "phe-en",
330 "pmm", "pmm-en", NULL, NULL,
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 },
336 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
337 .tcg_features = TCG_EXT4_FEATURES,
338 },
339 [FEAT_KVM] = {
340 .feat_names = {
341 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
342 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 "kvmclock-stable-bit", NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 },
350 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
351 .tcg_features = TCG_KVM_FEATURES,
352 },
353 [FEAT_HYPERV_EAX] = {
354 .feat_names = {
355 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
356 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
357 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
358 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
359 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
360 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
361 NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 },
367 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
368 },
369 [FEAT_HYPERV_EBX] = {
370 .feat_names = {
371 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
372 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
373 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
374 NULL /* hv_create_port */, NULL /* hv_connect_port */,
375 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
376 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
377 NULL, NULL,
378 NULL, NULL, NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 },
383 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
384 },
385 [FEAT_HYPERV_EDX] = {
386 .feat_names = {
387 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
388 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
389 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
390 NULL, NULL,
391 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 },
398 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
399 },
400 [FEAT_SVM] = {
401 .feat_names = {
402 "npt", "lbrv", "svm_lock", "nrip_save",
403 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
404 NULL, NULL, "pause_filter", NULL,
405 "pfthreshold", NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 },
411 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
412 .tcg_features = TCG_SVM_FEATURES,
413 },
414 [FEAT_7_0_EBX] = {
415 .feat_names = {
416 "fsgsbase", "tsc_adjust", NULL, "bmi1",
417 "hle", "avx2", NULL, "smep",
418 "bmi2", "erms", "invpcid", "rtm",
419 NULL, NULL, "mpx", NULL,
420 "avx512f", "avx512dq", "rdseed", "adx",
421 "smap", "avx512ifma", "pcommit", "clflushopt",
422 "clwb", NULL, "avx512pf", "avx512er",
423 "avx512cd", NULL, "avx512bw", "avx512vl",
424 },
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_EBX,
428 .tcg_features = TCG_7_0_EBX_FEATURES,
429 },
430 [FEAT_7_0_ECX] = {
431 .feat_names = {
432 NULL, "avx512vbmi", "umip", "pku",
433 "ospke", NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, "rdpid", NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 },
441 .cpuid_eax = 7,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
443 .cpuid_reg = R_ECX,
444 .tcg_features = TCG_7_0_ECX_FEATURES,
445 },
446 [FEAT_8000_0007_EDX] = {
447 .feat_names = {
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
450 "invtsc", NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 },
457 .cpuid_eax = 0x80000007,
458 .cpuid_reg = R_EDX,
459 .tcg_features = TCG_APM_FEATURES,
460 .unmigratable_flags = CPUID_APM_INVTSC,
461 },
462 [FEAT_XSAVE] = {
463 .feat_names = {
464 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 },
473 .cpuid_eax = 0xd,
474 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
475 .cpuid_reg = R_EAX,
476 .tcg_features = TCG_XSAVE_FEATURES,
477 },
478 [FEAT_6_EAX] = {
479 .feat_names = {
480 NULL, NULL, "arat", NULL,
481 NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 },
489 .cpuid_eax = 6, .cpuid_reg = R_EAX,
490 .tcg_features = TCG_6_EAX_FEATURES,
491 },
492 };
493
494 typedef struct X86RegisterInfo32 {
495 /* Name of register */
496 const char *name;
497 /* QAPI enum value register */
498 X86CPURegister32 qapi_enum;
499 } X86RegisterInfo32;
500
501 #define REGISTER(reg) \
502 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
503 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
504 REGISTER(EAX),
505 REGISTER(ECX),
506 REGISTER(EDX),
507 REGISTER(EBX),
508 REGISTER(ESP),
509 REGISTER(EBP),
510 REGISTER(ESI),
511 REGISTER(EDI),
512 };
513 #undef REGISTER
514
515 typedef struct ExtSaveArea {
516 uint32_t feature, bits;
517 uint32_t offset, size;
518 } ExtSaveArea;
519
520 static const ExtSaveArea x86_ext_save_areas[] = {
521 [XSTATE_YMM_BIT] =
522 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
523 .offset = offsetof(X86XSaveArea, avx_state),
524 .size = sizeof(XSaveAVX) },
525 [XSTATE_BNDREGS_BIT] =
526 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
527 .offset = offsetof(X86XSaveArea, bndreg_state),
528 .size = sizeof(XSaveBNDREG) },
529 [XSTATE_BNDCSR_BIT] =
530 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
531 .offset = offsetof(X86XSaveArea, bndcsr_state),
532 .size = sizeof(XSaveBNDCSR) },
533 [XSTATE_OPMASK_BIT] =
534 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
535 .offset = offsetof(X86XSaveArea, opmask_state),
536 .size = sizeof(XSaveOpmask) },
537 [XSTATE_ZMM_Hi256_BIT] =
538 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
539 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
540 .size = sizeof(XSaveZMM_Hi256) },
541 [XSTATE_Hi16_ZMM_BIT] =
542 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
543 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
544 .size = sizeof(XSaveHi16_ZMM) },
545 [XSTATE_PKRU_BIT] =
546 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
547 .offset = offsetof(X86XSaveArea, pkru_state),
548 .size = sizeof(XSavePKRU) },
549 };
550
551 const char *get_register_name_32(unsigned int reg)
552 {
553 if (reg >= CPU_NB_REGS32) {
554 return NULL;
555 }
556 return x86_reg_info_32[reg].name;
557 }
558
559 /*
560 * Returns the set of feature flags that are supported and migratable by
561 * QEMU, for a given FeatureWord.
562 */
563 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
564 {
565 FeatureWordInfo *wi = &feature_word_info[w];
566 uint32_t r = 0;
567 int i;
568
569 for (i = 0; i < 32; i++) {
570 uint32_t f = 1U << i;
571 /* If the feature name is unknown, it is not supported by QEMU yet */
572 if (!wi->feat_names[i]) {
573 continue;
574 }
575 /* Skip features known to QEMU, but explicitly marked as unmigratable */
576 if (wi->unmigratable_flags & f) {
577 continue;
578 }
579 r |= f;
580 }
581 return r;
582 }
583
584 void host_cpuid(uint32_t function, uint32_t count,
585 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
586 {
587 uint32_t vec[4];
588
589 #ifdef __x86_64__
590 asm volatile("cpuid"
591 : "=a"(vec[0]), "=b"(vec[1]),
592 "=c"(vec[2]), "=d"(vec[3])
593 : "0"(function), "c"(count) : "cc");
594 #elif defined(__i386__)
595 asm volatile("pusha \n\t"
596 "cpuid \n\t"
597 "mov %%eax, 0(%2) \n\t"
598 "mov %%ebx, 4(%2) \n\t"
599 "mov %%ecx, 8(%2) \n\t"
600 "mov %%edx, 12(%2) \n\t"
601 "popa"
602 : : "a"(function), "c"(count), "S"(vec)
603 : "memory", "cc");
604 #else
605 abort();
606 #endif
607
608 if (eax)
609 *eax = vec[0];
610 if (ebx)
611 *ebx = vec[1];
612 if (ecx)
613 *ecx = vec[2];
614 if (edx)
615 *edx = vec[3];
616 }
617
618 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
619
620 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
621 * a substring. ex if !NULL points to the first char after a substring,
622 * otherwise the string is assumed to sized by a terminating nul.
623 * Return lexical ordering of *s1:*s2.
624 */
625 static int sstrcmp(const char *s1, const char *e1,
626 const char *s2, const char *e2)
627 {
628 for (;;) {
629 if (!*s1 || !*s2 || *s1 != *s2)
630 return (*s1 - *s2);
631 ++s1, ++s2;
632 if (s1 == e1 && s2 == e2)
633 return (0);
634 else if (s1 == e1)
635 return (*s2);
636 else if (s2 == e2)
637 return (*s1);
638 }
639 }
640
641 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
642 * '|' delimited (possibly empty) strings in which case search for a match
643 * within the alternatives proceeds left to right. Return 0 for success,
644 * non-zero otherwise.
645 */
646 static int altcmp(const char *s, const char *e, const char *altstr)
647 {
648 const char *p, *q;
649
650 for (q = p = altstr; ; ) {
651 while (*p && *p != '|')
652 ++p;
653 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
654 return (0);
655 if (!*p)
656 return (1);
657 else
658 q = ++p;
659 }
660 }
661
662 /* search featureset for flag *[s..e), if found set corresponding bit in
663 * *pval and return true, otherwise return false
664 */
665 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
666 const char **featureset)
667 {
668 uint32_t mask;
669 const char **ppc;
670 bool found = false;
671
672 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
673 if (*ppc && !altcmp(s, e, *ppc)) {
674 *pval |= mask;
675 found = true;
676 }
677 }
678 return found;
679 }
680
681 static void add_flagname_to_bitmaps(const char *flagname,
682 FeatureWordArray words,
683 Error **errp)
684 {
685 FeatureWord w;
686 for (w = 0; w < FEATURE_WORDS; w++) {
687 FeatureWordInfo *wi = &feature_word_info[w];
688 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
689 break;
690 }
691 }
692 if (w == FEATURE_WORDS) {
693 error_setg(errp, "CPU feature %s not found", flagname);
694 }
695 }
696
697 /* CPU class name definitions: */
698
699 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
700 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
701
702 /* Return type name for a given CPU model name
703 * Caller is responsible for freeing the returned string.
704 */
705 static char *x86_cpu_type_name(const char *model_name)
706 {
707 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
708 }
709
710 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
711 {
712 ObjectClass *oc;
713 char *typename;
714
715 if (cpu_model == NULL) {
716 return NULL;
717 }
718
719 typename = x86_cpu_type_name(cpu_model);
720 oc = object_class_by_name(typename);
721 g_free(typename);
722 return oc;
723 }
724
725 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
726 {
727 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
728 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
729 return g_strndup(class_name,
730 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
731 }
732
733 struct X86CPUDefinition {
734 const char *name;
735 uint32_t level;
736 uint32_t xlevel;
737 /* vendor is zero-terminated, 12 character ASCII string */
738 char vendor[CPUID_VENDOR_SZ + 1];
739 int family;
740 int model;
741 int stepping;
742 FeatureWordArray features;
743 char model_id[48];
744 };
745
746 static X86CPUDefinition builtin_x86_defs[] = {
747 {
748 .name = "qemu64",
749 .level = 0xd,
750 .vendor = CPUID_VENDOR_AMD,
751 .family = 6,
752 .model = 6,
753 .stepping = 3,
754 .features[FEAT_1_EDX] =
755 PPRO_FEATURES |
756 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
757 CPUID_PSE36,
758 .features[FEAT_1_ECX] =
759 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
760 .features[FEAT_8000_0001_EDX] =
761 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
762 .features[FEAT_8000_0001_ECX] =
763 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
764 .xlevel = 0x8000000A,
765 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
766 },
767 {
768 .name = "phenom",
769 .level = 5,
770 .vendor = CPUID_VENDOR_AMD,
771 .family = 16,
772 .model = 2,
773 .stepping = 3,
774 /* Missing: CPUID_HT */
775 .features[FEAT_1_EDX] =
776 PPRO_FEATURES |
777 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
778 CPUID_PSE36 | CPUID_VME,
779 .features[FEAT_1_ECX] =
780 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
781 CPUID_EXT_POPCNT,
782 .features[FEAT_8000_0001_EDX] =
783 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
784 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
785 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
786 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
787 CPUID_EXT3_CR8LEG,
788 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
789 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
790 .features[FEAT_8000_0001_ECX] =
791 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
792 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
793 /* Missing: CPUID_SVM_LBRV */
794 .features[FEAT_SVM] =
795 CPUID_SVM_NPT,
796 .xlevel = 0x8000001A,
797 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
798 },
799 {
800 .name = "core2duo",
801 .level = 10,
802 .vendor = CPUID_VENDOR_INTEL,
803 .family = 6,
804 .model = 15,
805 .stepping = 11,
806 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
807 .features[FEAT_1_EDX] =
808 PPRO_FEATURES |
809 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
810 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
811 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
812 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
813 .features[FEAT_1_ECX] =
814 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
815 CPUID_EXT_CX16,
816 .features[FEAT_8000_0001_EDX] =
817 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
818 .features[FEAT_8000_0001_ECX] =
819 CPUID_EXT3_LAHF_LM,
820 .xlevel = 0x80000008,
821 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
822 },
823 {
824 .name = "kvm64",
825 .level = 0xd,
826 .vendor = CPUID_VENDOR_INTEL,
827 .family = 15,
828 .model = 6,
829 .stepping = 1,
830 /* Missing: CPUID_HT */
831 .features[FEAT_1_EDX] =
832 PPRO_FEATURES | CPUID_VME |
833 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
834 CPUID_PSE36,
835 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
836 .features[FEAT_1_ECX] =
837 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
838 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
839 .features[FEAT_8000_0001_EDX] =
840 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
841 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
842 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
843 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
844 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
845 .features[FEAT_8000_0001_ECX] =
846 0,
847 .xlevel = 0x80000008,
848 .model_id = "Common KVM processor"
849 },
850 {
851 .name = "qemu32",
852 .level = 4,
853 .vendor = CPUID_VENDOR_INTEL,
854 .family = 6,
855 .model = 6,
856 .stepping = 3,
857 .features[FEAT_1_EDX] =
858 PPRO_FEATURES,
859 .features[FEAT_1_ECX] =
860 CPUID_EXT_SSE3,
861 .xlevel = 0x80000004,
862 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
863 },
864 {
865 .name = "kvm32",
866 .level = 5,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 15,
869 .model = 6,
870 .stepping = 1,
871 .features[FEAT_1_EDX] =
872 PPRO_FEATURES | CPUID_VME |
873 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
874 .features[FEAT_1_ECX] =
875 CPUID_EXT_SSE3,
876 .features[FEAT_8000_0001_ECX] =
877 0,
878 .xlevel = 0x80000008,
879 .model_id = "Common 32-bit KVM processor"
880 },
881 {
882 .name = "coreduo",
883 .level = 10,
884 .vendor = CPUID_VENDOR_INTEL,
885 .family = 6,
886 .model = 14,
887 .stepping = 8,
888 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
889 .features[FEAT_1_EDX] =
890 PPRO_FEATURES | CPUID_VME |
891 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
892 CPUID_SS,
893 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
894 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
895 .features[FEAT_1_ECX] =
896 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
897 .features[FEAT_8000_0001_EDX] =
898 CPUID_EXT2_NX,
899 .xlevel = 0x80000008,
900 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
901 },
902 {
903 .name = "486",
904 .level = 1,
905 .vendor = CPUID_VENDOR_INTEL,
906 .family = 4,
907 .model = 8,
908 .stepping = 0,
909 .features[FEAT_1_EDX] =
910 I486_FEATURES,
911 .xlevel = 0,
912 },
913 {
914 .name = "pentium",
915 .level = 1,
916 .vendor = CPUID_VENDOR_INTEL,
917 .family = 5,
918 .model = 4,
919 .stepping = 3,
920 .features[FEAT_1_EDX] =
921 PENTIUM_FEATURES,
922 .xlevel = 0,
923 },
924 {
925 .name = "pentium2",
926 .level = 2,
927 .vendor = CPUID_VENDOR_INTEL,
928 .family = 6,
929 .model = 5,
930 .stepping = 2,
931 .features[FEAT_1_EDX] =
932 PENTIUM2_FEATURES,
933 .xlevel = 0,
934 },
935 {
936 .name = "pentium3",
937 .level = 3,
938 .vendor = CPUID_VENDOR_INTEL,
939 .family = 6,
940 .model = 7,
941 .stepping = 3,
942 .features[FEAT_1_EDX] =
943 PENTIUM3_FEATURES,
944 .xlevel = 0,
945 },
946 {
947 .name = "athlon",
948 .level = 2,
949 .vendor = CPUID_VENDOR_AMD,
950 .family = 6,
951 .model = 2,
952 .stepping = 3,
953 .features[FEAT_1_EDX] =
954 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
955 CPUID_MCA,
956 .features[FEAT_8000_0001_EDX] =
957 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
958 .xlevel = 0x80000008,
959 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
960 },
961 {
962 .name = "n270",
963 .level = 10,
964 .vendor = CPUID_VENDOR_INTEL,
965 .family = 6,
966 .model = 28,
967 .stepping = 2,
968 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
969 .features[FEAT_1_EDX] =
970 PPRO_FEATURES |
971 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
972 CPUID_ACPI | CPUID_SS,
973 /* Some CPUs got no CPUID_SEP */
974 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
975 * CPUID_EXT_XTPR */
976 .features[FEAT_1_ECX] =
977 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
978 CPUID_EXT_MOVBE,
979 .features[FEAT_8000_0001_EDX] =
980 CPUID_EXT2_NX,
981 .features[FEAT_8000_0001_ECX] =
982 CPUID_EXT3_LAHF_LM,
983 .xlevel = 0x80000008,
984 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
985 },
986 {
987 .name = "Conroe",
988 .level = 10,
989 .vendor = CPUID_VENDOR_INTEL,
990 .family = 6,
991 .model = 15,
992 .stepping = 3,
993 .features[FEAT_1_EDX] =
994 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
995 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
996 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
997 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
998 CPUID_DE | CPUID_FP87,
999 .features[FEAT_1_ECX] =
1000 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1001 .features[FEAT_8000_0001_EDX] =
1002 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1003 .features[FEAT_8000_0001_ECX] =
1004 CPUID_EXT3_LAHF_LM,
1005 .xlevel = 0x80000008,
1006 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1007 },
1008 {
1009 .name = "Penryn",
1010 .level = 10,
1011 .vendor = CPUID_VENDOR_INTEL,
1012 .family = 6,
1013 .model = 23,
1014 .stepping = 3,
1015 .features[FEAT_1_EDX] =
1016 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1017 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1018 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1019 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1020 CPUID_DE | CPUID_FP87,
1021 .features[FEAT_1_ECX] =
1022 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1023 CPUID_EXT_SSE3,
1024 .features[FEAT_8000_0001_EDX] =
1025 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1026 .features[FEAT_8000_0001_ECX] =
1027 CPUID_EXT3_LAHF_LM,
1028 .xlevel = 0x80000008,
1029 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1030 },
1031 {
1032 .name = "Nehalem",
1033 .level = 11,
1034 .vendor = CPUID_VENDOR_INTEL,
1035 .family = 6,
1036 .model = 26,
1037 .stepping = 3,
1038 .features[FEAT_1_EDX] =
1039 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1040 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1041 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1042 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1043 CPUID_DE | CPUID_FP87,
1044 .features[FEAT_1_ECX] =
1045 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1046 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1047 .features[FEAT_8000_0001_EDX] =
1048 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1049 .features[FEAT_8000_0001_ECX] =
1050 CPUID_EXT3_LAHF_LM,
1051 .xlevel = 0x80000008,
1052 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1053 },
1054 {
1055 .name = "Westmere",
1056 .level = 11,
1057 .vendor = CPUID_VENDOR_INTEL,
1058 .family = 6,
1059 .model = 44,
1060 .stepping = 1,
1061 .features[FEAT_1_EDX] =
1062 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1063 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1064 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1065 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1066 CPUID_DE | CPUID_FP87,
1067 .features[FEAT_1_ECX] =
1068 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1069 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1070 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1071 .features[FEAT_8000_0001_EDX] =
1072 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1073 .features[FEAT_8000_0001_ECX] =
1074 CPUID_EXT3_LAHF_LM,
1075 .features[FEAT_6_EAX] =
1076 CPUID_6_EAX_ARAT,
1077 .xlevel = 0x80000008,
1078 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1079 },
1080 {
1081 .name = "SandyBridge",
1082 .level = 0xd,
1083 .vendor = CPUID_VENDOR_INTEL,
1084 .family = 6,
1085 .model = 42,
1086 .stepping = 1,
1087 .features[FEAT_1_EDX] =
1088 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1089 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1090 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1091 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1092 CPUID_DE | CPUID_FP87,
1093 .features[FEAT_1_ECX] =
1094 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1095 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1096 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1097 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1098 CPUID_EXT_SSE3,
1099 .features[FEAT_8000_0001_EDX] =
1100 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1101 CPUID_EXT2_SYSCALL,
1102 .features[FEAT_8000_0001_ECX] =
1103 CPUID_EXT3_LAHF_LM,
1104 .features[FEAT_XSAVE] =
1105 CPUID_XSAVE_XSAVEOPT,
1106 .features[FEAT_6_EAX] =
1107 CPUID_6_EAX_ARAT,
1108 .xlevel = 0x80000008,
1109 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1110 },
1111 {
1112 .name = "IvyBridge",
1113 .level = 0xd,
1114 .vendor = CPUID_VENDOR_INTEL,
1115 .family = 6,
1116 .model = 58,
1117 .stepping = 9,
1118 .features[FEAT_1_EDX] =
1119 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1120 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1121 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1122 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1123 CPUID_DE | CPUID_FP87,
1124 .features[FEAT_1_ECX] =
1125 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1126 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1127 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1128 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1129 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1130 .features[FEAT_7_0_EBX] =
1131 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1132 CPUID_7_0_EBX_ERMS,
1133 .features[FEAT_8000_0001_EDX] =
1134 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1135 CPUID_EXT2_SYSCALL,
1136 .features[FEAT_8000_0001_ECX] =
1137 CPUID_EXT3_LAHF_LM,
1138 .features[FEAT_XSAVE] =
1139 CPUID_XSAVE_XSAVEOPT,
1140 .features[FEAT_6_EAX] =
1141 CPUID_6_EAX_ARAT,
1142 .xlevel = 0x80000008,
1143 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1144 },
1145 {
1146 .name = "Haswell-noTSX",
1147 .level = 0xd,
1148 .vendor = CPUID_VENDOR_INTEL,
1149 .family = 6,
1150 .model = 60,
1151 .stepping = 1,
1152 .features[FEAT_1_EDX] =
1153 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1154 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1155 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1156 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1157 CPUID_DE | CPUID_FP87,
1158 .features[FEAT_1_ECX] =
1159 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1160 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1161 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1162 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1163 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1164 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1165 .features[FEAT_8000_0001_EDX] =
1166 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1167 CPUID_EXT2_SYSCALL,
1168 .features[FEAT_8000_0001_ECX] =
1169 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1170 .features[FEAT_7_0_EBX] =
1171 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1172 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1173 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1174 .features[FEAT_XSAVE] =
1175 CPUID_XSAVE_XSAVEOPT,
1176 .features[FEAT_6_EAX] =
1177 CPUID_6_EAX_ARAT,
1178 .xlevel = 0x80000008,
1179 .model_id = "Intel Core Processor (Haswell, no TSX)",
1180 }, {
1181 .name = "Haswell",
1182 .level = 0xd,
1183 .vendor = CPUID_VENDOR_INTEL,
1184 .family = 6,
1185 .model = 60,
1186 .stepping = 1,
1187 .features[FEAT_1_EDX] =
1188 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1189 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1190 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1191 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1192 CPUID_DE | CPUID_FP87,
1193 .features[FEAT_1_ECX] =
1194 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1195 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1196 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1197 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1198 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1199 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1202 CPUID_EXT2_SYSCALL,
1203 .features[FEAT_8000_0001_ECX] =
1204 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1205 .features[FEAT_7_0_EBX] =
1206 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1207 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1208 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1209 CPUID_7_0_EBX_RTM,
1210 .features[FEAT_XSAVE] =
1211 CPUID_XSAVE_XSAVEOPT,
1212 .features[FEAT_6_EAX] =
1213 CPUID_6_EAX_ARAT,
1214 .xlevel = 0x80000008,
1215 .model_id = "Intel Core Processor (Haswell)",
1216 },
1217 {
1218 .name = "Broadwell-noTSX",
1219 .level = 0xd,
1220 .vendor = CPUID_VENDOR_INTEL,
1221 .family = 6,
1222 .model = 61,
1223 .stepping = 2,
1224 .features[FEAT_1_EDX] =
1225 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1226 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1227 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1228 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1229 CPUID_DE | CPUID_FP87,
1230 .features[FEAT_1_ECX] =
1231 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1232 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1233 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1234 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1235 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1236 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1237 .features[FEAT_8000_0001_EDX] =
1238 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1239 CPUID_EXT2_SYSCALL,
1240 .features[FEAT_8000_0001_ECX] =
1241 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1242 .features[FEAT_7_0_EBX] =
1243 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1244 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1245 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1246 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1247 CPUID_7_0_EBX_SMAP,
1248 .features[FEAT_XSAVE] =
1249 CPUID_XSAVE_XSAVEOPT,
1250 .features[FEAT_6_EAX] =
1251 CPUID_6_EAX_ARAT,
1252 .xlevel = 0x80000008,
1253 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1254 },
1255 {
1256 .name = "Broadwell",
1257 .level = 0xd,
1258 .vendor = CPUID_VENDOR_INTEL,
1259 .family = 6,
1260 .model = 61,
1261 .stepping = 2,
1262 .features[FEAT_1_EDX] =
1263 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1264 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1265 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1266 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1267 CPUID_DE | CPUID_FP87,
1268 .features[FEAT_1_ECX] =
1269 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1270 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1271 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1272 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1273 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1274 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1275 .features[FEAT_8000_0001_EDX] =
1276 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1277 CPUID_EXT2_SYSCALL,
1278 .features[FEAT_8000_0001_ECX] =
1279 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1280 .features[FEAT_7_0_EBX] =
1281 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1282 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1283 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1284 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1285 CPUID_7_0_EBX_SMAP,
1286 .features[FEAT_XSAVE] =
1287 CPUID_XSAVE_XSAVEOPT,
1288 .features[FEAT_6_EAX] =
1289 CPUID_6_EAX_ARAT,
1290 .xlevel = 0x80000008,
1291 .model_id = "Intel Core Processor (Broadwell)",
1292 },
1293 {
1294 .name = "Skylake-Client",
1295 .level = 0xd,
1296 .vendor = CPUID_VENDOR_INTEL,
1297 .family = 6,
1298 .model = 94,
1299 .stepping = 3,
1300 .features[FEAT_1_EDX] =
1301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1305 CPUID_DE | CPUID_FP87,
1306 .features[FEAT_1_ECX] =
1307 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1308 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1309 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1310 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1311 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1312 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1313 .features[FEAT_8000_0001_EDX] =
1314 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1315 CPUID_EXT2_SYSCALL,
1316 .features[FEAT_8000_0001_ECX] =
1317 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1318 .features[FEAT_7_0_EBX] =
1319 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1320 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1321 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1322 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1323 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1324 /* Missing: XSAVES (not supported by some Linux versions,
1325 * including v4.1 to v4.6).
1326 * KVM doesn't yet expose any XSAVES state save component,
1327 * and the only one defined in Skylake (processor tracing)
1328 * probably will block migration anyway.
1329 */
1330 .features[FEAT_XSAVE] =
1331 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1332 CPUID_XSAVE_XGETBV1,
1333 .features[FEAT_6_EAX] =
1334 CPUID_6_EAX_ARAT,
1335 .xlevel = 0x80000008,
1336 .model_id = "Intel Core Processor (Skylake)",
1337 },
1338 {
1339 .name = "Opteron_G1",
1340 .level = 5,
1341 .vendor = CPUID_VENDOR_AMD,
1342 .family = 15,
1343 .model = 6,
1344 .stepping = 1,
1345 .features[FEAT_1_EDX] =
1346 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1347 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1348 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1349 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1350 CPUID_DE | CPUID_FP87,
1351 .features[FEAT_1_ECX] =
1352 CPUID_EXT_SSE3,
1353 .features[FEAT_8000_0001_EDX] =
1354 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1355 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1356 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1357 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1358 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1359 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1360 .xlevel = 0x80000008,
1361 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1362 },
1363 {
1364 .name = "Opteron_G2",
1365 .level = 5,
1366 .vendor = CPUID_VENDOR_AMD,
1367 .family = 15,
1368 .model = 6,
1369 .stepping = 1,
1370 .features[FEAT_1_EDX] =
1371 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1372 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1373 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1374 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1375 CPUID_DE | CPUID_FP87,
1376 .features[FEAT_1_ECX] =
1377 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1378 /* Missing: CPUID_EXT2_RDTSCP */
1379 .features[FEAT_8000_0001_EDX] =
1380 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1381 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1382 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1383 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1384 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1385 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1386 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1387 .features[FEAT_8000_0001_ECX] =
1388 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1389 .xlevel = 0x80000008,
1390 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1391 },
1392 {
1393 .name = "Opteron_G3",
1394 .level = 5,
1395 .vendor = CPUID_VENDOR_AMD,
1396 .family = 15,
1397 .model = 6,
1398 .stepping = 1,
1399 .features[FEAT_1_EDX] =
1400 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1401 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1402 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1403 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1404 CPUID_DE | CPUID_FP87,
1405 .features[FEAT_1_ECX] =
1406 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1407 CPUID_EXT_SSE3,
1408 /* Missing: CPUID_EXT2_RDTSCP */
1409 .features[FEAT_8000_0001_EDX] =
1410 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1411 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1412 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1413 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1414 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1415 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1416 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1417 .features[FEAT_8000_0001_ECX] =
1418 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1419 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1420 .xlevel = 0x80000008,
1421 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1422 },
1423 {
1424 .name = "Opteron_G4",
1425 .level = 0xd,
1426 .vendor = CPUID_VENDOR_AMD,
1427 .family = 21,
1428 .model = 1,
1429 .stepping = 2,
1430 .features[FEAT_1_EDX] =
1431 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1432 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1433 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1434 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1435 CPUID_DE | CPUID_FP87,
1436 .features[FEAT_1_ECX] =
1437 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1438 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1439 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1440 CPUID_EXT_SSE3,
1441 /* Missing: CPUID_EXT2_RDTSCP */
1442 .features[FEAT_8000_0001_EDX] =
1443 CPUID_EXT2_LM |
1444 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1445 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1446 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1447 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1448 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1449 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1450 .features[FEAT_8000_0001_ECX] =
1451 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1452 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1453 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1454 CPUID_EXT3_LAHF_LM,
1455 /* no xsaveopt! */
1456 .xlevel = 0x8000001A,
1457 .model_id = "AMD Opteron 62xx class CPU",
1458 },
1459 {
1460 .name = "Opteron_G5",
1461 .level = 0xd,
1462 .vendor = CPUID_VENDOR_AMD,
1463 .family = 21,
1464 .model = 2,
1465 .stepping = 0,
1466 .features[FEAT_1_EDX] =
1467 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1468 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1469 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1470 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1471 CPUID_DE | CPUID_FP87,
1472 .features[FEAT_1_ECX] =
1473 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1474 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1475 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1476 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1477 /* Missing: CPUID_EXT2_RDTSCP */
1478 .features[FEAT_8000_0001_EDX] =
1479 CPUID_EXT2_LM |
1480 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1481 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1482 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1483 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1484 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1485 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1486 .features[FEAT_8000_0001_ECX] =
1487 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1488 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1489 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1490 CPUID_EXT3_LAHF_LM,
1491 /* no xsaveopt! */
1492 .xlevel = 0x8000001A,
1493 .model_id = "AMD Opteron 63xx class CPU",
1494 },
1495 };
1496
1497 typedef struct PropValue {
1498 const char *prop, *value;
1499 } PropValue;
1500
1501 /* KVM-specific features that are automatically added/removed
1502 * from all CPU models when KVM is enabled.
1503 */
1504 static PropValue kvm_default_props[] = {
1505 { "kvmclock", "on" },
1506 { "kvm-nopiodelay", "on" },
1507 { "kvm-asyncpf", "on" },
1508 { "kvm-steal-time", "on" },
1509 { "kvm-pv-eoi", "on" },
1510 { "kvmclock-stable-bit", "on" },
1511 { "x2apic", "on" },
1512 { "acpi", "off" },
1513 { "monitor", "off" },
1514 { "svm", "off" },
1515 { NULL, NULL },
1516 };
1517
1518 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1519 {
1520 PropValue *pv;
1521 for (pv = kvm_default_props; pv->prop; pv++) {
1522 if (!strcmp(pv->prop, prop)) {
1523 pv->value = value;
1524 break;
1525 }
1526 }
1527
1528 /* It is valid to call this function only for properties that
1529 * are already present in the kvm_default_props table.
1530 */
1531 assert(pv->prop);
1532 }
1533
1534 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1535 bool migratable_only);
1536
1537 #ifdef CONFIG_KVM
1538
1539 static bool lmce_supported(void)
1540 {
1541 uint64_t mce_cap;
1542
1543 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1544 return false;
1545 }
1546
1547 return !!(mce_cap & MCG_LMCE_P);
1548 }
1549
1550 static int cpu_x86_fill_model_id(char *str)
1551 {
1552 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1553 int i;
1554
1555 for (i = 0; i < 3; i++) {
1556 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1557 memcpy(str + i * 16 + 0, &eax, 4);
1558 memcpy(str + i * 16 + 4, &ebx, 4);
1559 memcpy(str + i * 16 + 8, &ecx, 4);
1560 memcpy(str + i * 16 + 12, &edx, 4);
1561 }
1562 return 0;
1563 }
1564
1565 static X86CPUDefinition host_cpudef;
1566
1567 static Property host_x86_cpu_properties[] = {
1568 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1569 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1570 DEFINE_PROP_END_OF_LIST()
1571 };
1572
1573 /* class_init for the "host" CPU model
1574 *
1575 * This function may be called before KVM is initialized.
1576 */
1577 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1578 {
1579 DeviceClass *dc = DEVICE_CLASS(oc);
1580 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1581 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1582
1583 xcc->kvm_required = true;
1584
1585 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1586 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1587
1588 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1589 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1590 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1591 host_cpudef.stepping = eax & 0x0F;
1592
1593 cpu_x86_fill_model_id(host_cpudef.model_id);
1594
1595 xcc->cpu_def = &host_cpudef;
1596
1597 /* level, xlevel, xlevel2, and the feature words are initialized on
1598 * instance_init, because they require KVM to be initialized.
1599 */
1600
1601 dc->props = host_x86_cpu_properties;
1602 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1603 dc->cannot_destroy_with_object_finalize_yet = true;
1604 }
1605
1606 static void host_x86_cpu_initfn(Object *obj)
1607 {
1608 X86CPU *cpu = X86_CPU(obj);
1609 CPUX86State *env = &cpu->env;
1610 KVMState *s = kvm_state;
1611
1612 /* We can't fill the features array here because we don't know yet if
1613 * "migratable" is true or false.
1614 */
1615 cpu->host_features = true;
1616
1617 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1618 if (kvm_enabled()) {
1619 env->cpuid_min_level =
1620 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1621 env->cpuid_min_xlevel =
1622 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1623 env->cpuid_min_xlevel2 =
1624 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1625
1626 if (lmce_supported()) {
1627 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1628 }
1629 }
1630
1631 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1632 }
1633
1634 static const TypeInfo host_x86_cpu_type_info = {
1635 .name = X86_CPU_TYPE_NAME("host"),
1636 .parent = TYPE_X86_CPU,
1637 .instance_init = host_x86_cpu_initfn,
1638 .class_init = host_x86_cpu_class_init,
1639 };
1640
1641 #endif
1642
1643 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1644 {
1645 FeatureWordInfo *f = &feature_word_info[w];
1646 int i;
1647
1648 for (i = 0; i < 32; ++i) {
1649 if ((1UL << i) & mask) {
1650 const char *reg = get_register_name_32(f->cpuid_reg);
1651 assert(reg);
1652 fprintf(stderr, "warning: %s doesn't support requested feature: "
1653 "CPUID.%02XH:%s%s%s [bit %d]\n",
1654 kvm_enabled() ? "host" : "TCG",
1655 f->cpuid_eax, reg,
1656 f->feat_names[i] ? "." : "",
1657 f->feat_names[i] ? f->feat_names[i] : "", i);
1658 }
1659 }
1660 }
1661
1662 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1663 const char *name, void *opaque,
1664 Error **errp)
1665 {
1666 X86CPU *cpu = X86_CPU(obj);
1667 CPUX86State *env = &cpu->env;
1668 int64_t value;
1669
1670 value = (env->cpuid_version >> 8) & 0xf;
1671 if (value == 0xf) {
1672 value += (env->cpuid_version >> 20) & 0xff;
1673 }
1674 visit_type_int(v, name, &value, errp);
1675 }
1676
1677 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1678 const char *name, void *opaque,
1679 Error **errp)
1680 {
1681 X86CPU *cpu = X86_CPU(obj);
1682 CPUX86State *env = &cpu->env;
1683 const int64_t min = 0;
1684 const int64_t max = 0xff + 0xf;
1685 Error *local_err = NULL;
1686 int64_t value;
1687
1688 visit_type_int(v, name, &value, &local_err);
1689 if (local_err) {
1690 error_propagate(errp, local_err);
1691 return;
1692 }
1693 if (value < min || value > max) {
1694 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1695 name ? name : "null", value, min, max);
1696 return;
1697 }
1698
1699 env->cpuid_version &= ~0xff00f00;
1700 if (value > 0x0f) {
1701 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1702 } else {
1703 env->cpuid_version |= value << 8;
1704 }
1705 }
1706
1707 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1708 const char *name, void *opaque,
1709 Error **errp)
1710 {
1711 X86CPU *cpu = X86_CPU(obj);
1712 CPUX86State *env = &cpu->env;
1713 int64_t value;
1714
1715 value = (env->cpuid_version >> 4) & 0xf;
1716 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1717 visit_type_int(v, name, &value, errp);
1718 }
1719
1720 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1721 const char *name, void *opaque,
1722 Error **errp)
1723 {
1724 X86CPU *cpu = X86_CPU(obj);
1725 CPUX86State *env = &cpu->env;
1726 const int64_t min = 0;
1727 const int64_t max = 0xff;
1728 Error *local_err = NULL;
1729 int64_t value;
1730
1731 visit_type_int(v, name, &value, &local_err);
1732 if (local_err) {
1733 error_propagate(errp, local_err);
1734 return;
1735 }
1736 if (value < min || value > max) {
1737 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1738 name ? name : "null", value, min, max);
1739 return;
1740 }
1741
1742 env->cpuid_version &= ~0xf00f0;
1743 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1744 }
1745
1746 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1747 const char *name, void *opaque,
1748 Error **errp)
1749 {
1750 X86CPU *cpu = X86_CPU(obj);
1751 CPUX86State *env = &cpu->env;
1752 int64_t value;
1753
1754 value = env->cpuid_version & 0xf;
1755 visit_type_int(v, name, &value, errp);
1756 }
1757
1758 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1759 const char *name, void *opaque,
1760 Error **errp)
1761 {
1762 X86CPU *cpu = X86_CPU(obj);
1763 CPUX86State *env = &cpu->env;
1764 const int64_t min = 0;
1765 const int64_t max = 0xf;
1766 Error *local_err = NULL;
1767 int64_t value;
1768
1769 visit_type_int(v, name, &value, &local_err);
1770 if (local_err) {
1771 error_propagate(errp, local_err);
1772 return;
1773 }
1774 if (value < min || value > max) {
1775 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1776 name ? name : "null", value, min, max);
1777 return;
1778 }
1779
1780 env->cpuid_version &= ~0xf;
1781 env->cpuid_version |= value & 0xf;
1782 }
1783
1784 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1785 {
1786 X86CPU *cpu = X86_CPU(obj);
1787 CPUX86State *env = &cpu->env;
1788 char *value;
1789
1790 value = g_malloc(CPUID_VENDOR_SZ + 1);
1791 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1792 env->cpuid_vendor3);
1793 return value;
1794 }
1795
1796 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1797 Error **errp)
1798 {
1799 X86CPU *cpu = X86_CPU(obj);
1800 CPUX86State *env = &cpu->env;
1801 int i;
1802
1803 if (strlen(value) != CPUID_VENDOR_SZ) {
1804 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1805 return;
1806 }
1807
1808 env->cpuid_vendor1 = 0;
1809 env->cpuid_vendor2 = 0;
1810 env->cpuid_vendor3 = 0;
1811 for (i = 0; i < 4; i++) {
1812 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1813 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1814 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1815 }
1816 }
1817
1818 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1819 {
1820 X86CPU *cpu = X86_CPU(obj);
1821 CPUX86State *env = &cpu->env;
1822 char *value;
1823 int i;
1824
1825 value = g_malloc(48 + 1);
1826 for (i = 0; i < 48; i++) {
1827 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1828 }
1829 value[48] = '\0';
1830 return value;
1831 }
1832
1833 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1834 Error **errp)
1835 {
1836 X86CPU *cpu = X86_CPU(obj);
1837 CPUX86State *env = &cpu->env;
1838 int c, len, i;
1839
1840 if (model_id == NULL) {
1841 model_id = "";
1842 }
1843 len = strlen(model_id);
1844 memset(env->cpuid_model, 0, 48);
1845 for (i = 0; i < 48; i++) {
1846 if (i >= len) {
1847 c = '\0';
1848 } else {
1849 c = (uint8_t)model_id[i];
1850 }
1851 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1852 }
1853 }
1854
1855 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1856 void *opaque, Error **errp)
1857 {
1858 X86CPU *cpu = X86_CPU(obj);
1859 int64_t value;
1860
1861 value = cpu->env.tsc_khz * 1000;
1862 visit_type_int(v, name, &value, errp);
1863 }
1864
1865 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1866 void *opaque, Error **errp)
1867 {
1868 X86CPU *cpu = X86_CPU(obj);
1869 const int64_t min = 0;
1870 const int64_t max = INT64_MAX;
1871 Error *local_err = NULL;
1872 int64_t value;
1873
1874 visit_type_int(v, name, &value, &local_err);
1875 if (local_err) {
1876 error_propagate(errp, local_err);
1877 return;
1878 }
1879 if (value < min || value > max) {
1880 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1881 name ? name : "null", value, min, max);
1882 return;
1883 }
1884
1885 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1886 }
1887
1888 /* Generic getter for "feature-words" and "filtered-features" properties */
1889 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1890 const char *name, void *opaque,
1891 Error **errp)
1892 {
1893 uint32_t *array = (uint32_t *)opaque;
1894 FeatureWord w;
1895 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1896 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1897 X86CPUFeatureWordInfoList *list = NULL;
1898
1899 for (w = 0; w < FEATURE_WORDS; w++) {
1900 FeatureWordInfo *wi = &feature_word_info[w];
1901 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1902 qwi->cpuid_input_eax = wi->cpuid_eax;
1903 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1904 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1905 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1906 qwi->features = array[w];
1907
1908 /* List will be in reverse order, but order shouldn't matter */
1909 list_entries[w].next = list;
1910 list_entries[w].value = &word_infos[w];
1911 list = &list_entries[w];
1912 }
1913
1914 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1915 }
1916
1917 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1918 void *opaque, Error **errp)
1919 {
1920 X86CPU *cpu = X86_CPU(obj);
1921 int64_t value = cpu->hyperv_spinlock_attempts;
1922
1923 visit_type_int(v, name, &value, errp);
1924 }
1925
1926 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1927 void *opaque, Error **errp)
1928 {
1929 const int64_t min = 0xFFF;
1930 const int64_t max = UINT_MAX;
1931 X86CPU *cpu = X86_CPU(obj);
1932 Error *err = NULL;
1933 int64_t value;
1934
1935 visit_type_int(v, name, &value, &err);
1936 if (err) {
1937 error_propagate(errp, err);
1938 return;
1939 }
1940
1941 if (value < min || value > max) {
1942 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1943 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1944 object_get_typename(obj), name ? name : "null",
1945 value, min, max);
1946 return;
1947 }
1948 cpu->hyperv_spinlock_attempts = value;
1949 }
1950
1951 static PropertyInfo qdev_prop_spinlocks = {
1952 .name = "int",
1953 .get = x86_get_hv_spinlocks,
1954 .set = x86_set_hv_spinlocks,
1955 };
1956
1957 /* Convert all '_' in a feature string option name to '-', to make feature
1958 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1959 */
1960 static inline void feat2prop(char *s)
1961 {
1962 while ((s = strchr(s, '_'))) {
1963 *s = '-';
1964 }
1965 }
1966
1967 /* Compatibily hack to maintain legacy +-feat semantic,
1968 * where +-feat overwrites any feature set by
1969 * feat=on|feat even if the later is parsed after +-feat
1970 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1971 */
1972 static FeatureWordArray plus_features = { 0 };
1973 static FeatureWordArray minus_features = { 0 };
1974
1975 /* Parse "+feature,-feature,feature=foo" CPU feature string
1976 */
1977 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1978 Error **errp)
1979 {
1980 char *featurestr; /* Single 'key=value" string being parsed */
1981 Error *local_err = NULL;
1982 static bool cpu_globals_initialized;
1983
1984 if (cpu_globals_initialized) {
1985 return;
1986 }
1987 cpu_globals_initialized = true;
1988
1989 if (!features) {
1990 return;
1991 }
1992
1993 for (featurestr = strtok(features, ",");
1994 featurestr && !local_err;
1995 featurestr = strtok(NULL, ",")) {
1996 const char *name;
1997 const char *val = NULL;
1998 char *eq = NULL;
1999 char num[32];
2000 GlobalProperty *prop;
2001
2002 /* Compatibility syntax: */
2003 if (featurestr[0] == '+') {
2004 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2005 continue;
2006 } else if (featurestr[0] == '-') {
2007 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2008 continue;
2009 }
2010
2011 eq = strchr(featurestr, '=');
2012 if (eq) {
2013 *eq++ = 0;
2014 val = eq;
2015 } else {
2016 val = "on";
2017 }
2018
2019 feat2prop(featurestr);
2020 name = featurestr;
2021
2022 /* Special case: */
2023 if (!strcmp(name, "tsc-freq")) {
2024 int64_t tsc_freq;
2025 char *err;
2026
2027 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2028 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2029 if (tsc_freq < 0 || *err) {
2030 error_setg(errp, "bad numerical value %s", val);
2031 return;
2032 }
2033 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2034 val = num;
2035 name = "tsc-frequency";
2036 }
2037
2038 prop = g_new0(typeof(*prop), 1);
2039 prop->driver = typename;
2040 prop->property = g_strdup(name);
2041 prop->value = g_strdup(val);
2042 prop->errp = &error_fatal;
2043 qdev_prop_register_global(prop);
2044 }
2045
2046 if (local_err) {
2047 error_propagate(errp, local_err);
2048 }
2049 }
2050
2051 /* Print all cpuid feature names in featureset
2052 */
2053 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2054 {
2055 int bit;
2056 bool first = true;
2057
2058 for (bit = 0; bit < 32; bit++) {
2059 if (featureset[bit]) {
2060 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2061 first = false;
2062 }
2063 }
2064 }
2065
2066 /* generate CPU information. */
2067 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2068 {
2069 X86CPUDefinition *def;
2070 char buf[256];
2071 int i;
2072
2073 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2074 def = &builtin_x86_defs[i];
2075 snprintf(buf, sizeof(buf), "%s", def->name);
2076 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2077 }
2078 #ifdef CONFIG_KVM
2079 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2080 "KVM processor with all supported host features "
2081 "(only available in KVM mode)");
2082 #endif
2083
2084 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2085 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2086 FeatureWordInfo *fw = &feature_word_info[i];
2087
2088 (*cpu_fprintf)(f, " ");
2089 listflags(f, cpu_fprintf, fw->feat_names);
2090 (*cpu_fprintf)(f, "\n");
2091 }
2092 }
2093
2094 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2095 {
2096 CpuDefinitionInfoList *cpu_list = NULL;
2097 X86CPUDefinition *def;
2098 int i;
2099
2100 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2101 CpuDefinitionInfoList *entry;
2102 CpuDefinitionInfo *info;
2103
2104 def = &builtin_x86_defs[i];
2105 info = g_malloc0(sizeof(*info));
2106 info->name = g_strdup(def->name);
2107
2108 entry = g_malloc0(sizeof(*entry));
2109 entry->value = info;
2110 entry->next = cpu_list;
2111 cpu_list = entry;
2112 }
2113
2114 return cpu_list;
2115 }
2116
2117 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2118 bool migratable_only)
2119 {
2120 FeatureWordInfo *wi = &feature_word_info[w];
2121 uint32_t r;
2122
2123 if (kvm_enabled()) {
2124 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2125 wi->cpuid_ecx,
2126 wi->cpuid_reg);
2127 } else if (tcg_enabled()) {
2128 r = wi->tcg_features;
2129 } else {
2130 return ~0;
2131 }
2132 if (migratable_only) {
2133 r &= x86_cpu_get_migratable_flags(w);
2134 }
2135 return r;
2136 }
2137
2138 /*
2139 * Filters CPU feature words based on host availability of each feature.
2140 *
2141 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2142 */
2143 static int x86_cpu_filter_features(X86CPU *cpu)
2144 {
2145 CPUX86State *env = &cpu->env;
2146 FeatureWord w;
2147 int rv = 0;
2148
2149 for (w = 0; w < FEATURE_WORDS; w++) {
2150 uint32_t host_feat =
2151 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2152 uint32_t requested_features = env->features[w];
2153 env->features[w] &= host_feat;
2154 cpu->filtered_features[w] = requested_features & ~env->features[w];
2155 if (cpu->filtered_features[w]) {
2156 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2157 report_unavailable_features(w, cpu->filtered_features[w]);
2158 }
2159 rv = 1;
2160 }
2161 }
2162
2163 return rv;
2164 }
2165
2166 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2167 {
2168 PropValue *pv;
2169 for (pv = props; pv->prop; pv++) {
2170 if (!pv->value) {
2171 continue;
2172 }
2173 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2174 &error_abort);
2175 }
2176 }
2177
2178 /* Load data from X86CPUDefinition
2179 */
2180 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2181 {
2182 CPUX86State *env = &cpu->env;
2183 const char *vendor;
2184 char host_vendor[CPUID_VENDOR_SZ + 1];
2185 FeatureWord w;
2186
2187 /* CPU models only set _minimum_ values for level/xlevel: */
2188 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2189 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2190
2191 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2192 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2193 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2194 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2195 for (w = 0; w < FEATURE_WORDS; w++) {
2196 env->features[w] = def->features[w];
2197 }
2198
2199 /* Special cases not set in the X86CPUDefinition structs: */
2200 if (kvm_enabled()) {
2201 if (!kvm_irqchip_in_kernel()) {
2202 x86_cpu_change_kvm_default("x2apic", "off");
2203 }
2204
2205 x86_cpu_apply_props(cpu, kvm_default_props);
2206 }
2207
2208 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2209
2210 /* sysenter isn't supported in compatibility mode on AMD,
2211 * syscall isn't supported in compatibility mode on Intel.
2212 * Normally we advertise the actual CPU vendor, but you can
2213 * override this using the 'vendor' property if you want to use
2214 * KVM's sysenter/syscall emulation in compatibility mode and
2215 * when doing cross vendor migration
2216 */
2217 vendor = def->vendor;
2218 if (kvm_enabled()) {
2219 uint32_t ebx = 0, ecx = 0, edx = 0;
2220 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2221 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2222 vendor = host_vendor;
2223 }
2224
2225 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2226
2227 }
2228
2229 X86CPU *cpu_x86_init(const char *cpu_model)
2230 {
2231 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2232 }
2233
2234 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2235 {
2236 X86CPUDefinition *cpudef = data;
2237 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2238
2239 xcc->cpu_def = cpudef;
2240 }
2241
2242 static void x86_register_cpudef_type(X86CPUDefinition *def)
2243 {
2244 char *typename = x86_cpu_type_name(def->name);
2245 TypeInfo ti = {
2246 .name = typename,
2247 .parent = TYPE_X86_CPU,
2248 .class_init = x86_cpu_cpudef_class_init,
2249 .class_data = def,
2250 };
2251
2252 type_register(&ti);
2253 g_free(typename);
2254 }
2255
2256 #if !defined(CONFIG_USER_ONLY)
2257
2258 void cpu_clear_apic_feature(CPUX86State *env)
2259 {
2260 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2261 }
2262
2263 #endif /* !CONFIG_USER_ONLY */
2264
2265 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2266 uint32_t *eax, uint32_t *ebx,
2267 uint32_t *ecx, uint32_t *edx)
2268 {
2269 X86CPU *cpu = x86_env_get_cpu(env);
2270 CPUState *cs = CPU(cpu);
2271 uint32_t pkg_offset;
2272
2273 /* test if maximum index reached */
2274 if (index & 0x80000000) {
2275 if (index > env->cpuid_xlevel) {
2276 if (env->cpuid_xlevel2 > 0) {
2277 /* Handle the Centaur's CPUID instruction. */
2278 if (index > env->cpuid_xlevel2) {
2279 index = env->cpuid_xlevel2;
2280 } else if (index < 0xC0000000) {
2281 index = env->cpuid_xlevel;
2282 }
2283 } else {
2284 /* Intel documentation states that invalid EAX input will
2285 * return the same information as EAX=cpuid_level
2286 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2287 */
2288 index = env->cpuid_level;
2289 }
2290 }
2291 } else {
2292 if (index > env->cpuid_level)
2293 index = env->cpuid_level;
2294 }
2295
2296 switch(index) {
2297 case 0:
2298 *eax = env->cpuid_level;
2299 *ebx = env->cpuid_vendor1;
2300 *edx = env->cpuid_vendor2;
2301 *ecx = env->cpuid_vendor3;
2302 break;
2303 case 1:
2304 *eax = env->cpuid_version;
2305 *ebx = (cpu->apic_id << 24) |
2306 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2307 *ecx = env->features[FEAT_1_ECX];
2308 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2309 *ecx |= CPUID_EXT_OSXSAVE;
2310 }
2311 *edx = env->features[FEAT_1_EDX];
2312 if (cs->nr_cores * cs->nr_threads > 1) {
2313 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2314 *edx |= CPUID_HT;
2315 }
2316 break;
2317 case 2:
2318 /* cache info: needed for Pentium Pro compatibility */
2319 if (cpu->cache_info_passthrough) {
2320 host_cpuid(index, 0, eax, ebx, ecx, edx);
2321 break;
2322 }
2323 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2324 *ebx = 0;
2325 if (!cpu->enable_l3_cache) {
2326 *ecx = 0;
2327 } else {
2328 *ecx = L3_N_DESCRIPTOR;
2329 }
2330 *edx = (L1D_DESCRIPTOR << 16) | \
2331 (L1I_DESCRIPTOR << 8) | \
2332 (L2_DESCRIPTOR);
2333 break;
2334 case 4:
2335 /* cache info: needed for Core compatibility */
2336 if (cpu->cache_info_passthrough) {
2337 host_cpuid(index, count, eax, ebx, ecx, edx);
2338 *eax &= ~0xFC000000;
2339 } else {
2340 *eax = 0;
2341 switch (count) {
2342 case 0: /* L1 dcache info */
2343 *eax |= CPUID_4_TYPE_DCACHE | \
2344 CPUID_4_LEVEL(1) | \
2345 CPUID_4_SELF_INIT_LEVEL;
2346 *ebx = (L1D_LINE_SIZE - 1) | \
2347 ((L1D_PARTITIONS - 1) << 12) | \
2348 ((L1D_ASSOCIATIVITY - 1) << 22);
2349 *ecx = L1D_SETS - 1;
2350 *edx = CPUID_4_NO_INVD_SHARING;
2351 break;
2352 case 1: /* L1 icache info */
2353 *eax |= CPUID_4_TYPE_ICACHE | \
2354 CPUID_4_LEVEL(1) | \
2355 CPUID_4_SELF_INIT_LEVEL;
2356 *ebx = (L1I_LINE_SIZE - 1) | \
2357 ((L1I_PARTITIONS - 1) << 12) | \
2358 ((L1I_ASSOCIATIVITY - 1) << 22);
2359 *ecx = L1I_SETS - 1;
2360 *edx = CPUID_4_NO_INVD_SHARING;
2361 break;
2362 case 2: /* L2 cache info */
2363 *eax |= CPUID_4_TYPE_UNIFIED | \
2364 CPUID_4_LEVEL(2) | \
2365 CPUID_4_SELF_INIT_LEVEL;
2366 if (cs->nr_threads > 1) {
2367 *eax |= (cs->nr_threads - 1) << 14;
2368 }
2369 *ebx = (L2_LINE_SIZE - 1) | \
2370 ((L2_PARTITIONS - 1) << 12) | \
2371 ((L2_ASSOCIATIVITY - 1) << 22);
2372 *ecx = L2_SETS - 1;
2373 *edx = CPUID_4_NO_INVD_SHARING;
2374 break;
2375 case 3: /* L3 cache info */
2376 if (!cpu->enable_l3_cache) {
2377 *eax = 0;
2378 *ebx = 0;
2379 *ecx = 0;
2380 *edx = 0;
2381 break;
2382 }
2383 *eax |= CPUID_4_TYPE_UNIFIED | \
2384 CPUID_4_LEVEL(3) | \
2385 CPUID_4_SELF_INIT_LEVEL;
2386 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2387 *eax |= ((1 << pkg_offset) - 1) << 14;
2388 *ebx = (L3_N_LINE_SIZE - 1) | \
2389 ((L3_N_PARTITIONS - 1) << 12) | \
2390 ((L3_N_ASSOCIATIVITY - 1) << 22);
2391 *ecx = L3_N_SETS - 1;
2392 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2393 break;
2394 default: /* end of info */
2395 *eax = 0;
2396 *ebx = 0;
2397 *ecx = 0;
2398 *edx = 0;
2399 break;
2400 }
2401 }
2402
2403 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2404 if ((*eax & 31) && cs->nr_cores > 1) {
2405 *eax |= (cs->nr_cores - 1) << 26;
2406 }
2407 break;
2408 case 5:
2409 /* mwait info: needed for Core compatibility */
2410 *eax = 0; /* Smallest monitor-line size in bytes */
2411 *ebx = 0; /* Largest monitor-line size in bytes */
2412 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2413 *edx = 0;
2414 break;
2415 case 6:
2416 /* Thermal and Power Leaf */
2417 *eax = env->features[FEAT_6_EAX];
2418 *ebx = 0;
2419 *ecx = 0;
2420 *edx = 0;
2421 break;
2422 case 7:
2423 /* Structured Extended Feature Flags Enumeration Leaf */
2424 if (count == 0) {
2425 *eax = 0; /* Maximum ECX value for sub-leaves */
2426 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2427 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2428 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2429 *ecx |= CPUID_7_0_ECX_OSPKE;
2430 }
2431 *edx = 0; /* Reserved */
2432 } else {
2433 *eax = 0;
2434 *ebx = 0;
2435 *ecx = 0;
2436 *edx = 0;
2437 }
2438 break;
2439 case 9:
2440 /* Direct Cache Access Information Leaf */
2441 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2442 *ebx = 0;
2443 *ecx = 0;
2444 *edx = 0;
2445 break;
2446 case 0xA:
2447 /* Architectural Performance Monitoring Leaf */
2448 if (kvm_enabled() && cpu->enable_pmu) {
2449 KVMState *s = cs->kvm_state;
2450
2451 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2452 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2453 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2454 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2455 } else {
2456 *eax = 0;
2457 *ebx = 0;
2458 *ecx = 0;
2459 *edx = 0;
2460 }
2461 break;
2462 case 0xB:
2463 /* Extended Topology Enumeration Leaf */
2464 if (!cpu->enable_cpuid_0xb) {
2465 *eax = *ebx = *ecx = *edx = 0;
2466 break;
2467 }
2468
2469 *ecx = count & 0xff;
2470 *edx = cpu->apic_id;
2471
2472 switch (count) {
2473 case 0:
2474 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2475 *ebx = cs->nr_threads;
2476 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2477 break;
2478 case 1:
2479 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2480 *ebx = cs->nr_cores * cs->nr_threads;
2481 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2482 break;
2483 default:
2484 *eax = 0;
2485 *ebx = 0;
2486 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2487 }
2488
2489 assert(!(*eax & ~0x1f));
2490 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2491 break;
2492 case 0xD: {
2493 uint64_t ena_mask;
2494 int i;
2495
2496 /* Processor Extended State */
2497 *eax = 0;
2498 *ebx = 0;
2499 *ecx = 0;
2500 *edx = 0;
2501 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2502 break;
2503 }
2504
2505 ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2506 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2507 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2508 if (env->features[esa->feature] & esa->bits) {
2509 ena_mask |= (1ULL << i);
2510 }
2511 }
2512
2513 if (kvm_enabled()) {
2514 KVMState *s = cs->kvm_state;
2515 uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2516 kvm_mask <<= 32;
2517 kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2518 ena_mask &= kvm_mask;
2519 }
2520
2521 if (count == 0) {
2522 *ecx = 0x240;
2523 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2524 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2525 if ((ena_mask >> i) & 1) {
2526 *ecx = MAX(*ecx, esa->offset + esa->size);
2527 }
2528 }
2529 *eax = ena_mask;
2530 *edx = ena_mask >> 32;
2531 *ebx = *ecx;
2532 } else if (count == 1) {
2533 *eax = env->features[FEAT_XSAVE];
2534 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2535 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2536 if ((ena_mask >> count) & 1) {
2537 *eax = esa->size;
2538 *ebx = esa->offset;
2539 }
2540 }
2541 break;
2542 }
2543 case 0x80000000:
2544 *eax = env->cpuid_xlevel;
2545 *ebx = env->cpuid_vendor1;
2546 *edx = env->cpuid_vendor2;
2547 *ecx = env->cpuid_vendor3;
2548 break;
2549 case 0x80000001:
2550 *eax = env->cpuid_version;
2551 *ebx = 0;
2552 *ecx = env->features[FEAT_8000_0001_ECX];
2553 *edx = env->features[FEAT_8000_0001_EDX];
2554
2555 /* The Linux kernel checks for the CMPLegacy bit and
2556 * discards multiple thread information if it is set.
2557 * So don't set it here for Intel to make Linux guests happy.
2558 */
2559 if (cs->nr_cores * cs->nr_threads > 1) {
2560 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2561 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2562 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2563 *ecx |= 1 << 1; /* CmpLegacy bit */
2564 }
2565 }
2566 break;
2567 case 0x80000002:
2568 case 0x80000003:
2569 case 0x80000004:
2570 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2571 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2572 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2573 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2574 break;
2575 case 0x80000005:
2576 /* cache info (L1 cache) */
2577 if (cpu->cache_info_passthrough) {
2578 host_cpuid(index, 0, eax, ebx, ecx, edx);
2579 break;
2580 }
2581 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2582 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2583 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2584 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2585 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2586 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2587 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2588 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2589 break;
2590 case 0x80000006:
2591 /* cache info (L2 cache) */
2592 if (cpu->cache_info_passthrough) {
2593 host_cpuid(index, 0, eax, ebx, ecx, edx);
2594 break;
2595 }
2596 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2597 (L2_DTLB_2M_ENTRIES << 16) | \
2598 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2599 (L2_ITLB_2M_ENTRIES);
2600 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2601 (L2_DTLB_4K_ENTRIES << 16) | \
2602 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2603 (L2_ITLB_4K_ENTRIES);
2604 *ecx = (L2_SIZE_KB_AMD << 16) | \
2605 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2606 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2607 if (!cpu->enable_l3_cache) {
2608 *edx = ((L3_SIZE_KB / 512) << 18) | \
2609 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2610 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2611 } else {
2612 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2613 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2614 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2615 }
2616 break;
2617 case 0x80000007:
2618 *eax = 0;
2619 *ebx = 0;
2620 *ecx = 0;
2621 *edx = env->features[FEAT_8000_0007_EDX];
2622 break;
2623 case 0x80000008:
2624 /* virtual & phys address size in low 2 bytes. */
2625 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2626 /* 64 bit processor, 48 bits virtual, configurable
2627 * physical bits.
2628 */
2629 *eax = 0x00003000 + cpu->phys_bits;
2630 } else {
2631 *eax = cpu->phys_bits;
2632 }
2633 *ebx = 0;
2634 *ecx = 0;
2635 *edx = 0;
2636 if (cs->nr_cores * cs->nr_threads > 1) {
2637 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2638 }
2639 break;
2640 case 0x8000000A:
2641 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2642 *eax = 0x00000001; /* SVM Revision */
2643 *ebx = 0x00000010; /* nr of ASIDs */
2644 *ecx = 0;
2645 *edx = env->features[FEAT_SVM]; /* optional features */
2646 } else {
2647 *eax = 0;
2648 *ebx = 0;
2649 *ecx = 0;
2650 *edx = 0;
2651 }
2652 break;
2653 case 0xC0000000:
2654 *eax = env->cpuid_xlevel2;
2655 *ebx = 0;
2656 *ecx = 0;
2657 *edx = 0;
2658 break;
2659 case 0xC0000001:
2660 /* Support for VIA CPU's CPUID instruction */
2661 *eax = env->cpuid_version;
2662 *ebx = 0;
2663 *ecx = 0;
2664 *edx = env->features[FEAT_C000_0001_EDX];
2665 break;
2666 case 0xC0000002:
2667 case 0xC0000003:
2668 case 0xC0000004:
2669 /* Reserved for the future, and now filled with zero */
2670 *eax = 0;
2671 *ebx = 0;
2672 *ecx = 0;
2673 *edx = 0;
2674 break;
2675 default:
2676 /* reserved values: zero */
2677 *eax = 0;
2678 *ebx = 0;
2679 *ecx = 0;
2680 *edx = 0;
2681 break;
2682 }
2683 }
2684
2685 /* CPUClass::reset() */
2686 static void x86_cpu_reset(CPUState *s)
2687 {
2688 X86CPU *cpu = X86_CPU(s);
2689 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2690 CPUX86State *env = &cpu->env;
2691 target_ulong cr4;
2692 uint64_t xcr0;
2693 int i;
2694
2695 xcc->parent_reset(s);
2696
2697 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2698
2699 tlb_flush(s, 1);
2700
2701 env->old_exception = -1;
2702
2703 /* init to reset state */
2704
2705 env->hflags2 |= HF2_GIF_MASK;
2706
2707 cpu_x86_update_cr0(env, 0x60000010);
2708 env->a20_mask = ~0x0;
2709 env->smbase = 0x30000;
2710
2711 env->idt.limit = 0xffff;
2712 env->gdt.limit = 0xffff;
2713 env->ldt.limit = 0xffff;
2714 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2715 env->tr.limit = 0xffff;
2716 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2717
2718 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2719 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2720 DESC_R_MASK | DESC_A_MASK);
2721 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2722 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2723 DESC_A_MASK);
2724 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2725 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2726 DESC_A_MASK);
2727 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2728 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2729 DESC_A_MASK);
2730 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2731 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2732 DESC_A_MASK);
2733 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2734 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2735 DESC_A_MASK);
2736
2737 env->eip = 0xfff0;
2738 env->regs[R_EDX] = env->cpuid_version;
2739
2740 env->eflags = 0x2;
2741
2742 /* FPU init */
2743 for (i = 0; i < 8; i++) {
2744 env->fptags[i] = 1;
2745 }
2746 cpu_set_fpuc(env, 0x37f);
2747
2748 env->mxcsr = 0x1f80;
2749 /* All units are in INIT state. */
2750 env->xstate_bv = 0;
2751
2752 env->pat = 0x0007040600070406ULL;
2753 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2754
2755 memset(env->dr, 0, sizeof(env->dr));
2756 env->dr[6] = DR6_FIXED_1;
2757 env->dr[7] = DR7_FIXED_1;
2758 cpu_breakpoint_remove_all(s, BP_CPU);
2759 cpu_watchpoint_remove_all(s, BP_CPU);
2760
2761 cr4 = 0;
2762 xcr0 = XSTATE_FP_MASK;
2763
2764 #ifdef CONFIG_USER_ONLY
2765 /* Enable all the features for user-mode. */
2766 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2767 xcr0 |= XSTATE_SSE_MASK;
2768 }
2769 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2770 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2771 if (env->features[esa->feature] & esa->bits) {
2772 xcr0 |= 1ull << i;
2773 }
2774 }
2775
2776 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2777 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2778 }
2779 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2780 cr4 |= CR4_FSGSBASE_MASK;
2781 }
2782 #endif
2783
2784 env->xcr0 = xcr0;
2785 cpu_x86_update_cr4(env, cr4);
2786
2787 /*
2788 * SDM 11.11.5 requires:
2789 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2790 * - IA32_MTRR_PHYSMASKn.V = 0
2791 * All other bits are undefined. For simplification, zero it all.
2792 */
2793 env->mtrr_deftype = 0;
2794 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2795 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2796
2797 #if !defined(CONFIG_USER_ONLY)
2798 /* We hard-wire the BSP to the first CPU. */
2799 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2800
2801 s->halted = !cpu_is_bsp(cpu);
2802
2803 if (kvm_enabled()) {
2804 kvm_arch_reset_vcpu(cpu);
2805 }
2806 #endif
2807 }
2808
2809 #ifndef CONFIG_USER_ONLY
2810 bool cpu_is_bsp(X86CPU *cpu)
2811 {
2812 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2813 }
2814
2815 /* TODO: remove me, when reset over QOM tree is implemented */
2816 static void x86_cpu_machine_reset_cb(void *opaque)
2817 {
2818 X86CPU *cpu = opaque;
2819 cpu_reset(CPU(cpu));
2820 }
2821 #endif
2822
2823 static void mce_init(X86CPU *cpu)
2824 {
2825 CPUX86State *cenv = &cpu->env;
2826 unsigned int bank;
2827
2828 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2829 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2830 (CPUID_MCE | CPUID_MCA)) {
2831 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2832 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2833 cenv->mcg_ctl = ~(uint64_t)0;
2834 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2835 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2836 }
2837 }
2838 }
2839
2840 #ifndef CONFIG_USER_ONLY
2841 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2842 {
2843 APICCommonState *apic;
2844 const char *apic_type = "apic";
2845
2846 if (kvm_apic_in_kernel()) {
2847 apic_type = "kvm-apic";
2848 } else if (xen_enabled()) {
2849 apic_type = "xen-apic";
2850 }
2851
2852 cpu->apic_state = DEVICE(object_new(apic_type));
2853
2854 object_property_add_child(OBJECT(cpu), "lapic",
2855 OBJECT(cpu->apic_state), &error_abort);
2856 object_unref(OBJECT(cpu->apic_state));
2857
2858 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2859 /* TODO: convert to link<> */
2860 apic = APIC_COMMON(cpu->apic_state);
2861 apic->cpu = cpu;
2862 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2863 }
2864
2865 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2866 {
2867 APICCommonState *apic;
2868 static bool apic_mmio_map_once;
2869
2870 if (cpu->apic_state == NULL) {
2871 return;
2872 }
2873 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2874 errp);
2875
2876 /* Map APIC MMIO area */
2877 apic = APIC_COMMON(cpu->apic_state);
2878 if (!apic_mmio_map_once) {
2879 memory_region_add_subregion_overlap(get_system_memory(),
2880 apic->apicbase &
2881 MSR_IA32_APICBASE_BASE,
2882 &apic->io_memory,
2883 0x1000);
2884 apic_mmio_map_once = true;
2885 }
2886 }
2887
2888 static void x86_cpu_machine_done(Notifier *n, void *unused)
2889 {
2890 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2891 MemoryRegion *smram =
2892 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2893
2894 if (smram) {
2895 cpu->smram = g_new(MemoryRegion, 1);
2896 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2897 smram, 0, 1ull << 32);
2898 memory_region_set_enabled(cpu->smram, false);
2899 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2900 }
2901 }
2902 #else
2903 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2904 {
2905 }
2906 #endif
2907
2908 /* Note: Only safe for use on x86(-64) hosts */
2909 static uint32_t x86_host_phys_bits(void)
2910 {
2911 uint32_t eax;
2912 uint32_t host_phys_bits;
2913
2914 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2915 if (eax >= 0x80000008) {
2916 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2917 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2918 * at 23:16 that can specify a maximum physical address bits for
2919 * the guest that can override this value; but I've not seen
2920 * anything with that set.
2921 */
2922 host_phys_bits = eax & 0xff;
2923 } else {
2924 /* It's an odd 64 bit machine that doesn't have the leaf for
2925 * physical address bits; fall back to 36 that's most older
2926 * Intel.
2927 */
2928 host_phys_bits = 36;
2929 }
2930
2931 return host_phys_bits;
2932 }
2933
2934 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2935 {
2936 if (*min < value) {
2937 *min = value;
2938 }
2939 }
2940
2941 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2942 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2943 {
2944 CPUX86State *env = &cpu->env;
2945 FeatureWordInfo *fi = &feature_word_info[w];
2946 uint32_t eax = fi->cpuid_eax;
2947 uint32_t region = eax & 0xF0000000;
2948
2949 if (!env->features[w]) {
2950 return;
2951 }
2952
2953 switch (region) {
2954 case 0x00000000:
2955 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2956 break;
2957 case 0x80000000:
2958 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2959 break;
2960 case 0xC0000000:
2961 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2962 break;
2963 }
2964 }
2965
2966 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2967 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2968 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2969 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2970 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2971 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2972 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2973 {
2974 CPUState *cs = CPU(dev);
2975 X86CPU *cpu = X86_CPU(dev);
2976 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2977 CPUX86State *env = &cpu->env;
2978 Error *local_err = NULL;
2979 static bool ht_warned;
2980 FeatureWord w;
2981
2982 if (xcc->kvm_required && !kvm_enabled()) {
2983 char *name = x86_cpu_class_get_model_name(xcc);
2984 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2985 g_free(name);
2986 goto out;
2987 }
2988
2989 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2990 error_setg(errp, "apic-id property was not initialized properly");
2991 return;
2992 }
2993
2994 /*TODO: cpu->host_features incorrectly overwrites features
2995 * set using "feat=on|off". Once we fix this, we can convert
2996 * plus_features & minus_features to global properties
2997 * inside x86_cpu_parse_featurestr() too.
2998 */
2999 if (cpu->host_features) {
3000 for (w = 0; w < FEATURE_WORDS; w++) {
3001 env->features[w] =
3002 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3003 }
3004 }
3005
3006 for (w = 0; w < FEATURE_WORDS; w++) {
3007 cpu->env.features[w] |= plus_features[w];
3008 cpu->env.features[w] &= ~minus_features[w];
3009 }
3010
3011
3012 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3013 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3014 if (cpu->full_cpuid_auto_level) {
3015 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3016 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3017 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3018 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3019 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3020 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3021 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3022 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3023 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3024 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3025 /* SVM requires CPUID[0x8000000A] */
3026 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3027 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3028 }
3029 }
3030
3031 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3032 if (env->cpuid_level == UINT32_MAX) {
3033 env->cpuid_level = env->cpuid_min_level;
3034 }
3035 if (env->cpuid_xlevel == UINT32_MAX) {
3036 env->cpuid_xlevel = env->cpuid_min_xlevel;
3037 }
3038 if (env->cpuid_xlevel2 == UINT32_MAX) {
3039 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3040 }
3041
3042 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3043 error_setg(&local_err,
3044 kvm_enabled() ?
3045 "Host doesn't support requested features" :
3046 "TCG doesn't support requested features");
3047 goto out;
3048 }
3049
3050 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3051 * CPUID[1].EDX.
3052 */
3053 if (IS_AMD_CPU(env)) {
3054 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3055 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3056 & CPUID_EXT2_AMD_ALIASES);
3057 }
3058
3059 /* For 64bit systems think about the number of physical bits to present.
3060 * ideally this should be the same as the host; anything other than matching
3061 * the host can cause incorrect guest behaviour.
3062 * QEMU used to pick the magic value of 40 bits that corresponds to
3063 * consumer AMD devices but nothing else.
3064 */
3065 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3066 if (kvm_enabled()) {
3067 uint32_t host_phys_bits = x86_host_phys_bits();
3068 static bool warned;
3069
3070 if (cpu->host_phys_bits) {
3071 /* The user asked for us to use the host physical bits */
3072 cpu->phys_bits = host_phys_bits;
3073 }
3074
3075 /* Print a warning if the user set it to a value that's not the
3076 * host value.
3077 */
3078 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3079 !warned) {
3080 error_report("Warning: Host physical bits (%u)"
3081 " does not match phys-bits property (%u)",
3082 host_phys_bits, cpu->phys_bits);
3083 warned = true;
3084 }
3085
3086 if (cpu->phys_bits &&
3087 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3088 cpu->phys_bits < 32)) {
3089 error_setg(errp, "phys-bits should be between 32 and %u "
3090 " (but is %u)",
3091 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3092 return;
3093 }
3094 } else {
3095 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3096 error_setg(errp, "TCG only supports phys-bits=%u",
3097 TCG_PHYS_ADDR_BITS);
3098 return;
3099 }
3100 }
3101 /* 0 means it was not explicitly set by the user (or by machine
3102 * compat_props or by the host code above). In this case, the default
3103 * is the value used by TCG (40).
3104 */
3105 if (cpu->phys_bits == 0) {
3106 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3107 }
3108 } else {
3109 /* For 32 bit systems don't use the user set value, but keep
3110 * phys_bits consistent with what we tell the guest.
3111 */
3112 if (cpu->phys_bits != 0) {
3113 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3114 return;
3115 }
3116
3117 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3118 cpu->phys_bits = 36;
3119 } else {
3120 cpu->phys_bits = 32;
3121 }
3122 }
3123 cpu_exec_init(cs, &error_abort);
3124
3125 if (tcg_enabled()) {
3126 tcg_x86_init();
3127 }
3128
3129 #ifndef CONFIG_USER_ONLY
3130 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3131
3132 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3133 x86_cpu_apic_create(cpu, &local_err);
3134 if (local_err != NULL) {
3135 goto out;
3136 }
3137 }
3138 #endif
3139
3140 mce_init(cpu);
3141
3142 #ifndef CONFIG_USER_ONLY
3143 if (tcg_enabled()) {
3144 AddressSpace *newas = g_new(AddressSpace, 1);
3145
3146 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3147 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3148
3149 /* Outer container... */
3150 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3151 memory_region_set_enabled(cpu->cpu_as_root, true);
3152
3153 /* ... with two regions inside: normal system memory with low
3154 * priority, and...
3155 */
3156 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3157 get_system_memory(), 0, ~0ull);
3158 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3159 memory_region_set_enabled(cpu->cpu_as_mem, true);
3160 address_space_init(newas, cpu->cpu_as_root, "CPU");
3161 cs->num_ases = 1;
3162 cpu_address_space_init(cs, newas, 0);
3163
3164 /* ... SMRAM with higher priority, linked from /machine/smram. */
3165 cpu->machine_done.notify = x86_cpu_machine_done;
3166 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3167 }
3168 #endif
3169
3170 qemu_init_vcpu(cs);
3171
3172 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3173 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3174 * based on inputs (sockets,cores,threads), it is still better to gives
3175 * users a warning.
3176 *
3177 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3178 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3179 */
3180 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3181 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3182 " -smp options properly.");
3183 ht_warned = true;
3184 }
3185
3186 x86_cpu_apic_realize(cpu, &local_err);
3187 if (local_err != NULL) {
3188 goto out;
3189 }
3190 cpu_reset(cs);
3191
3192 xcc->parent_realize(dev, &local_err);
3193
3194 out:
3195 if (local_err != NULL) {
3196 error_propagate(errp, local_err);
3197 return;
3198 }
3199 }
3200
3201 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3202 {
3203 X86CPU *cpu = X86_CPU(dev);
3204
3205 #ifndef CONFIG_USER_ONLY
3206 cpu_remove_sync(CPU(dev));
3207 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3208 #endif
3209
3210 if (cpu->apic_state) {
3211 object_unparent(OBJECT(cpu->apic_state));
3212 cpu->apic_state = NULL;
3213 }
3214 }
3215
3216 typedef struct BitProperty {
3217 uint32_t *ptr;
3218 uint32_t mask;
3219 } BitProperty;
3220
3221 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3222 void *opaque, Error **errp)
3223 {
3224 BitProperty *fp = opaque;
3225 bool value = (*fp->ptr & fp->mask) == fp->mask;
3226 visit_type_bool(v, name, &value, errp);
3227 }
3228
3229 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3230 void *opaque, Error **errp)
3231 {
3232 DeviceState *dev = DEVICE(obj);
3233 BitProperty *fp = opaque;
3234 Error *local_err = NULL;
3235 bool value;
3236
3237 if (dev->realized) {
3238 qdev_prop_set_after_realize(dev, name, errp);
3239 return;
3240 }
3241
3242 visit_type_bool(v, name, &value, &local_err);
3243 if (local_err) {
3244 error_propagate(errp, local_err);
3245 return;
3246 }
3247
3248 if (value) {
3249 *fp->ptr |= fp->mask;
3250 } else {
3251 *fp->ptr &= ~fp->mask;
3252 }
3253 }
3254
3255 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3256 void *opaque)
3257 {
3258 BitProperty *prop = opaque;
3259 g_free(prop);
3260 }
3261
3262 /* Register a boolean property to get/set a single bit in a uint32_t field.
3263 *
3264 * The same property name can be registered multiple times to make it affect
3265 * multiple bits in the same FeatureWord. In that case, the getter will return
3266 * true only if all bits are set.
3267 */
3268 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3269 const char *prop_name,
3270 uint32_t *field,
3271 int bitnr)
3272 {
3273 BitProperty *fp;
3274 ObjectProperty *op;
3275 uint32_t mask = (1UL << bitnr);
3276
3277 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3278 if (op) {
3279 fp = op->opaque;
3280 assert(fp->ptr == field);
3281 fp->mask |= mask;
3282 } else {
3283 fp = g_new0(BitProperty, 1);
3284 fp->ptr = field;
3285 fp->mask = mask;
3286 object_property_add(OBJECT(cpu), prop_name, "bool",
3287 x86_cpu_get_bit_prop,
3288 x86_cpu_set_bit_prop,
3289 x86_cpu_release_bit_prop, fp, &error_abort);
3290 }
3291 }
3292
3293 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3294 FeatureWord w,
3295 int bitnr)
3296 {
3297 Object *obj = OBJECT(cpu);
3298 int i;
3299 char **names;
3300 FeatureWordInfo *fi = &feature_word_info[w];
3301
3302 if (!fi->feat_names[bitnr]) {
3303 return;
3304 }
3305
3306 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3307
3308 feat2prop(names[0]);
3309 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3310
3311 for (i = 1; names[i]; i++) {
3312 feat2prop(names[i]);
3313 object_property_add_alias(obj, names[i], obj, names[0],
3314 &error_abort);
3315 }
3316
3317 g_strfreev(names);
3318 }
3319
3320 static void x86_cpu_initfn(Object *obj)
3321 {
3322 CPUState *cs = CPU(obj);
3323 X86CPU *cpu = X86_CPU(obj);
3324 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3325 CPUX86State *env = &cpu->env;
3326 FeatureWord w;
3327
3328 cs->env_ptr = env;
3329
3330 object_property_add(obj, "family", "int",
3331 x86_cpuid_version_get_family,
3332 x86_cpuid_version_set_family, NULL, NULL, NULL);
3333 object_property_add(obj, "model", "int",
3334 x86_cpuid_version_get_model,
3335 x86_cpuid_version_set_model, NULL, NULL, NULL);
3336 object_property_add(obj, "stepping", "int",
3337 x86_cpuid_version_get_stepping,
3338 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3339 object_property_add_str(obj, "vendor",
3340 x86_cpuid_get_vendor,
3341 x86_cpuid_set_vendor, NULL);
3342 object_property_add_str(obj, "model-id",
3343 x86_cpuid_get_model_id,
3344 x86_cpuid_set_model_id, NULL);
3345 object_property_add(obj, "tsc-frequency", "int",
3346 x86_cpuid_get_tsc_freq,
3347 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3348 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3349 x86_cpu_get_feature_words,
3350 NULL, NULL, (void *)env->features, NULL);
3351 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3352 x86_cpu_get_feature_words,
3353 NULL, NULL, (void *)cpu->filtered_features, NULL);
3354
3355 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3356
3357 for (w = 0; w < FEATURE_WORDS; w++) {
3358 int bitnr;
3359
3360 for (bitnr = 0; bitnr < 32; bitnr++) {
3361 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3362 }
3363 }
3364
3365 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3366 }
3367
3368 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3369 {
3370 X86CPU *cpu = X86_CPU(cs);
3371
3372 return cpu->apic_id;
3373 }
3374
3375 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3376 {
3377 X86CPU *cpu = X86_CPU(cs);
3378
3379 return cpu->env.cr[0] & CR0_PG_MASK;
3380 }
3381
3382 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3383 {
3384 X86CPU *cpu = X86_CPU(cs);
3385
3386 cpu->env.eip = value;
3387 }
3388
3389 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3390 {
3391 X86CPU *cpu = X86_CPU(cs);
3392
3393 cpu->env.eip = tb->pc - tb->cs_base;
3394 }
3395
3396 static bool x86_cpu_has_work(CPUState *cs)
3397 {
3398 X86CPU *cpu = X86_CPU(cs);
3399 CPUX86State *env = &cpu->env;
3400
3401 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3402 CPU_INTERRUPT_POLL)) &&
3403 (env->eflags & IF_MASK)) ||
3404 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3405 CPU_INTERRUPT_INIT |
3406 CPU_INTERRUPT_SIPI |
3407 CPU_INTERRUPT_MCE)) ||
3408 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3409 !(env->hflags & HF_SMM_MASK));
3410 }
3411
3412 static Property x86_cpu_properties[] = {
3413 #ifdef CONFIG_USER_ONLY
3414 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3415 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3416 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3417 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3418 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3419 #else
3420 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3421 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3422 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3423 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3424 #endif
3425 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3426 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3427 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3428 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3429 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3430 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3431 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3432 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3433 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3434 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3435 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3436 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3437 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3438 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3439 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3440 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3441 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3442 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3443 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3444 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3445 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3446 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3447 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3448 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3449 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3450 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3451 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3452 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3453 DEFINE_PROP_END_OF_LIST()
3454 };
3455
3456 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3457 {
3458 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3459 CPUClass *cc = CPU_CLASS(oc);
3460 DeviceClass *dc = DEVICE_CLASS(oc);
3461
3462 xcc->parent_realize = dc->realize;
3463 dc->realize = x86_cpu_realizefn;
3464 dc->unrealize = x86_cpu_unrealizefn;
3465 dc->props = x86_cpu_properties;
3466
3467 xcc->parent_reset = cc->reset;
3468 cc->reset = x86_cpu_reset;
3469 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3470
3471 cc->class_by_name = x86_cpu_class_by_name;
3472 cc->parse_features = x86_cpu_parse_featurestr;
3473 cc->has_work = x86_cpu_has_work;
3474 cc->do_interrupt = x86_cpu_do_interrupt;
3475 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3476 cc->dump_state = x86_cpu_dump_state;
3477 cc->set_pc = x86_cpu_set_pc;
3478 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3479 cc->gdb_read_register = x86_cpu_gdb_read_register;
3480 cc->gdb_write_register = x86_cpu_gdb_write_register;
3481 cc->get_arch_id = x86_cpu_get_arch_id;
3482 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3483 #ifdef CONFIG_USER_ONLY
3484 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3485 #else
3486 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3487 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3488 cc->write_elf64_note = x86_cpu_write_elf64_note;
3489 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3490 cc->write_elf32_note = x86_cpu_write_elf32_note;
3491 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3492 cc->vmsd = &vmstate_x86_cpu;
3493 #endif
3494 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3495 #ifndef CONFIG_USER_ONLY
3496 cc->debug_excp_handler = breakpoint_handler;
3497 #endif
3498 cc->cpu_exec_enter = x86_cpu_exec_enter;
3499 cc->cpu_exec_exit = x86_cpu_exec_exit;
3500
3501 dc->cannot_instantiate_with_device_add_yet = false;
3502 /*
3503 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3504 * object in cpus -> dangling pointer after final object_unref().
3505 */
3506 dc->cannot_destroy_with_object_finalize_yet = true;
3507 }
3508
3509 static const TypeInfo x86_cpu_type_info = {
3510 .name = TYPE_X86_CPU,
3511 .parent = TYPE_CPU,
3512 .instance_size = sizeof(X86CPU),
3513 .instance_init = x86_cpu_initfn,
3514 .abstract = true,
3515 .class_size = sizeof(X86CPUClass),
3516 .class_init = x86_cpu_common_class_init,
3517 };
3518
3519 static void x86_cpu_register_types(void)
3520 {
3521 int i;
3522
3523 type_register_static(&x86_cpu_type_info);
3524 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3525 x86_register_cpudef_type(&builtin_x86_defs[i]);
3526 }
3527 #ifdef CONFIG_KVM
3528 type_register_static(&host_x86_cpu_type_info);
3529 #endif
3530 }
3531
3532 type_init(x86_cpu_register_types)