]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Make plus_features/minus_features QOM-based
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
247
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
253 */
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 uint32_t migratable_flags; /* Feature flags known to be migratable */
262 } FeatureWordInfo;
263
264 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
265 [FEAT_1_EDX] = {
266 .feat_names = {
267 "fpu", "vme", "de", "pse",
268 "tsc", "msr", "pae", "mce",
269 "cx8", "apic", NULL, "sep",
270 "mtrr", "pge", "mca", "cmov",
271 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
272 NULL, "ds" /* Intel dts */, "acpi", "mmx",
273 "fxsr", "sse", "sse2", "ss",
274 "ht" /* Intel htt */, "tm", "ia64", "pbe",
275 },
276 .cpuid_eax = 1, .cpuid_reg = R_EDX,
277 .tcg_features = TCG_FEATURES,
278 },
279 [FEAT_1_ECX] = {
280 .feat_names = {
281 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
282 "ds_cpl", "vmx", "smx", "est",
283 "tm2", "ssse3", "cid", NULL,
284 "fma", "cx16", "xtpr", "pdcm",
285 NULL, "pcid", "dca", "sse4.1|sse4_1",
286 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
287 "tsc-deadline", "aes", "xsave", "osxsave",
288 "avx", "f16c", "rdrand", "hypervisor",
289 },
290 .cpuid_eax = 1, .cpuid_reg = R_ECX,
291 .tcg_features = TCG_EXT_FEATURES,
292 },
293 /* Feature names that are already defined on feature_name[] but
294 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
295 * names on feat_names below. They are copied automatically
296 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
297 */
298 [FEAT_8000_0001_EDX] = {
299 .feat_names = {
300 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
301 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
302 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
303 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
304 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
305 "nx|xd", NULL, "mmxext", NULL /* mmx */,
306 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
307 NULL, "lm|i64", "3dnowext", "3dnow",
308 },
309 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
310 .tcg_features = TCG_EXT2_FEATURES,
311 },
312 [FEAT_8000_0001_ECX] = {
313 .feat_names = {
314 "lahf_lm", "cmp_legacy", "svm", "extapic",
315 "cr8legacy", "abm", "sse4a", "misalignsse",
316 "3dnowprefetch", "osvw", "ibs", "xop",
317 "skinit", "wdt", NULL, "lwp",
318 "fma4", "tce", NULL, "nodeid_msr",
319 NULL, "tbm", "topoext", "perfctr_core",
320 "perfctr_nb", NULL, NULL, NULL,
321 NULL, NULL, NULL, NULL,
322 },
323 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
324 .tcg_features = TCG_EXT3_FEATURES,
325 },
326 [FEAT_C000_0001_EDX] = {
327 .feat_names = {
328 NULL, NULL, "xstore", "xstore-en",
329 NULL, NULL, "xcrypt", "xcrypt-en",
330 "ace2", "ace2-en", "phe", "phe-en",
331 "pmm", "pmm-en", NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 NULL, NULL, NULL, NULL,
336 },
337 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
338 .tcg_features = TCG_EXT4_FEATURES,
339 },
340 [FEAT_KVM] = {
341 .feat_names = {
342 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
343 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL,
348 "kvmclock-stable-bit", NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL,
350 },
351 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
352 .tcg_features = TCG_KVM_FEATURES,
353 },
354 [FEAT_HYPERV_EAX] = {
355 .feat_names = {
356 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
357 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
358 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
359 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
360 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
361 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL,
367 },
368 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
369 },
370 [FEAT_HYPERV_EBX] = {
371 .feat_names = {
372 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
373 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
374 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
375 NULL /* hv_create_port */, NULL /* hv_connect_port */,
376 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
377 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
378 NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL,
383 },
384 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
385 },
386 [FEAT_HYPERV_EDX] = {
387 .feat_names = {
388 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
389 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
390 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
391 NULL, NULL,
392 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL,
398 },
399 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
400 },
401 [FEAT_SVM] = {
402 .feat_names = {
403 "npt", "lbrv", "svm_lock", "nrip_save",
404 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
405 NULL, NULL, "pause_filter", NULL,
406 "pfthreshold", NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL,
411 },
412 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
413 .tcg_features = TCG_SVM_FEATURES,
414 },
415 [FEAT_7_0_EBX] = {
416 .feat_names = {
417 "fsgsbase", "tsc_adjust", NULL, "bmi1",
418 "hle", "avx2", NULL, "smep",
419 "bmi2", "erms", "invpcid", "rtm",
420 NULL, NULL, "mpx", NULL,
421 "avx512f", "avx512dq", "rdseed", "adx",
422 "smap", "avx512ifma", "pcommit", "clflushopt",
423 "clwb", NULL, "avx512pf", "avx512er",
424 "avx512cd", NULL, "avx512bw", "avx512vl",
425 },
426 .cpuid_eax = 7,
427 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
428 .cpuid_reg = R_EBX,
429 .tcg_features = TCG_7_0_EBX_FEATURES,
430 },
431 [FEAT_7_0_ECX] = {
432 .feat_names = {
433 NULL, "avx512vbmi", "umip", "pku",
434 "ospke", NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL,
438 NULL, NULL, "rdpid", NULL,
439 NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL,
441 },
442 .cpuid_eax = 7,
443 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
444 .cpuid_reg = R_ECX,
445 .tcg_features = TCG_7_0_ECX_FEATURES,
446 },
447 [FEAT_8000_0007_EDX] = {
448 .feat_names = {
449 NULL, NULL, NULL, NULL,
450 NULL, NULL, NULL, NULL,
451 "invtsc", NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 NULL, NULL, NULL, NULL,
457 },
458 .cpuid_eax = 0x80000007,
459 .cpuid_reg = R_EDX,
460 .tcg_features = TCG_APM_FEATURES,
461 .unmigratable_flags = CPUID_APM_INVTSC,
462 },
463 [FEAT_XSAVE] = {
464 .feat_names = {
465 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL,
473 },
474 .cpuid_eax = 0xd,
475 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
476 .cpuid_reg = R_EAX,
477 .tcg_features = TCG_XSAVE_FEATURES,
478 },
479 [FEAT_6_EAX] = {
480 .feat_names = {
481 NULL, NULL, "arat", NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL,
489 },
490 .cpuid_eax = 6, .cpuid_reg = R_EAX,
491 .tcg_features = TCG_6_EAX_FEATURES,
492 },
493 [FEAT_XSAVE_COMP_LO] = {
494 .cpuid_eax = 0xD,
495 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
496 .cpuid_reg = R_EAX,
497 .tcg_features = ~0U,
498 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
499 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
500 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
501 XSTATE_PKRU_MASK,
502 },
503 [FEAT_XSAVE_COMP_HI] = {
504 .cpuid_eax = 0xD,
505 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
506 .cpuid_reg = R_EDX,
507 .tcg_features = ~0U,
508 },
509 };
510
511 typedef struct X86RegisterInfo32 {
512 /* Name of register */
513 const char *name;
514 /* QAPI enum value register */
515 X86CPURegister32 qapi_enum;
516 } X86RegisterInfo32;
517
518 #define REGISTER(reg) \
519 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
520 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
521 REGISTER(EAX),
522 REGISTER(ECX),
523 REGISTER(EDX),
524 REGISTER(EBX),
525 REGISTER(ESP),
526 REGISTER(EBP),
527 REGISTER(ESI),
528 REGISTER(EDI),
529 };
530 #undef REGISTER
531
532 typedef struct ExtSaveArea {
533 uint32_t feature, bits;
534 uint32_t offset, size;
535 } ExtSaveArea;
536
537 static const ExtSaveArea x86_ext_save_areas[] = {
538 [XSTATE_YMM_BIT] =
539 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
540 .offset = offsetof(X86XSaveArea, avx_state),
541 .size = sizeof(XSaveAVX) },
542 [XSTATE_BNDREGS_BIT] =
543 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
544 .offset = offsetof(X86XSaveArea, bndreg_state),
545 .size = sizeof(XSaveBNDREG) },
546 [XSTATE_BNDCSR_BIT] =
547 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
548 .offset = offsetof(X86XSaveArea, bndcsr_state),
549 .size = sizeof(XSaveBNDCSR) },
550 [XSTATE_OPMASK_BIT] =
551 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
552 .offset = offsetof(X86XSaveArea, opmask_state),
553 .size = sizeof(XSaveOpmask) },
554 [XSTATE_ZMM_Hi256_BIT] =
555 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
556 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
557 .size = sizeof(XSaveZMM_Hi256) },
558 [XSTATE_Hi16_ZMM_BIT] =
559 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
560 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
561 .size = sizeof(XSaveHi16_ZMM) },
562 [XSTATE_PKRU_BIT] =
563 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
564 .offset = offsetof(X86XSaveArea, pkru_state),
565 .size = sizeof(XSavePKRU) },
566 };
567
568 static uint32_t xsave_area_size(uint64_t mask)
569 {
570 int i;
571 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
572
573 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
574 const ExtSaveArea *esa = &x86_ext_save_areas[i];
575 if ((mask >> i) & 1) {
576 ret = MAX(ret, esa->offset + esa->size);
577 }
578 }
579 return ret;
580 }
581
582 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
583 {
584 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
585 cpu->env.features[FEAT_XSAVE_COMP_LO];
586 }
587
588 const char *get_register_name_32(unsigned int reg)
589 {
590 if (reg >= CPU_NB_REGS32) {
591 return NULL;
592 }
593 return x86_reg_info_32[reg].name;
594 }
595
596 /*
597 * Returns the set of feature flags that are supported and migratable by
598 * QEMU, for a given FeatureWord.
599 */
600 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
601 {
602 FeatureWordInfo *wi = &feature_word_info[w];
603 uint32_t r = 0;
604 int i;
605
606 for (i = 0; i < 32; i++) {
607 uint32_t f = 1U << i;
608
609 /* If the feature name is known, it is implicitly considered migratable,
610 * unless it is explicitly set in unmigratable_flags */
611 if ((wi->migratable_flags & f) ||
612 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
613 r |= f;
614 }
615 }
616 return r;
617 }
618
619 void host_cpuid(uint32_t function, uint32_t count,
620 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
621 {
622 uint32_t vec[4];
623
624 #ifdef __x86_64__
625 asm volatile("cpuid"
626 : "=a"(vec[0]), "=b"(vec[1]),
627 "=c"(vec[2]), "=d"(vec[3])
628 : "0"(function), "c"(count) : "cc");
629 #elif defined(__i386__)
630 asm volatile("pusha \n\t"
631 "cpuid \n\t"
632 "mov %%eax, 0(%2) \n\t"
633 "mov %%ebx, 4(%2) \n\t"
634 "mov %%ecx, 8(%2) \n\t"
635 "mov %%edx, 12(%2) \n\t"
636 "popa"
637 : : "a"(function), "c"(count), "S"(vec)
638 : "memory", "cc");
639 #else
640 abort();
641 #endif
642
643 if (eax)
644 *eax = vec[0];
645 if (ebx)
646 *ebx = vec[1];
647 if (ecx)
648 *ecx = vec[2];
649 if (edx)
650 *edx = vec[3];
651 }
652
653 /* CPU class name definitions: */
654
655 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
656 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
657
658 /* Return type name for a given CPU model name
659 * Caller is responsible for freeing the returned string.
660 */
661 static char *x86_cpu_type_name(const char *model_name)
662 {
663 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
664 }
665
666 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
667 {
668 ObjectClass *oc;
669 char *typename;
670
671 if (cpu_model == NULL) {
672 return NULL;
673 }
674
675 typename = x86_cpu_type_name(cpu_model);
676 oc = object_class_by_name(typename);
677 g_free(typename);
678 return oc;
679 }
680
681 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
682 {
683 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
684 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
685 return g_strndup(class_name,
686 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
687 }
688
689 struct X86CPUDefinition {
690 const char *name;
691 uint32_t level;
692 uint32_t xlevel;
693 /* vendor is zero-terminated, 12 character ASCII string */
694 char vendor[CPUID_VENDOR_SZ + 1];
695 int family;
696 int model;
697 int stepping;
698 FeatureWordArray features;
699 char model_id[48];
700 };
701
702 static X86CPUDefinition builtin_x86_defs[] = {
703 {
704 .name = "qemu64",
705 .level = 0xd,
706 .vendor = CPUID_VENDOR_AMD,
707 .family = 6,
708 .model = 6,
709 .stepping = 3,
710 .features[FEAT_1_EDX] =
711 PPRO_FEATURES |
712 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
713 CPUID_PSE36,
714 .features[FEAT_1_ECX] =
715 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
716 .features[FEAT_8000_0001_EDX] =
717 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
718 .features[FEAT_8000_0001_ECX] =
719 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
720 .xlevel = 0x8000000A,
721 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
722 },
723 {
724 .name = "phenom",
725 .level = 5,
726 .vendor = CPUID_VENDOR_AMD,
727 .family = 16,
728 .model = 2,
729 .stepping = 3,
730 /* Missing: CPUID_HT */
731 .features[FEAT_1_EDX] =
732 PPRO_FEATURES |
733 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
734 CPUID_PSE36 | CPUID_VME,
735 .features[FEAT_1_ECX] =
736 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
737 CPUID_EXT_POPCNT,
738 .features[FEAT_8000_0001_EDX] =
739 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
740 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
741 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
742 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
743 CPUID_EXT3_CR8LEG,
744 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
745 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
746 .features[FEAT_8000_0001_ECX] =
747 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
748 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
749 /* Missing: CPUID_SVM_LBRV */
750 .features[FEAT_SVM] =
751 CPUID_SVM_NPT,
752 .xlevel = 0x8000001A,
753 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
754 },
755 {
756 .name = "core2duo",
757 .level = 10,
758 .vendor = CPUID_VENDOR_INTEL,
759 .family = 6,
760 .model = 15,
761 .stepping = 11,
762 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
763 .features[FEAT_1_EDX] =
764 PPRO_FEATURES |
765 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
766 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
767 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
768 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
771 CPUID_EXT_CX16,
772 .features[FEAT_8000_0001_EDX] =
773 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
774 .features[FEAT_8000_0001_ECX] =
775 CPUID_EXT3_LAHF_LM,
776 .xlevel = 0x80000008,
777 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
778 },
779 {
780 .name = "kvm64",
781 .level = 0xd,
782 .vendor = CPUID_VENDOR_INTEL,
783 .family = 15,
784 .model = 6,
785 .stepping = 1,
786 /* Missing: CPUID_HT */
787 .features[FEAT_1_EDX] =
788 PPRO_FEATURES | CPUID_VME |
789 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
790 CPUID_PSE36,
791 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
792 .features[FEAT_1_ECX] =
793 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
794 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
795 .features[FEAT_8000_0001_EDX] =
796 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
797 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
798 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
799 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
800 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
801 .features[FEAT_8000_0001_ECX] =
802 0,
803 .xlevel = 0x80000008,
804 .model_id = "Common KVM processor"
805 },
806 {
807 .name = "qemu32",
808 .level = 4,
809 .vendor = CPUID_VENDOR_INTEL,
810 .family = 6,
811 .model = 6,
812 .stepping = 3,
813 .features[FEAT_1_EDX] =
814 PPRO_FEATURES,
815 .features[FEAT_1_ECX] =
816 CPUID_EXT_SSE3,
817 .xlevel = 0x80000004,
818 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
819 },
820 {
821 .name = "kvm32",
822 .level = 5,
823 .vendor = CPUID_VENDOR_INTEL,
824 .family = 15,
825 .model = 6,
826 .stepping = 1,
827 .features[FEAT_1_EDX] =
828 PPRO_FEATURES | CPUID_VME |
829 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3,
832 .features[FEAT_8000_0001_ECX] =
833 0,
834 .xlevel = 0x80000008,
835 .model_id = "Common 32-bit KVM processor"
836 },
837 {
838 .name = "coreduo",
839 .level = 10,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 6,
842 .model = 14,
843 .stepping = 8,
844 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES | CPUID_VME |
847 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
848 CPUID_SS,
849 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
850 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
851 .features[FEAT_1_ECX] =
852 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
853 .features[FEAT_8000_0001_EDX] =
854 CPUID_EXT2_NX,
855 .xlevel = 0x80000008,
856 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
857 },
858 {
859 .name = "486",
860 .level = 1,
861 .vendor = CPUID_VENDOR_INTEL,
862 .family = 4,
863 .model = 8,
864 .stepping = 0,
865 .features[FEAT_1_EDX] =
866 I486_FEATURES,
867 .xlevel = 0,
868 },
869 {
870 .name = "pentium",
871 .level = 1,
872 .vendor = CPUID_VENDOR_INTEL,
873 .family = 5,
874 .model = 4,
875 .stepping = 3,
876 .features[FEAT_1_EDX] =
877 PENTIUM_FEATURES,
878 .xlevel = 0,
879 },
880 {
881 .name = "pentium2",
882 .level = 2,
883 .vendor = CPUID_VENDOR_INTEL,
884 .family = 6,
885 .model = 5,
886 .stepping = 2,
887 .features[FEAT_1_EDX] =
888 PENTIUM2_FEATURES,
889 .xlevel = 0,
890 },
891 {
892 .name = "pentium3",
893 .level = 3,
894 .vendor = CPUID_VENDOR_INTEL,
895 .family = 6,
896 .model = 7,
897 .stepping = 3,
898 .features[FEAT_1_EDX] =
899 PENTIUM3_FEATURES,
900 .xlevel = 0,
901 },
902 {
903 .name = "athlon",
904 .level = 2,
905 .vendor = CPUID_VENDOR_AMD,
906 .family = 6,
907 .model = 2,
908 .stepping = 3,
909 .features[FEAT_1_EDX] =
910 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
911 CPUID_MCA,
912 .features[FEAT_8000_0001_EDX] =
913 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
914 .xlevel = 0x80000008,
915 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
916 },
917 {
918 .name = "n270",
919 .level = 10,
920 .vendor = CPUID_VENDOR_INTEL,
921 .family = 6,
922 .model = 28,
923 .stepping = 2,
924 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
925 .features[FEAT_1_EDX] =
926 PPRO_FEATURES |
927 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
928 CPUID_ACPI | CPUID_SS,
929 /* Some CPUs got no CPUID_SEP */
930 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
931 * CPUID_EXT_XTPR */
932 .features[FEAT_1_ECX] =
933 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
934 CPUID_EXT_MOVBE,
935 .features[FEAT_8000_0001_EDX] =
936 CPUID_EXT2_NX,
937 .features[FEAT_8000_0001_ECX] =
938 CPUID_EXT3_LAHF_LM,
939 .xlevel = 0x80000008,
940 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
941 },
942 {
943 .name = "Conroe",
944 .level = 10,
945 .vendor = CPUID_VENDOR_INTEL,
946 .family = 6,
947 .model = 15,
948 .stepping = 3,
949 .features[FEAT_1_EDX] =
950 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
951 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
952 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
953 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
954 CPUID_DE | CPUID_FP87,
955 .features[FEAT_1_ECX] =
956 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
957 .features[FEAT_8000_0001_EDX] =
958 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
959 .features[FEAT_8000_0001_ECX] =
960 CPUID_EXT3_LAHF_LM,
961 .xlevel = 0x80000008,
962 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
963 },
964 {
965 .name = "Penryn",
966 .level = 10,
967 .vendor = CPUID_VENDOR_INTEL,
968 .family = 6,
969 .model = 23,
970 .stepping = 3,
971 .features[FEAT_1_EDX] =
972 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
973 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
974 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
975 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
976 CPUID_DE | CPUID_FP87,
977 .features[FEAT_1_ECX] =
978 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
979 CPUID_EXT_SSE3,
980 .features[FEAT_8000_0001_EDX] =
981 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
982 .features[FEAT_8000_0001_ECX] =
983 CPUID_EXT3_LAHF_LM,
984 .xlevel = 0x80000008,
985 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
986 },
987 {
988 .name = "Nehalem",
989 .level = 11,
990 .vendor = CPUID_VENDOR_INTEL,
991 .family = 6,
992 .model = 26,
993 .stepping = 3,
994 .features[FEAT_1_EDX] =
995 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
996 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
997 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
998 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
999 CPUID_DE | CPUID_FP87,
1000 .features[FEAT_1_ECX] =
1001 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1002 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1003 .features[FEAT_8000_0001_EDX] =
1004 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1005 .features[FEAT_8000_0001_ECX] =
1006 CPUID_EXT3_LAHF_LM,
1007 .xlevel = 0x80000008,
1008 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1009 },
1010 {
1011 .name = "Westmere",
1012 .level = 11,
1013 .vendor = CPUID_VENDOR_INTEL,
1014 .family = 6,
1015 .model = 44,
1016 .stepping = 1,
1017 .features[FEAT_1_EDX] =
1018 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1019 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1020 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1021 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1022 CPUID_DE | CPUID_FP87,
1023 .features[FEAT_1_ECX] =
1024 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1025 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1026 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1027 .features[FEAT_8000_0001_EDX] =
1028 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1029 .features[FEAT_8000_0001_ECX] =
1030 CPUID_EXT3_LAHF_LM,
1031 .features[FEAT_6_EAX] =
1032 CPUID_6_EAX_ARAT,
1033 .xlevel = 0x80000008,
1034 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1035 },
1036 {
1037 .name = "SandyBridge",
1038 .level = 0xd,
1039 .vendor = CPUID_VENDOR_INTEL,
1040 .family = 6,
1041 .model = 42,
1042 .stepping = 1,
1043 .features[FEAT_1_EDX] =
1044 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1045 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1046 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1047 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1048 CPUID_DE | CPUID_FP87,
1049 .features[FEAT_1_ECX] =
1050 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1051 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1052 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1053 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1054 CPUID_EXT_SSE3,
1055 .features[FEAT_8000_0001_EDX] =
1056 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1057 CPUID_EXT2_SYSCALL,
1058 .features[FEAT_8000_0001_ECX] =
1059 CPUID_EXT3_LAHF_LM,
1060 .features[FEAT_XSAVE] =
1061 CPUID_XSAVE_XSAVEOPT,
1062 .features[FEAT_6_EAX] =
1063 CPUID_6_EAX_ARAT,
1064 .xlevel = 0x80000008,
1065 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1066 },
1067 {
1068 .name = "IvyBridge",
1069 .level = 0xd,
1070 .vendor = CPUID_VENDOR_INTEL,
1071 .family = 6,
1072 .model = 58,
1073 .stepping = 9,
1074 .features[FEAT_1_EDX] =
1075 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1076 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1077 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1078 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1079 CPUID_DE | CPUID_FP87,
1080 .features[FEAT_1_ECX] =
1081 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1082 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1083 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1084 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1085 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1086 .features[FEAT_7_0_EBX] =
1087 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1088 CPUID_7_0_EBX_ERMS,
1089 .features[FEAT_8000_0001_EDX] =
1090 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1091 CPUID_EXT2_SYSCALL,
1092 .features[FEAT_8000_0001_ECX] =
1093 CPUID_EXT3_LAHF_LM,
1094 .features[FEAT_XSAVE] =
1095 CPUID_XSAVE_XSAVEOPT,
1096 .features[FEAT_6_EAX] =
1097 CPUID_6_EAX_ARAT,
1098 .xlevel = 0x80000008,
1099 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1100 },
1101 {
1102 .name = "Haswell-noTSX",
1103 .level = 0xd,
1104 .vendor = CPUID_VENDOR_INTEL,
1105 .family = 6,
1106 .model = 60,
1107 .stepping = 1,
1108 .features[FEAT_1_EDX] =
1109 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1110 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1111 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1112 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1113 CPUID_DE | CPUID_FP87,
1114 .features[FEAT_1_ECX] =
1115 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1116 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1117 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1118 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1119 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1120 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1121 .features[FEAT_8000_0001_EDX] =
1122 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1123 CPUID_EXT2_SYSCALL,
1124 .features[FEAT_8000_0001_ECX] =
1125 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1126 .features[FEAT_7_0_EBX] =
1127 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1128 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1129 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1130 .features[FEAT_XSAVE] =
1131 CPUID_XSAVE_XSAVEOPT,
1132 .features[FEAT_6_EAX] =
1133 CPUID_6_EAX_ARAT,
1134 .xlevel = 0x80000008,
1135 .model_id = "Intel Core Processor (Haswell, no TSX)",
1136 }, {
1137 .name = "Haswell",
1138 .level = 0xd,
1139 .vendor = CPUID_VENDOR_INTEL,
1140 .family = 6,
1141 .model = 60,
1142 .stepping = 1,
1143 .features[FEAT_1_EDX] =
1144 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1145 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1146 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1147 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1148 CPUID_DE | CPUID_FP87,
1149 .features[FEAT_1_ECX] =
1150 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1151 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1152 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1153 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1154 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1155 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1156 .features[FEAT_8000_0001_EDX] =
1157 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1158 CPUID_EXT2_SYSCALL,
1159 .features[FEAT_8000_0001_ECX] =
1160 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1161 .features[FEAT_7_0_EBX] =
1162 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1163 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1164 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1165 CPUID_7_0_EBX_RTM,
1166 .features[FEAT_XSAVE] =
1167 CPUID_XSAVE_XSAVEOPT,
1168 .features[FEAT_6_EAX] =
1169 CPUID_6_EAX_ARAT,
1170 .xlevel = 0x80000008,
1171 .model_id = "Intel Core Processor (Haswell)",
1172 },
1173 {
1174 .name = "Broadwell-noTSX",
1175 .level = 0xd,
1176 .vendor = CPUID_VENDOR_INTEL,
1177 .family = 6,
1178 .model = 61,
1179 .stepping = 2,
1180 .features[FEAT_1_EDX] =
1181 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1182 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1183 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1184 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1185 CPUID_DE | CPUID_FP87,
1186 .features[FEAT_1_ECX] =
1187 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1188 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1189 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1190 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1191 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1192 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1193 .features[FEAT_8000_0001_EDX] =
1194 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1195 CPUID_EXT2_SYSCALL,
1196 .features[FEAT_8000_0001_ECX] =
1197 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1198 .features[FEAT_7_0_EBX] =
1199 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1200 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1201 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1202 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1203 CPUID_7_0_EBX_SMAP,
1204 .features[FEAT_XSAVE] =
1205 CPUID_XSAVE_XSAVEOPT,
1206 .features[FEAT_6_EAX] =
1207 CPUID_6_EAX_ARAT,
1208 .xlevel = 0x80000008,
1209 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1210 },
1211 {
1212 .name = "Broadwell",
1213 .level = 0xd,
1214 .vendor = CPUID_VENDOR_INTEL,
1215 .family = 6,
1216 .model = 61,
1217 .stepping = 2,
1218 .features[FEAT_1_EDX] =
1219 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1220 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1221 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1222 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1223 CPUID_DE | CPUID_FP87,
1224 .features[FEAT_1_ECX] =
1225 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1226 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1227 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1228 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1229 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1230 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1231 .features[FEAT_8000_0001_EDX] =
1232 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1233 CPUID_EXT2_SYSCALL,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1236 .features[FEAT_7_0_EBX] =
1237 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1238 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1239 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1240 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1241 CPUID_7_0_EBX_SMAP,
1242 .features[FEAT_XSAVE] =
1243 CPUID_XSAVE_XSAVEOPT,
1244 .features[FEAT_6_EAX] =
1245 CPUID_6_EAX_ARAT,
1246 .xlevel = 0x80000008,
1247 .model_id = "Intel Core Processor (Broadwell)",
1248 },
1249 {
1250 .name = "Skylake-Client",
1251 .level = 0xd,
1252 .vendor = CPUID_VENDOR_INTEL,
1253 .family = 6,
1254 .model = 94,
1255 .stepping = 3,
1256 .features[FEAT_1_EDX] =
1257 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1258 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1259 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1260 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1261 CPUID_DE | CPUID_FP87,
1262 .features[FEAT_1_ECX] =
1263 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1264 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1265 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1266 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1267 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1268 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1269 .features[FEAT_8000_0001_EDX] =
1270 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1271 CPUID_EXT2_SYSCALL,
1272 .features[FEAT_8000_0001_ECX] =
1273 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1274 .features[FEAT_7_0_EBX] =
1275 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1276 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1277 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1278 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1279 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1280 /* Missing: XSAVES (not supported by some Linux versions,
1281 * including v4.1 to v4.6).
1282 * KVM doesn't yet expose any XSAVES state save component,
1283 * and the only one defined in Skylake (processor tracing)
1284 * probably will block migration anyway.
1285 */
1286 .features[FEAT_XSAVE] =
1287 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1288 CPUID_XSAVE_XGETBV1,
1289 .features[FEAT_6_EAX] =
1290 CPUID_6_EAX_ARAT,
1291 .xlevel = 0x80000008,
1292 .model_id = "Intel Core Processor (Skylake)",
1293 },
1294 {
1295 .name = "Opteron_G1",
1296 .level = 5,
1297 .vendor = CPUID_VENDOR_AMD,
1298 .family = 15,
1299 .model = 6,
1300 .stepping = 1,
1301 .features[FEAT_1_EDX] =
1302 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1303 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1304 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1305 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1306 CPUID_DE | CPUID_FP87,
1307 .features[FEAT_1_ECX] =
1308 CPUID_EXT_SSE3,
1309 .features[FEAT_8000_0001_EDX] =
1310 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1311 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1312 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1313 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1314 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1315 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1316 .xlevel = 0x80000008,
1317 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1318 },
1319 {
1320 .name = "Opteron_G2",
1321 .level = 5,
1322 .vendor = CPUID_VENDOR_AMD,
1323 .family = 15,
1324 .model = 6,
1325 .stepping = 1,
1326 .features[FEAT_1_EDX] =
1327 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1328 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1329 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1330 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1331 CPUID_DE | CPUID_FP87,
1332 .features[FEAT_1_ECX] =
1333 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1334 /* Missing: CPUID_EXT2_RDTSCP */
1335 .features[FEAT_8000_0001_EDX] =
1336 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1337 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1338 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1339 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1340 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1341 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1342 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1343 .features[FEAT_8000_0001_ECX] =
1344 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1345 .xlevel = 0x80000008,
1346 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1347 },
1348 {
1349 .name = "Opteron_G3",
1350 .level = 5,
1351 .vendor = CPUID_VENDOR_AMD,
1352 .family = 16,
1353 .model = 2,
1354 .stepping = 3,
1355 .features[FEAT_1_EDX] =
1356 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1357 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1358 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1359 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1360 CPUID_DE | CPUID_FP87,
1361 .features[FEAT_1_ECX] =
1362 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1363 CPUID_EXT_SSE3,
1364 /* Missing: CPUID_EXT2_RDTSCP */
1365 .features[FEAT_8000_0001_EDX] =
1366 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1367 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1368 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1369 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1370 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1371 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1372 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1373 .features[FEAT_8000_0001_ECX] =
1374 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1375 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1376 .xlevel = 0x80000008,
1377 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1378 },
1379 {
1380 .name = "Opteron_G4",
1381 .level = 0xd,
1382 .vendor = CPUID_VENDOR_AMD,
1383 .family = 21,
1384 .model = 1,
1385 .stepping = 2,
1386 .features[FEAT_1_EDX] =
1387 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1388 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1389 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1390 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1391 CPUID_DE | CPUID_FP87,
1392 .features[FEAT_1_ECX] =
1393 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1394 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1395 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1396 CPUID_EXT_SSE3,
1397 /* Missing: CPUID_EXT2_RDTSCP */
1398 .features[FEAT_8000_0001_EDX] =
1399 CPUID_EXT2_LM |
1400 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1401 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1402 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1403 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1404 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1405 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1406 .features[FEAT_8000_0001_ECX] =
1407 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1408 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1409 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1410 CPUID_EXT3_LAHF_LM,
1411 /* no xsaveopt! */
1412 .xlevel = 0x8000001A,
1413 .model_id = "AMD Opteron 62xx class CPU",
1414 },
1415 {
1416 .name = "Opteron_G5",
1417 .level = 0xd,
1418 .vendor = CPUID_VENDOR_AMD,
1419 .family = 21,
1420 .model = 2,
1421 .stepping = 0,
1422 .features[FEAT_1_EDX] =
1423 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1424 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1425 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1426 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1427 CPUID_DE | CPUID_FP87,
1428 .features[FEAT_1_ECX] =
1429 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1430 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1431 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1432 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1433 /* Missing: CPUID_EXT2_RDTSCP */
1434 .features[FEAT_8000_0001_EDX] =
1435 CPUID_EXT2_LM |
1436 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1437 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1438 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1439 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1440 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1441 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1442 .features[FEAT_8000_0001_ECX] =
1443 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1444 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1445 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1446 CPUID_EXT3_LAHF_LM,
1447 /* no xsaveopt! */
1448 .xlevel = 0x8000001A,
1449 .model_id = "AMD Opteron 63xx class CPU",
1450 },
1451 };
1452
1453 typedef struct PropValue {
1454 const char *prop, *value;
1455 } PropValue;
1456
1457 /* KVM-specific features that are automatically added/removed
1458 * from all CPU models when KVM is enabled.
1459 */
1460 static PropValue kvm_default_props[] = {
1461 { "kvmclock", "on" },
1462 { "kvm-nopiodelay", "on" },
1463 { "kvm-asyncpf", "on" },
1464 { "kvm-steal-time", "on" },
1465 { "kvm-pv-eoi", "on" },
1466 { "kvmclock-stable-bit", "on" },
1467 { "x2apic", "on" },
1468 { "acpi", "off" },
1469 { "monitor", "off" },
1470 { "svm", "off" },
1471 { NULL, NULL },
1472 };
1473
1474 /* TCG-specific defaults that override all CPU models when using TCG
1475 */
1476 static PropValue tcg_default_props[] = {
1477 { "vme", "off" },
1478 { NULL, NULL },
1479 };
1480
1481
1482 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1483 {
1484 PropValue *pv;
1485 for (pv = kvm_default_props; pv->prop; pv++) {
1486 if (!strcmp(pv->prop, prop)) {
1487 pv->value = value;
1488 break;
1489 }
1490 }
1491
1492 /* It is valid to call this function only for properties that
1493 * are already present in the kvm_default_props table.
1494 */
1495 assert(pv->prop);
1496 }
1497
1498 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1499 bool migratable_only);
1500
1501 #ifdef CONFIG_KVM
1502
1503 static bool lmce_supported(void)
1504 {
1505 uint64_t mce_cap;
1506
1507 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1508 return false;
1509 }
1510
1511 return !!(mce_cap & MCG_LMCE_P);
1512 }
1513
1514 static int cpu_x86_fill_model_id(char *str)
1515 {
1516 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1517 int i;
1518
1519 for (i = 0; i < 3; i++) {
1520 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1521 memcpy(str + i * 16 + 0, &eax, 4);
1522 memcpy(str + i * 16 + 4, &ebx, 4);
1523 memcpy(str + i * 16 + 8, &ecx, 4);
1524 memcpy(str + i * 16 + 12, &edx, 4);
1525 }
1526 return 0;
1527 }
1528
1529 static X86CPUDefinition host_cpudef;
1530
1531 static Property host_x86_cpu_properties[] = {
1532 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1533 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1534 DEFINE_PROP_END_OF_LIST()
1535 };
1536
1537 /* class_init for the "host" CPU model
1538 *
1539 * This function may be called before KVM is initialized.
1540 */
1541 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1542 {
1543 DeviceClass *dc = DEVICE_CLASS(oc);
1544 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1545 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1546
1547 xcc->kvm_required = true;
1548
1549 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1550 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1551
1552 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1553 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1554 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1555 host_cpudef.stepping = eax & 0x0F;
1556
1557 cpu_x86_fill_model_id(host_cpudef.model_id);
1558
1559 xcc->cpu_def = &host_cpudef;
1560 xcc->model_description =
1561 "KVM processor with all supported host features "
1562 "(only available in KVM mode)";
1563
1564 /* level, xlevel, xlevel2, and the feature words are initialized on
1565 * instance_init, because they require KVM to be initialized.
1566 */
1567
1568 dc->props = host_x86_cpu_properties;
1569 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1570 dc->cannot_destroy_with_object_finalize_yet = true;
1571 }
1572
1573 static void host_x86_cpu_initfn(Object *obj)
1574 {
1575 X86CPU *cpu = X86_CPU(obj);
1576 CPUX86State *env = &cpu->env;
1577 KVMState *s = kvm_state;
1578
1579 /* We can't fill the features array here because we don't know yet if
1580 * "migratable" is true or false.
1581 */
1582 cpu->host_features = true;
1583
1584 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1585 if (kvm_enabled()) {
1586 env->cpuid_min_level =
1587 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1588 env->cpuid_min_xlevel =
1589 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1590 env->cpuid_min_xlevel2 =
1591 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1592
1593 if (lmce_supported()) {
1594 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1595 }
1596 }
1597
1598 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1599 }
1600
1601 static const TypeInfo host_x86_cpu_type_info = {
1602 .name = X86_CPU_TYPE_NAME("host"),
1603 .parent = TYPE_X86_CPU,
1604 .instance_init = host_x86_cpu_initfn,
1605 .class_init = host_x86_cpu_class_init,
1606 };
1607
1608 #endif
1609
1610 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1611 {
1612 FeatureWordInfo *f = &feature_word_info[w];
1613 int i;
1614
1615 for (i = 0; i < 32; ++i) {
1616 if ((1UL << i) & mask) {
1617 const char *reg = get_register_name_32(f->cpuid_reg);
1618 assert(reg);
1619 fprintf(stderr, "warning: %s doesn't support requested feature: "
1620 "CPUID.%02XH:%s%s%s [bit %d]\n",
1621 kvm_enabled() ? "host" : "TCG",
1622 f->cpuid_eax, reg,
1623 f->feat_names[i] ? "." : "",
1624 f->feat_names[i] ? f->feat_names[i] : "", i);
1625 }
1626 }
1627 }
1628
1629 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1630 const char *name, void *opaque,
1631 Error **errp)
1632 {
1633 X86CPU *cpu = X86_CPU(obj);
1634 CPUX86State *env = &cpu->env;
1635 int64_t value;
1636
1637 value = (env->cpuid_version >> 8) & 0xf;
1638 if (value == 0xf) {
1639 value += (env->cpuid_version >> 20) & 0xff;
1640 }
1641 visit_type_int(v, name, &value, errp);
1642 }
1643
1644 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1645 const char *name, void *opaque,
1646 Error **errp)
1647 {
1648 X86CPU *cpu = X86_CPU(obj);
1649 CPUX86State *env = &cpu->env;
1650 const int64_t min = 0;
1651 const int64_t max = 0xff + 0xf;
1652 Error *local_err = NULL;
1653 int64_t value;
1654
1655 visit_type_int(v, name, &value, &local_err);
1656 if (local_err) {
1657 error_propagate(errp, local_err);
1658 return;
1659 }
1660 if (value < min || value > max) {
1661 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1662 name ? name : "null", value, min, max);
1663 return;
1664 }
1665
1666 env->cpuid_version &= ~0xff00f00;
1667 if (value > 0x0f) {
1668 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1669 } else {
1670 env->cpuid_version |= value << 8;
1671 }
1672 }
1673
1674 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1675 const char *name, void *opaque,
1676 Error **errp)
1677 {
1678 X86CPU *cpu = X86_CPU(obj);
1679 CPUX86State *env = &cpu->env;
1680 int64_t value;
1681
1682 value = (env->cpuid_version >> 4) & 0xf;
1683 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1684 visit_type_int(v, name, &value, errp);
1685 }
1686
1687 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1688 const char *name, void *opaque,
1689 Error **errp)
1690 {
1691 X86CPU *cpu = X86_CPU(obj);
1692 CPUX86State *env = &cpu->env;
1693 const int64_t min = 0;
1694 const int64_t max = 0xff;
1695 Error *local_err = NULL;
1696 int64_t value;
1697
1698 visit_type_int(v, name, &value, &local_err);
1699 if (local_err) {
1700 error_propagate(errp, local_err);
1701 return;
1702 }
1703 if (value < min || value > max) {
1704 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1705 name ? name : "null", value, min, max);
1706 return;
1707 }
1708
1709 env->cpuid_version &= ~0xf00f0;
1710 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1711 }
1712
1713 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1714 const char *name, void *opaque,
1715 Error **errp)
1716 {
1717 X86CPU *cpu = X86_CPU(obj);
1718 CPUX86State *env = &cpu->env;
1719 int64_t value;
1720
1721 value = env->cpuid_version & 0xf;
1722 visit_type_int(v, name, &value, errp);
1723 }
1724
1725 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1726 const char *name, void *opaque,
1727 Error **errp)
1728 {
1729 X86CPU *cpu = X86_CPU(obj);
1730 CPUX86State *env = &cpu->env;
1731 const int64_t min = 0;
1732 const int64_t max = 0xf;
1733 Error *local_err = NULL;
1734 int64_t value;
1735
1736 visit_type_int(v, name, &value, &local_err);
1737 if (local_err) {
1738 error_propagate(errp, local_err);
1739 return;
1740 }
1741 if (value < min || value > max) {
1742 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1743 name ? name : "null", value, min, max);
1744 return;
1745 }
1746
1747 env->cpuid_version &= ~0xf;
1748 env->cpuid_version |= value & 0xf;
1749 }
1750
1751 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1752 {
1753 X86CPU *cpu = X86_CPU(obj);
1754 CPUX86State *env = &cpu->env;
1755 char *value;
1756
1757 value = g_malloc(CPUID_VENDOR_SZ + 1);
1758 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1759 env->cpuid_vendor3);
1760 return value;
1761 }
1762
1763 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1764 Error **errp)
1765 {
1766 X86CPU *cpu = X86_CPU(obj);
1767 CPUX86State *env = &cpu->env;
1768 int i;
1769
1770 if (strlen(value) != CPUID_VENDOR_SZ) {
1771 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1772 return;
1773 }
1774
1775 env->cpuid_vendor1 = 0;
1776 env->cpuid_vendor2 = 0;
1777 env->cpuid_vendor3 = 0;
1778 for (i = 0; i < 4; i++) {
1779 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1780 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1781 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1782 }
1783 }
1784
1785 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1786 {
1787 X86CPU *cpu = X86_CPU(obj);
1788 CPUX86State *env = &cpu->env;
1789 char *value;
1790 int i;
1791
1792 value = g_malloc(48 + 1);
1793 for (i = 0; i < 48; i++) {
1794 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1795 }
1796 value[48] = '\0';
1797 return value;
1798 }
1799
1800 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1801 Error **errp)
1802 {
1803 X86CPU *cpu = X86_CPU(obj);
1804 CPUX86State *env = &cpu->env;
1805 int c, len, i;
1806
1807 if (model_id == NULL) {
1808 model_id = "";
1809 }
1810 len = strlen(model_id);
1811 memset(env->cpuid_model, 0, 48);
1812 for (i = 0; i < 48; i++) {
1813 if (i >= len) {
1814 c = '\0';
1815 } else {
1816 c = (uint8_t)model_id[i];
1817 }
1818 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1819 }
1820 }
1821
1822 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1823 void *opaque, Error **errp)
1824 {
1825 X86CPU *cpu = X86_CPU(obj);
1826 int64_t value;
1827
1828 value = cpu->env.tsc_khz * 1000;
1829 visit_type_int(v, name, &value, errp);
1830 }
1831
1832 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1833 void *opaque, Error **errp)
1834 {
1835 X86CPU *cpu = X86_CPU(obj);
1836 const int64_t min = 0;
1837 const int64_t max = INT64_MAX;
1838 Error *local_err = NULL;
1839 int64_t value;
1840
1841 visit_type_int(v, name, &value, &local_err);
1842 if (local_err) {
1843 error_propagate(errp, local_err);
1844 return;
1845 }
1846 if (value < min || value > max) {
1847 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1848 name ? name : "null", value, min, max);
1849 return;
1850 }
1851
1852 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1853 }
1854
1855 /* Generic getter for "feature-words" and "filtered-features" properties */
1856 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1857 const char *name, void *opaque,
1858 Error **errp)
1859 {
1860 uint32_t *array = (uint32_t *)opaque;
1861 FeatureWord w;
1862 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1863 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1864 X86CPUFeatureWordInfoList *list = NULL;
1865
1866 for (w = 0; w < FEATURE_WORDS; w++) {
1867 FeatureWordInfo *wi = &feature_word_info[w];
1868 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1869 qwi->cpuid_input_eax = wi->cpuid_eax;
1870 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1871 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1872 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1873 qwi->features = array[w];
1874
1875 /* List will be in reverse order, but order shouldn't matter */
1876 list_entries[w].next = list;
1877 list_entries[w].value = &word_infos[w];
1878 list = &list_entries[w];
1879 }
1880
1881 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1882 }
1883
1884 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1885 void *opaque, Error **errp)
1886 {
1887 X86CPU *cpu = X86_CPU(obj);
1888 int64_t value = cpu->hyperv_spinlock_attempts;
1889
1890 visit_type_int(v, name, &value, errp);
1891 }
1892
1893 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1894 void *opaque, Error **errp)
1895 {
1896 const int64_t min = 0xFFF;
1897 const int64_t max = UINT_MAX;
1898 X86CPU *cpu = X86_CPU(obj);
1899 Error *err = NULL;
1900 int64_t value;
1901
1902 visit_type_int(v, name, &value, &err);
1903 if (err) {
1904 error_propagate(errp, err);
1905 return;
1906 }
1907
1908 if (value < min || value > max) {
1909 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1910 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1911 object_get_typename(obj), name ? name : "null",
1912 value, min, max);
1913 return;
1914 }
1915 cpu->hyperv_spinlock_attempts = value;
1916 }
1917
1918 static PropertyInfo qdev_prop_spinlocks = {
1919 .name = "int",
1920 .get = x86_get_hv_spinlocks,
1921 .set = x86_set_hv_spinlocks,
1922 };
1923
1924 /* Convert all '_' in a feature string option name to '-', to make feature
1925 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1926 */
1927 static inline void feat2prop(char *s)
1928 {
1929 while ((s = strchr(s, '_'))) {
1930 *s = '-';
1931 }
1932 }
1933
1934 /* Compatibily hack to maintain legacy +-feat semantic,
1935 * where +-feat overwrites any feature set by
1936 * feat=on|feat even if the later is parsed after +-feat
1937 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1938 */
1939 static GList *plus_features, *minus_features;
1940
1941 /* Parse "+feature,-feature,feature=foo" CPU feature string
1942 */
1943 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1944 Error **errp)
1945 {
1946 char *featurestr; /* Single 'key=value" string being parsed */
1947 Error *local_err = NULL;
1948 static bool cpu_globals_initialized;
1949
1950 if (cpu_globals_initialized) {
1951 return;
1952 }
1953 cpu_globals_initialized = true;
1954
1955 if (!features) {
1956 return;
1957 }
1958
1959 for (featurestr = strtok(features, ",");
1960 featurestr && !local_err;
1961 featurestr = strtok(NULL, ",")) {
1962 const char *name;
1963 const char *val = NULL;
1964 char *eq = NULL;
1965 char num[32];
1966 GlobalProperty *prop;
1967
1968 /* Compatibility syntax: */
1969 if (featurestr[0] == '+') {
1970 plus_features = g_list_append(plus_features,
1971 g_strdup(featurestr + 1));
1972 continue;
1973 } else if (featurestr[0] == '-') {
1974 minus_features = g_list_append(minus_features,
1975 g_strdup(featurestr + 1));
1976 continue;
1977 }
1978
1979 eq = strchr(featurestr, '=');
1980 if (eq) {
1981 *eq++ = 0;
1982 val = eq;
1983 } else {
1984 val = "on";
1985 }
1986
1987 feat2prop(featurestr);
1988 name = featurestr;
1989
1990 /* Special case: */
1991 if (!strcmp(name, "tsc-freq")) {
1992 int64_t tsc_freq;
1993 char *err;
1994
1995 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
1996 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
1997 if (tsc_freq < 0 || *err) {
1998 error_setg(errp, "bad numerical value %s", val);
1999 return;
2000 }
2001 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2002 val = num;
2003 name = "tsc-frequency";
2004 }
2005
2006 prop = g_new0(typeof(*prop), 1);
2007 prop->driver = typename;
2008 prop->property = g_strdup(name);
2009 prop->value = g_strdup(val);
2010 prop->errp = &error_fatal;
2011 qdev_prop_register_global(prop);
2012 }
2013
2014 if (local_err) {
2015 error_propagate(errp, local_err);
2016 }
2017 }
2018
2019 /* Print all cpuid feature names in featureset
2020 */
2021 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2022 {
2023 int bit;
2024 bool first = true;
2025
2026 for (bit = 0; bit < 32; bit++) {
2027 if (featureset[bit]) {
2028 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2029 first = false;
2030 }
2031 }
2032 }
2033
2034 /* Sort alphabetically by type name, listing kvm_required models last. */
2035 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
2036 {
2037 ObjectClass *class_a = (ObjectClass *)a;
2038 ObjectClass *class_b = (ObjectClass *)b;
2039 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
2040 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
2041 const char *name_a, *name_b;
2042
2043 if (cc_a->kvm_required != cc_b->kvm_required) {
2044 /* kvm_required items go last */
2045 return cc_a->kvm_required ? 1 : -1;
2046 } else {
2047 name_a = object_class_get_name(class_a);
2048 name_b = object_class_get_name(class_b);
2049 return strcmp(name_a, name_b);
2050 }
2051 }
2052
2053 static GSList *get_sorted_cpu_model_list(void)
2054 {
2055 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
2056 list = g_slist_sort(list, x86_cpu_list_compare);
2057 return list;
2058 }
2059
2060 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
2061 {
2062 ObjectClass *oc = data;
2063 X86CPUClass *cc = X86_CPU_CLASS(oc);
2064 CPUListState *s = user_data;
2065 char *name = x86_cpu_class_get_model_name(cc);
2066 const char *desc = cc->model_description;
2067 if (!desc) {
2068 desc = cc->cpu_def->model_id;
2069 }
2070
2071 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
2072 name, desc);
2073 g_free(name);
2074 }
2075
2076 /* list available CPU models and flags */
2077 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2078 {
2079 int i;
2080 CPUListState s = {
2081 .file = f,
2082 .cpu_fprintf = cpu_fprintf,
2083 };
2084 GSList *list;
2085
2086 (*cpu_fprintf)(f, "Available CPUs:\n");
2087 list = get_sorted_cpu_model_list();
2088 g_slist_foreach(list, x86_cpu_list_entry, &s);
2089 g_slist_free(list);
2090
2091 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2092 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2093 FeatureWordInfo *fw = &feature_word_info[i];
2094
2095 (*cpu_fprintf)(f, " ");
2096 listflags(f, cpu_fprintf, fw->feat_names);
2097 (*cpu_fprintf)(f, "\n");
2098 }
2099 }
2100
2101 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
2102 {
2103 ObjectClass *oc = data;
2104 X86CPUClass *cc = X86_CPU_CLASS(oc);
2105 CpuDefinitionInfoList **cpu_list = user_data;
2106 CpuDefinitionInfoList *entry;
2107 CpuDefinitionInfo *info;
2108
2109 info = g_malloc0(sizeof(*info));
2110 info->name = x86_cpu_class_get_model_name(cc);
2111
2112 entry = g_malloc0(sizeof(*entry));
2113 entry->value = info;
2114 entry->next = *cpu_list;
2115 *cpu_list = entry;
2116 }
2117
2118 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2119 {
2120 CpuDefinitionInfoList *cpu_list = NULL;
2121 GSList *list = get_sorted_cpu_model_list();
2122 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
2123 g_slist_free(list);
2124 return cpu_list;
2125 }
2126
2127 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2128 bool migratable_only)
2129 {
2130 FeatureWordInfo *wi = &feature_word_info[w];
2131 uint32_t r;
2132
2133 if (kvm_enabled()) {
2134 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2135 wi->cpuid_ecx,
2136 wi->cpuid_reg);
2137 } else if (tcg_enabled()) {
2138 r = wi->tcg_features;
2139 } else {
2140 return ~0;
2141 }
2142 if (migratable_only) {
2143 r &= x86_cpu_get_migratable_flags(w);
2144 }
2145 return r;
2146 }
2147
2148 /*
2149 * Filters CPU feature words based on host availability of each feature.
2150 *
2151 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2152 */
2153 static int x86_cpu_filter_features(X86CPU *cpu)
2154 {
2155 CPUX86State *env = &cpu->env;
2156 FeatureWord w;
2157 int rv = 0;
2158
2159 for (w = 0; w < FEATURE_WORDS; w++) {
2160 uint32_t host_feat =
2161 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2162 uint32_t requested_features = env->features[w];
2163 env->features[w] &= host_feat;
2164 cpu->filtered_features[w] = requested_features & ~env->features[w];
2165 if (cpu->filtered_features[w]) {
2166 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2167 report_unavailable_features(w, cpu->filtered_features[w]);
2168 }
2169 rv = 1;
2170 }
2171 }
2172
2173 return rv;
2174 }
2175
2176 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2177 {
2178 PropValue *pv;
2179 for (pv = props; pv->prop; pv++) {
2180 if (!pv->value) {
2181 continue;
2182 }
2183 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2184 &error_abort);
2185 }
2186 }
2187
2188 /* Load data from X86CPUDefinition
2189 */
2190 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2191 {
2192 CPUX86State *env = &cpu->env;
2193 const char *vendor;
2194 char host_vendor[CPUID_VENDOR_SZ + 1];
2195 FeatureWord w;
2196
2197 /* CPU models only set _minimum_ values for level/xlevel: */
2198 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2199 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2200
2201 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2202 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2203 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2204 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2205 for (w = 0; w < FEATURE_WORDS; w++) {
2206 env->features[w] = def->features[w];
2207 }
2208
2209 /* Special cases not set in the X86CPUDefinition structs: */
2210 if (kvm_enabled()) {
2211 if (!kvm_irqchip_in_kernel()) {
2212 x86_cpu_change_kvm_default("x2apic", "off");
2213 }
2214
2215 x86_cpu_apply_props(cpu, kvm_default_props);
2216 } else if (tcg_enabled()) {
2217 x86_cpu_apply_props(cpu, tcg_default_props);
2218 }
2219
2220 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2221
2222 /* sysenter isn't supported in compatibility mode on AMD,
2223 * syscall isn't supported in compatibility mode on Intel.
2224 * Normally we advertise the actual CPU vendor, but you can
2225 * override this using the 'vendor' property if you want to use
2226 * KVM's sysenter/syscall emulation in compatibility mode and
2227 * when doing cross vendor migration
2228 */
2229 vendor = def->vendor;
2230 if (kvm_enabled()) {
2231 uint32_t ebx = 0, ecx = 0, edx = 0;
2232 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2233 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2234 vendor = host_vendor;
2235 }
2236
2237 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2238
2239 }
2240
2241 X86CPU *cpu_x86_init(const char *cpu_model)
2242 {
2243 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2244 }
2245
2246 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2247 {
2248 X86CPUDefinition *cpudef = data;
2249 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2250
2251 xcc->cpu_def = cpudef;
2252 }
2253
2254 static void x86_register_cpudef_type(X86CPUDefinition *def)
2255 {
2256 char *typename = x86_cpu_type_name(def->name);
2257 TypeInfo ti = {
2258 .name = typename,
2259 .parent = TYPE_X86_CPU,
2260 .class_init = x86_cpu_cpudef_class_init,
2261 .class_data = def,
2262 };
2263
2264 type_register(&ti);
2265 g_free(typename);
2266 }
2267
2268 #if !defined(CONFIG_USER_ONLY)
2269
2270 void cpu_clear_apic_feature(CPUX86State *env)
2271 {
2272 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2273 }
2274
2275 #endif /* !CONFIG_USER_ONLY */
2276
2277 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2278 uint32_t *eax, uint32_t *ebx,
2279 uint32_t *ecx, uint32_t *edx)
2280 {
2281 X86CPU *cpu = x86_env_get_cpu(env);
2282 CPUState *cs = CPU(cpu);
2283 uint32_t pkg_offset;
2284
2285 /* test if maximum index reached */
2286 if (index & 0x80000000) {
2287 if (index > env->cpuid_xlevel) {
2288 if (env->cpuid_xlevel2 > 0) {
2289 /* Handle the Centaur's CPUID instruction. */
2290 if (index > env->cpuid_xlevel2) {
2291 index = env->cpuid_xlevel2;
2292 } else if (index < 0xC0000000) {
2293 index = env->cpuid_xlevel;
2294 }
2295 } else {
2296 /* Intel documentation states that invalid EAX input will
2297 * return the same information as EAX=cpuid_level
2298 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2299 */
2300 index = env->cpuid_level;
2301 }
2302 }
2303 } else {
2304 if (index > env->cpuid_level)
2305 index = env->cpuid_level;
2306 }
2307
2308 switch(index) {
2309 case 0:
2310 *eax = env->cpuid_level;
2311 *ebx = env->cpuid_vendor1;
2312 *edx = env->cpuid_vendor2;
2313 *ecx = env->cpuid_vendor3;
2314 break;
2315 case 1:
2316 *eax = env->cpuid_version;
2317 *ebx = (cpu->apic_id << 24) |
2318 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2319 *ecx = env->features[FEAT_1_ECX];
2320 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2321 *ecx |= CPUID_EXT_OSXSAVE;
2322 }
2323 *edx = env->features[FEAT_1_EDX];
2324 if (cs->nr_cores * cs->nr_threads > 1) {
2325 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2326 *edx |= CPUID_HT;
2327 }
2328 break;
2329 case 2:
2330 /* cache info: needed for Pentium Pro compatibility */
2331 if (cpu->cache_info_passthrough) {
2332 host_cpuid(index, 0, eax, ebx, ecx, edx);
2333 break;
2334 }
2335 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2336 *ebx = 0;
2337 if (!cpu->enable_l3_cache) {
2338 *ecx = 0;
2339 } else {
2340 *ecx = L3_N_DESCRIPTOR;
2341 }
2342 *edx = (L1D_DESCRIPTOR << 16) | \
2343 (L1I_DESCRIPTOR << 8) | \
2344 (L2_DESCRIPTOR);
2345 break;
2346 case 4:
2347 /* cache info: needed for Core compatibility */
2348 if (cpu->cache_info_passthrough) {
2349 host_cpuid(index, count, eax, ebx, ecx, edx);
2350 *eax &= ~0xFC000000;
2351 } else {
2352 *eax = 0;
2353 switch (count) {
2354 case 0: /* L1 dcache info */
2355 *eax |= CPUID_4_TYPE_DCACHE | \
2356 CPUID_4_LEVEL(1) | \
2357 CPUID_4_SELF_INIT_LEVEL;
2358 *ebx = (L1D_LINE_SIZE - 1) | \
2359 ((L1D_PARTITIONS - 1) << 12) | \
2360 ((L1D_ASSOCIATIVITY - 1) << 22);
2361 *ecx = L1D_SETS - 1;
2362 *edx = CPUID_4_NO_INVD_SHARING;
2363 break;
2364 case 1: /* L1 icache info */
2365 *eax |= CPUID_4_TYPE_ICACHE | \
2366 CPUID_4_LEVEL(1) | \
2367 CPUID_4_SELF_INIT_LEVEL;
2368 *ebx = (L1I_LINE_SIZE - 1) | \
2369 ((L1I_PARTITIONS - 1) << 12) | \
2370 ((L1I_ASSOCIATIVITY - 1) << 22);
2371 *ecx = L1I_SETS - 1;
2372 *edx = CPUID_4_NO_INVD_SHARING;
2373 break;
2374 case 2: /* L2 cache info */
2375 *eax |= CPUID_4_TYPE_UNIFIED | \
2376 CPUID_4_LEVEL(2) | \
2377 CPUID_4_SELF_INIT_LEVEL;
2378 if (cs->nr_threads > 1) {
2379 *eax |= (cs->nr_threads - 1) << 14;
2380 }
2381 *ebx = (L2_LINE_SIZE - 1) | \
2382 ((L2_PARTITIONS - 1) << 12) | \
2383 ((L2_ASSOCIATIVITY - 1) << 22);
2384 *ecx = L2_SETS - 1;
2385 *edx = CPUID_4_NO_INVD_SHARING;
2386 break;
2387 case 3: /* L3 cache info */
2388 if (!cpu->enable_l3_cache) {
2389 *eax = 0;
2390 *ebx = 0;
2391 *ecx = 0;
2392 *edx = 0;
2393 break;
2394 }
2395 *eax |= CPUID_4_TYPE_UNIFIED | \
2396 CPUID_4_LEVEL(3) | \
2397 CPUID_4_SELF_INIT_LEVEL;
2398 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2399 *eax |= ((1 << pkg_offset) - 1) << 14;
2400 *ebx = (L3_N_LINE_SIZE - 1) | \
2401 ((L3_N_PARTITIONS - 1) << 12) | \
2402 ((L3_N_ASSOCIATIVITY - 1) << 22);
2403 *ecx = L3_N_SETS - 1;
2404 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2405 break;
2406 default: /* end of info */
2407 *eax = 0;
2408 *ebx = 0;
2409 *ecx = 0;
2410 *edx = 0;
2411 break;
2412 }
2413 }
2414
2415 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2416 if ((*eax & 31) && cs->nr_cores > 1) {
2417 *eax |= (cs->nr_cores - 1) << 26;
2418 }
2419 break;
2420 case 5:
2421 /* mwait info: needed for Core compatibility */
2422 *eax = 0; /* Smallest monitor-line size in bytes */
2423 *ebx = 0; /* Largest monitor-line size in bytes */
2424 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2425 *edx = 0;
2426 break;
2427 case 6:
2428 /* Thermal and Power Leaf */
2429 *eax = env->features[FEAT_6_EAX];
2430 *ebx = 0;
2431 *ecx = 0;
2432 *edx = 0;
2433 break;
2434 case 7:
2435 /* Structured Extended Feature Flags Enumeration Leaf */
2436 if (count == 0) {
2437 *eax = 0; /* Maximum ECX value for sub-leaves */
2438 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2439 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2440 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2441 *ecx |= CPUID_7_0_ECX_OSPKE;
2442 }
2443 *edx = 0; /* Reserved */
2444 } else {
2445 *eax = 0;
2446 *ebx = 0;
2447 *ecx = 0;
2448 *edx = 0;
2449 }
2450 break;
2451 case 9:
2452 /* Direct Cache Access Information Leaf */
2453 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2454 *ebx = 0;
2455 *ecx = 0;
2456 *edx = 0;
2457 break;
2458 case 0xA:
2459 /* Architectural Performance Monitoring Leaf */
2460 if (kvm_enabled() && cpu->enable_pmu) {
2461 KVMState *s = cs->kvm_state;
2462
2463 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2464 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2465 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2466 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2467 } else {
2468 *eax = 0;
2469 *ebx = 0;
2470 *ecx = 0;
2471 *edx = 0;
2472 }
2473 break;
2474 case 0xB:
2475 /* Extended Topology Enumeration Leaf */
2476 if (!cpu->enable_cpuid_0xb) {
2477 *eax = *ebx = *ecx = *edx = 0;
2478 break;
2479 }
2480
2481 *ecx = count & 0xff;
2482 *edx = cpu->apic_id;
2483
2484 switch (count) {
2485 case 0:
2486 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2487 *ebx = cs->nr_threads;
2488 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2489 break;
2490 case 1:
2491 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2492 *ebx = cs->nr_cores * cs->nr_threads;
2493 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2494 break;
2495 default:
2496 *eax = 0;
2497 *ebx = 0;
2498 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2499 }
2500
2501 assert(!(*eax & ~0x1f));
2502 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2503 break;
2504 case 0xD: {
2505 /* Processor Extended State */
2506 *eax = 0;
2507 *ebx = 0;
2508 *ecx = 0;
2509 *edx = 0;
2510 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2511 break;
2512 }
2513
2514 if (count == 0) {
2515 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
2516 *eax = env->features[FEAT_XSAVE_COMP_LO];
2517 *edx = env->features[FEAT_XSAVE_COMP_HI];
2518 *ebx = *ecx;
2519 } else if (count == 1) {
2520 *eax = env->features[FEAT_XSAVE];
2521 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2522 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
2523 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2524 *eax = esa->size;
2525 *ebx = esa->offset;
2526 }
2527 }
2528 break;
2529 }
2530 case 0x80000000:
2531 *eax = env->cpuid_xlevel;
2532 *ebx = env->cpuid_vendor1;
2533 *edx = env->cpuid_vendor2;
2534 *ecx = env->cpuid_vendor3;
2535 break;
2536 case 0x80000001:
2537 *eax = env->cpuid_version;
2538 *ebx = 0;
2539 *ecx = env->features[FEAT_8000_0001_ECX];
2540 *edx = env->features[FEAT_8000_0001_EDX];
2541
2542 /* The Linux kernel checks for the CMPLegacy bit and
2543 * discards multiple thread information if it is set.
2544 * So don't set it here for Intel to make Linux guests happy.
2545 */
2546 if (cs->nr_cores * cs->nr_threads > 1) {
2547 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2548 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2549 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2550 *ecx |= 1 << 1; /* CmpLegacy bit */
2551 }
2552 }
2553 break;
2554 case 0x80000002:
2555 case 0x80000003:
2556 case 0x80000004:
2557 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2558 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2559 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2560 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2561 break;
2562 case 0x80000005:
2563 /* cache info (L1 cache) */
2564 if (cpu->cache_info_passthrough) {
2565 host_cpuid(index, 0, eax, ebx, ecx, edx);
2566 break;
2567 }
2568 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2569 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2570 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2571 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2572 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2573 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2574 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2575 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2576 break;
2577 case 0x80000006:
2578 /* cache info (L2 cache) */
2579 if (cpu->cache_info_passthrough) {
2580 host_cpuid(index, 0, eax, ebx, ecx, edx);
2581 break;
2582 }
2583 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2584 (L2_DTLB_2M_ENTRIES << 16) | \
2585 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2586 (L2_ITLB_2M_ENTRIES);
2587 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2588 (L2_DTLB_4K_ENTRIES << 16) | \
2589 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2590 (L2_ITLB_4K_ENTRIES);
2591 *ecx = (L2_SIZE_KB_AMD << 16) | \
2592 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2593 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2594 if (!cpu->enable_l3_cache) {
2595 *edx = ((L3_SIZE_KB / 512) << 18) | \
2596 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2597 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2598 } else {
2599 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2600 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2601 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2602 }
2603 break;
2604 case 0x80000007:
2605 *eax = 0;
2606 *ebx = 0;
2607 *ecx = 0;
2608 *edx = env->features[FEAT_8000_0007_EDX];
2609 break;
2610 case 0x80000008:
2611 /* virtual & phys address size in low 2 bytes. */
2612 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2613 /* 64 bit processor, 48 bits virtual, configurable
2614 * physical bits.
2615 */
2616 *eax = 0x00003000 + cpu->phys_bits;
2617 } else {
2618 *eax = cpu->phys_bits;
2619 }
2620 *ebx = 0;
2621 *ecx = 0;
2622 *edx = 0;
2623 if (cs->nr_cores * cs->nr_threads > 1) {
2624 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2625 }
2626 break;
2627 case 0x8000000A:
2628 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2629 *eax = 0x00000001; /* SVM Revision */
2630 *ebx = 0x00000010; /* nr of ASIDs */
2631 *ecx = 0;
2632 *edx = env->features[FEAT_SVM]; /* optional features */
2633 } else {
2634 *eax = 0;
2635 *ebx = 0;
2636 *ecx = 0;
2637 *edx = 0;
2638 }
2639 break;
2640 case 0xC0000000:
2641 *eax = env->cpuid_xlevel2;
2642 *ebx = 0;
2643 *ecx = 0;
2644 *edx = 0;
2645 break;
2646 case 0xC0000001:
2647 /* Support for VIA CPU's CPUID instruction */
2648 *eax = env->cpuid_version;
2649 *ebx = 0;
2650 *ecx = 0;
2651 *edx = env->features[FEAT_C000_0001_EDX];
2652 break;
2653 case 0xC0000002:
2654 case 0xC0000003:
2655 case 0xC0000004:
2656 /* Reserved for the future, and now filled with zero */
2657 *eax = 0;
2658 *ebx = 0;
2659 *ecx = 0;
2660 *edx = 0;
2661 break;
2662 default:
2663 /* reserved values: zero */
2664 *eax = 0;
2665 *ebx = 0;
2666 *ecx = 0;
2667 *edx = 0;
2668 break;
2669 }
2670 }
2671
2672 /* CPUClass::reset() */
2673 static void x86_cpu_reset(CPUState *s)
2674 {
2675 X86CPU *cpu = X86_CPU(s);
2676 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2677 CPUX86State *env = &cpu->env;
2678 target_ulong cr4;
2679 uint64_t xcr0;
2680 int i;
2681
2682 xcc->parent_reset(s);
2683
2684 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2685
2686 tlb_flush(s, 1);
2687
2688 env->old_exception = -1;
2689
2690 /* init to reset state */
2691
2692 env->hflags2 |= HF2_GIF_MASK;
2693
2694 cpu_x86_update_cr0(env, 0x60000010);
2695 env->a20_mask = ~0x0;
2696 env->smbase = 0x30000;
2697
2698 env->idt.limit = 0xffff;
2699 env->gdt.limit = 0xffff;
2700 env->ldt.limit = 0xffff;
2701 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2702 env->tr.limit = 0xffff;
2703 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2704
2705 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2706 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2707 DESC_R_MASK | DESC_A_MASK);
2708 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2709 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2710 DESC_A_MASK);
2711 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2712 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2713 DESC_A_MASK);
2714 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2715 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2716 DESC_A_MASK);
2717 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2718 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2719 DESC_A_MASK);
2720 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2721 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2722 DESC_A_MASK);
2723
2724 env->eip = 0xfff0;
2725 env->regs[R_EDX] = env->cpuid_version;
2726
2727 env->eflags = 0x2;
2728
2729 /* FPU init */
2730 for (i = 0; i < 8; i++) {
2731 env->fptags[i] = 1;
2732 }
2733 cpu_set_fpuc(env, 0x37f);
2734
2735 env->mxcsr = 0x1f80;
2736 /* All units are in INIT state. */
2737 env->xstate_bv = 0;
2738
2739 env->pat = 0x0007040600070406ULL;
2740 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2741
2742 memset(env->dr, 0, sizeof(env->dr));
2743 env->dr[6] = DR6_FIXED_1;
2744 env->dr[7] = DR7_FIXED_1;
2745 cpu_breakpoint_remove_all(s, BP_CPU);
2746 cpu_watchpoint_remove_all(s, BP_CPU);
2747
2748 cr4 = 0;
2749 xcr0 = XSTATE_FP_MASK;
2750
2751 #ifdef CONFIG_USER_ONLY
2752 /* Enable all the features for user-mode. */
2753 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2754 xcr0 |= XSTATE_SSE_MASK;
2755 }
2756 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2757 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2758 if (env->features[esa->feature] & esa->bits) {
2759 xcr0 |= 1ull << i;
2760 }
2761 }
2762
2763 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2764 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2765 }
2766 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2767 cr4 |= CR4_FSGSBASE_MASK;
2768 }
2769 #endif
2770
2771 env->xcr0 = xcr0;
2772 cpu_x86_update_cr4(env, cr4);
2773
2774 /*
2775 * SDM 11.11.5 requires:
2776 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2777 * - IA32_MTRR_PHYSMASKn.V = 0
2778 * All other bits are undefined. For simplification, zero it all.
2779 */
2780 env->mtrr_deftype = 0;
2781 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2782 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2783
2784 #if !defined(CONFIG_USER_ONLY)
2785 /* We hard-wire the BSP to the first CPU. */
2786 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2787
2788 s->halted = !cpu_is_bsp(cpu);
2789
2790 if (kvm_enabled()) {
2791 kvm_arch_reset_vcpu(cpu);
2792 }
2793 #endif
2794 }
2795
2796 #ifndef CONFIG_USER_ONLY
2797 bool cpu_is_bsp(X86CPU *cpu)
2798 {
2799 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2800 }
2801
2802 /* TODO: remove me, when reset over QOM tree is implemented */
2803 static void x86_cpu_machine_reset_cb(void *opaque)
2804 {
2805 X86CPU *cpu = opaque;
2806 cpu_reset(CPU(cpu));
2807 }
2808 #endif
2809
2810 static void mce_init(X86CPU *cpu)
2811 {
2812 CPUX86State *cenv = &cpu->env;
2813 unsigned int bank;
2814
2815 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2816 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2817 (CPUID_MCE | CPUID_MCA)) {
2818 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2819 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2820 cenv->mcg_ctl = ~(uint64_t)0;
2821 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2822 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2823 }
2824 }
2825 }
2826
2827 #ifndef CONFIG_USER_ONLY
2828 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2829 {
2830 APICCommonState *apic;
2831 const char *apic_type = "apic";
2832
2833 if (kvm_apic_in_kernel()) {
2834 apic_type = "kvm-apic";
2835 } else if (xen_enabled()) {
2836 apic_type = "xen-apic";
2837 }
2838
2839 cpu->apic_state = DEVICE(object_new(apic_type));
2840
2841 object_property_add_child(OBJECT(cpu), "lapic",
2842 OBJECT(cpu->apic_state), &error_abort);
2843 object_unref(OBJECT(cpu->apic_state));
2844
2845 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2846 /* TODO: convert to link<> */
2847 apic = APIC_COMMON(cpu->apic_state);
2848 apic->cpu = cpu;
2849 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2850 }
2851
2852 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2853 {
2854 APICCommonState *apic;
2855 static bool apic_mmio_map_once;
2856
2857 if (cpu->apic_state == NULL) {
2858 return;
2859 }
2860 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2861 errp);
2862
2863 /* Map APIC MMIO area */
2864 apic = APIC_COMMON(cpu->apic_state);
2865 if (!apic_mmio_map_once) {
2866 memory_region_add_subregion_overlap(get_system_memory(),
2867 apic->apicbase &
2868 MSR_IA32_APICBASE_BASE,
2869 &apic->io_memory,
2870 0x1000);
2871 apic_mmio_map_once = true;
2872 }
2873 }
2874
2875 static void x86_cpu_machine_done(Notifier *n, void *unused)
2876 {
2877 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2878 MemoryRegion *smram =
2879 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2880
2881 if (smram) {
2882 cpu->smram = g_new(MemoryRegion, 1);
2883 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2884 smram, 0, 1ull << 32);
2885 memory_region_set_enabled(cpu->smram, false);
2886 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2887 }
2888 }
2889 #else
2890 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2891 {
2892 }
2893 #endif
2894
2895 /* Note: Only safe for use on x86(-64) hosts */
2896 static uint32_t x86_host_phys_bits(void)
2897 {
2898 uint32_t eax;
2899 uint32_t host_phys_bits;
2900
2901 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2902 if (eax >= 0x80000008) {
2903 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2904 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2905 * at 23:16 that can specify a maximum physical address bits for
2906 * the guest that can override this value; but I've not seen
2907 * anything with that set.
2908 */
2909 host_phys_bits = eax & 0xff;
2910 } else {
2911 /* It's an odd 64 bit machine that doesn't have the leaf for
2912 * physical address bits; fall back to 36 that's most older
2913 * Intel.
2914 */
2915 host_phys_bits = 36;
2916 }
2917
2918 return host_phys_bits;
2919 }
2920
2921 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2922 {
2923 if (*min < value) {
2924 *min = value;
2925 }
2926 }
2927
2928 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2929 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2930 {
2931 CPUX86State *env = &cpu->env;
2932 FeatureWordInfo *fi = &feature_word_info[w];
2933 uint32_t eax = fi->cpuid_eax;
2934 uint32_t region = eax & 0xF0000000;
2935
2936 if (!env->features[w]) {
2937 return;
2938 }
2939
2940 switch (region) {
2941 case 0x00000000:
2942 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2943 break;
2944 case 0x80000000:
2945 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2946 break;
2947 case 0xC0000000:
2948 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2949 break;
2950 }
2951 }
2952
2953 /* Calculate XSAVE components based on the configured CPU feature flags */
2954 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
2955 {
2956 CPUX86State *env = &cpu->env;
2957 int i;
2958 uint64_t mask;
2959
2960 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2961 return;
2962 }
2963
2964 mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2965 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2966 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2967 if (env->features[esa->feature] & esa->bits) {
2968 mask |= (1ULL << i);
2969 }
2970 }
2971
2972 env->features[FEAT_XSAVE_COMP_LO] = mask;
2973 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
2974 }
2975
2976 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2977 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2978 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2979 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2980 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2981 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2982 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2983 {
2984 CPUState *cs = CPU(dev);
2985 X86CPU *cpu = X86_CPU(dev);
2986 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2987 CPUX86State *env = &cpu->env;
2988 Error *local_err = NULL;
2989 static bool ht_warned;
2990 FeatureWord w;
2991 GList *l;
2992
2993 if (xcc->kvm_required && !kvm_enabled()) {
2994 char *name = x86_cpu_class_get_model_name(xcc);
2995 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2996 g_free(name);
2997 goto out;
2998 }
2999
3000 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
3001 error_setg(errp, "apic-id property was not initialized properly");
3002 return;
3003 }
3004
3005 /*TODO: cpu->host_features incorrectly overwrites features
3006 * set using "feat=on|off". Once we fix this, we can convert
3007 * plus_features & minus_features to global properties
3008 * inside x86_cpu_parse_featurestr() too.
3009 */
3010 if (cpu->host_features) {
3011 for (w = 0; w < FEATURE_WORDS; w++) {
3012 env->features[w] =
3013 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3014 }
3015 }
3016
3017 for (l = plus_features; l; l = l->next) {
3018 const char *prop = l->data;
3019 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
3020 if (local_err) {
3021 goto out;
3022 }
3023 }
3024
3025 for (l = minus_features; l; l = l->next) {
3026 const char *prop = l->data;
3027 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
3028 if (local_err) {
3029 goto out;
3030 }
3031 }
3032
3033 if (!kvm_enabled() || !cpu->expose_kvm) {
3034 env->features[FEAT_KVM] = 0;
3035 }
3036
3037 x86_cpu_enable_xsave_components(cpu);
3038
3039 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3040 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3041 if (cpu->full_cpuid_auto_level) {
3042 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3043 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3044 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3045 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3046 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3047 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3048 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3049 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3050 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3051 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3052 /* SVM requires CPUID[0x8000000A] */
3053 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3054 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3055 }
3056 }
3057
3058 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3059 if (env->cpuid_level == UINT32_MAX) {
3060 env->cpuid_level = env->cpuid_min_level;
3061 }
3062 if (env->cpuid_xlevel == UINT32_MAX) {
3063 env->cpuid_xlevel = env->cpuid_min_xlevel;
3064 }
3065 if (env->cpuid_xlevel2 == UINT32_MAX) {
3066 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3067 }
3068
3069 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3070 error_setg(&local_err,
3071 kvm_enabled() ?
3072 "Host doesn't support requested features" :
3073 "TCG doesn't support requested features");
3074 goto out;
3075 }
3076
3077 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3078 * CPUID[1].EDX.
3079 */
3080 if (IS_AMD_CPU(env)) {
3081 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3082 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3083 & CPUID_EXT2_AMD_ALIASES);
3084 }
3085
3086 /* For 64bit systems think about the number of physical bits to present.
3087 * ideally this should be the same as the host; anything other than matching
3088 * the host can cause incorrect guest behaviour.
3089 * QEMU used to pick the magic value of 40 bits that corresponds to
3090 * consumer AMD devices but nothing else.
3091 */
3092 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3093 if (kvm_enabled()) {
3094 uint32_t host_phys_bits = x86_host_phys_bits();
3095 static bool warned;
3096
3097 if (cpu->host_phys_bits) {
3098 /* The user asked for us to use the host physical bits */
3099 cpu->phys_bits = host_phys_bits;
3100 }
3101
3102 /* Print a warning if the user set it to a value that's not the
3103 * host value.
3104 */
3105 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3106 !warned) {
3107 error_report("Warning: Host physical bits (%u)"
3108 " does not match phys-bits property (%u)",
3109 host_phys_bits, cpu->phys_bits);
3110 warned = true;
3111 }
3112
3113 if (cpu->phys_bits &&
3114 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3115 cpu->phys_bits < 32)) {
3116 error_setg(errp, "phys-bits should be between 32 and %u "
3117 " (but is %u)",
3118 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3119 return;
3120 }
3121 } else {
3122 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3123 error_setg(errp, "TCG only supports phys-bits=%u",
3124 TCG_PHYS_ADDR_BITS);
3125 return;
3126 }
3127 }
3128 /* 0 means it was not explicitly set by the user (or by machine
3129 * compat_props or by the host code above). In this case, the default
3130 * is the value used by TCG (40).
3131 */
3132 if (cpu->phys_bits == 0) {
3133 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3134 }
3135 } else {
3136 /* For 32 bit systems don't use the user set value, but keep
3137 * phys_bits consistent with what we tell the guest.
3138 */
3139 if (cpu->phys_bits != 0) {
3140 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3141 return;
3142 }
3143
3144 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3145 cpu->phys_bits = 36;
3146 } else {
3147 cpu->phys_bits = 32;
3148 }
3149 }
3150 cpu_exec_init(cs, &error_abort);
3151
3152 if (tcg_enabled()) {
3153 tcg_x86_init();
3154 }
3155
3156 #ifndef CONFIG_USER_ONLY
3157 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3158
3159 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3160 x86_cpu_apic_create(cpu, &local_err);
3161 if (local_err != NULL) {
3162 goto out;
3163 }
3164 }
3165 #endif
3166
3167 mce_init(cpu);
3168
3169 #ifndef CONFIG_USER_ONLY
3170 if (tcg_enabled()) {
3171 AddressSpace *newas = g_new(AddressSpace, 1);
3172
3173 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3174 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3175
3176 /* Outer container... */
3177 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3178 memory_region_set_enabled(cpu->cpu_as_root, true);
3179
3180 /* ... with two regions inside: normal system memory with low
3181 * priority, and...
3182 */
3183 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3184 get_system_memory(), 0, ~0ull);
3185 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3186 memory_region_set_enabled(cpu->cpu_as_mem, true);
3187 address_space_init(newas, cpu->cpu_as_root, "CPU");
3188 cs->num_ases = 1;
3189 cpu_address_space_init(cs, newas, 0);
3190
3191 /* ... SMRAM with higher priority, linked from /machine/smram. */
3192 cpu->machine_done.notify = x86_cpu_machine_done;
3193 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3194 }
3195 #endif
3196
3197 qemu_init_vcpu(cs);
3198
3199 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3200 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3201 * based on inputs (sockets,cores,threads), it is still better to gives
3202 * users a warning.
3203 *
3204 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3205 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3206 */
3207 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3208 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3209 " -smp options properly.");
3210 ht_warned = true;
3211 }
3212
3213 x86_cpu_apic_realize(cpu, &local_err);
3214 if (local_err != NULL) {
3215 goto out;
3216 }
3217 cpu_reset(cs);
3218
3219 xcc->parent_realize(dev, &local_err);
3220
3221 out:
3222 if (local_err != NULL) {
3223 error_propagate(errp, local_err);
3224 return;
3225 }
3226 }
3227
3228 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3229 {
3230 X86CPU *cpu = X86_CPU(dev);
3231
3232 #ifndef CONFIG_USER_ONLY
3233 cpu_remove_sync(CPU(dev));
3234 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3235 #endif
3236
3237 if (cpu->apic_state) {
3238 object_unparent(OBJECT(cpu->apic_state));
3239 cpu->apic_state = NULL;
3240 }
3241 }
3242
3243 typedef struct BitProperty {
3244 uint32_t *ptr;
3245 uint32_t mask;
3246 } BitProperty;
3247
3248 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3249 void *opaque, Error **errp)
3250 {
3251 BitProperty *fp = opaque;
3252 bool value = (*fp->ptr & fp->mask) == fp->mask;
3253 visit_type_bool(v, name, &value, errp);
3254 }
3255
3256 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3257 void *opaque, Error **errp)
3258 {
3259 DeviceState *dev = DEVICE(obj);
3260 BitProperty *fp = opaque;
3261 Error *local_err = NULL;
3262 bool value;
3263
3264 if (dev->realized) {
3265 qdev_prop_set_after_realize(dev, name, errp);
3266 return;
3267 }
3268
3269 visit_type_bool(v, name, &value, &local_err);
3270 if (local_err) {
3271 error_propagate(errp, local_err);
3272 return;
3273 }
3274
3275 if (value) {
3276 *fp->ptr |= fp->mask;
3277 } else {
3278 *fp->ptr &= ~fp->mask;
3279 }
3280 }
3281
3282 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3283 void *opaque)
3284 {
3285 BitProperty *prop = opaque;
3286 g_free(prop);
3287 }
3288
3289 /* Register a boolean property to get/set a single bit in a uint32_t field.
3290 *
3291 * The same property name can be registered multiple times to make it affect
3292 * multiple bits in the same FeatureWord. In that case, the getter will return
3293 * true only if all bits are set.
3294 */
3295 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3296 const char *prop_name,
3297 uint32_t *field,
3298 int bitnr)
3299 {
3300 BitProperty *fp;
3301 ObjectProperty *op;
3302 uint32_t mask = (1UL << bitnr);
3303
3304 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3305 if (op) {
3306 fp = op->opaque;
3307 assert(fp->ptr == field);
3308 fp->mask |= mask;
3309 } else {
3310 fp = g_new0(BitProperty, 1);
3311 fp->ptr = field;
3312 fp->mask = mask;
3313 object_property_add(OBJECT(cpu), prop_name, "bool",
3314 x86_cpu_get_bit_prop,
3315 x86_cpu_set_bit_prop,
3316 x86_cpu_release_bit_prop, fp, &error_abort);
3317 }
3318 }
3319
3320 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3321 FeatureWord w,
3322 int bitnr)
3323 {
3324 Object *obj = OBJECT(cpu);
3325 int i;
3326 char **names;
3327 FeatureWordInfo *fi = &feature_word_info[w];
3328
3329 if (!fi->feat_names[bitnr]) {
3330 return;
3331 }
3332
3333 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3334
3335 feat2prop(names[0]);
3336 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3337
3338 for (i = 1; names[i]; i++) {
3339 feat2prop(names[i]);
3340 object_property_add_alias(obj, names[i], obj, names[0],
3341 &error_abort);
3342 }
3343
3344 g_strfreev(names);
3345 }
3346
3347 static void x86_cpu_initfn(Object *obj)
3348 {
3349 CPUState *cs = CPU(obj);
3350 X86CPU *cpu = X86_CPU(obj);
3351 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3352 CPUX86State *env = &cpu->env;
3353 FeatureWord w;
3354
3355 cs->env_ptr = env;
3356
3357 object_property_add(obj, "family", "int",
3358 x86_cpuid_version_get_family,
3359 x86_cpuid_version_set_family, NULL, NULL, NULL);
3360 object_property_add(obj, "model", "int",
3361 x86_cpuid_version_get_model,
3362 x86_cpuid_version_set_model, NULL, NULL, NULL);
3363 object_property_add(obj, "stepping", "int",
3364 x86_cpuid_version_get_stepping,
3365 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3366 object_property_add_str(obj, "vendor",
3367 x86_cpuid_get_vendor,
3368 x86_cpuid_set_vendor, NULL);
3369 object_property_add_str(obj, "model-id",
3370 x86_cpuid_get_model_id,
3371 x86_cpuid_set_model_id, NULL);
3372 object_property_add(obj, "tsc-frequency", "int",
3373 x86_cpuid_get_tsc_freq,
3374 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3375 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3376 x86_cpu_get_feature_words,
3377 NULL, NULL, (void *)env->features, NULL);
3378 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3379 x86_cpu_get_feature_words,
3380 NULL, NULL, (void *)cpu->filtered_features, NULL);
3381
3382 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3383
3384 for (w = 0; w < FEATURE_WORDS; w++) {
3385 int bitnr;
3386
3387 for (bitnr = 0; bitnr < 32; bitnr++) {
3388 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3389 }
3390 }
3391
3392 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
3393 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
3394 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
3395 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
3396 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
3397 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
3398 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
3399 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
3400 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
3401 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
3402 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
3403 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
3404 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
3405 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
3406 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
3407 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
3408 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
3409 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
3410 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
3411 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
3412 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
3413
3414 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3415 }
3416
3417 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3418 {
3419 X86CPU *cpu = X86_CPU(cs);
3420
3421 return cpu->apic_id;
3422 }
3423
3424 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3425 {
3426 X86CPU *cpu = X86_CPU(cs);
3427
3428 return cpu->env.cr[0] & CR0_PG_MASK;
3429 }
3430
3431 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3432 {
3433 X86CPU *cpu = X86_CPU(cs);
3434
3435 cpu->env.eip = value;
3436 }
3437
3438 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3439 {
3440 X86CPU *cpu = X86_CPU(cs);
3441
3442 cpu->env.eip = tb->pc - tb->cs_base;
3443 }
3444
3445 static bool x86_cpu_has_work(CPUState *cs)
3446 {
3447 X86CPU *cpu = X86_CPU(cs);
3448 CPUX86State *env = &cpu->env;
3449
3450 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3451 CPU_INTERRUPT_POLL)) &&
3452 (env->eflags & IF_MASK)) ||
3453 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3454 CPU_INTERRUPT_INIT |
3455 CPU_INTERRUPT_SIPI |
3456 CPU_INTERRUPT_MCE)) ||
3457 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3458 !(env->hflags & HF_SMM_MASK));
3459 }
3460
3461 static Property x86_cpu_properties[] = {
3462 #ifdef CONFIG_USER_ONLY
3463 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3464 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3465 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3466 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3467 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3468 #else
3469 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3470 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3471 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3472 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3473 #endif
3474 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3475 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3476 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3477 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3478 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3479 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3480 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3481 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3482 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3483 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3484 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3485 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3486 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3487 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3488 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3489 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3490 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3491 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3492 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3493 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3494 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3495 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3496 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3497 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3498 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3499 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3500 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3501 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3502 DEFINE_PROP_END_OF_LIST()
3503 };
3504
3505 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3506 {
3507 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3508 CPUClass *cc = CPU_CLASS(oc);
3509 DeviceClass *dc = DEVICE_CLASS(oc);
3510
3511 xcc->parent_realize = dc->realize;
3512 dc->realize = x86_cpu_realizefn;
3513 dc->unrealize = x86_cpu_unrealizefn;
3514 dc->props = x86_cpu_properties;
3515
3516 xcc->parent_reset = cc->reset;
3517 cc->reset = x86_cpu_reset;
3518 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3519
3520 cc->class_by_name = x86_cpu_class_by_name;
3521 cc->parse_features = x86_cpu_parse_featurestr;
3522 cc->has_work = x86_cpu_has_work;
3523 cc->do_interrupt = x86_cpu_do_interrupt;
3524 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3525 cc->dump_state = x86_cpu_dump_state;
3526 cc->set_pc = x86_cpu_set_pc;
3527 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3528 cc->gdb_read_register = x86_cpu_gdb_read_register;
3529 cc->gdb_write_register = x86_cpu_gdb_write_register;
3530 cc->get_arch_id = x86_cpu_get_arch_id;
3531 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3532 #ifdef CONFIG_USER_ONLY
3533 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3534 #else
3535 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3536 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3537 cc->write_elf64_note = x86_cpu_write_elf64_note;
3538 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3539 cc->write_elf32_note = x86_cpu_write_elf32_note;
3540 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3541 cc->vmsd = &vmstate_x86_cpu;
3542 #endif
3543 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3544 #ifndef CONFIG_USER_ONLY
3545 cc->debug_excp_handler = breakpoint_handler;
3546 #endif
3547 cc->cpu_exec_enter = x86_cpu_exec_enter;
3548 cc->cpu_exec_exit = x86_cpu_exec_exit;
3549
3550 dc->cannot_instantiate_with_device_add_yet = false;
3551 /*
3552 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3553 * object in cpus -> dangling pointer after final object_unref().
3554 */
3555 dc->cannot_destroy_with_object_finalize_yet = true;
3556 }
3557
3558 static const TypeInfo x86_cpu_type_info = {
3559 .name = TYPE_X86_CPU,
3560 .parent = TYPE_CPU,
3561 .instance_size = sizeof(X86CPU),
3562 .instance_init = x86_cpu_initfn,
3563 .abstract = true,
3564 .class_size = sizeof(X86CPUClass),
3565 .class_init = x86_cpu_common_class_init,
3566 };
3567
3568 static void x86_cpu_register_types(void)
3569 {
3570 int i;
3571
3572 type_register_static(&x86_cpu_type_info);
3573 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3574 x86_register_cpudef_type(&builtin_x86_defs[i]);
3575 }
3576 #ifdef CONFIG_KVM
3577 type_register_static(&host_x86_cpu_type_info);
3578 #endif
3579 }
3580
3581 type_init(x86_cpu_register_types)