]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: xsave: Helper function to calculate xsave area size
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include "qemu/osdep.h"
20 #include "qemu/cutils.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "sysemu/kvm.h"
25 #include "sysemu/cpus.h"
26 #include "kvm_i386.h"
27
28 #include "qemu/error-report.h"
29 #include "qemu/option.h"
30 #include "qemu/config-file.h"
31 #include "qapi/qmp/qerror.h"
32
33 #include "qapi-types.h"
34 #include "qapi-visit.h"
35 #include "qapi/visitor.h"
36 #include "sysemu/arch_init.h"
37
38 #if defined(CONFIG_KVM)
39 #include <linux/kvm_para.h>
40 #endif
41
42 #include "sysemu/sysemu.h"
43 #include "hw/qdev-properties.h"
44 #include "hw/i386/topology.h"
45 #ifndef CONFIG_USER_ONLY
46 #include "exec/address-spaces.h"
47 #include "hw/hw.h"
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60 #define CPUID_2_L3_16MB_16WAY_64B 0x4d
61
62
63 /* CPUID Leaf 4 constants: */
64
65 /* EAX: */
66 #define CPUID_4_TYPE_DCACHE 1
67 #define CPUID_4_TYPE_ICACHE 2
68 #define CPUID_4_TYPE_UNIFIED 3
69
70 #define CPUID_4_LEVEL(l) ((l) << 5)
71
72 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
73 #define CPUID_4_FULLY_ASSOC (1 << 9)
74
75 /* EDX: */
76 #define CPUID_4_NO_INVD_SHARING (1 << 0)
77 #define CPUID_4_INCLUSIVE (1 << 1)
78 #define CPUID_4_COMPLEX_IDX (1 << 2)
79
80 #define ASSOC_FULL 0xFF
81
82 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
83 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
84 a == 2 ? 0x2 : \
85 a == 4 ? 0x4 : \
86 a == 8 ? 0x6 : \
87 a == 16 ? 0x8 : \
88 a == 32 ? 0xA : \
89 a == 48 ? 0xB : \
90 a == 64 ? 0xC : \
91 a == 96 ? 0xD : \
92 a == 128 ? 0xE : \
93 a == ASSOC_FULL ? 0xF : \
94 0 /* invalid value */)
95
96
97 /* Definitions of the hardcoded cache entries we expose: */
98
99 /* L1 data cache: */
100 #define L1D_LINE_SIZE 64
101 #define L1D_ASSOCIATIVITY 8
102 #define L1D_SETS 64
103 #define L1D_PARTITIONS 1
104 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
105 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
106 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
107 #define L1D_LINES_PER_TAG 1
108 #define L1D_SIZE_KB_AMD 64
109 #define L1D_ASSOCIATIVITY_AMD 2
110
111 /* L1 instruction cache: */
112 #define L1I_LINE_SIZE 64
113 #define L1I_ASSOCIATIVITY 8
114 #define L1I_SETS 64
115 #define L1I_PARTITIONS 1
116 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
117 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
118 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
119 #define L1I_LINES_PER_TAG 1
120 #define L1I_SIZE_KB_AMD 64
121 #define L1I_ASSOCIATIVITY_AMD 2
122
123 /* Level 2 unified cache: */
124 #define L2_LINE_SIZE 64
125 #define L2_ASSOCIATIVITY 16
126 #define L2_SETS 4096
127 #define L2_PARTITIONS 1
128 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
129 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
130 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
131 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
132 #define L2_LINES_PER_TAG 1
133 #define L2_SIZE_KB_AMD 512
134
135 /* Level 3 unified cache: */
136 #define L3_SIZE_KB 0 /* disabled */
137 #define L3_ASSOCIATIVITY 0 /* disabled */
138 #define L3_LINES_PER_TAG 0 /* disabled */
139 #define L3_LINE_SIZE 0 /* disabled */
140 #define L3_N_LINE_SIZE 64
141 #define L3_N_ASSOCIATIVITY 16
142 #define L3_N_SETS 16384
143 #define L3_N_PARTITIONS 1
144 #define L3_N_DESCRIPTOR CPUID_2_L3_16MB_16WAY_64B
145 #define L3_N_LINES_PER_TAG 1
146 #define L3_N_SIZE_KB_AMD 16384
147
148 /* TLB definitions: */
149
150 #define L1_DTLB_2M_ASSOC 1
151 #define L1_DTLB_2M_ENTRIES 255
152 #define L1_DTLB_4K_ASSOC 1
153 #define L1_DTLB_4K_ENTRIES 255
154
155 #define L1_ITLB_2M_ASSOC 1
156 #define L1_ITLB_2M_ENTRIES 255
157 #define L1_ITLB_4K_ASSOC 1
158 #define L1_ITLB_4K_ENTRIES 255
159
160 #define L2_DTLB_2M_ASSOC 0 /* disabled */
161 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
162 #define L2_DTLB_4K_ASSOC 4
163 #define L2_DTLB_4K_ENTRIES 512
164
165 #define L2_ITLB_2M_ASSOC 0 /* disabled */
166 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
167 #define L2_ITLB_4K_ASSOC 4
168 #define L2_ITLB_4K_ENTRIES 512
169
170
171
172 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
173 uint32_t vendor2, uint32_t vendor3)
174 {
175 int i;
176 for (i = 0; i < 4; i++) {
177 dst[i] = vendor1 >> (8 * i);
178 dst[i + 4] = vendor2 >> (8 * i);
179 dst[i + 8] = vendor3 >> (8 * i);
180 }
181 dst[CPUID_VENDOR_SZ] = '\0';
182 }
183
184 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
185 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
186 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
187 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
188 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
189 CPUID_PSE36 | CPUID_FXSR)
190 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
191 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
192 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
193 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
194 CPUID_PAE | CPUID_SEP | CPUID_APIC)
195
196 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
197 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
198 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
199 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
200 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
201 /* partly implemented:
202 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
203 /* missing:
204 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
205 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
206 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
207 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
208 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
209 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
210 /* missing:
211 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
212 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
213 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
214 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
215 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
216
217 #ifdef TARGET_X86_64
218 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
219 #else
220 #define TCG_EXT2_X86_64_FEATURES 0
221 #endif
222
223 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
224 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
225 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
226 TCG_EXT2_X86_64_FEATURES)
227 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
228 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
229 #define TCG_EXT4_FEATURES 0
230 #define TCG_SVM_FEATURES 0
231 #define TCG_KVM_FEATURES 0
232 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
233 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
234 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
235 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
236 CPUID_7_0_EBX_ERMS)
237 /* missing:
238 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
239 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
240 CPUID_7_0_EBX_RDSEED */
241 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE)
242 #define TCG_APM_FEATURES 0
243 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
244 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
245 /* missing:
246 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
247
248 typedef struct FeatureWordInfo {
249 /* feature flags names are taken from "Intel Processor Identification and
250 * the CPUID Instruction" and AMD's "CPUID Specification".
251 * In cases of disagreement between feature naming conventions,
252 * aliases may be added.
253 */
254 const char *feat_names[32];
255 uint32_t cpuid_eax; /* Input EAX for CPUID */
256 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
257 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
258 int cpuid_reg; /* output register (R_* constant) */
259 uint32_t tcg_features; /* Feature flags supported by TCG */
260 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
261 } FeatureWordInfo;
262
263 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
264 [FEAT_1_EDX] = {
265 .feat_names = {
266 "fpu", "vme", "de", "pse",
267 "tsc", "msr", "pae", "mce",
268 "cx8", "apic", NULL, "sep",
269 "mtrr", "pge", "mca", "cmov",
270 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
271 NULL, "ds" /* Intel dts */, "acpi", "mmx",
272 "fxsr", "sse", "sse2", "ss",
273 "ht" /* Intel htt */, "tm", "ia64", "pbe",
274 },
275 .cpuid_eax = 1, .cpuid_reg = R_EDX,
276 .tcg_features = TCG_FEATURES,
277 },
278 [FEAT_1_ECX] = {
279 .feat_names = {
280 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
281 "ds_cpl", "vmx", "smx", "est",
282 "tm2", "ssse3", "cid", NULL,
283 "fma", "cx16", "xtpr", "pdcm",
284 NULL, "pcid", "dca", "sse4.1|sse4_1",
285 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
286 "tsc-deadline", "aes", "xsave", "osxsave",
287 "avx", "f16c", "rdrand", "hypervisor",
288 },
289 .cpuid_eax = 1, .cpuid_reg = R_ECX,
290 .tcg_features = TCG_EXT_FEATURES,
291 },
292 /* Feature names that are already defined on feature_name[] but
293 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
294 * names on feat_names below. They are copied automatically
295 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
296 */
297 [FEAT_8000_0001_EDX] = {
298 .feat_names = {
299 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
300 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
301 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
302 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
303 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
304 "nx|xd", NULL, "mmxext", NULL /* mmx */,
305 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb", "rdtscp",
306 NULL, "lm|i64", "3dnowext", "3dnow",
307 },
308 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
309 .tcg_features = TCG_EXT2_FEATURES,
310 },
311 [FEAT_8000_0001_ECX] = {
312 .feat_names = {
313 "lahf_lm", "cmp_legacy", "svm", "extapic",
314 "cr8legacy", "abm", "sse4a", "misalignsse",
315 "3dnowprefetch", "osvw", "ibs", "xop",
316 "skinit", "wdt", NULL, "lwp",
317 "fma4", "tce", NULL, "nodeid_msr",
318 NULL, "tbm", "topoext", "perfctr_core",
319 "perfctr_nb", NULL, NULL, NULL,
320 NULL, NULL, NULL, NULL,
321 },
322 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
323 .tcg_features = TCG_EXT3_FEATURES,
324 },
325 [FEAT_C000_0001_EDX] = {
326 .feat_names = {
327 NULL, NULL, "xstore", "xstore-en",
328 NULL, NULL, "xcrypt", "xcrypt-en",
329 "ace2", "ace2-en", "phe", "phe-en",
330 "pmm", "pmm-en", NULL, NULL,
331 NULL, NULL, NULL, NULL,
332 NULL, NULL, NULL, NULL,
333 NULL, NULL, NULL, NULL,
334 NULL, NULL, NULL, NULL,
335 },
336 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
337 .tcg_features = TCG_EXT4_FEATURES,
338 },
339 [FEAT_KVM] = {
340 .feat_names = {
341 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
342 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
343 NULL, NULL, NULL, NULL,
344 NULL, NULL, NULL, NULL,
345 NULL, NULL, NULL, NULL,
346 NULL, NULL, NULL, NULL,
347 "kvmclock-stable-bit", NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL,
349 },
350 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
351 .tcg_features = TCG_KVM_FEATURES,
352 },
353 [FEAT_HYPERV_EAX] = {
354 .feat_names = {
355 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
356 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
357 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
358 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
359 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
360 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
361 NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL,
366 },
367 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
368 },
369 [FEAT_HYPERV_EBX] = {
370 .feat_names = {
371 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
372 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
373 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
374 NULL /* hv_create_port */, NULL /* hv_connect_port */,
375 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
376 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
377 NULL, NULL,
378 NULL, NULL, NULL, NULL,
379 NULL, NULL, NULL, NULL,
380 NULL, NULL, NULL, NULL,
381 NULL, NULL, NULL, NULL,
382 },
383 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
384 },
385 [FEAT_HYPERV_EDX] = {
386 .feat_names = {
387 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
388 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
389 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
390 NULL, NULL,
391 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
392 NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL,
397 },
398 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
399 },
400 [FEAT_SVM] = {
401 .feat_names = {
402 "npt", "lbrv", "svm_lock", "nrip_save",
403 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
404 NULL, NULL, "pause_filter", NULL,
405 "pfthreshold", NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL,
410 },
411 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
412 .tcg_features = TCG_SVM_FEATURES,
413 },
414 [FEAT_7_0_EBX] = {
415 .feat_names = {
416 "fsgsbase", "tsc_adjust", NULL, "bmi1",
417 "hle", "avx2", NULL, "smep",
418 "bmi2", "erms", "invpcid", "rtm",
419 NULL, NULL, "mpx", NULL,
420 "avx512f", "avx512dq", "rdseed", "adx",
421 "smap", "avx512ifma", "pcommit", "clflushopt",
422 "clwb", NULL, "avx512pf", "avx512er",
423 "avx512cd", NULL, "avx512bw", "avx512vl",
424 },
425 .cpuid_eax = 7,
426 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
427 .cpuid_reg = R_EBX,
428 .tcg_features = TCG_7_0_EBX_FEATURES,
429 },
430 [FEAT_7_0_ECX] = {
431 .feat_names = {
432 NULL, "avx512vbmi", "umip", "pku",
433 "ospke", NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL,
437 NULL, NULL, "rdpid", NULL,
438 NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL,
440 },
441 .cpuid_eax = 7,
442 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
443 .cpuid_reg = R_ECX,
444 .tcg_features = TCG_7_0_ECX_FEATURES,
445 },
446 [FEAT_8000_0007_EDX] = {
447 .feat_names = {
448 NULL, NULL, NULL, NULL,
449 NULL, NULL, NULL, NULL,
450 "invtsc", NULL, NULL, NULL,
451 NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL,
455 NULL, NULL, NULL, NULL,
456 },
457 .cpuid_eax = 0x80000007,
458 .cpuid_reg = R_EDX,
459 .tcg_features = TCG_APM_FEATURES,
460 .unmigratable_flags = CPUID_APM_INVTSC,
461 },
462 [FEAT_XSAVE] = {
463 .feat_names = {
464 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
465 NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL,
472 },
473 .cpuid_eax = 0xd,
474 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
475 .cpuid_reg = R_EAX,
476 .tcg_features = TCG_XSAVE_FEATURES,
477 },
478 [FEAT_6_EAX] = {
479 .feat_names = {
480 NULL, NULL, "arat", NULL,
481 NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL,
483 NULL, NULL, NULL, NULL,
484 NULL, NULL, NULL, NULL,
485 NULL, NULL, NULL, NULL,
486 NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL,
488 },
489 .cpuid_eax = 6, .cpuid_reg = R_EAX,
490 .tcg_features = TCG_6_EAX_FEATURES,
491 },
492 };
493
494 typedef struct X86RegisterInfo32 {
495 /* Name of register */
496 const char *name;
497 /* QAPI enum value register */
498 X86CPURegister32 qapi_enum;
499 } X86RegisterInfo32;
500
501 #define REGISTER(reg) \
502 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
503 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
504 REGISTER(EAX),
505 REGISTER(ECX),
506 REGISTER(EDX),
507 REGISTER(EBX),
508 REGISTER(ESP),
509 REGISTER(EBP),
510 REGISTER(ESI),
511 REGISTER(EDI),
512 };
513 #undef REGISTER
514
515 typedef struct ExtSaveArea {
516 uint32_t feature, bits;
517 uint32_t offset, size;
518 } ExtSaveArea;
519
520 static const ExtSaveArea x86_ext_save_areas[] = {
521 [XSTATE_YMM_BIT] =
522 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
523 .offset = offsetof(X86XSaveArea, avx_state),
524 .size = sizeof(XSaveAVX) },
525 [XSTATE_BNDREGS_BIT] =
526 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
527 .offset = offsetof(X86XSaveArea, bndreg_state),
528 .size = sizeof(XSaveBNDREG) },
529 [XSTATE_BNDCSR_BIT] =
530 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
531 .offset = offsetof(X86XSaveArea, bndcsr_state),
532 .size = sizeof(XSaveBNDCSR) },
533 [XSTATE_OPMASK_BIT] =
534 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
535 .offset = offsetof(X86XSaveArea, opmask_state),
536 .size = sizeof(XSaveOpmask) },
537 [XSTATE_ZMM_Hi256_BIT] =
538 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
539 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
540 .size = sizeof(XSaveZMM_Hi256) },
541 [XSTATE_Hi16_ZMM_BIT] =
542 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
543 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
544 .size = sizeof(XSaveHi16_ZMM) },
545 [XSTATE_PKRU_BIT] =
546 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
547 .offset = offsetof(X86XSaveArea, pkru_state),
548 .size = sizeof(XSavePKRU) },
549 };
550
551 static uint32_t xsave_area_size(uint64_t mask)
552 {
553 int i;
554 uint64_t ret = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader);
555
556 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
557 const ExtSaveArea *esa = &x86_ext_save_areas[i];
558 if ((mask >> i) & 1) {
559 ret = MAX(ret, esa->offset + esa->size);
560 }
561 }
562 return ret;
563 }
564
565 const char *get_register_name_32(unsigned int reg)
566 {
567 if (reg >= CPU_NB_REGS32) {
568 return NULL;
569 }
570 return x86_reg_info_32[reg].name;
571 }
572
573 /*
574 * Returns the set of feature flags that are supported and migratable by
575 * QEMU, for a given FeatureWord.
576 */
577 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
578 {
579 FeatureWordInfo *wi = &feature_word_info[w];
580 uint32_t r = 0;
581 int i;
582
583 for (i = 0; i < 32; i++) {
584 uint32_t f = 1U << i;
585 /* If the feature name is unknown, it is not supported by QEMU yet */
586 if (!wi->feat_names[i]) {
587 continue;
588 }
589 /* Skip features known to QEMU, but explicitly marked as unmigratable */
590 if (wi->unmigratable_flags & f) {
591 continue;
592 }
593 r |= f;
594 }
595 return r;
596 }
597
598 void host_cpuid(uint32_t function, uint32_t count,
599 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
600 {
601 uint32_t vec[4];
602
603 #ifdef __x86_64__
604 asm volatile("cpuid"
605 : "=a"(vec[0]), "=b"(vec[1]),
606 "=c"(vec[2]), "=d"(vec[3])
607 : "0"(function), "c"(count) : "cc");
608 #elif defined(__i386__)
609 asm volatile("pusha \n\t"
610 "cpuid \n\t"
611 "mov %%eax, 0(%2) \n\t"
612 "mov %%ebx, 4(%2) \n\t"
613 "mov %%ecx, 8(%2) \n\t"
614 "mov %%edx, 12(%2) \n\t"
615 "popa"
616 : : "a"(function), "c"(count), "S"(vec)
617 : "memory", "cc");
618 #else
619 abort();
620 #endif
621
622 if (eax)
623 *eax = vec[0];
624 if (ebx)
625 *ebx = vec[1];
626 if (ecx)
627 *ecx = vec[2];
628 if (edx)
629 *edx = vec[3];
630 }
631
632 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
633
634 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
635 * a substring. ex if !NULL points to the first char after a substring,
636 * otherwise the string is assumed to sized by a terminating nul.
637 * Return lexical ordering of *s1:*s2.
638 */
639 static int sstrcmp(const char *s1, const char *e1,
640 const char *s2, const char *e2)
641 {
642 for (;;) {
643 if (!*s1 || !*s2 || *s1 != *s2)
644 return (*s1 - *s2);
645 ++s1, ++s2;
646 if (s1 == e1 && s2 == e2)
647 return (0);
648 else if (s1 == e1)
649 return (*s2);
650 else if (s2 == e2)
651 return (*s1);
652 }
653 }
654
655 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
656 * '|' delimited (possibly empty) strings in which case search for a match
657 * within the alternatives proceeds left to right. Return 0 for success,
658 * non-zero otherwise.
659 */
660 static int altcmp(const char *s, const char *e, const char *altstr)
661 {
662 const char *p, *q;
663
664 for (q = p = altstr; ; ) {
665 while (*p && *p != '|')
666 ++p;
667 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
668 return (0);
669 if (!*p)
670 return (1);
671 else
672 q = ++p;
673 }
674 }
675
676 /* search featureset for flag *[s..e), if found set corresponding bit in
677 * *pval and return true, otherwise return false
678 */
679 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
680 const char **featureset)
681 {
682 uint32_t mask;
683 const char **ppc;
684 bool found = false;
685
686 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
687 if (*ppc && !altcmp(s, e, *ppc)) {
688 *pval |= mask;
689 found = true;
690 }
691 }
692 return found;
693 }
694
695 static void add_flagname_to_bitmaps(const char *flagname,
696 FeatureWordArray words,
697 Error **errp)
698 {
699 FeatureWord w;
700 for (w = 0; w < FEATURE_WORDS; w++) {
701 FeatureWordInfo *wi = &feature_word_info[w];
702 if (lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
703 break;
704 }
705 }
706 if (w == FEATURE_WORDS) {
707 error_setg(errp, "CPU feature %s not found", flagname);
708 }
709 }
710
711 /* CPU class name definitions: */
712
713 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
714 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
715
716 /* Return type name for a given CPU model name
717 * Caller is responsible for freeing the returned string.
718 */
719 static char *x86_cpu_type_name(const char *model_name)
720 {
721 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
722 }
723
724 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
725 {
726 ObjectClass *oc;
727 char *typename;
728
729 if (cpu_model == NULL) {
730 return NULL;
731 }
732
733 typename = x86_cpu_type_name(cpu_model);
734 oc = object_class_by_name(typename);
735 g_free(typename);
736 return oc;
737 }
738
739 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
740 {
741 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
742 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
743 return g_strndup(class_name,
744 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
745 }
746
747 struct X86CPUDefinition {
748 const char *name;
749 uint32_t level;
750 uint32_t xlevel;
751 /* vendor is zero-terminated, 12 character ASCII string */
752 char vendor[CPUID_VENDOR_SZ + 1];
753 int family;
754 int model;
755 int stepping;
756 FeatureWordArray features;
757 char model_id[48];
758 };
759
760 static X86CPUDefinition builtin_x86_defs[] = {
761 {
762 .name = "qemu64",
763 .level = 0xd,
764 .vendor = CPUID_VENDOR_AMD,
765 .family = 6,
766 .model = 6,
767 .stepping = 3,
768 .features[FEAT_1_EDX] =
769 PPRO_FEATURES |
770 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
771 CPUID_PSE36,
772 .features[FEAT_1_ECX] =
773 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
774 .features[FEAT_8000_0001_EDX] =
775 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
776 .features[FEAT_8000_0001_ECX] =
777 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
778 .xlevel = 0x8000000A,
779 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
780 },
781 {
782 .name = "phenom",
783 .level = 5,
784 .vendor = CPUID_VENDOR_AMD,
785 .family = 16,
786 .model = 2,
787 .stepping = 3,
788 /* Missing: CPUID_HT */
789 .features[FEAT_1_EDX] =
790 PPRO_FEATURES |
791 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
792 CPUID_PSE36 | CPUID_VME,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
795 CPUID_EXT_POPCNT,
796 .features[FEAT_8000_0001_EDX] =
797 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
798 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
799 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
800 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
801 CPUID_EXT3_CR8LEG,
802 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
803 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
804 .features[FEAT_8000_0001_ECX] =
805 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
806 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
807 /* Missing: CPUID_SVM_LBRV */
808 .features[FEAT_SVM] =
809 CPUID_SVM_NPT,
810 .xlevel = 0x8000001A,
811 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
812 },
813 {
814 .name = "core2duo",
815 .level = 10,
816 .vendor = CPUID_VENDOR_INTEL,
817 .family = 6,
818 .model = 15,
819 .stepping = 11,
820 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
821 .features[FEAT_1_EDX] =
822 PPRO_FEATURES |
823 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
824 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
825 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
826 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
827 .features[FEAT_1_ECX] =
828 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
829 CPUID_EXT_CX16,
830 .features[FEAT_8000_0001_EDX] =
831 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
832 .features[FEAT_8000_0001_ECX] =
833 CPUID_EXT3_LAHF_LM,
834 .xlevel = 0x80000008,
835 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
836 },
837 {
838 .name = "kvm64",
839 .level = 0xd,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 15,
842 .model = 6,
843 .stepping = 1,
844 /* Missing: CPUID_HT */
845 .features[FEAT_1_EDX] =
846 PPRO_FEATURES | CPUID_VME |
847 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
848 CPUID_PSE36,
849 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
850 .features[FEAT_1_ECX] =
851 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
852 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
853 .features[FEAT_8000_0001_EDX] =
854 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
855 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
856 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
857 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
858 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
859 .features[FEAT_8000_0001_ECX] =
860 0,
861 .xlevel = 0x80000008,
862 .model_id = "Common KVM processor"
863 },
864 {
865 .name = "qemu32",
866 .level = 4,
867 .vendor = CPUID_VENDOR_INTEL,
868 .family = 6,
869 .model = 6,
870 .stepping = 3,
871 .features[FEAT_1_EDX] =
872 PPRO_FEATURES,
873 .features[FEAT_1_ECX] =
874 CPUID_EXT_SSE3,
875 .xlevel = 0x80000004,
876 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
877 },
878 {
879 .name = "kvm32",
880 .level = 5,
881 .vendor = CPUID_VENDOR_INTEL,
882 .family = 15,
883 .model = 6,
884 .stepping = 1,
885 .features[FEAT_1_EDX] =
886 PPRO_FEATURES | CPUID_VME |
887 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
888 .features[FEAT_1_ECX] =
889 CPUID_EXT_SSE3,
890 .features[FEAT_8000_0001_ECX] =
891 0,
892 .xlevel = 0x80000008,
893 .model_id = "Common 32-bit KVM processor"
894 },
895 {
896 .name = "coreduo",
897 .level = 10,
898 .vendor = CPUID_VENDOR_INTEL,
899 .family = 6,
900 .model = 14,
901 .stepping = 8,
902 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
903 .features[FEAT_1_EDX] =
904 PPRO_FEATURES | CPUID_VME |
905 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
906 CPUID_SS,
907 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
908 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
909 .features[FEAT_1_ECX] =
910 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
911 .features[FEAT_8000_0001_EDX] =
912 CPUID_EXT2_NX,
913 .xlevel = 0x80000008,
914 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
915 },
916 {
917 .name = "486",
918 .level = 1,
919 .vendor = CPUID_VENDOR_INTEL,
920 .family = 4,
921 .model = 8,
922 .stepping = 0,
923 .features[FEAT_1_EDX] =
924 I486_FEATURES,
925 .xlevel = 0,
926 },
927 {
928 .name = "pentium",
929 .level = 1,
930 .vendor = CPUID_VENDOR_INTEL,
931 .family = 5,
932 .model = 4,
933 .stepping = 3,
934 .features[FEAT_1_EDX] =
935 PENTIUM_FEATURES,
936 .xlevel = 0,
937 },
938 {
939 .name = "pentium2",
940 .level = 2,
941 .vendor = CPUID_VENDOR_INTEL,
942 .family = 6,
943 .model = 5,
944 .stepping = 2,
945 .features[FEAT_1_EDX] =
946 PENTIUM2_FEATURES,
947 .xlevel = 0,
948 },
949 {
950 .name = "pentium3",
951 .level = 3,
952 .vendor = CPUID_VENDOR_INTEL,
953 .family = 6,
954 .model = 7,
955 .stepping = 3,
956 .features[FEAT_1_EDX] =
957 PENTIUM3_FEATURES,
958 .xlevel = 0,
959 },
960 {
961 .name = "athlon",
962 .level = 2,
963 .vendor = CPUID_VENDOR_AMD,
964 .family = 6,
965 .model = 2,
966 .stepping = 3,
967 .features[FEAT_1_EDX] =
968 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
969 CPUID_MCA,
970 .features[FEAT_8000_0001_EDX] =
971 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
972 .xlevel = 0x80000008,
973 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
974 },
975 {
976 .name = "n270",
977 .level = 10,
978 .vendor = CPUID_VENDOR_INTEL,
979 .family = 6,
980 .model = 28,
981 .stepping = 2,
982 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
983 .features[FEAT_1_EDX] =
984 PPRO_FEATURES |
985 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
986 CPUID_ACPI | CPUID_SS,
987 /* Some CPUs got no CPUID_SEP */
988 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
989 * CPUID_EXT_XTPR */
990 .features[FEAT_1_ECX] =
991 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
992 CPUID_EXT_MOVBE,
993 .features[FEAT_8000_0001_EDX] =
994 CPUID_EXT2_NX,
995 .features[FEAT_8000_0001_ECX] =
996 CPUID_EXT3_LAHF_LM,
997 .xlevel = 0x80000008,
998 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
999 },
1000 {
1001 .name = "Conroe",
1002 .level = 10,
1003 .vendor = CPUID_VENDOR_INTEL,
1004 .family = 6,
1005 .model = 15,
1006 .stepping = 3,
1007 .features[FEAT_1_EDX] =
1008 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1009 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1010 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1011 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1012 CPUID_DE | CPUID_FP87,
1013 .features[FEAT_1_ECX] =
1014 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1015 .features[FEAT_8000_0001_EDX] =
1016 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1017 .features[FEAT_8000_0001_ECX] =
1018 CPUID_EXT3_LAHF_LM,
1019 .xlevel = 0x80000008,
1020 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1021 },
1022 {
1023 .name = "Penryn",
1024 .level = 10,
1025 .vendor = CPUID_VENDOR_INTEL,
1026 .family = 6,
1027 .model = 23,
1028 .stepping = 3,
1029 .features[FEAT_1_EDX] =
1030 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1031 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1032 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1033 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1034 CPUID_DE | CPUID_FP87,
1035 .features[FEAT_1_ECX] =
1036 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1037 CPUID_EXT_SSE3,
1038 .features[FEAT_8000_0001_EDX] =
1039 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1040 .features[FEAT_8000_0001_ECX] =
1041 CPUID_EXT3_LAHF_LM,
1042 .xlevel = 0x80000008,
1043 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1044 },
1045 {
1046 .name = "Nehalem",
1047 .level = 11,
1048 .vendor = CPUID_VENDOR_INTEL,
1049 .family = 6,
1050 .model = 26,
1051 .stepping = 3,
1052 .features[FEAT_1_EDX] =
1053 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1054 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1055 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1056 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1057 CPUID_DE | CPUID_FP87,
1058 .features[FEAT_1_ECX] =
1059 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1060 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1061 .features[FEAT_8000_0001_EDX] =
1062 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1063 .features[FEAT_8000_0001_ECX] =
1064 CPUID_EXT3_LAHF_LM,
1065 .xlevel = 0x80000008,
1066 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1067 },
1068 {
1069 .name = "Westmere",
1070 .level = 11,
1071 .vendor = CPUID_VENDOR_INTEL,
1072 .family = 6,
1073 .model = 44,
1074 .stepping = 1,
1075 .features[FEAT_1_EDX] =
1076 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1077 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1078 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1079 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1080 CPUID_DE | CPUID_FP87,
1081 .features[FEAT_1_ECX] =
1082 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1083 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1084 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1085 .features[FEAT_8000_0001_EDX] =
1086 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1087 .features[FEAT_8000_0001_ECX] =
1088 CPUID_EXT3_LAHF_LM,
1089 .features[FEAT_6_EAX] =
1090 CPUID_6_EAX_ARAT,
1091 .xlevel = 0x80000008,
1092 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1093 },
1094 {
1095 .name = "SandyBridge",
1096 .level = 0xd,
1097 .vendor = CPUID_VENDOR_INTEL,
1098 .family = 6,
1099 .model = 42,
1100 .stepping = 1,
1101 .features[FEAT_1_EDX] =
1102 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1103 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1104 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1105 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1106 CPUID_DE | CPUID_FP87,
1107 .features[FEAT_1_ECX] =
1108 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1109 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1110 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1111 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1112 CPUID_EXT_SSE3,
1113 .features[FEAT_8000_0001_EDX] =
1114 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1115 CPUID_EXT2_SYSCALL,
1116 .features[FEAT_8000_0001_ECX] =
1117 CPUID_EXT3_LAHF_LM,
1118 .features[FEAT_XSAVE] =
1119 CPUID_XSAVE_XSAVEOPT,
1120 .features[FEAT_6_EAX] =
1121 CPUID_6_EAX_ARAT,
1122 .xlevel = 0x80000008,
1123 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1124 },
1125 {
1126 .name = "IvyBridge",
1127 .level = 0xd,
1128 .vendor = CPUID_VENDOR_INTEL,
1129 .family = 6,
1130 .model = 58,
1131 .stepping = 9,
1132 .features[FEAT_1_EDX] =
1133 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1134 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1135 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1136 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1137 CPUID_DE | CPUID_FP87,
1138 .features[FEAT_1_ECX] =
1139 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1140 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1141 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1142 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1143 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1144 .features[FEAT_7_0_EBX] =
1145 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1146 CPUID_7_0_EBX_ERMS,
1147 .features[FEAT_8000_0001_EDX] =
1148 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1149 CPUID_EXT2_SYSCALL,
1150 .features[FEAT_8000_0001_ECX] =
1151 CPUID_EXT3_LAHF_LM,
1152 .features[FEAT_XSAVE] =
1153 CPUID_XSAVE_XSAVEOPT,
1154 .features[FEAT_6_EAX] =
1155 CPUID_6_EAX_ARAT,
1156 .xlevel = 0x80000008,
1157 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1158 },
1159 {
1160 .name = "Haswell-noTSX",
1161 .level = 0xd,
1162 .vendor = CPUID_VENDOR_INTEL,
1163 .family = 6,
1164 .model = 60,
1165 .stepping = 1,
1166 .features[FEAT_1_EDX] =
1167 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1168 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1169 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1170 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1171 CPUID_DE | CPUID_FP87,
1172 .features[FEAT_1_ECX] =
1173 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1174 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1175 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1176 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1177 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1178 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1179 .features[FEAT_8000_0001_EDX] =
1180 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1181 CPUID_EXT2_SYSCALL,
1182 .features[FEAT_8000_0001_ECX] =
1183 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1184 .features[FEAT_7_0_EBX] =
1185 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1186 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1187 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1188 .features[FEAT_XSAVE] =
1189 CPUID_XSAVE_XSAVEOPT,
1190 .features[FEAT_6_EAX] =
1191 CPUID_6_EAX_ARAT,
1192 .xlevel = 0x80000008,
1193 .model_id = "Intel Core Processor (Haswell, no TSX)",
1194 }, {
1195 .name = "Haswell",
1196 .level = 0xd,
1197 .vendor = CPUID_VENDOR_INTEL,
1198 .family = 6,
1199 .model = 60,
1200 .stepping = 1,
1201 .features[FEAT_1_EDX] =
1202 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1203 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1204 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1205 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1206 CPUID_DE | CPUID_FP87,
1207 .features[FEAT_1_ECX] =
1208 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1209 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1210 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1211 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1212 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1213 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1214 .features[FEAT_8000_0001_EDX] =
1215 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1216 CPUID_EXT2_SYSCALL,
1217 .features[FEAT_8000_0001_ECX] =
1218 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1219 .features[FEAT_7_0_EBX] =
1220 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1221 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1222 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1223 CPUID_7_0_EBX_RTM,
1224 .features[FEAT_XSAVE] =
1225 CPUID_XSAVE_XSAVEOPT,
1226 .features[FEAT_6_EAX] =
1227 CPUID_6_EAX_ARAT,
1228 .xlevel = 0x80000008,
1229 .model_id = "Intel Core Processor (Haswell)",
1230 },
1231 {
1232 .name = "Broadwell-noTSX",
1233 .level = 0xd,
1234 .vendor = CPUID_VENDOR_INTEL,
1235 .family = 6,
1236 .model = 61,
1237 .stepping = 2,
1238 .features[FEAT_1_EDX] =
1239 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1240 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1241 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1242 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1243 CPUID_DE | CPUID_FP87,
1244 .features[FEAT_1_ECX] =
1245 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1246 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1247 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1248 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1249 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1250 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1251 .features[FEAT_8000_0001_EDX] =
1252 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1253 CPUID_EXT2_SYSCALL,
1254 .features[FEAT_8000_0001_ECX] =
1255 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1256 .features[FEAT_7_0_EBX] =
1257 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1258 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1259 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1260 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1261 CPUID_7_0_EBX_SMAP,
1262 .features[FEAT_XSAVE] =
1263 CPUID_XSAVE_XSAVEOPT,
1264 .features[FEAT_6_EAX] =
1265 CPUID_6_EAX_ARAT,
1266 .xlevel = 0x80000008,
1267 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1268 },
1269 {
1270 .name = "Broadwell",
1271 .level = 0xd,
1272 .vendor = CPUID_VENDOR_INTEL,
1273 .family = 6,
1274 .model = 61,
1275 .stepping = 2,
1276 .features[FEAT_1_EDX] =
1277 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1278 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1279 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1280 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1281 CPUID_DE | CPUID_FP87,
1282 .features[FEAT_1_ECX] =
1283 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1284 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1285 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1286 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1287 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1288 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1289 .features[FEAT_8000_0001_EDX] =
1290 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1291 CPUID_EXT2_SYSCALL,
1292 .features[FEAT_8000_0001_ECX] =
1293 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1294 .features[FEAT_7_0_EBX] =
1295 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1296 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1297 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1298 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1299 CPUID_7_0_EBX_SMAP,
1300 .features[FEAT_XSAVE] =
1301 CPUID_XSAVE_XSAVEOPT,
1302 .features[FEAT_6_EAX] =
1303 CPUID_6_EAX_ARAT,
1304 .xlevel = 0x80000008,
1305 .model_id = "Intel Core Processor (Broadwell)",
1306 },
1307 {
1308 .name = "Skylake-Client",
1309 .level = 0xd,
1310 .vendor = CPUID_VENDOR_INTEL,
1311 .family = 6,
1312 .model = 94,
1313 .stepping = 3,
1314 .features[FEAT_1_EDX] =
1315 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1316 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1317 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1318 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1319 CPUID_DE | CPUID_FP87,
1320 .features[FEAT_1_ECX] =
1321 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1322 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1323 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1324 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1325 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1326 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1327 .features[FEAT_8000_0001_EDX] =
1328 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1329 CPUID_EXT2_SYSCALL,
1330 .features[FEAT_8000_0001_ECX] =
1331 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1332 .features[FEAT_7_0_EBX] =
1333 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1334 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1335 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1336 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1337 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
1338 /* Missing: XSAVES (not supported by some Linux versions,
1339 * including v4.1 to v4.6).
1340 * KVM doesn't yet expose any XSAVES state save component,
1341 * and the only one defined in Skylake (processor tracing)
1342 * probably will block migration anyway.
1343 */
1344 .features[FEAT_XSAVE] =
1345 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
1346 CPUID_XSAVE_XGETBV1,
1347 .features[FEAT_6_EAX] =
1348 CPUID_6_EAX_ARAT,
1349 .xlevel = 0x80000008,
1350 .model_id = "Intel Core Processor (Skylake)",
1351 },
1352 {
1353 .name = "Opteron_G1",
1354 .level = 5,
1355 .vendor = CPUID_VENDOR_AMD,
1356 .family = 15,
1357 .model = 6,
1358 .stepping = 1,
1359 .features[FEAT_1_EDX] =
1360 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1361 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1362 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1363 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1364 CPUID_DE | CPUID_FP87,
1365 .features[FEAT_1_ECX] =
1366 CPUID_EXT_SSE3,
1367 .features[FEAT_8000_0001_EDX] =
1368 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1369 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1370 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1371 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1372 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1373 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1374 .xlevel = 0x80000008,
1375 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1376 },
1377 {
1378 .name = "Opteron_G2",
1379 .level = 5,
1380 .vendor = CPUID_VENDOR_AMD,
1381 .family = 15,
1382 .model = 6,
1383 .stepping = 1,
1384 .features[FEAT_1_EDX] =
1385 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1386 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1387 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1388 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1389 CPUID_DE | CPUID_FP87,
1390 .features[FEAT_1_ECX] =
1391 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1392 /* Missing: CPUID_EXT2_RDTSCP */
1393 .features[FEAT_8000_0001_EDX] =
1394 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1395 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1396 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1397 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1398 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1399 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1400 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1401 .features[FEAT_8000_0001_ECX] =
1402 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1403 .xlevel = 0x80000008,
1404 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1405 },
1406 {
1407 .name = "Opteron_G3",
1408 .level = 5,
1409 .vendor = CPUID_VENDOR_AMD,
1410 .family = 15,
1411 .model = 6,
1412 .stepping = 1,
1413 .features[FEAT_1_EDX] =
1414 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1415 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1416 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1417 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1418 CPUID_DE | CPUID_FP87,
1419 .features[FEAT_1_ECX] =
1420 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1421 CPUID_EXT_SSE3,
1422 /* Missing: CPUID_EXT2_RDTSCP */
1423 .features[FEAT_8000_0001_EDX] =
1424 CPUID_EXT2_LM | CPUID_EXT2_FXSR |
1425 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1426 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1427 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1428 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1429 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1430 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1431 .features[FEAT_8000_0001_ECX] =
1432 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1433 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1434 .xlevel = 0x80000008,
1435 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1436 },
1437 {
1438 .name = "Opteron_G4",
1439 .level = 0xd,
1440 .vendor = CPUID_VENDOR_AMD,
1441 .family = 21,
1442 .model = 1,
1443 .stepping = 2,
1444 .features[FEAT_1_EDX] =
1445 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1446 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1447 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1448 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1449 CPUID_DE | CPUID_FP87,
1450 .features[FEAT_1_ECX] =
1451 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1452 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1453 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1454 CPUID_EXT_SSE3,
1455 /* Missing: CPUID_EXT2_RDTSCP */
1456 .features[FEAT_8000_0001_EDX] =
1457 CPUID_EXT2_LM |
1458 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1459 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1460 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1461 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1462 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1463 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1464 .features[FEAT_8000_0001_ECX] =
1465 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1466 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1467 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1468 CPUID_EXT3_LAHF_LM,
1469 /* no xsaveopt! */
1470 .xlevel = 0x8000001A,
1471 .model_id = "AMD Opteron 62xx class CPU",
1472 },
1473 {
1474 .name = "Opteron_G5",
1475 .level = 0xd,
1476 .vendor = CPUID_VENDOR_AMD,
1477 .family = 21,
1478 .model = 2,
1479 .stepping = 0,
1480 .features[FEAT_1_EDX] =
1481 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1482 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1483 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1484 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1485 CPUID_DE | CPUID_FP87,
1486 .features[FEAT_1_ECX] =
1487 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1488 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1489 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1490 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1491 /* Missing: CPUID_EXT2_RDTSCP */
1492 .features[FEAT_8000_0001_EDX] =
1493 CPUID_EXT2_LM |
1494 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1495 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1496 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1497 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1498 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1499 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1500 .features[FEAT_8000_0001_ECX] =
1501 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1502 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1503 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1504 CPUID_EXT3_LAHF_LM,
1505 /* no xsaveopt! */
1506 .xlevel = 0x8000001A,
1507 .model_id = "AMD Opteron 63xx class CPU",
1508 },
1509 };
1510
1511 typedef struct PropValue {
1512 const char *prop, *value;
1513 } PropValue;
1514
1515 /* KVM-specific features that are automatically added/removed
1516 * from all CPU models when KVM is enabled.
1517 */
1518 static PropValue kvm_default_props[] = {
1519 { "kvmclock", "on" },
1520 { "kvm-nopiodelay", "on" },
1521 { "kvm-asyncpf", "on" },
1522 { "kvm-steal-time", "on" },
1523 { "kvm-pv-eoi", "on" },
1524 { "kvmclock-stable-bit", "on" },
1525 { "x2apic", "on" },
1526 { "acpi", "off" },
1527 { "monitor", "off" },
1528 { "svm", "off" },
1529 { NULL, NULL },
1530 };
1531
1532 void x86_cpu_change_kvm_default(const char *prop, const char *value)
1533 {
1534 PropValue *pv;
1535 for (pv = kvm_default_props; pv->prop; pv++) {
1536 if (!strcmp(pv->prop, prop)) {
1537 pv->value = value;
1538 break;
1539 }
1540 }
1541
1542 /* It is valid to call this function only for properties that
1543 * are already present in the kvm_default_props table.
1544 */
1545 assert(pv->prop);
1546 }
1547
1548 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1549 bool migratable_only);
1550
1551 #ifdef CONFIG_KVM
1552
1553 static bool lmce_supported(void)
1554 {
1555 uint64_t mce_cap;
1556
1557 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
1558 return false;
1559 }
1560
1561 return !!(mce_cap & MCG_LMCE_P);
1562 }
1563
1564 static int cpu_x86_fill_model_id(char *str)
1565 {
1566 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1567 int i;
1568
1569 for (i = 0; i < 3; i++) {
1570 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1571 memcpy(str + i * 16 + 0, &eax, 4);
1572 memcpy(str + i * 16 + 4, &ebx, 4);
1573 memcpy(str + i * 16 + 8, &ecx, 4);
1574 memcpy(str + i * 16 + 12, &edx, 4);
1575 }
1576 return 0;
1577 }
1578
1579 static X86CPUDefinition host_cpudef;
1580
1581 static Property host_x86_cpu_properties[] = {
1582 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1583 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
1584 DEFINE_PROP_END_OF_LIST()
1585 };
1586
1587 /* class_init for the "host" CPU model
1588 *
1589 * This function may be called before KVM is initialized.
1590 */
1591 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1592 {
1593 DeviceClass *dc = DEVICE_CLASS(oc);
1594 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1595 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1596
1597 xcc->kvm_required = true;
1598
1599 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1600 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1601
1602 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1603 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1604 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1605 host_cpudef.stepping = eax & 0x0F;
1606
1607 cpu_x86_fill_model_id(host_cpudef.model_id);
1608
1609 xcc->cpu_def = &host_cpudef;
1610
1611 /* level, xlevel, xlevel2, and the feature words are initialized on
1612 * instance_init, because they require KVM to be initialized.
1613 */
1614
1615 dc->props = host_x86_cpu_properties;
1616 /* Reason: host_x86_cpu_initfn() dies when !kvm_enabled() */
1617 dc->cannot_destroy_with_object_finalize_yet = true;
1618 }
1619
1620 static void host_x86_cpu_initfn(Object *obj)
1621 {
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 KVMState *s = kvm_state;
1625
1626 /* We can't fill the features array here because we don't know yet if
1627 * "migratable" is true or false.
1628 */
1629 cpu->host_features = true;
1630
1631 /* If KVM is disabled, x86_cpu_realizefn() will report an error later */
1632 if (kvm_enabled()) {
1633 env->cpuid_min_level =
1634 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1635 env->cpuid_min_xlevel =
1636 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1637 env->cpuid_min_xlevel2 =
1638 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1639
1640 if (lmce_supported()) {
1641 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
1642 }
1643 }
1644
1645 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1646 }
1647
1648 static const TypeInfo host_x86_cpu_type_info = {
1649 .name = X86_CPU_TYPE_NAME("host"),
1650 .parent = TYPE_X86_CPU,
1651 .instance_init = host_x86_cpu_initfn,
1652 .class_init = host_x86_cpu_class_init,
1653 };
1654
1655 #endif
1656
1657 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1658 {
1659 FeatureWordInfo *f = &feature_word_info[w];
1660 int i;
1661
1662 for (i = 0; i < 32; ++i) {
1663 if ((1UL << i) & mask) {
1664 const char *reg = get_register_name_32(f->cpuid_reg);
1665 assert(reg);
1666 fprintf(stderr, "warning: %s doesn't support requested feature: "
1667 "CPUID.%02XH:%s%s%s [bit %d]\n",
1668 kvm_enabled() ? "host" : "TCG",
1669 f->cpuid_eax, reg,
1670 f->feat_names[i] ? "." : "",
1671 f->feat_names[i] ? f->feat_names[i] : "", i);
1672 }
1673 }
1674 }
1675
1676 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
1677 const char *name, void *opaque,
1678 Error **errp)
1679 {
1680 X86CPU *cpu = X86_CPU(obj);
1681 CPUX86State *env = &cpu->env;
1682 int64_t value;
1683
1684 value = (env->cpuid_version >> 8) & 0xf;
1685 if (value == 0xf) {
1686 value += (env->cpuid_version >> 20) & 0xff;
1687 }
1688 visit_type_int(v, name, &value, errp);
1689 }
1690
1691 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
1692 const char *name, void *opaque,
1693 Error **errp)
1694 {
1695 X86CPU *cpu = X86_CPU(obj);
1696 CPUX86State *env = &cpu->env;
1697 const int64_t min = 0;
1698 const int64_t max = 0xff + 0xf;
1699 Error *local_err = NULL;
1700 int64_t value;
1701
1702 visit_type_int(v, name, &value, &local_err);
1703 if (local_err) {
1704 error_propagate(errp, local_err);
1705 return;
1706 }
1707 if (value < min || value > max) {
1708 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1709 name ? name : "null", value, min, max);
1710 return;
1711 }
1712
1713 env->cpuid_version &= ~0xff00f00;
1714 if (value > 0x0f) {
1715 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1716 } else {
1717 env->cpuid_version |= value << 8;
1718 }
1719 }
1720
1721 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
1722 const char *name, void *opaque,
1723 Error **errp)
1724 {
1725 X86CPU *cpu = X86_CPU(obj);
1726 CPUX86State *env = &cpu->env;
1727 int64_t value;
1728
1729 value = (env->cpuid_version >> 4) & 0xf;
1730 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1731 visit_type_int(v, name, &value, errp);
1732 }
1733
1734 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
1735 const char *name, void *opaque,
1736 Error **errp)
1737 {
1738 X86CPU *cpu = X86_CPU(obj);
1739 CPUX86State *env = &cpu->env;
1740 const int64_t min = 0;
1741 const int64_t max = 0xff;
1742 Error *local_err = NULL;
1743 int64_t value;
1744
1745 visit_type_int(v, name, &value, &local_err);
1746 if (local_err) {
1747 error_propagate(errp, local_err);
1748 return;
1749 }
1750 if (value < min || value > max) {
1751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1752 name ? name : "null", value, min, max);
1753 return;
1754 }
1755
1756 env->cpuid_version &= ~0xf00f0;
1757 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1758 }
1759
1760 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1761 const char *name, void *opaque,
1762 Error **errp)
1763 {
1764 X86CPU *cpu = X86_CPU(obj);
1765 CPUX86State *env = &cpu->env;
1766 int64_t value;
1767
1768 value = env->cpuid_version & 0xf;
1769 visit_type_int(v, name, &value, errp);
1770 }
1771
1772 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1773 const char *name, void *opaque,
1774 Error **errp)
1775 {
1776 X86CPU *cpu = X86_CPU(obj);
1777 CPUX86State *env = &cpu->env;
1778 const int64_t min = 0;
1779 const int64_t max = 0xf;
1780 Error *local_err = NULL;
1781 int64_t value;
1782
1783 visit_type_int(v, name, &value, &local_err);
1784 if (local_err) {
1785 error_propagate(errp, local_err);
1786 return;
1787 }
1788 if (value < min || value > max) {
1789 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1790 name ? name : "null", value, min, max);
1791 return;
1792 }
1793
1794 env->cpuid_version &= ~0xf;
1795 env->cpuid_version |= value & 0xf;
1796 }
1797
1798 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1799 {
1800 X86CPU *cpu = X86_CPU(obj);
1801 CPUX86State *env = &cpu->env;
1802 char *value;
1803
1804 value = g_malloc(CPUID_VENDOR_SZ + 1);
1805 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1806 env->cpuid_vendor3);
1807 return value;
1808 }
1809
1810 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1811 Error **errp)
1812 {
1813 X86CPU *cpu = X86_CPU(obj);
1814 CPUX86State *env = &cpu->env;
1815 int i;
1816
1817 if (strlen(value) != CPUID_VENDOR_SZ) {
1818 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
1819 return;
1820 }
1821
1822 env->cpuid_vendor1 = 0;
1823 env->cpuid_vendor2 = 0;
1824 env->cpuid_vendor3 = 0;
1825 for (i = 0; i < 4; i++) {
1826 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1827 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1828 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1829 }
1830 }
1831
1832 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1833 {
1834 X86CPU *cpu = X86_CPU(obj);
1835 CPUX86State *env = &cpu->env;
1836 char *value;
1837 int i;
1838
1839 value = g_malloc(48 + 1);
1840 for (i = 0; i < 48; i++) {
1841 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1842 }
1843 value[48] = '\0';
1844 return value;
1845 }
1846
1847 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1848 Error **errp)
1849 {
1850 X86CPU *cpu = X86_CPU(obj);
1851 CPUX86State *env = &cpu->env;
1852 int c, len, i;
1853
1854 if (model_id == NULL) {
1855 model_id = "";
1856 }
1857 len = strlen(model_id);
1858 memset(env->cpuid_model, 0, 48);
1859 for (i = 0; i < 48; i++) {
1860 if (i >= len) {
1861 c = '\0';
1862 } else {
1863 c = (uint8_t)model_id[i];
1864 }
1865 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1866 }
1867 }
1868
1869 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
1870 void *opaque, Error **errp)
1871 {
1872 X86CPU *cpu = X86_CPU(obj);
1873 int64_t value;
1874
1875 value = cpu->env.tsc_khz * 1000;
1876 visit_type_int(v, name, &value, errp);
1877 }
1878
1879 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
1880 void *opaque, Error **errp)
1881 {
1882 X86CPU *cpu = X86_CPU(obj);
1883 const int64_t min = 0;
1884 const int64_t max = INT64_MAX;
1885 Error *local_err = NULL;
1886 int64_t value;
1887
1888 visit_type_int(v, name, &value, &local_err);
1889 if (local_err) {
1890 error_propagate(errp, local_err);
1891 return;
1892 }
1893 if (value < min || value > max) {
1894 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1895 name ? name : "null", value, min, max);
1896 return;
1897 }
1898
1899 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
1900 }
1901
1902 /* Generic getter for "feature-words" and "filtered-features" properties */
1903 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
1904 const char *name, void *opaque,
1905 Error **errp)
1906 {
1907 uint32_t *array = (uint32_t *)opaque;
1908 FeatureWord w;
1909 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1910 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1911 X86CPUFeatureWordInfoList *list = NULL;
1912
1913 for (w = 0; w < FEATURE_WORDS; w++) {
1914 FeatureWordInfo *wi = &feature_word_info[w];
1915 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1916 qwi->cpuid_input_eax = wi->cpuid_eax;
1917 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1918 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1919 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1920 qwi->features = array[w];
1921
1922 /* List will be in reverse order, but order shouldn't matter */
1923 list_entries[w].next = list;
1924 list_entries[w].value = &word_infos[w];
1925 list = &list_entries[w];
1926 }
1927
1928 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
1929 }
1930
1931 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1932 void *opaque, Error **errp)
1933 {
1934 X86CPU *cpu = X86_CPU(obj);
1935 int64_t value = cpu->hyperv_spinlock_attempts;
1936
1937 visit_type_int(v, name, &value, errp);
1938 }
1939
1940 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
1941 void *opaque, Error **errp)
1942 {
1943 const int64_t min = 0xFFF;
1944 const int64_t max = UINT_MAX;
1945 X86CPU *cpu = X86_CPU(obj);
1946 Error *err = NULL;
1947 int64_t value;
1948
1949 visit_type_int(v, name, &value, &err);
1950 if (err) {
1951 error_propagate(errp, err);
1952 return;
1953 }
1954
1955 if (value < min || value > max) {
1956 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1957 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1958 object_get_typename(obj), name ? name : "null",
1959 value, min, max);
1960 return;
1961 }
1962 cpu->hyperv_spinlock_attempts = value;
1963 }
1964
1965 static PropertyInfo qdev_prop_spinlocks = {
1966 .name = "int",
1967 .get = x86_get_hv_spinlocks,
1968 .set = x86_set_hv_spinlocks,
1969 };
1970
1971 /* Convert all '_' in a feature string option name to '-', to make feature
1972 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1973 */
1974 static inline void feat2prop(char *s)
1975 {
1976 while ((s = strchr(s, '_'))) {
1977 *s = '-';
1978 }
1979 }
1980
1981 /* Compatibily hack to maintain legacy +-feat semantic,
1982 * where +-feat overwrites any feature set by
1983 * feat=on|feat even if the later is parsed after +-feat
1984 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
1985 */
1986 static FeatureWordArray plus_features = { 0 };
1987 static FeatureWordArray minus_features = { 0 };
1988
1989 /* Parse "+feature,-feature,feature=foo" CPU feature string
1990 */
1991 static void x86_cpu_parse_featurestr(const char *typename, char *features,
1992 Error **errp)
1993 {
1994 char *featurestr; /* Single 'key=value" string being parsed */
1995 Error *local_err = NULL;
1996 static bool cpu_globals_initialized;
1997
1998 if (cpu_globals_initialized) {
1999 return;
2000 }
2001 cpu_globals_initialized = true;
2002
2003 if (!features) {
2004 return;
2005 }
2006
2007 for (featurestr = strtok(features, ",");
2008 featurestr && !local_err;
2009 featurestr = strtok(NULL, ",")) {
2010 const char *name;
2011 const char *val = NULL;
2012 char *eq = NULL;
2013 char num[32];
2014 GlobalProperty *prop;
2015
2016 /* Compatibility syntax: */
2017 if (featurestr[0] == '+') {
2018 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
2019 continue;
2020 } else if (featurestr[0] == '-') {
2021 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
2022 continue;
2023 }
2024
2025 eq = strchr(featurestr, '=');
2026 if (eq) {
2027 *eq++ = 0;
2028 val = eq;
2029 } else {
2030 val = "on";
2031 }
2032
2033 feat2prop(featurestr);
2034 name = featurestr;
2035
2036 /* Special case: */
2037 if (!strcmp(name, "tsc-freq")) {
2038 int64_t tsc_freq;
2039 char *err;
2040
2041 tsc_freq = qemu_strtosz_suffix_unit(val, &err,
2042 QEMU_STRTOSZ_DEFSUFFIX_B, 1000);
2043 if (tsc_freq < 0 || *err) {
2044 error_setg(errp, "bad numerical value %s", val);
2045 return;
2046 }
2047 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
2048 val = num;
2049 name = "tsc-frequency";
2050 }
2051
2052 prop = g_new0(typeof(*prop), 1);
2053 prop->driver = typename;
2054 prop->property = g_strdup(name);
2055 prop->value = g_strdup(val);
2056 prop->errp = &error_fatal;
2057 qdev_prop_register_global(prop);
2058 }
2059
2060 if (local_err) {
2061 error_propagate(errp, local_err);
2062 }
2063 }
2064
2065 /* Print all cpuid feature names in featureset
2066 */
2067 static void listflags(FILE *f, fprintf_function print, const char **featureset)
2068 {
2069 int bit;
2070 bool first = true;
2071
2072 for (bit = 0; bit < 32; bit++) {
2073 if (featureset[bit]) {
2074 print(f, "%s%s", first ? "" : " ", featureset[bit]);
2075 first = false;
2076 }
2077 }
2078 }
2079
2080 /* generate CPU information. */
2081 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
2082 {
2083 X86CPUDefinition *def;
2084 char buf[256];
2085 int i;
2086
2087 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2088 def = &builtin_x86_defs[i];
2089 snprintf(buf, sizeof(buf), "%s", def->name);
2090 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
2091 }
2092 #ifdef CONFIG_KVM
2093 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
2094 "KVM processor with all supported host features "
2095 "(only available in KVM mode)");
2096 #endif
2097
2098 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
2099 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
2100 FeatureWordInfo *fw = &feature_word_info[i];
2101
2102 (*cpu_fprintf)(f, " ");
2103 listflags(f, cpu_fprintf, fw->feat_names);
2104 (*cpu_fprintf)(f, "\n");
2105 }
2106 }
2107
2108 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
2109 {
2110 CpuDefinitionInfoList *cpu_list = NULL;
2111 X86CPUDefinition *def;
2112 int i;
2113
2114 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
2115 CpuDefinitionInfoList *entry;
2116 CpuDefinitionInfo *info;
2117
2118 def = &builtin_x86_defs[i];
2119 info = g_malloc0(sizeof(*info));
2120 info->name = g_strdup(def->name);
2121
2122 entry = g_malloc0(sizeof(*entry));
2123 entry->value = info;
2124 entry->next = cpu_list;
2125 cpu_list = entry;
2126 }
2127
2128 return cpu_list;
2129 }
2130
2131 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2132 bool migratable_only)
2133 {
2134 FeatureWordInfo *wi = &feature_word_info[w];
2135 uint32_t r;
2136
2137 if (kvm_enabled()) {
2138 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2139 wi->cpuid_ecx,
2140 wi->cpuid_reg);
2141 } else if (tcg_enabled()) {
2142 r = wi->tcg_features;
2143 } else {
2144 return ~0;
2145 }
2146 if (migratable_only) {
2147 r &= x86_cpu_get_migratable_flags(w);
2148 }
2149 return r;
2150 }
2151
2152 /*
2153 * Filters CPU feature words based on host availability of each feature.
2154 *
2155 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2156 */
2157 static int x86_cpu_filter_features(X86CPU *cpu)
2158 {
2159 CPUX86State *env = &cpu->env;
2160 FeatureWord w;
2161 int rv = 0;
2162
2163 for (w = 0; w < FEATURE_WORDS; w++) {
2164 uint32_t host_feat =
2165 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2166 uint32_t requested_features = env->features[w];
2167 env->features[w] &= host_feat;
2168 cpu->filtered_features[w] = requested_features & ~env->features[w];
2169 if (cpu->filtered_features[w]) {
2170 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2171 report_unavailable_features(w, cpu->filtered_features[w]);
2172 }
2173 rv = 1;
2174 }
2175 }
2176
2177 return rv;
2178 }
2179
2180 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
2181 {
2182 PropValue *pv;
2183 for (pv = props; pv->prop; pv++) {
2184 if (!pv->value) {
2185 continue;
2186 }
2187 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
2188 &error_abort);
2189 }
2190 }
2191
2192 /* Load data from X86CPUDefinition
2193 */
2194 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2195 {
2196 CPUX86State *env = &cpu->env;
2197 const char *vendor;
2198 char host_vendor[CPUID_VENDOR_SZ + 1];
2199 FeatureWord w;
2200
2201 /* CPU models only set _minimum_ values for level/xlevel: */
2202 object_property_set_int(OBJECT(cpu), def->level, "min-level", errp);
2203 object_property_set_int(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
2204
2205 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2206 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2207 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2208 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2209 for (w = 0; w < FEATURE_WORDS; w++) {
2210 env->features[w] = def->features[w];
2211 }
2212
2213 /* Special cases not set in the X86CPUDefinition structs: */
2214 if (kvm_enabled()) {
2215 if (!kvm_irqchip_in_kernel()) {
2216 x86_cpu_change_kvm_default("x2apic", "off");
2217 }
2218
2219 x86_cpu_apply_props(cpu, kvm_default_props);
2220 }
2221
2222 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2223
2224 /* sysenter isn't supported in compatibility mode on AMD,
2225 * syscall isn't supported in compatibility mode on Intel.
2226 * Normally we advertise the actual CPU vendor, but you can
2227 * override this using the 'vendor' property if you want to use
2228 * KVM's sysenter/syscall emulation in compatibility mode and
2229 * when doing cross vendor migration
2230 */
2231 vendor = def->vendor;
2232 if (kvm_enabled()) {
2233 uint32_t ebx = 0, ecx = 0, edx = 0;
2234 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2235 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2236 vendor = host_vendor;
2237 }
2238
2239 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2240
2241 }
2242
2243 X86CPU *cpu_x86_init(const char *cpu_model)
2244 {
2245 return X86_CPU(cpu_generic_init(TYPE_X86_CPU, cpu_model));
2246 }
2247
2248 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2249 {
2250 X86CPUDefinition *cpudef = data;
2251 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2252
2253 xcc->cpu_def = cpudef;
2254 }
2255
2256 static void x86_register_cpudef_type(X86CPUDefinition *def)
2257 {
2258 char *typename = x86_cpu_type_name(def->name);
2259 TypeInfo ti = {
2260 .name = typename,
2261 .parent = TYPE_X86_CPU,
2262 .class_init = x86_cpu_cpudef_class_init,
2263 .class_data = def,
2264 };
2265
2266 type_register(&ti);
2267 g_free(typename);
2268 }
2269
2270 #if !defined(CONFIG_USER_ONLY)
2271
2272 void cpu_clear_apic_feature(CPUX86State *env)
2273 {
2274 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2275 }
2276
2277 #endif /* !CONFIG_USER_ONLY */
2278
2279 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2280 uint32_t *eax, uint32_t *ebx,
2281 uint32_t *ecx, uint32_t *edx)
2282 {
2283 X86CPU *cpu = x86_env_get_cpu(env);
2284 CPUState *cs = CPU(cpu);
2285 uint32_t pkg_offset;
2286
2287 /* test if maximum index reached */
2288 if (index & 0x80000000) {
2289 if (index > env->cpuid_xlevel) {
2290 if (env->cpuid_xlevel2 > 0) {
2291 /* Handle the Centaur's CPUID instruction. */
2292 if (index > env->cpuid_xlevel2) {
2293 index = env->cpuid_xlevel2;
2294 } else if (index < 0xC0000000) {
2295 index = env->cpuid_xlevel;
2296 }
2297 } else {
2298 /* Intel documentation states that invalid EAX input will
2299 * return the same information as EAX=cpuid_level
2300 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2301 */
2302 index = env->cpuid_level;
2303 }
2304 }
2305 } else {
2306 if (index > env->cpuid_level)
2307 index = env->cpuid_level;
2308 }
2309
2310 switch(index) {
2311 case 0:
2312 *eax = env->cpuid_level;
2313 *ebx = env->cpuid_vendor1;
2314 *edx = env->cpuid_vendor2;
2315 *ecx = env->cpuid_vendor3;
2316 break;
2317 case 1:
2318 *eax = env->cpuid_version;
2319 *ebx = (cpu->apic_id << 24) |
2320 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2321 *ecx = env->features[FEAT_1_ECX];
2322 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
2323 *ecx |= CPUID_EXT_OSXSAVE;
2324 }
2325 *edx = env->features[FEAT_1_EDX];
2326 if (cs->nr_cores * cs->nr_threads > 1) {
2327 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2328 *edx |= CPUID_HT;
2329 }
2330 break;
2331 case 2:
2332 /* cache info: needed for Pentium Pro compatibility */
2333 if (cpu->cache_info_passthrough) {
2334 host_cpuid(index, 0, eax, ebx, ecx, edx);
2335 break;
2336 }
2337 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2338 *ebx = 0;
2339 if (!cpu->enable_l3_cache) {
2340 *ecx = 0;
2341 } else {
2342 *ecx = L3_N_DESCRIPTOR;
2343 }
2344 *edx = (L1D_DESCRIPTOR << 16) | \
2345 (L1I_DESCRIPTOR << 8) | \
2346 (L2_DESCRIPTOR);
2347 break;
2348 case 4:
2349 /* cache info: needed for Core compatibility */
2350 if (cpu->cache_info_passthrough) {
2351 host_cpuid(index, count, eax, ebx, ecx, edx);
2352 *eax &= ~0xFC000000;
2353 } else {
2354 *eax = 0;
2355 switch (count) {
2356 case 0: /* L1 dcache info */
2357 *eax |= CPUID_4_TYPE_DCACHE | \
2358 CPUID_4_LEVEL(1) | \
2359 CPUID_4_SELF_INIT_LEVEL;
2360 *ebx = (L1D_LINE_SIZE - 1) | \
2361 ((L1D_PARTITIONS - 1) << 12) | \
2362 ((L1D_ASSOCIATIVITY - 1) << 22);
2363 *ecx = L1D_SETS - 1;
2364 *edx = CPUID_4_NO_INVD_SHARING;
2365 break;
2366 case 1: /* L1 icache info */
2367 *eax |= CPUID_4_TYPE_ICACHE | \
2368 CPUID_4_LEVEL(1) | \
2369 CPUID_4_SELF_INIT_LEVEL;
2370 *ebx = (L1I_LINE_SIZE - 1) | \
2371 ((L1I_PARTITIONS - 1) << 12) | \
2372 ((L1I_ASSOCIATIVITY - 1) << 22);
2373 *ecx = L1I_SETS - 1;
2374 *edx = CPUID_4_NO_INVD_SHARING;
2375 break;
2376 case 2: /* L2 cache info */
2377 *eax |= CPUID_4_TYPE_UNIFIED | \
2378 CPUID_4_LEVEL(2) | \
2379 CPUID_4_SELF_INIT_LEVEL;
2380 if (cs->nr_threads > 1) {
2381 *eax |= (cs->nr_threads - 1) << 14;
2382 }
2383 *ebx = (L2_LINE_SIZE - 1) | \
2384 ((L2_PARTITIONS - 1) << 12) | \
2385 ((L2_ASSOCIATIVITY - 1) << 22);
2386 *ecx = L2_SETS - 1;
2387 *edx = CPUID_4_NO_INVD_SHARING;
2388 break;
2389 case 3: /* L3 cache info */
2390 if (!cpu->enable_l3_cache) {
2391 *eax = 0;
2392 *ebx = 0;
2393 *ecx = 0;
2394 *edx = 0;
2395 break;
2396 }
2397 *eax |= CPUID_4_TYPE_UNIFIED | \
2398 CPUID_4_LEVEL(3) | \
2399 CPUID_4_SELF_INIT_LEVEL;
2400 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2401 *eax |= ((1 << pkg_offset) - 1) << 14;
2402 *ebx = (L3_N_LINE_SIZE - 1) | \
2403 ((L3_N_PARTITIONS - 1) << 12) | \
2404 ((L3_N_ASSOCIATIVITY - 1) << 22);
2405 *ecx = L3_N_SETS - 1;
2406 *edx = CPUID_4_INCLUSIVE | CPUID_4_COMPLEX_IDX;
2407 break;
2408 default: /* end of info */
2409 *eax = 0;
2410 *ebx = 0;
2411 *ecx = 0;
2412 *edx = 0;
2413 break;
2414 }
2415 }
2416
2417 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2418 if ((*eax & 31) && cs->nr_cores > 1) {
2419 *eax |= (cs->nr_cores - 1) << 26;
2420 }
2421 break;
2422 case 5:
2423 /* mwait info: needed for Core compatibility */
2424 *eax = 0; /* Smallest monitor-line size in bytes */
2425 *ebx = 0; /* Largest monitor-line size in bytes */
2426 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2427 *edx = 0;
2428 break;
2429 case 6:
2430 /* Thermal and Power Leaf */
2431 *eax = env->features[FEAT_6_EAX];
2432 *ebx = 0;
2433 *ecx = 0;
2434 *edx = 0;
2435 break;
2436 case 7:
2437 /* Structured Extended Feature Flags Enumeration Leaf */
2438 if (count == 0) {
2439 *eax = 0; /* Maximum ECX value for sub-leaves */
2440 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2441 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
2442 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
2443 *ecx |= CPUID_7_0_ECX_OSPKE;
2444 }
2445 *edx = 0; /* Reserved */
2446 } else {
2447 *eax = 0;
2448 *ebx = 0;
2449 *ecx = 0;
2450 *edx = 0;
2451 }
2452 break;
2453 case 9:
2454 /* Direct Cache Access Information Leaf */
2455 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2456 *ebx = 0;
2457 *ecx = 0;
2458 *edx = 0;
2459 break;
2460 case 0xA:
2461 /* Architectural Performance Monitoring Leaf */
2462 if (kvm_enabled() && cpu->enable_pmu) {
2463 KVMState *s = cs->kvm_state;
2464
2465 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2466 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2467 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2468 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2469 } else {
2470 *eax = 0;
2471 *ebx = 0;
2472 *ecx = 0;
2473 *edx = 0;
2474 }
2475 break;
2476 case 0xB:
2477 /* Extended Topology Enumeration Leaf */
2478 if (!cpu->enable_cpuid_0xb) {
2479 *eax = *ebx = *ecx = *edx = 0;
2480 break;
2481 }
2482
2483 *ecx = count & 0xff;
2484 *edx = cpu->apic_id;
2485
2486 switch (count) {
2487 case 0:
2488 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
2489 *ebx = cs->nr_threads;
2490 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
2491 break;
2492 case 1:
2493 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
2494 *ebx = cs->nr_cores * cs->nr_threads;
2495 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
2496 break;
2497 default:
2498 *eax = 0;
2499 *ebx = 0;
2500 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
2501 }
2502
2503 assert(!(*eax & ~0x1f));
2504 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
2505 break;
2506 case 0xD: {
2507 uint64_t ena_mask;
2508 int i;
2509
2510 /* Processor Extended State */
2511 *eax = 0;
2512 *ebx = 0;
2513 *ecx = 0;
2514 *edx = 0;
2515 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
2516 break;
2517 }
2518
2519 ena_mask = (XSTATE_FP_MASK | XSTATE_SSE_MASK);
2520 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2521 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2522 if (env->features[esa->feature] & esa->bits) {
2523 ena_mask |= (1ULL << i);
2524 }
2525 }
2526
2527 if (kvm_enabled()) {
2528 KVMState *s = cs->kvm_state;
2529 uint64_t kvm_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
2530 kvm_mask <<= 32;
2531 kvm_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
2532 ena_mask &= kvm_mask;
2533 }
2534
2535 if (count == 0) {
2536 *ecx = xsave_area_size(ena_mask);;
2537 *eax = ena_mask;
2538 *edx = ena_mask >> 32;
2539 *ebx = *ecx;
2540 } else if (count == 1) {
2541 *eax = env->features[FEAT_XSAVE];
2542 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
2543 const ExtSaveArea *esa = &x86_ext_save_areas[count];
2544 if ((ena_mask >> count) & 1) {
2545 *eax = esa->size;
2546 *ebx = esa->offset;
2547 }
2548 }
2549 break;
2550 }
2551 case 0x80000000:
2552 *eax = env->cpuid_xlevel;
2553 *ebx = env->cpuid_vendor1;
2554 *edx = env->cpuid_vendor2;
2555 *ecx = env->cpuid_vendor3;
2556 break;
2557 case 0x80000001:
2558 *eax = env->cpuid_version;
2559 *ebx = 0;
2560 *ecx = env->features[FEAT_8000_0001_ECX];
2561 *edx = env->features[FEAT_8000_0001_EDX];
2562
2563 /* The Linux kernel checks for the CMPLegacy bit and
2564 * discards multiple thread information if it is set.
2565 * So don't set it here for Intel to make Linux guests happy.
2566 */
2567 if (cs->nr_cores * cs->nr_threads > 1) {
2568 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
2569 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
2570 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
2571 *ecx |= 1 << 1; /* CmpLegacy bit */
2572 }
2573 }
2574 break;
2575 case 0x80000002:
2576 case 0x80000003:
2577 case 0x80000004:
2578 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2579 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2580 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2581 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2582 break;
2583 case 0x80000005:
2584 /* cache info (L1 cache) */
2585 if (cpu->cache_info_passthrough) {
2586 host_cpuid(index, 0, eax, ebx, ecx, edx);
2587 break;
2588 }
2589 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2590 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2591 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2592 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2593 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2594 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2595 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2596 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2597 break;
2598 case 0x80000006:
2599 /* cache info (L2 cache) */
2600 if (cpu->cache_info_passthrough) {
2601 host_cpuid(index, 0, eax, ebx, ecx, edx);
2602 break;
2603 }
2604 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2605 (L2_DTLB_2M_ENTRIES << 16) | \
2606 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2607 (L2_ITLB_2M_ENTRIES);
2608 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2609 (L2_DTLB_4K_ENTRIES << 16) | \
2610 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2611 (L2_ITLB_4K_ENTRIES);
2612 *ecx = (L2_SIZE_KB_AMD << 16) | \
2613 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2614 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2615 if (!cpu->enable_l3_cache) {
2616 *edx = ((L3_SIZE_KB / 512) << 18) | \
2617 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2618 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2619 } else {
2620 *edx = ((L3_N_SIZE_KB_AMD / 512) << 18) | \
2621 (AMD_ENC_ASSOC(L3_N_ASSOCIATIVITY) << 12) | \
2622 (L3_N_LINES_PER_TAG << 8) | (L3_N_LINE_SIZE);
2623 }
2624 break;
2625 case 0x80000007:
2626 *eax = 0;
2627 *ebx = 0;
2628 *ecx = 0;
2629 *edx = env->features[FEAT_8000_0007_EDX];
2630 break;
2631 case 0x80000008:
2632 /* virtual & phys address size in low 2 bytes. */
2633 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2634 /* 64 bit processor, 48 bits virtual, configurable
2635 * physical bits.
2636 */
2637 *eax = 0x00003000 + cpu->phys_bits;
2638 } else {
2639 *eax = cpu->phys_bits;
2640 }
2641 *ebx = 0;
2642 *ecx = 0;
2643 *edx = 0;
2644 if (cs->nr_cores * cs->nr_threads > 1) {
2645 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2646 }
2647 break;
2648 case 0x8000000A:
2649 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2650 *eax = 0x00000001; /* SVM Revision */
2651 *ebx = 0x00000010; /* nr of ASIDs */
2652 *ecx = 0;
2653 *edx = env->features[FEAT_SVM]; /* optional features */
2654 } else {
2655 *eax = 0;
2656 *ebx = 0;
2657 *ecx = 0;
2658 *edx = 0;
2659 }
2660 break;
2661 case 0xC0000000:
2662 *eax = env->cpuid_xlevel2;
2663 *ebx = 0;
2664 *ecx = 0;
2665 *edx = 0;
2666 break;
2667 case 0xC0000001:
2668 /* Support for VIA CPU's CPUID instruction */
2669 *eax = env->cpuid_version;
2670 *ebx = 0;
2671 *ecx = 0;
2672 *edx = env->features[FEAT_C000_0001_EDX];
2673 break;
2674 case 0xC0000002:
2675 case 0xC0000003:
2676 case 0xC0000004:
2677 /* Reserved for the future, and now filled with zero */
2678 *eax = 0;
2679 *ebx = 0;
2680 *ecx = 0;
2681 *edx = 0;
2682 break;
2683 default:
2684 /* reserved values: zero */
2685 *eax = 0;
2686 *ebx = 0;
2687 *ecx = 0;
2688 *edx = 0;
2689 break;
2690 }
2691 }
2692
2693 /* CPUClass::reset() */
2694 static void x86_cpu_reset(CPUState *s)
2695 {
2696 X86CPU *cpu = X86_CPU(s);
2697 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2698 CPUX86State *env = &cpu->env;
2699 target_ulong cr4;
2700 uint64_t xcr0;
2701 int i;
2702
2703 xcc->parent_reset(s);
2704
2705 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
2706
2707 tlb_flush(s, 1);
2708
2709 env->old_exception = -1;
2710
2711 /* init to reset state */
2712
2713 env->hflags2 |= HF2_GIF_MASK;
2714
2715 cpu_x86_update_cr0(env, 0x60000010);
2716 env->a20_mask = ~0x0;
2717 env->smbase = 0x30000;
2718
2719 env->idt.limit = 0xffff;
2720 env->gdt.limit = 0xffff;
2721 env->ldt.limit = 0xffff;
2722 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2723 env->tr.limit = 0xffff;
2724 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2725
2726 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2727 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2728 DESC_R_MASK | DESC_A_MASK);
2729 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2730 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2731 DESC_A_MASK);
2732 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2733 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2734 DESC_A_MASK);
2735 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2736 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2737 DESC_A_MASK);
2738 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2739 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2740 DESC_A_MASK);
2741 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2742 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2743 DESC_A_MASK);
2744
2745 env->eip = 0xfff0;
2746 env->regs[R_EDX] = env->cpuid_version;
2747
2748 env->eflags = 0x2;
2749
2750 /* FPU init */
2751 for (i = 0; i < 8; i++) {
2752 env->fptags[i] = 1;
2753 }
2754 cpu_set_fpuc(env, 0x37f);
2755
2756 env->mxcsr = 0x1f80;
2757 /* All units are in INIT state. */
2758 env->xstate_bv = 0;
2759
2760 env->pat = 0x0007040600070406ULL;
2761 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2762
2763 memset(env->dr, 0, sizeof(env->dr));
2764 env->dr[6] = DR6_FIXED_1;
2765 env->dr[7] = DR7_FIXED_1;
2766 cpu_breakpoint_remove_all(s, BP_CPU);
2767 cpu_watchpoint_remove_all(s, BP_CPU);
2768
2769 cr4 = 0;
2770 xcr0 = XSTATE_FP_MASK;
2771
2772 #ifdef CONFIG_USER_ONLY
2773 /* Enable all the features for user-mode. */
2774 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
2775 xcr0 |= XSTATE_SSE_MASK;
2776 }
2777 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
2778 const ExtSaveArea *esa = &x86_ext_save_areas[i];
2779 if (env->features[esa->feature] & esa->bits) {
2780 xcr0 |= 1ull << i;
2781 }
2782 }
2783
2784 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
2785 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
2786 }
2787 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
2788 cr4 |= CR4_FSGSBASE_MASK;
2789 }
2790 #endif
2791
2792 env->xcr0 = xcr0;
2793 cpu_x86_update_cr4(env, cr4);
2794
2795 /*
2796 * SDM 11.11.5 requires:
2797 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2798 * - IA32_MTRR_PHYSMASKn.V = 0
2799 * All other bits are undefined. For simplification, zero it all.
2800 */
2801 env->mtrr_deftype = 0;
2802 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2803 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2804
2805 #if !defined(CONFIG_USER_ONLY)
2806 /* We hard-wire the BSP to the first CPU. */
2807 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
2808
2809 s->halted = !cpu_is_bsp(cpu);
2810
2811 if (kvm_enabled()) {
2812 kvm_arch_reset_vcpu(cpu);
2813 }
2814 #endif
2815 }
2816
2817 #ifndef CONFIG_USER_ONLY
2818 bool cpu_is_bsp(X86CPU *cpu)
2819 {
2820 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2821 }
2822
2823 /* TODO: remove me, when reset over QOM tree is implemented */
2824 static void x86_cpu_machine_reset_cb(void *opaque)
2825 {
2826 X86CPU *cpu = opaque;
2827 cpu_reset(CPU(cpu));
2828 }
2829 #endif
2830
2831 static void mce_init(X86CPU *cpu)
2832 {
2833 CPUX86State *cenv = &cpu->env;
2834 unsigned int bank;
2835
2836 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2837 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2838 (CPUID_MCE | CPUID_MCA)) {
2839 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
2840 (cpu->enable_lmce ? MCG_LMCE_P : 0);
2841 cenv->mcg_ctl = ~(uint64_t)0;
2842 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2843 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2844 }
2845 }
2846 }
2847
2848 #ifndef CONFIG_USER_ONLY
2849 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2850 {
2851 APICCommonState *apic;
2852 const char *apic_type = "apic";
2853
2854 if (kvm_apic_in_kernel()) {
2855 apic_type = "kvm-apic";
2856 } else if (xen_enabled()) {
2857 apic_type = "xen-apic";
2858 }
2859
2860 cpu->apic_state = DEVICE(object_new(apic_type));
2861
2862 object_property_add_child(OBJECT(cpu), "lapic",
2863 OBJECT(cpu->apic_state), &error_abort);
2864 object_unref(OBJECT(cpu->apic_state));
2865
2866 qdev_prop_set_uint8(cpu->apic_state, "id", cpu->apic_id);
2867 /* TODO: convert to link<> */
2868 apic = APIC_COMMON(cpu->apic_state);
2869 apic->cpu = cpu;
2870 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
2871 }
2872
2873 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2874 {
2875 APICCommonState *apic;
2876 static bool apic_mmio_map_once;
2877
2878 if (cpu->apic_state == NULL) {
2879 return;
2880 }
2881 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
2882 errp);
2883
2884 /* Map APIC MMIO area */
2885 apic = APIC_COMMON(cpu->apic_state);
2886 if (!apic_mmio_map_once) {
2887 memory_region_add_subregion_overlap(get_system_memory(),
2888 apic->apicbase &
2889 MSR_IA32_APICBASE_BASE,
2890 &apic->io_memory,
2891 0x1000);
2892 apic_mmio_map_once = true;
2893 }
2894 }
2895
2896 static void x86_cpu_machine_done(Notifier *n, void *unused)
2897 {
2898 X86CPU *cpu = container_of(n, X86CPU, machine_done);
2899 MemoryRegion *smram =
2900 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
2901
2902 if (smram) {
2903 cpu->smram = g_new(MemoryRegion, 1);
2904 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
2905 smram, 0, 1ull << 32);
2906 memory_region_set_enabled(cpu->smram, false);
2907 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
2908 }
2909 }
2910 #else
2911 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2912 {
2913 }
2914 #endif
2915
2916 /* Note: Only safe for use on x86(-64) hosts */
2917 static uint32_t x86_host_phys_bits(void)
2918 {
2919 uint32_t eax;
2920 uint32_t host_phys_bits;
2921
2922 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
2923 if (eax >= 0x80000008) {
2924 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
2925 /* Note: According to AMD doc 25481 rev 2.34 they have a field
2926 * at 23:16 that can specify a maximum physical address bits for
2927 * the guest that can override this value; but I've not seen
2928 * anything with that set.
2929 */
2930 host_phys_bits = eax & 0xff;
2931 } else {
2932 /* It's an odd 64 bit machine that doesn't have the leaf for
2933 * physical address bits; fall back to 36 that's most older
2934 * Intel.
2935 */
2936 host_phys_bits = 36;
2937 }
2938
2939 return host_phys_bits;
2940 }
2941
2942 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
2943 {
2944 if (*min < value) {
2945 *min = value;
2946 }
2947 }
2948
2949 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
2950 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
2951 {
2952 CPUX86State *env = &cpu->env;
2953 FeatureWordInfo *fi = &feature_word_info[w];
2954 uint32_t eax = fi->cpuid_eax;
2955 uint32_t region = eax & 0xF0000000;
2956
2957 if (!env->features[w]) {
2958 return;
2959 }
2960
2961 switch (region) {
2962 case 0x00000000:
2963 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
2964 break;
2965 case 0x80000000:
2966 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
2967 break;
2968 case 0xC0000000:
2969 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
2970 break;
2971 }
2972 }
2973
2974 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2975 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2976 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2977 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2978 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2979 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2980 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2981 {
2982 CPUState *cs = CPU(dev);
2983 X86CPU *cpu = X86_CPU(dev);
2984 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2985 CPUX86State *env = &cpu->env;
2986 Error *local_err = NULL;
2987 static bool ht_warned;
2988 FeatureWord w;
2989
2990 if (xcc->kvm_required && !kvm_enabled()) {
2991 char *name = x86_cpu_class_get_model_name(xcc);
2992 error_setg(&local_err, "CPU model '%s' requires KVM", name);
2993 g_free(name);
2994 goto out;
2995 }
2996
2997 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
2998 error_setg(errp, "apic-id property was not initialized properly");
2999 return;
3000 }
3001
3002 /*TODO: cpu->host_features incorrectly overwrites features
3003 * set using "feat=on|off". Once we fix this, we can convert
3004 * plus_features & minus_features to global properties
3005 * inside x86_cpu_parse_featurestr() too.
3006 */
3007 if (cpu->host_features) {
3008 for (w = 0; w < FEATURE_WORDS; w++) {
3009 env->features[w] =
3010 x86_cpu_get_supported_feature_word(w, cpu->migratable);
3011 }
3012 }
3013
3014 for (w = 0; w < FEATURE_WORDS; w++) {
3015 cpu->env.features[w] |= plus_features[w];
3016 cpu->env.features[w] &= ~minus_features[w];
3017 }
3018
3019
3020 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
3021 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
3022 if (cpu->full_cpuid_auto_level) {
3023 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
3024 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
3025 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
3026 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
3027 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
3028 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
3029 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
3030 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
3031 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
3032 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
3033 /* SVM requires CPUID[0x8000000A] */
3034 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3035 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
3036 }
3037 }
3038
3039 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
3040 if (env->cpuid_level == UINT32_MAX) {
3041 env->cpuid_level = env->cpuid_min_level;
3042 }
3043 if (env->cpuid_xlevel == UINT32_MAX) {
3044 env->cpuid_xlevel = env->cpuid_min_xlevel;
3045 }
3046 if (env->cpuid_xlevel2 == UINT32_MAX) {
3047 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
3048 }
3049
3050 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
3051 error_setg(&local_err,
3052 kvm_enabled() ?
3053 "Host doesn't support requested features" :
3054 "TCG doesn't support requested features");
3055 goto out;
3056 }
3057
3058 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
3059 * CPUID[1].EDX.
3060 */
3061 if (IS_AMD_CPU(env)) {
3062 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
3063 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
3064 & CPUID_EXT2_AMD_ALIASES);
3065 }
3066
3067 /* For 64bit systems think about the number of physical bits to present.
3068 * ideally this should be the same as the host; anything other than matching
3069 * the host can cause incorrect guest behaviour.
3070 * QEMU used to pick the magic value of 40 bits that corresponds to
3071 * consumer AMD devices but nothing else.
3072 */
3073 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3074 if (kvm_enabled()) {
3075 uint32_t host_phys_bits = x86_host_phys_bits();
3076 static bool warned;
3077
3078 if (cpu->host_phys_bits) {
3079 /* The user asked for us to use the host physical bits */
3080 cpu->phys_bits = host_phys_bits;
3081 }
3082
3083 /* Print a warning if the user set it to a value that's not the
3084 * host value.
3085 */
3086 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
3087 !warned) {
3088 error_report("Warning: Host physical bits (%u)"
3089 " does not match phys-bits property (%u)",
3090 host_phys_bits, cpu->phys_bits);
3091 warned = true;
3092 }
3093
3094 if (cpu->phys_bits &&
3095 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
3096 cpu->phys_bits < 32)) {
3097 error_setg(errp, "phys-bits should be between 32 and %u "
3098 " (but is %u)",
3099 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
3100 return;
3101 }
3102 } else {
3103 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
3104 error_setg(errp, "TCG only supports phys-bits=%u",
3105 TCG_PHYS_ADDR_BITS);
3106 return;
3107 }
3108 }
3109 /* 0 means it was not explicitly set by the user (or by machine
3110 * compat_props or by the host code above). In this case, the default
3111 * is the value used by TCG (40).
3112 */
3113 if (cpu->phys_bits == 0) {
3114 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
3115 }
3116 } else {
3117 /* For 32 bit systems don't use the user set value, but keep
3118 * phys_bits consistent with what we tell the guest.
3119 */
3120 if (cpu->phys_bits != 0) {
3121 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
3122 return;
3123 }
3124
3125 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
3126 cpu->phys_bits = 36;
3127 } else {
3128 cpu->phys_bits = 32;
3129 }
3130 }
3131 cpu_exec_init(cs, &error_abort);
3132
3133 if (tcg_enabled()) {
3134 tcg_x86_init();
3135 }
3136
3137 #ifndef CONFIG_USER_ONLY
3138 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
3139
3140 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
3141 x86_cpu_apic_create(cpu, &local_err);
3142 if (local_err != NULL) {
3143 goto out;
3144 }
3145 }
3146 #endif
3147
3148 mce_init(cpu);
3149
3150 #ifndef CONFIG_USER_ONLY
3151 if (tcg_enabled()) {
3152 AddressSpace *newas = g_new(AddressSpace, 1);
3153
3154 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
3155 cpu->cpu_as_root = g_new(MemoryRegion, 1);
3156
3157 /* Outer container... */
3158 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
3159 memory_region_set_enabled(cpu->cpu_as_root, true);
3160
3161 /* ... with two regions inside: normal system memory with low
3162 * priority, and...
3163 */
3164 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
3165 get_system_memory(), 0, ~0ull);
3166 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
3167 memory_region_set_enabled(cpu->cpu_as_mem, true);
3168 address_space_init(newas, cpu->cpu_as_root, "CPU");
3169 cs->num_ases = 1;
3170 cpu_address_space_init(cs, newas, 0);
3171
3172 /* ... SMRAM with higher priority, linked from /machine/smram. */
3173 cpu->machine_done.notify = x86_cpu_machine_done;
3174 qemu_add_machine_init_done_notifier(&cpu->machine_done);
3175 }
3176 #endif
3177
3178 qemu_init_vcpu(cs);
3179
3180 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
3181 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
3182 * based on inputs (sockets,cores,threads), it is still better to gives
3183 * users a warning.
3184 *
3185 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
3186 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
3187 */
3188 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
3189 error_report("AMD CPU doesn't support hyperthreading. Please configure"
3190 " -smp options properly.");
3191 ht_warned = true;
3192 }
3193
3194 x86_cpu_apic_realize(cpu, &local_err);
3195 if (local_err != NULL) {
3196 goto out;
3197 }
3198 cpu_reset(cs);
3199
3200 xcc->parent_realize(dev, &local_err);
3201
3202 out:
3203 if (local_err != NULL) {
3204 error_propagate(errp, local_err);
3205 return;
3206 }
3207 }
3208
3209 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
3210 {
3211 X86CPU *cpu = X86_CPU(dev);
3212
3213 #ifndef CONFIG_USER_ONLY
3214 cpu_remove_sync(CPU(dev));
3215 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
3216 #endif
3217
3218 if (cpu->apic_state) {
3219 object_unparent(OBJECT(cpu->apic_state));
3220 cpu->apic_state = NULL;
3221 }
3222 }
3223
3224 typedef struct BitProperty {
3225 uint32_t *ptr;
3226 uint32_t mask;
3227 } BitProperty;
3228
3229 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
3230 void *opaque, Error **errp)
3231 {
3232 BitProperty *fp = opaque;
3233 bool value = (*fp->ptr & fp->mask) == fp->mask;
3234 visit_type_bool(v, name, &value, errp);
3235 }
3236
3237 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
3238 void *opaque, Error **errp)
3239 {
3240 DeviceState *dev = DEVICE(obj);
3241 BitProperty *fp = opaque;
3242 Error *local_err = NULL;
3243 bool value;
3244
3245 if (dev->realized) {
3246 qdev_prop_set_after_realize(dev, name, errp);
3247 return;
3248 }
3249
3250 visit_type_bool(v, name, &value, &local_err);
3251 if (local_err) {
3252 error_propagate(errp, local_err);
3253 return;
3254 }
3255
3256 if (value) {
3257 *fp->ptr |= fp->mask;
3258 } else {
3259 *fp->ptr &= ~fp->mask;
3260 }
3261 }
3262
3263 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
3264 void *opaque)
3265 {
3266 BitProperty *prop = opaque;
3267 g_free(prop);
3268 }
3269
3270 /* Register a boolean property to get/set a single bit in a uint32_t field.
3271 *
3272 * The same property name can be registered multiple times to make it affect
3273 * multiple bits in the same FeatureWord. In that case, the getter will return
3274 * true only if all bits are set.
3275 */
3276 static void x86_cpu_register_bit_prop(X86CPU *cpu,
3277 const char *prop_name,
3278 uint32_t *field,
3279 int bitnr)
3280 {
3281 BitProperty *fp;
3282 ObjectProperty *op;
3283 uint32_t mask = (1UL << bitnr);
3284
3285 op = object_property_find(OBJECT(cpu), prop_name, NULL);
3286 if (op) {
3287 fp = op->opaque;
3288 assert(fp->ptr == field);
3289 fp->mask |= mask;
3290 } else {
3291 fp = g_new0(BitProperty, 1);
3292 fp->ptr = field;
3293 fp->mask = mask;
3294 object_property_add(OBJECT(cpu), prop_name, "bool",
3295 x86_cpu_get_bit_prop,
3296 x86_cpu_set_bit_prop,
3297 x86_cpu_release_bit_prop, fp, &error_abort);
3298 }
3299 }
3300
3301 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
3302 FeatureWord w,
3303 int bitnr)
3304 {
3305 Object *obj = OBJECT(cpu);
3306 int i;
3307 char **names;
3308 FeatureWordInfo *fi = &feature_word_info[w];
3309
3310 if (!fi->feat_names[bitnr]) {
3311 return;
3312 }
3313
3314 names = g_strsplit(fi->feat_names[bitnr], "|", 0);
3315
3316 feat2prop(names[0]);
3317 x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
3318
3319 for (i = 1; names[i]; i++) {
3320 feat2prop(names[i]);
3321 object_property_add_alias(obj, names[i], obj, names[0],
3322 &error_abort);
3323 }
3324
3325 g_strfreev(names);
3326 }
3327
3328 static void x86_cpu_initfn(Object *obj)
3329 {
3330 CPUState *cs = CPU(obj);
3331 X86CPU *cpu = X86_CPU(obj);
3332 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
3333 CPUX86State *env = &cpu->env;
3334 FeatureWord w;
3335
3336 cs->env_ptr = env;
3337
3338 object_property_add(obj, "family", "int",
3339 x86_cpuid_version_get_family,
3340 x86_cpuid_version_set_family, NULL, NULL, NULL);
3341 object_property_add(obj, "model", "int",
3342 x86_cpuid_version_get_model,
3343 x86_cpuid_version_set_model, NULL, NULL, NULL);
3344 object_property_add(obj, "stepping", "int",
3345 x86_cpuid_version_get_stepping,
3346 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
3347 object_property_add_str(obj, "vendor",
3348 x86_cpuid_get_vendor,
3349 x86_cpuid_set_vendor, NULL);
3350 object_property_add_str(obj, "model-id",
3351 x86_cpuid_get_model_id,
3352 x86_cpuid_set_model_id, NULL);
3353 object_property_add(obj, "tsc-frequency", "int",
3354 x86_cpuid_get_tsc_freq,
3355 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
3356 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
3357 x86_cpu_get_feature_words,
3358 NULL, NULL, (void *)env->features, NULL);
3359 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
3360 x86_cpu_get_feature_words,
3361 NULL, NULL, (void *)cpu->filtered_features, NULL);
3362
3363 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
3364
3365 for (w = 0; w < FEATURE_WORDS; w++) {
3366 int bitnr;
3367
3368 for (bitnr = 0; bitnr < 32; bitnr++) {
3369 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
3370 }
3371 }
3372
3373 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
3374 }
3375
3376 static int64_t x86_cpu_get_arch_id(CPUState *cs)
3377 {
3378 X86CPU *cpu = X86_CPU(cs);
3379
3380 return cpu->apic_id;
3381 }
3382
3383 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
3384 {
3385 X86CPU *cpu = X86_CPU(cs);
3386
3387 return cpu->env.cr[0] & CR0_PG_MASK;
3388 }
3389
3390 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
3391 {
3392 X86CPU *cpu = X86_CPU(cs);
3393
3394 cpu->env.eip = value;
3395 }
3396
3397 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
3398 {
3399 X86CPU *cpu = X86_CPU(cs);
3400
3401 cpu->env.eip = tb->pc - tb->cs_base;
3402 }
3403
3404 static bool x86_cpu_has_work(CPUState *cs)
3405 {
3406 X86CPU *cpu = X86_CPU(cs);
3407 CPUX86State *env = &cpu->env;
3408
3409 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
3410 CPU_INTERRUPT_POLL)) &&
3411 (env->eflags & IF_MASK)) ||
3412 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
3413 CPU_INTERRUPT_INIT |
3414 CPU_INTERRUPT_SIPI |
3415 CPU_INTERRUPT_MCE)) ||
3416 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
3417 !(env->hflags & HF_SMM_MASK));
3418 }
3419
3420 static Property x86_cpu_properties[] = {
3421 #ifdef CONFIG_USER_ONLY
3422 /* apic_id = 0 by default for *-user, see commit 9886e834 */
3423 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
3424 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
3425 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
3426 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
3427 #else
3428 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
3429 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
3430 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
3431 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
3432 #endif
3433 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
3434 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
3435 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
3436 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
3437 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
3438 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
3439 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
3440 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
3441 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
3442 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
3443 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
3444 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
3445 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
3446 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
3447 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
3448 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
3449 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
3450 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
3451 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
3452 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
3453 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
3454 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
3455 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
3456 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
3457 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
3458 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
3459 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
3460 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
3461 DEFINE_PROP_END_OF_LIST()
3462 };
3463
3464 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3465 {
3466 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3467 CPUClass *cc = CPU_CLASS(oc);
3468 DeviceClass *dc = DEVICE_CLASS(oc);
3469
3470 xcc->parent_realize = dc->realize;
3471 dc->realize = x86_cpu_realizefn;
3472 dc->unrealize = x86_cpu_unrealizefn;
3473 dc->props = x86_cpu_properties;
3474
3475 xcc->parent_reset = cc->reset;
3476 cc->reset = x86_cpu_reset;
3477 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3478
3479 cc->class_by_name = x86_cpu_class_by_name;
3480 cc->parse_features = x86_cpu_parse_featurestr;
3481 cc->has_work = x86_cpu_has_work;
3482 cc->do_interrupt = x86_cpu_do_interrupt;
3483 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3484 cc->dump_state = x86_cpu_dump_state;
3485 cc->set_pc = x86_cpu_set_pc;
3486 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3487 cc->gdb_read_register = x86_cpu_gdb_read_register;
3488 cc->gdb_write_register = x86_cpu_gdb_write_register;
3489 cc->get_arch_id = x86_cpu_get_arch_id;
3490 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3491 #ifdef CONFIG_USER_ONLY
3492 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3493 #else
3494 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3495 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3496 cc->write_elf64_note = x86_cpu_write_elf64_note;
3497 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3498 cc->write_elf32_note = x86_cpu_write_elf32_note;
3499 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3500 cc->vmsd = &vmstate_x86_cpu;
3501 #endif
3502 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3503 #ifndef CONFIG_USER_ONLY
3504 cc->debug_excp_handler = breakpoint_handler;
3505 #endif
3506 cc->cpu_exec_enter = x86_cpu_exec_enter;
3507 cc->cpu_exec_exit = x86_cpu_exec_exit;
3508
3509 dc->cannot_instantiate_with_device_add_yet = false;
3510 /*
3511 * Reason: x86_cpu_initfn() calls cpu_exec_init(), which saves the
3512 * object in cpus -> dangling pointer after final object_unref().
3513 */
3514 dc->cannot_destroy_with_object_finalize_yet = true;
3515 }
3516
3517 static const TypeInfo x86_cpu_type_info = {
3518 .name = TYPE_X86_CPU,
3519 .parent = TYPE_CPU,
3520 .instance_size = sizeof(X86CPU),
3521 .instance_init = x86_cpu_initfn,
3522 .abstract = true,
3523 .class_size = sizeof(X86CPUClass),
3524 .class_init = x86_cpu_common_class_init,
3525 };
3526
3527 static void x86_cpu_register_types(void)
3528 {
3529 int i;
3530
3531 type_register_static(&x86_cpu_type_info);
3532 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3533 x86_register_cpudef_type(&builtin_x86_defs[i]);
3534 }
3535 #ifdef CONFIG_KVM
3536 type_register_static(&host_x86_cpu_type_info);
3537 #endif
3538 }
3539
3540 type_init(x86_cpu_register_types)