]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
target-i386: Simplify listflags() function
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
29
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
307 /* missing:
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
313 /* missing:
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
319 CPUID_EXT_RDRAND */
320
321 #ifdef TARGET_X86_64
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
323 #else
324 #define TCG_EXT2_X86_64_FEATURES 0
325 #endif
326
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
338 /* missing:
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
343
344
345 typedef struct FeatureWordInfo {
346 const char **feat_names;
347 uint32_t cpuid_eax; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
350 int cpuid_reg; /* output register (R_* constant) */
351 uint32_t tcg_features; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
353 } FeatureWordInfo;
354
355 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
356 [FEAT_1_EDX] = {
357 .feat_names = feature_name,
358 .cpuid_eax = 1, .cpuid_reg = R_EDX,
359 .tcg_features = TCG_FEATURES,
360 },
361 [FEAT_1_ECX] = {
362 .feat_names = ext_feature_name,
363 .cpuid_eax = 1, .cpuid_reg = R_ECX,
364 .tcg_features = TCG_EXT_FEATURES,
365 },
366 [FEAT_8000_0001_EDX] = {
367 .feat_names = ext2_feature_name,
368 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_EXT2_FEATURES,
370 },
371 [FEAT_8000_0001_ECX] = {
372 .feat_names = ext3_feature_name,
373 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT3_FEATURES,
375 },
376 [FEAT_C000_0001_EDX] = {
377 .feat_names = ext4_feature_name,
378 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT4_FEATURES,
380 },
381 [FEAT_KVM] = {
382 .feat_names = kvm_feature_name,
383 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
384 .tcg_features = TCG_KVM_FEATURES,
385 },
386 [FEAT_SVM] = {
387 .feat_names = svm_feature_name,
388 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_SVM_FEATURES,
390 },
391 [FEAT_7_0_EBX] = {
392 .feat_names = cpuid_7_0_ebx_feature_name,
393 .cpuid_eax = 7,
394 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
395 .cpuid_reg = R_EBX,
396 .tcg_features = TCG_7_0_EBX_FEATURES,
397 },
398 [FEAT_8000_0007_EDX] = {
399 .feat_names = cpuid_apm_edx_feature_name,
400 .cpuid_eax = 0x80000007,
401 .cpuid_reg = R_EDX,
402 .tcg_features = TCG_APM_FEATURES,
403 .unmigratable_flags = CPUID_APM_INVTSC,
404 },
405 [FEAT_XSAVE] = {
406 .feat_names = cpuid_xsave_feature_name,
407 .cpuid_eax = 0xd,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
409 .cpuid_reg = R_EAX,
410 .tcg_features = 0,
411 },
412 };
413
414 typedef struct X86RegisterInfo32 {
415 /* Name of register */
416 const char *name;
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum;
419 } X86RegisterInfo32;
420
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
424 REGISTER(EAX),
425 REGISTER(ECX),
426 REGISTER(EDX),
427 REGISTER(EBX),
428 REGISTER(ESP),
429 REGISTER(EBP),
430 REGISTER(ESI),
431 REGISTER(EDI),
432 };
433 #undef REGISTER
434
435 typedef struct ExtSaveArea {
436 uint32_t feature, bits;
437 uint32_t offset, size;
438 } ExtSaveArea;
439
440 static const ExtSaveArea ext_save_areas[] = {
441 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
442 .offset = 0x240, .size = 0x100 },
443 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
444 .offset = 0x3c0, .size = 0x40 },
445 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
446 .offset = 0x400, .size = 0x40 },
447 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
448 .offset = 0x440, .size = 0x40 },
449 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
450 .offset = 0x480, .size = 0x200 },
451 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
452 .offset = 0x680, .size = 0x400 },
453 };
454
455 const char *get_register_name_32(unsigned int reg)
456 {
457 if (reg >= CPU_NB_REGS32) {
458 return NULL;
459 }
460 return x86_reg_info_32[reg].name;
461 }
462
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
465 */
466 static uint32_t kvm_default_features[FEATURE_WORDS] = {
467 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2) |
470 (1 << KVM_FEATURE_ASYNC_PF) |
471 (1 << KVM_FEATURE_STEAL_TIME) |
472 (1 << KVM_FEATURE_PV_EOI) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
474 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
475 };
476
477 /* Features that are not added by default to any CPU model when KVM is enabled.
478 */
479 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
480 [FEAT_1_EDX] = CPUID_ACPI,
481 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
482 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
483 };
484
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 {
487 kvm_default_features[w] &= ~features;
488 }
489
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 {
492 kvm_default_unset_features[w] &= ~features;
493 }
494
495 /*
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
498 */
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 {
501 FeatureWordInfo *wi = &feature_word_info[w];
502 uint32_t r = 0;
503 int i;
504
505 for (i = 0; i < 32; i++) {
506 uint32_t f = 1U << i;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi->feat_names[i]) {
509 continue;
510 }
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi->unmigratable_flags & f) {
513 continue;
514 }
515 r |= f;
516 }
517 return r;
518 }
519
520 void host_cpuid(uint32_t function, uint32_t count,
521 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 {
523 uint32_t vec[4];
524
525 #ifdef __x86_64__
526 asm volatile("cpuid"
527 : "=a"(vec[0]), "=b"(vec[1]),
528 "=c"(vec[2]), "=d"(vec[3])
529 : "0"(function), "c"(count) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
532 "cpuid \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
537 "popa"
538 : : "a"(function), "c"(count), "S"(vec)
539 : "memory", "cc");
540 #else
541 abort();
542 #endif
543
544 if (eax)
545 *eax = vec[0];
546 if (ebx)
547 *ebx = vec[1];
548 if (ecx)
549 *ecx = vec[2];
550 if (edx)
551 *edx = vec[3];
552 }
553
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
560 */
561 static int sstrcmp(const char *s1, const char *e1,
562 const char *s2, const char *e2)
563 {
564 for (;;) {
565 if (!*s1 || !*s2 || *s1 != *s2)
566 return (*s1 - *s2);
567 ++s1, ++s2;
568 if (s1 == e1 && s2 == e2)
569 return (0);
570 else if (s1 == e1)
571 return (*s2);
572 else if (s2 == e2)
573 return (*s1);
574 }
575 }
576
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
581 */
582 static int altcmp(const char *s, const char *e, const char *altstr)
583 {
584 const char *p, *q;
585
586 for (q = p = altstr; ; ) {
587 while (*p && *p != '|')
588 ++p;
589 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
590 return (0);
591 if (!*p)
592 return (1);
593 else
594 q = ++p;
595 }
596 }
597
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
600 */
601 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
602 const char **featureset)
603 {
604 uint32_t mask;
605 const char **ppc;
606 bool found = false;
607
608 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
609 if (*ppc && !altcmp(s, e, *ppc)) {
610 *pval |= mask;
611 found = true;
612 }
613 }
614 return found;
615 }
616
617 static void add_flagname_to_bitmaps(const char *flagname,
618 FeatureWordArray words,
619 Error **errp)
620 {
621 FeatureWord w;
622 for (w = 0; w < FEATURE_WORDS; w++) {
623 FeatureWordInfo *wi = &feature_word_info[w];
624 if (wi->feat_names &&
625 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
626 break;
627 }
628 }
629 if (w == FEATURE_WORDS) {
630 error_setg(errp, "CPU feature %s not found", flagname);
631 }
632 }
633
634 /* CPU class name definitions: */
635
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
641 */
642 static char *x86_cpu_type_name(const char *model_name)
643 {
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
645 }
646
647 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 {
649 ObjectClass *oc;
650 char *typename;
651
652 if (cpu_model == NULL) {
653 return NULL;
654 }
655
656 typename = x86_cpu_type_name(cpu_model);
657 oc = object_class_by_name(typename);
658 g_free(typename);
659 return oc;
660 }
661
662 struct X86CPUDefinition {
663 const char *name;
664 uint32_t level;
665 uint32_t xlevel;
666 uint32_t xlevel2;
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor[CPUID_VENDOR_SZ + 1];
669 int family;
670 int model;
671 int stepping;
672 FeatureWordArray features;
673 char model_id[48];
674 bool cache_info_passthrough;
675 };
676
677 static X86CPUDefinition builtin_x86_defs[] = {
678 {
679 .name = "qemu64",
680 .level = 4,
681 .vendor = CPUID_VENDOR_AMD,
682 .family = 6,
683 .model = 6,
684 .stepping = 3,
685 .features[FEAT_1_EDX] =
686 PPRO_FEATURES |
687 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
688 CPUID_PSE36,
689 .features[FEAT_1_ECX] =
690 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
691 .features[FEAT_8000_0001_EDX] =
692 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
694 .features[FEAT_8000_0001_ECX] =
695 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
696 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
697 .xlevel = 0x8000000A,
698 },
699 {
700 .name = "phenom",
701 .level = 5,
702 .vendor = CPUID_VENDOR_AMD,
703 .family = 16,
704 .model = 2,
705 .stepping = 3,
706 /* Missing: CPUID_HT */
707 .features[FEAT_1_EDX] =
708 PPRO_FEATURES |
709 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
710 CPUID_PSE36 | CPUID_VME,
711 .features[FEAT_1_ECX] =
712 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
713 CPUID_EXT_POPCNT,
714 .features[FEAT_8000_0001_EDX] =
715 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
716 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
717 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
718 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
719 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
720 CPUID_EXT3_CR8LEG,
721 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
722 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
723 .features[FEAT_8000_0001_ECX] =
724 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
725 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
726 /* Missing: CPUID_SVM_LBRV */
727 .features[FEAT_SVM] =
728 CPUID_SVM_NPT,
729 .xlevel = 0x8000001A,
730 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
731 },
732 {
733 .name = "core2duo",
734 .level = 10,
735 .vendor = CPUID_VENDOR_INTEL,
736 .family = 6,
737 .model = 15,
738 .stepping = 11,
739 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
740 .features[FEAT_1_EDX] =
741 PPRO_FEATURES |
742 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
743 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
744 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
745 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
748 CPUID_EXT_CX16,
749 .features[FEAT_8000_0001_EDX] =
750 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
751 .features[FEAT_8000_0001_ECX] =
752 CPUID_EXT3_LAHF_LM,
753 .xlevel = 0x80000008,
754 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
755 },
756 {
757 .name = "kvm64",
758 .level = 5,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 15,
761 .model = 6,
762 .stepping = 1,
763 /* Missing: CPUID_HT */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES | CPUID_VME |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36,
768 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
772 .features[FEAT_8000_0001_EDX] =
773 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
780 0,
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
783 },
784 {
785 .name = "qemu32",
786 .level = 4,
787 .vendor = CPUID_VENDOR_INTEL,
788 .family = 6,
789 .model = 6,
790 .stepping = 3,
791 .features[FEAT_1_EDX] =
792 PPRO_FEATURES,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
795 .xlevel = 0x80000004,
796 },
797 {
798 .name = "kvm32",
799 .level = 5,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 15,
802 .model = 6,
803 .stepping = 1,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .features[FEAT_8000_0001_EDX] =
810 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
811 .features[FEAT_8000_0001_ECX] =
812 0,
813 .xlevel = 0x80000008,
814 .model_id = "Common 32-bit KVM processor"
815 },
816 {
817 .name = "coreduo",
818 .level = 10,
819 .vendor = CPUID_VENDOR_INTEL,
820 .family = 6,
821 .model = 14,
822 .stepping = 8,
823 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
824 .features[FEAT_1_EDX] =
825 PPRO_FEATURES | CPUID_VME |
826 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
827 CPUID_SS,
828 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
829 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
832 .features[FEAT_8000_0001_EDX] =
833 CPUID_EXT2_NX,
834 .xlevel = 0x80000008,
835 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
836 },
837 {
838 .name = "486",
839 .level = 1,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 4,
842 .model = 8,
843 .stepping = 0,
844 .features[FEAT_1_EDX] =
845 I486_FEATURES,
846 .xlevel = 0,
847 },
848 {
849 .name = "pentium",
850 .level = 1,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 5,
853 .model = 4,
854 .stepping = 3,
855 .features[FEAT_1_EDX] =
856 PENTIUM_FEATURES,
857 .xlevel = 0,
858 },
859 {
860 .name = "pentium2",
861 .level = 2,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 5,
865 .stepping = 2,
866 .features[FEAT_1_EDX] =
867 PENTIUM2_FEATURES,
868 .xlevel = 0,
869 },
870 {
871 .name = "pentium3",
872 .level = 2,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 6,
875 .model = 7,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM3_FEATURES,
879 .xlevel = 0,
880 },
881 {
882 .name = "athlon",
883 .level = 2,
884 .vendor = CPUID_VENDOR_AMD,
885 .family = 6,
886 .model = 2,
887 .stepping = 3,
888 .features[FEAT_1_EDX] =
889 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
890 CPUID_MCA,
891 .features[FEAT_8000_0001_EDX] =
892 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
893 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
894 .xlevel = 0x80000008,
895 },
896 {
897 .name = "n270",
898 /* original is on level 10 */
899 .level = 5,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 6,
902 .model = 28,
903 .stepping = 2,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
908 CPUID_ACPI | CPUID_SS,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
911 * CPUID_EXT_XTPR */
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
914 CPUID_EXT_MOVBE,
915 .features[FEAT_8000_0001_EDX] =
916 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
917 CPUID_EXT2_NX,
918 .features[FEAT_8000_0001_ECX] =
919 CPUID_EXT3_LAHF_LM,
920 .xlevel = 0x8000000A,
921 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
922 },
923 {
924 .name = "Conroe",
925 .level = 4,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 15,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x8000000A,
943 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
944 },
945 {
946 .name = "Penryn",
947 .level = 4,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 23,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
960 CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x8000000A,
966 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
967 },
968 {
969 .name = "Nehalem",
970 .level = 4,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 26,
974 .stepping = 3,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
987 CPUID_EXT3_LAHF_LM,
988 .xlevel = 0x8000000A,
989 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
990 },
991 {
992 .name = "Westmere",
993 .level = 11,
994 .vendor = CPUID_VENDOR_INTEL,
995 .family = 6,
996 .model = 44,
997 .stepping = 1,
998 .features[FEAT_1_EDX] =
999 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1006 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1007 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1008 .features[FEAT_8000_0001_EDX] =
1009 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1010 .features[FEAT_8000_0001_ECX] =
1011 CPUID_EXT3_LAHF_LM,
1012 .xlevel = 0x8000000A,
1013 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1014 },
1015 {
1016 .name = "SandyBridge",
1017 .level = 0xd,
1018 .vendor = CPUID_VENDOR_INTEL,
1019 .family = 6,
1020 .model = 42,
1021 .stepping = 1,
1022 .features[FEAT_1_EDX] =
1023 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1024 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1025 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1026 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1027 CPUID_DE | CPUID_FP87,
1028 .features[FEAT_1_ECX] =
1029 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1031 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1032 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1033 CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1036 CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .features[FEAT_XSAVE] =
1040 CPUID_XSAVE_XSAVEOPT,
1041 .xlevel = 0x8000000A,
1042 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1043 },
1044 {
1045 .name = "IvyBridge",
1046 .level = 0xd,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 58,
1050 .stepping = 9,
1051 .features[FEAT_1_EDX] =
1052 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1059 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1060 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1061 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1062 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1063 .features[FEAT_7_0_EBX] =
1064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1065 CPUID_7_0_EBX_ERMS,
1066 .features[FEAT_8000_0001_EDX] =
1067 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1068 CPUID_EXT2_SYSCALL,
1069 .features[FEAT_8000_0001_ECX] =
1070 CPUID_EXT3_LAHF_LM,
1071 .features[FEAT_XSAVE] =
1072 CPUID_XSAVE_XSAVEOPT,
1073 .xlevel = 0x8000000A,
1074 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1075 },
1076 {
1077 .name = "Haswell",
1078 .level = 0xd,
1079 .vendor = CPUID_VENDOR_INTEL,
1080 .family = 6,
1081 .model = 60,
1082 .stepping = 1,
1083 .features[FEAT_1_EDX] =
1084 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1085 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1086 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1087 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1088 CPUID_DE | CPUID_FP87,
1089 .features[FEAT_1_ECX] =
1090 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1091 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1092 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1093 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1094 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1095 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1096 .features[FEAT_8000_0001_EDX] =
1097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1098 CPUID_EXT2_SYSCALL,
1099 .features[FEAT_8000_0001_ECX] =
1100 CPUID_EXT3_LAHF_LM,
1101 .features[FEAT_7_0_EBX] =
1102 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1103 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1104 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1105 .features[FEAT_XSAVE] =
1106 CPUID_XSAVE_XSAVEOPT,
1107 .xlevel = 0x8000000A,
1108 .model_id = "Intel Core Processor (Haswell)",
1109 },
1110 {
1111 .name = "Broadwell",
1112 .level = 0xd,
1113 .vendor = CPUID_VENDOR_INTEL,
1114 .family = 6,
1115 .model = 61,
1116 .stepping = 2,
1117 .features[FEAT_1_EDX] =
1118 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1119 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1120 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1121 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1122 CPUID_DE | CPUID_FP87,
1123 .features[FEAT_1_ECX] =
1124 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1125 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1126 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1127 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1128 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1129 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1130 .features[FEAT_8000_0001_EDX] =
1131 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1132 CPUID_EXT2_SYSCALL,
1133 .features[FEAT_8000_0001_ECX] =
1134 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1135 .features[FEAT_7_0_EBX] =
1136 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1137 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1138 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1139 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1140 CPUID_7_0_EBX_SMAP,
1141 .features[FEAT_XSAVE] =
1142 CPUID_XSAVE_XSAVEOPT,
1143 .xlevel = 0x8000000A,
1144 .model_id = "Intel Core Processor (Broadwell)",
1145 },
1146 {
1147 .name = "Opteron_G1",
1148 .level = 5,
1149 .vendor = CPUID_VENDOR_AMD,
1150 .family = 15,
1151 .model = 6,
1152 .stepping = 1,
1153 .features[FEAT_1_EDX] =
1154 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1155 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1156 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1157 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1158 CPUID_DE | CPUID_FP87,
1159 .features[FEAT_1_ECX] =
1160 CPUID_EXT_SSE3,
1161 .features[FEAT_8000_0001_EDX] =
1162 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1163 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1164 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1165 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1166 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1167 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1168 .xlevel = 0x80000008,
1169 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1170 },
1171 {
1172 .name = "Opteron_G2",
1173 .level = 5,
1174 .vendor = CPUID_VENDOR_AMD,
1175 .family = 15,
1176 .model = 6,
1177 .stepping = 1,
1178 .features[FEAT_1_EDX] =
1179 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1180 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1181 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1182 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1183 CPUID_DE | CPUID_FP87,
1184 .features[FEAT_1_ECX] =
1185 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1186 .features[FEAT_8000_0001_EDX] =
1187 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1188 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1189 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1190 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1191 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1192 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1193 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1194 .features[FEAT_8000_0001_ECX] =
1195 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1196 .xlevel = 0x80000008,
1197 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1198 },
1199 {
1200 .name = "Opteron_G3",
1201 .level = 5,
1202 .vendor = CPUID_VENDOR_AMD,
1203 .family = 15,
1204 .model = 6,
1205 .stepping = 1,
1206 .features[FEAT_1_EDX] =
1207 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1208 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1209 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1210 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1211 CPUID_DE | CPUID_FP87,
1212 .features[FEAT_1_ECX] =
1213 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1214 CPUID_EXT_SSE3,
1215 .features[FEAT_8000_0001_EDX] =
1216 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1217 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1218 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1219 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1220 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1221 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1222 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1223 .features[FEAT_8000_0001_ECX] =
1224 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1225 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1226 .xlevel = 0x80000008,
1227 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1228 },
1229 {
1230 .name = "Opteron_G4",
1231 .level = 0xd,
1232 .vendor = CPUID_VENDOR_AMD,
1233 .family = 21,
1234 .model = 1,
1235 .stepping = 2,
1236 .features[FEAT_1_EDX] =
1237 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1238 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1239 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1240 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1241 CPUID_DE | CPUID_FP87,
1242 .features[FEAT_1_ECX] =
1243 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1244 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1245 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1246 CPUID_EXT_SSE3,
1247 .features[FEAT_8000_0001_EDX] =
1248 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1249 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1250 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1251 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1252 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1253 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1254 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1255 .features[FEAT_8000_0001_ECX] =
1256 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1257 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1258 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1259 CPUID_EXT3_LAHF_LM,
1260 /* no xsaveopt! */
1261 .xlevel = 0x8000001A,
1262 .model_id = "AMD Opteron 62xx class CPU",
1263 },
1264 {
1265 .name = "Opteron_G5",
1266 .level = 0xd,
1267 .vendor = CPUID_VENDOR_AMD,
1268 .family = 21,
1269 .model = 2,
1270 .stepping = 0,
1271 .features[FEAT_1_EDX] =
1272 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1273 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1274 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1275 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1276 CPUID_DE | CPUID_FP87,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1279 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1280 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1281 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1282 .features[FEAT_8000_0001_EDX] =
1283 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1284 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1285 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1286 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1287 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1288 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1289 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1290 .features[FEAT_8000_0001_ECX] =
1291 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1292 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1293 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1294 CPUID_EXT3_LAHF_LM,
1295 /* no xsaveopt! */
1296 .xlevel = 0x8000001A,
1297 .model_id = "AMD Opteron 63xx class CPU",
1298 },
1299 };
1300
1301 /**
1302 * x86_cpu_compat_set_features:
1303 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1304 * @w: Identifies the feature word to be changed.
1305 * @feat_add: Feature bits to be added to feature word
1306 * @feat_remove: Feature bits to be removed from feature word
1307 *
1308 * Change CPU model feature bits for compatibility.
1309 *
1310 * This function may be used by machine-type compatibility functions
1311 * to enable or disable feature bits on specific CPU models.
1312 */
1313 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1314 uint32_t feat_add, uint32_t feat_remove)
1315 {
1316 X86CPUDefinition *def;
1317 int i;
1318 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1319 def = &builtin_x86_defs[i];
1320 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1321 def->features[w] |= feat_add;
1322 def->features[w] &= ~feat_remove;
1323 }
1324 }
1325 }
1326
1327 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1328 bool migratable_only);
1329
1330 #ifdef CONFIG_KVM
1331
1332 static int cpu_x86_fill_model_id(char *str)
1333 {
1334 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1335 int i;
1336
1337 for (i = 0; i < 3; i++) {
1338 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1339 memcpy(str + i * 16 + 0, &eax, 4);
1340 memcpy(str + i * 16 + 4, &ebx, 4);
1341 memcpy(str + i * 16 + 8, &ecx, 4);
1342 memcpy(str + i * 16 + 12, &edx, 4);
1343 }
1344 return 0;
1345 }
1346
1347 static X86CPUDefinition host_cpudef;
1348
1349 static Property host_x86_cpu_properties[] = {
1350 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1351 DEFINE_PROP_END_OF_LIST()
1352 };
1353
1354 /* class_init for the "host" CPU model
1355 *
1356 * This function may be called before KVM is initialized.
1357 */
1358 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1359 {
1360 DeviceClass *dc = DEVICE_CLASS(oc);
1361 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1362 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1363
1364 xcc->kvm_required = true;
1365
1366 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1367 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1368
1369 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1370 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1371 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1372 host_cpudef.stepping = eax & 0x0F;
1373
1374 cpu_x86_fill_model_id(host_cpudef.model_id);
1375
1376 xcc->cpu_def = &host_cpudef;
1377 host_cpudef.cache_info_passthrough = true;
1378
1379 /* level, xlevel, xlevel2, and the feature words are initialized on
1380 * instance_init, because they require KVM to be initialized.
1381 */
1382
1383 dc->props = host_x86_cpu_properties;
1384 }
1385
1386 static void host_x86_cpu_initfn(Object *obj)
1387 {
1388 X86CPU *cpu = X86_CPU(obj);
1389 CPUX86State *env = &cpu->env;
1390 KVMState *s = kvm_state;
1391
1392 assert(kvm_enabled());
1393
1394 /* We can't fill the features array here because we don't know yet if
1395 * "migratable" is true or false.
1396 */
1397 cpu->host_features = true;
1398
1399 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1400 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1401 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1402
1403 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1404 }
1405
1406 static const TypeInfo host_x86_cpu_type_info = {
1407 .name = X86_CPU_TYPE_NAME("host"),
1408 .parent = TYPE_X86_CPU,
1409 .instance_init = host_x86_cpu_initfn,
1410 .class_init = host_x86_cpu_class_init,
1411 };
1412
1413 #endif
1414
1415 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1416 {
1417 FeatureWordInfo *f = &feature_word_info[w];
1418 int i;
1419
1420 for (i = 0; i < 32; ++i) {
1421 if (1 << i & mask) {
1422 const char *reg = get_register_name_32(f->cpuid_reg);
1423 assert(reg);
1424 fprintf(stderr, "warning: %s doesn't support requested feature: "
1425 "CPUID.%02XH:%s%s%s [bit %d]\n",
1426 kvm_enabled() ? "host" : "TCG",
1427 f->cpuid_eax, reg,
1428 f->feat_names[i] ? "." : "",
1429 f->feat_names[i] ? f->feat_names[i] : "", i);
1430 }
1431 }
1432 }
1433
1434 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1435 const char *name, Error **errp)
1436 {
1437 X86CPU *cpu = X86_CPU(obj);
1438 CPUX86State *env = &cpu->env;
1439 int64_t value;
1440
1441 value = (env->cpuid_version >> 8) & 0xf;
1442 if (value == 0xf) {
1443 value += (env->cpuid_version >> 20) & 0xff;
1444 }
1445 visit_type_int(v, &value, name, errp);
1446 }
1447
1448 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1449 const char *name, Error **errp)
1450 {
1451 X86CPU *cpu = X86_CPU(obj);
1452 CPUX86State *env = &cpu->env;
1453 const int64_t min = 0;
1454 const int64_t max = 0xff + 0xf;
1455 Error *local_err = NULL;
1456 int64_t value;
1457
1458 visit_type_int(v, &value, name, &local_err);
1459 if (local_err) {
1460 error_propagate(errp, local_err);
1461 return;
1462 }
1463 if (value < min || value > max) {
1464 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1465 name ? name : "null", value, min, max);
1466 return;
1467 }
1468
1469 env->cpuid_version &= ~0xff00f00;
1470 if (value > 0x0f) {
1471 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1472 } else {
1473 env->cpuid_version |= value << 8;
1474 }
1475 }
1476
1477 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1478 const char *name, Error **errp)
1479 {
1480 X86CPU *cpu = X86_CPU(obj);
1481 CPUX86State *env = &cpu->env;
1482 int64_t value;
1483
1484 value = (env->cpuid_version >> 4) & 0xf;
1485 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1486 visit_type_int(v, &value, name, errp);
1487 }
1488
1489 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1490 const char *name, Error **errp)
1491 {
1492 X86CPU *cpu = X86_CPU(obj);
1493 CPUX86State *env = &cpu->env;
1494 const int64_t min = 0;
1495 const int64_t max = 0xff;
1496 Error *local_err = NULL;
1497 int64_t value;
1498
1499 visit_type_int(v, &value, name, &local_err);
1500 if (local_err) {
1501 error_propagate(errp, local_err);
1502 return;
1503 }
1504 if (value < min || value > max) {
1505 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1506 name ? name : "null", value, min, max);
1507 return;
1508 }
1509
1510 env->cpuid_version &= ~0xf00f0;
1511 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1512 }
1513
1514 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1515 void *opaque, const char *name,
1516 Error **errp)
1517 {
1518 X86CPU *cpu = X86_CPU(obj);
1519 CPUX86State *env = &cpu->env;
1520 int64_t value;
1521
1522 value = env->cpuid_version & 0xf;
1523 visit_type_int(v, &value, name, errp);
1524 }
1525
1526 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1527 void *opaque, const char *name,
1528 Error **errp)
1529 {
1530 X86CPU *cpu = X86_CPU(obj);
1531 CPUX86State *env = &cpu->env;
1532 const int64_t min = 0;
1533 const int64_t max = 0xf;
1534 Error *local_err = NULL;
1535 int64_t value;
1536
1537 visit_type_int(v, &value, name, &local_err);
1538 if (local_err) {
1539 error_propagate(errp, local_err);
1540 return;
1541 }
1542 if (value < min || value > max) {
1543 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1544 name ? name : "null", value, min, max);
1545 return;
1546 }
1547
1548 env->cpuid_version &= ~0xf;
1549 env->cpuid_version |= value & 0xf;
1550 }
1551
1552 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1553 const char *name, Error **errp)
1554 {
1555 X86CPU *cpu = X86_CPU(obj);
1556
1557 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1558 }
1559
1560 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1561 const char *name, Error **errp)
1562 {
1563 X86CPU *cpu = X86_CPU(obj);
1564
1565 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1566 }
1567
1568 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1569 const char *name, Error **errp)
1570 {
1571 X86CPU *cpu = X86_CPU(obj);
1572
1573 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1574 }
1575
1576 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1577 const char *name, Error **errp)
1578 {
1579 X86CPU *cpu = X86_CPU(obj);
1580
1581 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1582 }
1583
1584 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1585 {
1586 X86CPU *cpu = X86_CPU(obj);
1587 CPUX86State *env = &cpu->env;
1588 char *value;
1589
1590 value = g_malloc(CPUID_VENDOR_SZ + 1);
1591 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1592 env->cpuid_vendor3);
1593 return value;
1594 }
1595
1596 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1597 Error **errp)
1598 {
1599 X86CPU *cpu = X86_CPU(obj);
1600 CPUX86State *env = &cpu->env;
1601 int i;
1602
1603 if (strlen(value) != CPUID_VENDOR_SZ) {
1604 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1605 "vendor", value);
1606 return;
1607 }
1608
1609 env->cpuid_vendor1 = 0;
1610 env->cpuid_vendor2 = 0;
1611 env->cpuid_vendor3 = 0;
1612 for (i = 0; i < 4; i++) {
1613 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1614 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1615 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1616 }
1617 }
1618
1619 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1620 {
1621 X86CPU *cpu = X86_CPU(obj);
1622 CPUX86State *env = &cpu->env;
1623 char *value;
1624 int i;
1625
1626 value = g_malloc(48 + 1);
1627 for (i = 0; i < 48; i++) {
1628 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1629 }
1630 value[48] = '\0';
1631 return value;
1632 }
1633
1634 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1635 Error **errp)
1636 {
1637 X86CPU *cpu = X86_CPU(obj);
1638 CPUX86State *env = &cpu->env;
1639 int c, len, i;
1640
1641 if (model_id == NULL) {
1642 model_id = "";
1643 }
1644 len = strlen(model_id);
1645 memset(env->cpuid_model, 0, 48);
1646 for (i = 0; i < 48; i++) {
1647 if (i >= len) {
1648 c = '\0';
1649 } else {
1650 c = (uint8_t)model_id[i];
1651 }
1652 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1653 }
1654 }
1655
1656 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1657 const char *name, Error **errp)
1658 {
1659 X86CPU *cpu = X86_CPU(obj);
1660 int64_t value;
1661
1662 value = cpu->env.tsc_khz * 1000;
1663 visit_type_int(v, &value, name, errp);
1664 }
1665
1666 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1667 const char *name, Error **errp)
1668 {
1669 X86CPU *cpu = X86_CPU(obj);
1670 const int64_t min = 0;
1671 const int64_t max = INT64_MAX;
1672 Error *local_err = NULL;
1673 int64_t value;
1674
1675 visit_type_int(v, &value, name, &local_err);
1676 if (local_err) {
1677 error_propagate(errp, local_err);
1678 return;
1679 }
1680 if (value < min || value > max) {
1681 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1682 name ? name : "null", value, min, max);
1683 return;
1684 }
1685
1686 cpu->env.tsc_khz = value / 1000;
1687 }
1688
1689 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1690 const char *name, Error **errp)
1691 {
1692 X86CPU *cpu = X86_CPU(obj);
1693 int64_t value = cpu->env.cpuid_apic_id;
1694
1695 visit_type_int(v, &value, name, errp);
1696 }
1697
1698 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1699 const char *name, Error **errp)
1700 {
1701 X86CPU *cpu = X86_CPU(obj);
1702 DeviceState *dev = DEVICE(obj);
1703 const int64_t min = 0;
1704 const int64_t max = UINT32_MAX;
1705 Error *error = NULL;
1706 int64_t value;
1707
1708 if (dev->realized) {
1709 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1710 "it was realized", name, object_get_typename(obj));
1711 return;
1712 }
1713
1714 visit_type_int(v, &value, name, &error);
1715 if (error) {
1716 error_propagate(errp, error);
1717 return;
1718 }
1719 if (value < min || value > max) {
1720 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1721 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1722 object_get_typename(obj), name, value, min, max);
1723 return;
1724 }
1725
1726 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1727 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1728 return;
1729 }
1730 cpu->env.cpuid_apic_id = value;
1731 }
1732
1733 /* Generic getter for "feature-words" and "filtered-features" properties */
1734 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1735 const char *name, Error **errp)
1736 {
1737 uint32_t *array = (uint32_t *)opaque;
1738 FeatureWord w;
1739 Error *err = NULL;
1740 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1741 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1742 X86CPUFeatureWordInfoList *list = NULL;
1743
1744 for (w = 0; w < FEATURE_WORDS; w++) {
1745 FeatureWordInfo *wi = &feature_word_info[w];
1746 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1747 qwi->cpuid_input_eax = wi->cpuid_eax;
1748 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1749 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1750 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1751 qwi->features = array[w];
1752
1753 /* List will be in reverse order, but order shouldn't matter */
1754 list_entries[w].next = list;
1755 list_entries[w].value = &word_infos[w];
1756 list = &list_entries[w];
1757 }
1758
1759 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1760 error_propagate(errp, err);
1761 }
1762
1763 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1764 const char *name, Error **errp)
1765 {
1766 X86CPU *cpu = X86_CPU(obj);
1767 int64_t value = cpu->hyperv_spinlock_attempts;
1768
1769 visit_type_int(v, &value, name, errp);
1770 }
1771
1772 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1773 const char *name, Error **errp)
1774 {
1775 const int64_t min = 0xFFF;
1776 const int64_t max = UINT_MAX;
1777 X86CPU *cpu = X86_CPU(obj);
1778 Error *err = NULL;
1779 int64_t value;
1780
1781 visit_type_int(v, &value, name, &err);
1782 if (err) {
1783 error_propagate(errp, err);
1784 return;
1785 }
1786
1787 if (value < min || value > max) {
1788 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1789 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1790 object_get_typename(obj), name ? name : "null",
1791 value, min, max);
1792 return;
1793 }
1794 cpu->hyperv_spinlock_attempts = value;
1795 }
1796
1797 static PropertyInfo qdev_prop_spinlocks = {
1798 .name = "int",
1799 .get = x86_get_hv_spinlocks,
1800 .set = x86_set_hv_spinlocks,
1801 };
1802
1803 /* Convert all '_' in a feature string option name to '-', to make feature
1804 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1805 */
1806 static inline void feat2prop(char *s)
1807 {
1808 while ((s = strchr(s, '_'))) {
1809 *s = '-';
1810 }
1811 }
1812
1813 /* Parse "+feature,-feature,feature=foo" CPU feature string
1814 */
1815 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1816 Error **errp)
1817 {
1818 X86CPU *cpu = X86_CPU(cs);
1819 char *featurestr; /* Single 'key=value" string being parsed */
1820 FeatureWord w;
1821 /* Features to be added */
1822 FeatureWordArray plus_features = { 0 };
1823 /* Features to be removed */
1824 FeatureWordArray minus_features = { 0 };
1825 uint32_t numvalue;
1826 CPUX86State *env = &cpu->env;
1827 Error *local_err = NULL;
1828
1829 featurestr = features ? strtok(features, ",") : NULL;
1830
1831 while (featurestr) {
1832 char *val;
1833 if (featurestr[0] == '+') {
1834 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1835 } else if (featurestr[0] == '-') {
1836 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1837 } else if ((val = strchr(featurestr, '='))) {
1838 *val = 0; val++;
1839 feat2prop(featurestr);
1840 if (!strcmp(featurestr, "xlevel")) {
1841 char *err;
1842 char num[32];
1843
1844 numvalue = strtoul(val, &err, 0);
1845 if (!*val || *err) {
1846 error_setg(errp, "bad numerical value %s", val);
1847 return;
1848 }
1849 if (numvalue < 0x80000000) {
1850 error_report("xlevel value shall always be >= 0x80000000"
1851 ", fixup will be removed in future versions");
1852 numvalue += 0x80000000;
1853 }
1854 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1855 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1856 } else if (!strcmp(featurestr, "tsc-freq")) {
1857 int64_t tsc_freq;
1858 char *err;
1859 char num[32];
1860
1861 tsc_freq = strtosz_suffix_unit(val, &err,
1862 STRTOSZ_DEFSUFFIX_B, 1000);
1863 if (tsc_freq < 0 || *err) {
1864 error_setg(errp, "bad numerical value %s", val);
1865 return;
1866 }
1867 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1868 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1869 &local_err);
1870 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1871 char *err;
1872 const int min = 0xFFF;
1873 char num[32];
1874 numvalue = strtoul(val, &err, 0);
1875 if (!*val || *err) {
1876 error_setg(errp, "bad numerical value %s", val);
1877 return;
1878 }
1879 if (numvalue < min) {
1880 error_report("hv-spinlocks value shall always be >= 0x%x"
1881 ", fixup will be removed in future versions",
1882 min);
1883 numvalue = min;
1884 }
1885 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1886 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1887 } else {
1888 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1889 }
1890 } else {
1891 feat2prop(featurestr);
1892 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1893 }
1894 if (local_err) {
1895 error_propagate(errp, local_err);
1896 return;
1897 }
1898 featurestr = strtok(NULL, ",");
1899 }
1900
1901 if (cpu->host_features) {
1902 for (w = 0; w < FEATURE_WORDS; w++) {
1903 env->features[w] =
1904 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1905 }
1906 }
1907
1908 for (w = 0; w < FEATURE_WORDS; w++) {
1909 env->features[w] |= plus_features[w];
1910 env->features[w] &= ~minus_features[w];
1911 }
1912 }
1913
1914 /* Print all cpuid feature names in featureset
1915 */
1916 static void listflags(FILE *f, fprintf_function print, const char **featureset)
1917 {
1918 int bit;
1919 bool first = true;
1920
1921 for (bit = 0; bit < 32; bit++) {
1922 if (featureset[bit]) {
1923 print(f, "%s%s", first ? "" : " ", featureset[bit]);
1924 first = false;
1925 }
1926 }
1927 }
1928
1929 /* generate CPU information. */
1930 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1931 {
1932 X86CPUDefinition *def;
1933 char buf[256];
1934 int i;
1935
1936 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1937 def = &builtin_x86_defs[i];
1938 snprintf(buf, sizeof(buf), "%s", def->name);
1939 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1940 }
1941 #ifdef CONFIG_KVM
1942 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1943 "KVM processor with all supported host features "
1944 "(only available in KVM mode)");
1945 #endif
1946
1947 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1948 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1949 FeatureWordInfo *fw = &feature_word_info[i];
1950
1951 (*cpu_fprintf)(f, " ");
1952 listflags(f, cpu_fprintf, fw->feat_names);
1953 (*cpu_fprintf)(f, "\n");
1954 }
1955 }
1956
1957 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1958 {
1959 CpuDefinitionInfoList *cpu_list = NULL;
1960 X86CPUDefinition *def;
1961 int i;
1962
1963 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1964 CpuDefinitionInfoList *entry;
1965 CpuDefinitionInfo *info;
1966
1967 def = &builtin_x86_defs[i];
1968 info = g_malloc0(sizeof(*info));
1969 info->name = g_strdup(def->name);
1970
1971 entry = g_malloc0(sizeof(*entry));
1972 entry->value = info;
1973 entry->next = cpu_list;
1974 cpu_list = entry;
1975 }
1976
1977 return cpu_list;
1978 }
1979
1980 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1981 bool migratable_only)
1982 {
1983 FeatureWordInfo *wi = &feature_word_info[w];
1984 uint32_t r;
1985
1986 if (kvm_enabled()) {
1987 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1988 wi->cpuid_ecx,
1989 wi->cpuid_reg);
1990 } else if (tcg_enabled()) {
1991 r = wi->tcg_features;
1992 } else {
1993 return ~0;
1994 }
1995 if (migratable_only) {
1996 r &= x86_cpu_get_migratable_flags(w);
1997 }
1998 return r;
1999 }
2000
2001 /*
2002 * Filters CPU feature words based on host availability of each feature.
2003 *
2004 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2005 */
2006 static int x86_cpu_filter_features(X86CPU *cpu)
2007 {
2008 CPUX86State *env = &cpu->env;
2009 FeatureWord w;
2010 int rv = 0;
2011
2012 for (w = 0; w < FEATURE_WORDS; w++) {
2013 uint32_t host_feat =
2014 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2015 uint32_t requested_features = env->features[w];
2016 env->features[w] &= host_feat;
2017 cpu->filtered_features[w] = requested_features & ~env->features[w];
2018 if (cpu->filtered_features[w]) {
2019 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2020 report_unavailable_features(w, cpu->filtered_features[w]);
2021 }
2022 rv = 1;
2023 }
2024 }
2025
2026 return rv;
2027 }
2028
2029 /* Load data from X86CPUDefinition
2030 */
2031 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2032 {
2033 CPUX86State *env = &cpu->env;
2034 const char *vendor;
2035 char host_vendor[CPUID_VENDOR_SZ + 1];
2036 FeatureWord w;
2037
2038 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2039 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2040 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2041 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2042 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2043 env->cpuid_xlevel2 = def->xlevel2;
2044 cpu->cache_info_passthrough = def->cache_info_passthrough;
2045 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2046 for (w = 0; w < FEATURE_WORDS; w++) {
2047 env->features[w] = def->features[w];
2048 }
2049
2050 /* Special cases not set in the X86CPUDefinition structs: */
2051 if (kvm_enabled()) {
2052 FeatureWord w;
2053 for (w = 0; w < FEATURE_WORDS; w++) {
2054 env->features[w] |= kvm_default_features[w];
2055 env->features[w] &= ~kvm_default_unset_features[w];
2056 }
2057 }
2058
2059 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2060
2061 /* sysenter isn't supported in compatibility mode on AMD,
2062 * syscall isn't supported in compatibility mode on Intel.
2063 * Normally we advertise the actual CPU vendor, but you can
2064 * override this using the 'vendor' property if you want to use
2065 * KVM's sysenter/syscall emulation in compatibility mode and
2066 * when doing cross vendor migration
2067 */
2068 vendor = def->vendor;
2069 if (kvm_enabled()) {
2070 uint32_t ebx = 0, ecx = 0, edx = 0;
2071 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2072 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2073 vendor = host_vendor;
2074 }
2075
2076 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2077
2078 }
2079
2080 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2081 Error **errp)
2082 {
2083 X86CPU *cpu = NULL;
2084 X86CPUClass *xcc;
2085 ObjectClass *oc;
2086 gchar **model_pieces;
2087 char *name, *features;
2088 Error *error = NULL;
2089
2090 model_pieces = g_strsplit(cpu_model, ",", 2);
2091 if (!model_pieces[0]) {
2092 error_setg(&error, "Invalid/empty CPU model name");
2093 goto out;
2094 }
2095 name = model_pieces[0];
2096 features = model_pieces[1];
2097
2098 oc = x86_cpu_class_by_name(name);
2099 if (oc == NULL) {
2100 error_setg(&error, "Unable to find CPU definition: %s", name);
2101 goto out;
2102 }
2103 xcc = X86_CPU_CLASS(oc);
2104
2105 if (xcc->kvm_required && !kvm_enabled()) {
2106 error_setg(&error, "CPU model '%s' requires KVM", name);
2107 goto out;
2108 }
2109
2110 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2111
2112 #ifndef CONFIG_USER_ONLY
2113 if (icc_bridge == NULL) {
2114 error_setg(&error, "Invalid icc-bridge value");
2115 goto out;
2116 }
2117 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2118 object_unref(OBJECT(cpu));
2119 #endif
2120
2121 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2122 if (error) {
2123 goto out;
2124 }
2125
2126 out:
2127 if (error != NULL) {
2128 error_propagate(errp, error);
2129 if (cpu) {
2130 object_unref(OBJECT(cpu));
2131 cpu = NULL;
2132 }
2133 }
2134 g_strfreev(model_pieces);
2135 return cpu;
2136 }
2137
2138 X86CPU *cpu_x86_init(const char *cpu_model)
2139 {
2140 Error *error = NULL;
2141 X86CPU *cpu;
2142
2143 cpu = cpu_x86_create(cpu_model, NULL, &error);
2144 if (error) {
2145 goto out;
2146 }
2147
2148 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2149
2150 out:
2151 if (error) {
2152 error_report("%s", error_get_pretty(error));
2153 error_free(error);
2154 if (cpu != NULL) {
2155 object_unref(OBJECT(cpu));
2156 cpu = NULL;
2157 }
2158 }
2159 return cpu;
2160 }
2161
2162 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2163 {
2164 X86CPUDefinition *cpudef = data;
2165 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2166
2167 xcc->cpu_def = cpudef;
2168 }
2169
2170 static void x86_register_cpudef_type(X86CPUDefinition *def)
2171 {
2172 char *typename = x86_cpu_type_name(def->name);
2173 TypeInfo ti = {
2174 .name = typename,
2175 .parent = TYPE_X86_CPU,
2176 .class_init = x86_cpu_cpudef_class_init,
2177 .class_data = def,
2178 };
2179
2180 type_register(&ti);
2181 g_free(typename);
2182 }
2183
2184 #if !defined(CONFIG_USER_ONLY)
2185
2186 void cpu_clear_apic_feature(CPUX86State *env)
2187 {
2188 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2189 }
2190
2191 #endif /* !CONFIG_USER_ONLY */
2192
2193 /* Initialize list of CPU models, filling some non-static fields if necessary
2194 */
2195 void x86_cpudef_setup(void)
2196 {
2197 int i, j;
2198 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2199
2200 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2201 X86CPUDefinition *def = &builtin_x86_defs[i];
2202
2203 /* Look for specific "cpudef" models that */
2204 /* have the QEMU version in .model_id */
2205 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2206 if (strcmp(model_with_versions[j], def->name) == 0) {
2207 pstrcpy(def->model_id, sizeof(def->model_id),
2208 "QEMU Virtual CPU version ");
2209 pstrcat(def->model_id, sizeof(def->model_id),
2210 qemu_get_version());
2211 break;
2212 }
2213 }
2214 }
2215 }
2216
2217 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2218 uint32_t *ecx, uint32_t *edx)
2219 {
2220 *ebx = env->cpuid_vendor1;
2221 *edx = env->cpuid_vendor2;
2222 *ecx = env->cpuid_vendor3;
2223 }
2224
2225 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2226 uint32_t *eax, uint32_t *ebx,
2227 uint32_t *ecx, uint32_t *edx)
2228 {
2229 X86CPU *cpu = x86_env_get_cpu(env);
2230 CPUState *cs = CPU(cpu);
2231
2232 /* test if maximum index reached */
2233 if (index & 0x80000000) {
2234 if (index > env->cpuid_xlevel) {
2235 if (env->cpuid_xlevel2 > 0) {
2236 /* Handle the Centaur's CPUID instruction. */
2237 if (index > env->cpuid_xlevel2) {
2238 index = env->cpuid_xlevel2;
2239 } else if (index < 0xC0000000) {
2240 index = env->cpuid_xlevel;
2241 }
2242 } else {
2243 /* Intel documentation states that invalid EAX input will
2244 * return the same information as EAX=cpuid_level
2245 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2246 */
2247 index = env->cpuid_level;
2248 }
2249 }
2250 } else {
2251 if (index > env->cpuid_level)
2252 index = env->cpuid_level;
2253 }
2254
2255 switch(index) {
2256 case 0:
2257 *eax = env->cpuid_level;
2258 get_cpuid_vendor(env, ebx, ecx, edx);
2259 break;
2260 case 1:
2261 *eax = env->cpuid_version;
2262 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2263 *ecx = env->features[FEAT_1_ECX];
2264 *edx = env->features[FEAT_1_EDX];
2265 if (cs->nr_cores * cs->nr_threads > 1) {
2266 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2267 *edx |= 1 << 28; /* HTT bit */
2268 }
2269 break;
2270 case 2:
2271 /* cache info: needed for Pentium Pro compatibility */
2272 if (cpu->cache_info_passthrough) {
2273 host_cpuid(index, 0, eax, ebx, ecx, edx);
2274 break;
2275 }
2276 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2277 *ebx = 0;
2278 *ecx = 0;
2279 *edx = (L1D_DESCRIPTOR << 16) | \
2280 (L1I_DESCRIPTOR << 8) | \
2281 (L2_DESCRIPTOR);
2282 break;
2283 case 4:
2284 /* cache info: needed for Core compatibility */
2285 if (cpu->cache_info_passthrough) {
2286 host_cpuid(index, count, eax, ebx, ecx, edx);
2287 *eax &= ~0xFC000000;
2288 } else {
2289 *eax = 0;
2290 switch (count) {
2291 case 0: /* L1 dcache info */
2292 *eax |= CPUID_4_TYPE_DCACHE | \
2293 CPUID_4_LEVEL(1) | \
2294 CPUID_4_SELF_INIT_LEVEL;
2295 *ebx = (L1D_LINE_SIZE - 1) | \
2296 ((L1D_PARTITIONS - 1) << 12) | \
2297 ((L1D_ASSOCIATIVITY - 1) << 22);
2298 *ecx = L1D_SETS - 1;
2299 *edx = CPUID_4_NO_INVD_SHARING;
2300 break;
2301 case 1: /* L1 icache info */
2302 *eax |= CPUID_4_TYPE_ICACHE | \
2303 CPUID_4_LEVEL(1) | \
2304 CPUID_4_SELF_INIT_LEVEL;
2305 *ebx = (L1I_LINE_SIZE - 1) | \
2306 ((L1I_PARTITIONS - 1) << 12) | \
2307 ((L1I_ASSOCIATIVITY - 1) << 22);
2308 *ecx = L1I_SETS - 1;
2309 *edx = CPUID_4_NO_INVD_SHARING;
2310 break;
2311 case 2: /* L2 cache info */
2312 *eax |= CPUID_4_TYPE_UNIFIED | \
2313 CPUID_4_LEVEL(2) | \
2314 CPUID_4_SELF_INIT_LEVEL;
2315 if (cs->nr_threads > 1) {
2316 *eax |= (cs->nr_threads - 1) << 14;
2317 }
2318 *ebx = (L2_LINE_SIZE - 1) | \
2319 ((L2_PARTITIONS - 1) << 12) | \
2320 ((L2_ASSOCIATIVITY - 1) << 22);
2321 *ecx = L2_SETS - 1;
2322 *edx = CPUID_4_NO_INVD_SHARING;
2323 break;
2324 default: /* end of info */
2325 *eax = 0;
2326 *ebx = 0;
2327 *ecx = 0;
2328 *edx = 0;
2329 break;
2330 }
2331 }
2332
2333 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2334 if ((*eax & 31) && cs->nr_cores > 1) {
2335 *eax |= (cs->nr_cores - 1) << 26;
2336 }
2337 break;
2338 case 5:
2339 /* mwait info: needed for Core compatibility */
2340 *eax = 0; /* Smallest monitor-line size in bytes */
2341 *ebx = 0; /* Largest monitor-line size in bytes */
2342 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2343 *edx = 0;
2344 break;
2345 case 6:
2346 /* Thermal and Power Leaf */
2347 *eax = 0;
2348 *ebx = 0;
2349 *ecx = 0;
2350 *edx = 0;
2351 break;
2352 case 7:
2353 /* Structured Extended Feature Flags Enumeration Leaf */
2354 if (count == 0) {
2355 *eax = 0; /* Maximum ECX value for sub-leaves */
2356 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2357 *ecx = 0; /* Reserved */
2358 *edx = 0; /* Reserved */
2359 } else {
2360 *eax = 0;
2361 *ebx = 0;
2362 *ecx = 0;
2363 *edx = 0;
2364 }
2365 break;
2366 case 9:
2367 /* Direct Cache Access Information Leaf */
2368 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2369 *ebx = 0;
2370 *ecx = 0;
2371 *edx = 0;
2372 break;
2373 case 0xA:
2374 /* Architectural Performance Monitoring Leaf */
2375 if (kvm_enabled() && cpu->enable_pmu) {
2376 KVMState *s = cs->kvm_state;
2377
2378 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2379 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2380 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2381 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2382 } else {
2383 *eax = 0;
2384 *ebx = 0;
2385 *ecx = 0;
2386 *edx = 0;
2387 }
2388 break;
2389 case 0xD: {
2390 KVMState *s = cs->kvm_state;
2391 uint64_t kvm_mask;
2392 int i;
2393
2394 /* Processor Extended State */
2395 *eax = 0;
2396 *ebx = 0;
2397 *ecx = 0;
2398 *edx = 0;
2399 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2400 break;
2401 }
2402 kvm_mask =
2403 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2404 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2405
2406 if (count == 0) {
2407 *ecx = 0x240;
2408 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2409 const ExtSaveArea *esa = &ext_save_areas[i];
2410 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2411 (kvm_mask & (1 << i)) != 0) {
2412 if (i < 32) {
2413 *eax |= 1 << i;
2414 } else {
2415 *edx |= 1 << (i - 32);
2416 }
2417 *ecx = MAX(*ecx, esa->offset + esa->size);
2418 }
2419 }
2420 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2421 *ebx = *ecx;
2422 } else if (count == 1) {
2423 *eax = env->features[FEAT_XSAVE];
2424 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2425 const ExtSaveArea *esa = &ext_save_areas[count];
2426 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2427 (kvm_mask & (1 << count)) != 0) {
2428 *eax = esa->size;
2429 *ebx = esa->offset;
2430 }
2431 }
2432 break;
2433 }
2434 case 0x80000000:
2435 *eax = env->cpuid_xlevel;
2436 *ebx = env->cpuid_vendor1;
2437 *edx = env->cpuid_vendor2;
2438 *ecx = env->cpuid_vendor3;
2439 break;
2440 case 0x80000001:
2441 *eax = env->cpuid_version;
2442 *ebx = 0;
2443 *ecx = env->features[FEAT_8000_0001_ECX];
2444 *edx = env->features[FEAT_8000_0001_EDX];
2445
2446 /* The Linux kernel checks for the CMPLegacy bit and
2447 * discards multiple thread information if it is set.
2448 * So dont set it here for Intel to make Linux guests happy.
2449 */
2450 if (cs->nr_cores * cs->nr_threads > 1) {
2451 uint32_t tebx, tecx, tedx;
2452 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2453 if (tebx != CPUID_VENDOR_INTEL_1 ||
2454 tedx != CPUID_VENDOR_INTEL_2 ||
2455 tecx != CPUID_VENDOR_INTEL_3) {
2456 *ecx |= 1 << 1; /* CmpLegacy bit */
2457 }
2458 }
2459 break;
2460 case 0x80000002:
2461 case 0x80000003:
2462 case 0x80000004:
2463 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2464 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2465 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2466 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2467 break;
2468 case 0x80000005:
2469 /* cache info (L1 cache) */
2470 if (cpu->cache_info_passthrough) {
2471 host_cpuid(index, 0, eax, ebx, ecx, edx);
2472 break;
2473 }
2474 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2475 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2476 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2477 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2478 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2479 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2480 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2481 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2482 break;
2483 case 0x80000006:
2484 /* cache info (L2 cache) */
2485 if (cpu->cache_info_passthrough) {
2486 host_cpuid(index, 0, eax, ebx, ecx, edx);
2487 break;
2488 }
2489 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2490 (L2_DTLB_2M_ENTRIES << 16) | \
2491 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2492 (L2_ITLB_2M_ENTRIES);
2493 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2494 (L2_DTLB_4K_ENTRIES << 16) | \
2495 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2496 (L2_ITLB_4K_ENTRIES);
2497 *ecx = (L2_SIZE_KB_AMD << 16) | \
2498 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2499 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2500 *edx = ((L3_SIZE_KB/512) << 18) | \
2501 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2502 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2503 break;
2504 case 0x80000007:
2505 *eax = 0;
2506 *ebx = 0;
2507 *ecx = 0;
2508 *edx = env->features[FEAT_8000_0007_EDX];
2509 break;
2510 case 0x80000008:
2511 /* virtual & phys address size in low 2 bytes. */
2512 /* XXX: This value must match the one used in the MMU code. */
2513 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2514 /* 64 bit processor */
2515 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2516 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2517 } else {
2518 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2519 *eax = 0x00000024; /* 36 bits physical */
2520 } else {
2521 *eax = 0x00000020; /* 32 bits physical */
2522 }
2523 }
2524 *ebx = 0;
2525 *ecx = 0;
2526 *edx = 0;
2527 if (cs->nr_cores * cs->nr_threads > 1) {
2528 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2529 }
2530 break;
2531 case 0x8000000A:
2532 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2533 *eax = 0x00000001; /* SVM Revision */
2534 *ebx = 0x00000010; /* nr of ASIDs */
2535 *ecx = 0;
2536 *edx = env->features[FEAT_SVM]; /* optional features */
2537 } else {
2538 *eax = 0;
2539 *ebx = 0;
2540 *ecx = 0;
2541 *edx = 0;
2542 }
2543 break;
2544 case 0xC0000000:
2545 *eax = env->cpuid_xlevel2;
2546 *ebx = 0;
2547 *ecx = 0;
2548 *edx = 0;
2549 break;
2550 case 0xC0000001:
2551 /* Support for VIA CPU's CPUID instruction */
2552 *eax = env->cpuid_version;
2553 *ebx = 0;
2554 *ecx = 0;
2555 *edx = env->features[FEAT_C000_0001_EDX];
2556 break;
2557 case 0xC0000002:
2558 case 0xC0000003:
2559 case 0xC0000004:
2560 /* Reserved for the future, and now filled with zero */
2561 *eax = 0;
2562 *ebx = 0;
2563 *ecx = 0;
2564 *edx = 0;
2565 break;
2566 default:
2567 /* reserved values: zero */
2568 *eax = 0;
2569 *ebx = 0;
2570 *ecx = 0;
2571 *edx = 0;
2572 break;
2573 }
2574 }
2575
2576 /* CPUClass::reset() */
2577 static void x86_cpu_reset(CPUState *s)
2578 {
2579 X86CPU *cpu = X86_CPU(s);
2580 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2581 CPUX86State *env = &cpu->env;
2582 int i;
2583
2584 xcc->parent_reset(s);
2585
2586 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2587
2588 tlb_flush(s, 1);
2589
2590 env->old_exception = -1;
2591
2592 /* init to reset state */
2593
2594 #ifdef CONFIG_SOFTMMU
2595 env->hflags |= HF_SOFTMMU_MASK;
2596 #endif
2597 env->hflags2 |= HF2_GIF_MASK;
2598
2599 cpu_x86_update_cr0(env, 0x60000010);
2600 env->a20_mask = ~0x0;
2601 env->smbase = 0x30000;
2602
2603 env->idt.limit = 0xffff;
2604 env->gdt.limit = 0xffff;
2605 env->ldt.limit = 0xffff;
2606 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2607 env->tr.limit = 0xffff;
2608 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2609
2610 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2611 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2612 DESC_R_MASK | DESC_A_MASK);
2613 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2614 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2615 DESC_A_MASK);
2616 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2617 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2618 DESC_A_MASK);
2619 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2620 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2621 DESC_A_MASK);
2622 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2623 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2624 DESC_A_MASK);
2625 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2626 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2627 DESC_A_MASK);
2628
2629 env->eip = 0xfff0;
2630 env->regs[R_EDX] = env->cpuid_version;
2631
2632 env->eflags = 0x2;
2633
2634 /* FPU init */
2635 for (i = 0; i < 8; i++) {
2636 env->fptags[i] = 1;
2637 }
2638 cpu_set_fpuc(env, 0x37f);
2639
2640 env->mxcsr = 0x1f80;
2641 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2642
2643 env->pat = 0x0007040600070406ULL;
2644 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2645
2646 memset(env->dr, 0, sizeof(env->dr));
2647 env->dr[6] = DR6_FIXED_1;
2648 env->dr[7] = DR7_FIXED_1;
2649 cpu_breakpoint_remove_all(s, BP_CPU);
2650 cpu_watchpoint_remove_all(s, BP_CPU);
2651
2652 env->xcr0 = 1;
2653
2654 /*
2655 * SDM 11.11.5 requires:
2656 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2657 * - IA32_MTRR_PHYSMASKn.V = 0
2658 * All other bits are undefined. For simplification, zero it all.
2659 */
2660 env->mtrr_deftype = 0;
2661 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2662 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2663
2664 #if !defined(CONFIG_USER_ONLY)
2665 /* We hard-wire the BSP to the first CPU. */
2666 if (s->cpu_index == 0) {
2667 apic_designate_bsp(cpu->apic_state);
2668 }
2669
2670 s->halted = !cpu_is_bsp(cpu);
2671
2672 if (kvm_enabled()) {
2673 kvm_arch_reset_vcpu(cpu);
2674 }
2675 #endif
2676 }
2677
2678 #ifndef CONFIG_USER_ONLY
2679 bool cpu_is_bsp(X86CPU *cpu)
2680 {
2681 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2682 }
2683
2684 /* TODO: remove me, when reset over QOM tree is implemented */
2685 static void x86_cpu_machine_reset_cb(void *opaque)
2686 {
2687 X86CPU *cpu = opaque;
2688 cpu_reset(CPU(cpu));
2689 }
2690 #endif
2691
2692 static void mce_init(X86CPU *cpu)
2693 {
2694 CPUX86State *cenv = &cpu->env;
2695 unsigned int bank;
2696
2697 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2698 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2699 (CPUID_MCE | CPUID_MCA)) {
2700 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2701 cenv->mcg_ctl = ~(uint64_t)0;
2702 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2703 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2704 }
2705 }
2706 }
2707
2708 #ifndef CONFIG_USER_ONLY
2709 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2710 {
2711 CPUX86State *env = &cpu->env;
2712 DeviceState *dev = DEVICE(cpu);
2713 APICCommonState *apic;
2714 const char *apic_type = "apic";
2715
2716 if (kvm_irqchip_in_kernel()) {
2717 apic_type = "kvm-apic";
2718 } else if (xen_enabled()) {
2719 apic_type = "xen-apic";
2720 }
2721
2722 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2723 if (cpu->apic_state == NULL) {
2724 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2725 return;
2726 }
2727
2728 object_property_add_child(OBJECT(cpu), "apic",
2729 OBJECT(cpu->apic_state), NULL);
2730 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2731 /* TODO: convert to link<> */
2732 apic = APIC_COMMON(cpu->apic_state);
2733 apic->cpu = cpu;
2734 }
2735
2736 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2737 {
2738 if (cpu->apic_state == NULL) {
2739 return;
2740 }
2741
2742 if (qdev_init(cpu->apic_state)) {
2743 error_setg(errp, "APIC device '%s' could not be initialized",
2744 object_get_typename(OBJECT(cpu->apic_state)));
2745 return;
2746 }
2747 }
2748 #else
2749 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2750 {
2751 }
2752 #endif
2753
2754
2755 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2756 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2757 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2758 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2759 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2760 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2761 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2762 {
2763 CPUState *cs = CPU(dev);
2764 X86CPU *cpu = X86_CPU(dev);
2765 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2766 CPUX86State *env = &cpu->env;
2767 Error *local_err = NULL;
2768 static bool ht_warned;
2769
2770 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2771 env->cpuid_level = 7;
2772 }
2773
2774 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2775 * CPUID[1].EDX.
2776 */
2777 if (IS_AMD_CPU(env)) {
2778 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2779 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2780 & CPUID_EXT2_AMD_ALIASES);
2781 }
2782
2783
2784 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2785 error_setg(&local_err,
2786 kvm_enabled() ?
2787 "Host doesn't support requested features" :
2788 "TCG doesn't support requested features");
2789 goto out;
2790 }
2791
2792 #ifndef CONFIG_USER_ONLY
2793 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2794
2795 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2796 x86_cpu_apic_create(cpu, &local_err);
2797 if (local_err != NULL) {
2798 goto out;
2799 }
2800 }
2801 #endif
2802
2803 mce_init(cpu);
2804 qemu_init_vcpu(cs);
2805
2806 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2807 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2808 * based on inputs (sockets,cores,threads), it is still better to gives
2809 * users a warning.
2810 *
2811 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2812 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2813 */
2814 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2815 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2816 " -smp options properly.");
2817 ht_warned = true;
2818 }
2819
2820 x86_cpu_apic_realize(cpu, &local_err);
2821 if (local_err != NULL) {
2822 goto out;
2823 }
2824 cpu_reset(cs);
2825
2826 xcc->parent_realize(dev, &local_err);
2827 out:
2828 if (local_err != NULL) {
2829 error_propagate(errp, local_err);
2830 return;
2831 }
2832 }
2833
2834 /* Enables contiguous-apic-ID mode, for compatibility */
2835 static bool compat_apic_id_mode;
2836
2837 void enable_compat_apic_id_mode(void)
2838 {
2839 compat_apic_id_mode = true;
2840 }
2841
2842 /* Calculates initial APIC ID for a specific CPU index
2843 *
2844 * Currently we need to be able to calculate the APIC ID from the CPU index
2845 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2846 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2847 * all CPUs up to max_cpus.
2848 */
2849 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2850 {
2851 uint32_t correct_id;
2852 static bool warned;
2853
2854 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2855 if (compat_apic_id_mode) {
2856 if (cpu_index != correct_id && !warned) {
2857 error_report("APIC IDs set in compatibility mode, "
2858 "CPU topology won't match the configuration");
2859 warned = true;
2860 }
2861 return cpu_index;
2862 } else {
2863 return correct_id;
2864 }
2865 }
2866
2867 static void x86_cpu_initfn(Object *obj)
2868 {
2869 CPUState *cs = CPU(obj);
2870 X86CPU *cpu = X86_CPU(obj);
2871 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2872 CPUX86State *env = &cpu->env;
2873 static int inited;
2874
2875 cs->env_ptr = env;
2876 cpu_exec_init(env);
2877
2878 object_property_add(obj, "family", "int",
2879 x86_cpuid_version_get_family,
2880 x86_cpuid_version_set_family, NULL, NULL, NULL);
2881 object_property_add(obj, "model", "int",
2882 x86_cpuid_version_get_model,
2883 x86_cpuid_version_set_model, NULL, NULL, NULL);
2884 object_property_add(obj, "stepping", "int",
2885 x86_cpuid_version_get_stepping,
2886 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2887 object_property_add(obj, "level", "int",
2888 x86_cpuid_get_level,
2889 x86_cpuid_set_level, NULL, NULL, NULL);
2890 object_property_add(obj, "xlevel", "int",
2891 x86_cpuid_get_xlevel,
2892 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2893 object_property_add_str(obj, "vendor",
2894 x86_cpuid_get_vendor,
2895 x86_cpuid_set_vendor, NULL);
2896 object_property_add_str(obj, "model-id",
2897 x86_cpuid_get_model_id,
2898 x86_cpuid_set_model_id, NULL);
2899 object_property_add(obj, "tsc-frequency", "int",
2900 x86_cpuid_get_tsc_freq,
2901 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2902 object_property_add(obj, "apic-id", "int",
2903 x86_cpuid_get_apic_id,
2904 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2905 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2906 x86_cpu_get_feature_words,
2907 NULL, NULL, (void *)env->features, NULL);
2908 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2909 x86_cpu_get_feature_words,
2910 NULL, NULL, (void *)cpu->filtered_features, NULL);
2911
2912 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2913 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2914
2915 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2916
2917 /* init various static tables used in TCG mode */
2918 if (tcg_enabled() && !inited) {
2919 inited = 1;
2920 optimize_flags_init();
2921 }
2922 }
2923
2924 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2925 {
2926 X86CPU *cpu = X86_CPU(cs);
2927 CPUX86State *env = &cpu->env;
2928
2929 return env->cpuid_apic_id;
2930 }
2931
2932 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2933 {
2934 X86CPU *cpu = X86_CPU(cs);
2935
2936 return cpu->env.cr[0] & CR0_PG_MASK;
2937 }
2938
2939 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2940 {
2941 X86CPU *cpu = X86_CPU(cs);
2942
2943 cpu->env.eip = value;
2944 }
2945
2946 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2947 {
2948 X86CPU *cpu = X86_CPU(cs);
2949
2950 cpu->env.eip = tb->pc - tb->cs_base;
2951 }
2952
2953 static bool x86_cpu_has_work(CPUState *cs)
2954 {
2955 X86CPU *cpu = X86_CPU(cs);
2956 CPUX86State *env = &cpu->env;
2957
2958 #if !defined(CONFIG_USER_ONLY)
2959 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2960 apic_poll_irq(cpu->apic_state);
2961 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2962 }
2963 #endif
2964
2965 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2966 (env->eflags & IF_MASK)) ||
2967 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2968 CPU_INTERRUPT_INIT |
2969 CPU_INTERRUPT_SIPI |
2970 CPU_INTERRUPT_MCE));
2971 }
2972
2973 static Property x86_cpu_properties[] = {
2974 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2975 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2976 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2977 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2978 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2979 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2980 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2981 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2982 DEFINE_PROP_END_OF_LIST()
2983 };
2984
2985 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2986 {
2987 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2988 CPUClass *cc = CPU_CLASS(oc);
2989 DeviceClass *dc = DEVICE_CLASS(oc);
2990
2991 xcc->parent_realize = dc->realize;
2992 dc->realize = x86_cpu_realizefn;
2993 dc->bus_type = TYPE_ICC_BUS;
2994 dc->props = x86_cpu_properties;
2995
2996 xcc->parent_reset = cc->reset;
2997 cc->reset = x86_cpu_reset;
2998 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2999
3000 cc->class_by_name = x86_cpu_class_by_name;
3001 cc->parse_features = x86_cpu_parse_featurestr;
3002 cc->has_work = x86_cpu_has_work;
3003 cc->do_interrupt = x86_cpu_do_interrupt;
3004 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3005 cc->dump_state = x86_cpu_dump_state;
3006 cc->set_pc = x86_cpu_set_pc;
3007 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3008 cc->gdb_read_register = x86_cpu_gdb_read_register;
3009 cc->gdb_write_register = x86_cpu_gdb_write_register;
3010 cc->get_arch_id = x86_cpu_get_arch_id;
3011 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3012 #ifdef CONFIG_USER_ONLY
3013 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3014 #else
3015 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3016 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3017 cc->write_elf64_note = x86_cpu_write_elf64_note;
3018 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3019 cc->write_elf32_note = x86_cpu_write_elf32_note;
3020 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3021 cc->vmsd = &vmstate_x86_cpu;
3022 #endif
3023 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3024 #ifndef CONFIG_USER_ONLY
3025 cc->debug_excp_handler = breakpoint_handler;
3026 #endif
3027 cc->cpu_exec_enter = x86_cpu_exec_enter;
3028 cc->cpu_exec_exit = x86_cpu_exec_exit;
3029 }
3030
3031 static const TypeInfo x86_cpu_type_info = {
3032 .name = TYPE_X86_CPU,
3033 .parent = TYPE_CPU,
3034 .instance_size = sizeof(X86CPU),
3035 .instance_init = x86_cpu_initfn,
3036 .abstract = true,
3037 .class_size = sizeof(X86CPUClass),
3038 .class_init = x86_cpu_common_class_init,
3039 };
3040
3041 static void x86_cpu_register_types(void)
3042 {
3043 int i;
3044
3045 type_register_static(&x86_cpu_type_info);
3046 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3047 x86_register_cpudef_type(&builtin_x86_defs[i]);
3048 }
3049 #ifdef CONFIG_KVM
3050 type_register_static(&host_x86_cpu_type_info);
3051 #endif
3052 }
3053
3054 type_init(x86_cpu_register_types)