]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
29
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
307 /* missing:
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
313 /* missing:
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
319 CPUID_EXT_RDRAND */
320
321 #ifdef TARGET_X86_64
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
323 #else
324 #define TCG_EXT2_X86_64_FEATURES 0
325 #endif
326
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
338 /* missing:
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
343
344
345 typedef struct FeatureWordInfo {
346 const char **feat_names;
347 uint32_t cpuid_eax; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
350 int cpuid_reg; /* output register (R_* constant) */
351 uint32_t tcg_features; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
353 } FeatureWordInfo;
354
355 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
356 [FEAT_1_EDX] = {
357 .feat_names = feature_name,
358 .cpuid_eax = 1, .cpuid_reg = R_EDX,
359 .tcg_features = TCG_FEATURES,
360 },
361 [FEAT_1_ECX] = {
362 .feat_names = ext_feature_name,
363 .cpuid_eax = 1, .cpuid_reg = R_ECX,
364 .tcg_features = TCG_EXT_FEATURES,
365 },
366 [FEAT_8000_0001_EDX] = {
367 .feat_names = ext2_feature_name,
368 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_EXT2_FEATURES,
370 },
371 [FEAT_8000_0001_ECX] = {
372 .feat_names = ext3_feature_name,
373 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT3_FEATURES,
375 },
376 [FEAT_C000_0001_EDX] = {
377 .feat_names = ext4_feature_name,
378 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT4_FEATURES,
380 },
381 [FEAT_KVM] = {
382 .feat_names = kvm_feature_name,
383 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
384 .tcg_features = TCG_KVM_FEATURES,
385 },
386 [FEAT_SVM] = {
387 .feat_names = svm_feature_name,
388 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_SVM_FEATURES,
390 },
391 [FEAT_7_0_EBX] = {
392 .feat_names = cpuid_7_0_ebx_feature_name,
393 .cpuid_eax = 7,
394 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
395 .cpuid_reg = R_EBX,
396 .tcg_features = TCG_7_0_EBX_FEATURES,
397 },
398 [FEAT_8000_0007_EDX] = {
399 .feat_names = cpuid_apm_edx_feature_name,
400 .cpuid_eax = 0x80000007,
401 .cpuid_reg = R_EDX,
402 .tcg_features = TCG_APM_FEATURES,
403 .unmigratable_flags = CPUID_APM_INVTSC,
404 },
405 [FEAT_XSAVE] = {
406 .feat_names = cpuid_xsave_feature_name,
407 .cpuid_eax = 0xd,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
409 .cpuid_reg = R_EAX,
410 .tcg_features = 0,
411 },
412 };
413
414 typedef struct X86RegisterInfo32 {
415 /* Name of register */
416 const char *name;
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum;
419 } X86RegisterInfo32;
420
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
424 REGISTER(EAX),
425 REGISTER(ECX),
426 REGISTER(EDX),
427 REGISTER(EBX),
428 REGISTER(ESP),
429 REGISTER(EBP),
430 REGISTER(ESI),
431 REGISTER(EDI),
432 };
433 #undef REGISTER
434
435 typedef struct ExtSaveArea {
436 uint32_t feature, bits;
437 uint32_t offset, size;
438 } ExtSaveArea;
439
440 static const ExtSaveArea ext_save_areas[] = {
441 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
442 .offset = 0x240, .size = 0x100 },
443 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
444 .offset = 0x3c0, .size = 0x40 },
445 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
446 .offset = 0x400, .size = 0x40 },
447 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
448 .offset = 0x440, .size = 0x40 },
449 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
450 .offset = 0x480, .size = 0x200 },
451 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
452 .offset = 0x680, .size = 0x400 },
453 };
454
455 const char *get_register_name_32(unsigned int reg)
456 {
457 if (reg >= CPU_NB_REGS32) {
458 return NULL;
459 }
460 return x86_reg_info_32[reg].name;
461 }
462
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
465 */
466 static uint32_t kvm_default_features[FEATURE_WORDS] = {
467 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2) |
470 (1 << KVM_FEATURE_ASYNC_PF) |
471 (1 << KVM_FEATURE_STEAL_TIME) |
472 (1 << KVM_FEATURE_PV_EOI) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
474 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
475 };
476
477 /* Features that are not added by default to any CPU model when KVM is enabled.
478 */
479 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
480 [FEAT_1_EDX] = CPUID_ACPI,
481 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
482 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
483 };
484
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 {
487 kvm_default_features[w] &= ~features;
488 }
489
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 {
492 kvm_default_unset_features[w] &= ~features;
493 }
494
495 /*
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
498 */
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 {
501 FeatureWordInfo *wi = &feature_word_info[w];
502 uint32_t r = 0;
503 int i;
504
505 for (i = 0; i < 32; i++) {
506 uint32_t f = 1U << i;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi->feat_names[i]) {
509 continue;
510 }
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi->unmigratable_flags & f) {
513 continue;
514 }
515 r |= f;
516 }
517 return r;
518 }
519
520 void host_cpuid(uint32_t function, uint32_t count,
521 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 {
523 uint32_t vec[4];
524
525 #ifdef __x86_64__
526 asm volatile("cpuid"
527 : "=a"(vec[0]), "=b"(vec[1]),
528 "=c"(vec[2]), "=d"(vec[3])
529 : "0"(function), "c"(count) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
532 "cpuid \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
537 "popa"
538 : : "a"(function), "c"(count), "S"(vec)
539 : "memory", "cc");
540 #else
541 abort();
542 #endif
543
544 if (eax)
545 *eax = vec[0];
546 if (ebx)
547 *ebx = vec[1];
548 if (ecx)
549 *ecx = vec[2];
550 if (edx)
551 *edx = vec[3];
552 }
553
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
560 */
561 static int sstrcmp(const char *s1, const char *e1,
562 const char *s2, const char *e2)
563 {
564 for (;;) {
565 if (!*s1 || !*s2 || *s1 != *s2)
566 return (*s1 - *s2);
567 ++s1, ++s2;
568 if (s1 == e1 && s2 == e2)
569 return (0);
570 else if (s1 == e1)
571 return (*s2);
572 else if (s2 == e2)
573 return (*s1);
574 }
575 }
576
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
581 */
582 static int altcmp(const char *s, const char *e, const char *altstr)
583 {
584 const char *p, *q;
585
586 for (q = p = altstr; ; ) {
587 while (*p && *p != '|')
588 ++p;
589 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
590 return (0);
591 if (!*p)
592 return (1);
593 else
594 q = ++p;
595 }
596 }
597
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
600 */
601 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
602 const char **featureset)
603 {
604 uint32_t mask;
605 const char **ppc;
606 bool found = false;
607
608 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
609 if (*ppc && !altcmp(s, e, *ppc)) {
610 *pval |= mask;
611 found = true;
612 }
613 }
614 return found;
615 }
616
617 static void add_flagname_to_bitmaps(const char *flagname,
618 FeatureWordArray words,
619 Error **errp)
620 {
621 FeatureWord w;
622 for (w = 0; w < FEATURE_WORDS; w++) {
623 FeatureWordInfo *wi = &feature_word_info[w];
624 if (wi->feat_names &&
625 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
626 break;
627 }
628 }
629 if (w == FEATURE_WORDS) {
630 error_setg(errp, "CPU feature %s not found", flagname);
631 }
632 }
633
634 /* CPU class name definitions: */
635
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
641 */
642 static char *x86_cpu_type_name(const char *model_name)
643 {
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
645 }
646
647 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 {
649 ObjectClass *oc;
650 char *typename;
651
652 if (cpu_model == NULL) {
653 return NULL;
654 }
655
656 typename = x86_cpu_type_name(cpu_model);
657 oc = object_class_by_name(typename);
658 g_free(typename);
659 return oc;
660 }
661
662 struct X86CPUDefinition {
663 const char *name;
664 uint32_t level;
665 uint32_t xlevel;
666 uint32_t xlevel2;
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor[CPUID_VENDOR_SZ + 1];
669 int family;
670 int model;
671 int stepping;
672 FeatureWordArray features;
673 char model_id[48];
674 bool cache_info_passthrough;
675 };
676
677 static X86CPUDefinition builtin_x86_defs[] = {
678 {
679 .name = "qemu64",
680 .level = 4,
681 .vendor = CPUID_VENDOR_AMD,
682 .family = 6,
683 .model = 6,
684 .stepping = 3,
685 .features[FEAT_1_EDX] =
686 PPRO_FEATURES |
687 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
688 CPUID_PSE36,
689 .features[FEAT_1_ECX] =
690 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
691 .features[FEAT_8000_0001_EDX] =
692 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
694 .features[FEAT_8000_0001_ECX] =
695 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
696 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
697 .xlevel = 0x8000000A,
698 },
699 {
700 .name = "phenom",
701 .level = 5,
702 .vendor = CPUID_VENDOR_AMD,
703 .family = 16,
704 .model = 2,
705 .stepping = 3,
706 /* Missing: CPUID_HT */
707 .features[FEAT_1_EDX] =
708 PPRO_FEATURES |
709 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
710 CPUID_PSE36 | CPUID_VME,
711 .features[FEAT_1_ECX] =
712 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
713 CPUID_EXT_POPCNT,
714 .features[FEAT_8000_0001_EDX] =
715 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
716 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
717 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
718 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
719 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
720 CPUID_EXT3_CR8LEG,
721 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
722 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
723 .features[FEAT_8000_0001_ECX] =
724 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
725 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
726 /* Missing: CPUID_SVM_LBRV */
727 .features[FEAT_SVM] =
728 CPUID_SVM_NPT,
729 .xlevel = 0x8000001A,
730 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
731 },
732 {
733 .name = "core2duo",
734 .level = 10,
735 .vendor = CPUID_VENDOR_INTEL,
736 .family = 6,
737 .model = 15,
738 .stepping = 11,
739 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
740 .features[FEAT_1_EDX] =
741 PPRO_FEATURES |
742 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
743 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
744 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
745 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
748 CPUID_EXT_CX16,
749 .features[FEAT_8000_0001_EDX] =
750 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
751 .features[FEAT_8000_0001_ECX] =
752 CPUID_EXT3_LAHF_LM,
753 .xlevel = 0x80000008,
754 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
755 },
756 {
757 .name = "kvm64",
758 .level = 5,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 15,
761 .model = 6,
762 .stepping = 1,
763 /* Missing: CPUID_HT */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES | CPUID_VME |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36,
768 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
772 .features[FEAT_8000_0001_EDX] =
773 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
780 0,
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
783 },
784 {
785 .name = "qemu32",
786 .level = 4,
787 .vendor = CPUID_VENDOR_INTEL,
788 .family = 6,
789 .model = 6,
790 .stepping = 3,
791 .features[FEAT_1_EDX] =
792 PPRO_FEATURES,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
795 .xlevel = 0x80000004,
796 },
797 {
798 .name = "kvm32",
799 .level = 5,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 15,
802 .model = 6,
803 .stepping = 1,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES | CPUID_VME |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .features[FEAT_8000_0001_EDX] =
810 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
811 .features[FEAT_8000_0001_ECX] =
812 0,
813 .xlevel = 0x80000008,
814 .model_id = "Common 32-bit KVM processor"
815 },
816 {
817 .name = "coreduo",
818 .level = 10,
819 .vendor = CPUID_VENDOR_INTEL,
820 .family = 6,
821 .model = 14,
822 .stepping = 8,
823 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
824 .features[FEAT_1_EDX] =
825 PPRO_FEATURES | CPUID_VME |
826 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
827 CPUID_SS,
828 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
829 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
832 .features[FEAT_8000_0001_EDX] =
833 CPUID_EXT2_NX,
834 .xlevel = 0x80000008,
835 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
836 },
837 {
838 .name = "486",
839 .level = 1,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 4,
842 .model = 8,
843 .stepping = 0,
844 .features[FEAT_1_EDX] =
845 I486_FEATURES,
846 .xlevel = 0,
847 },
848 {
849 .name = "pentium",
850 .level = 1,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 5,
853 .model = 4,
854 .stepping = 3,
855 .features[FEAT_1_EDX] =
856 PENTIUM_FEATURES,
857 .xlevel = 0,
858 },
859 {
860 .name = "pentium2",
861 .level = 2,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 5,
865 .stepping = 2,
866 .features[FEAT_1_EDX] =
867 PENTIUM2_FEATURES,
868 .xlevel = 0,
869 },
870 {
871 .name = "pentium3",
872 .level = 2,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 6,
875 .model = 7,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM3_FEATURES,
879 .xlevel = 0,
880 },
881 {
882 .name = "athlon",
883 .level = 2,
884 .vendor = CPUID_VENDOR_AMD,
885 .family = 6,
886 .model = 2,
887 .stepping = 3,
888 .features[FEAT_1_EDX] =
889 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
890 CPUID_MCA,
891 .features[FEAT_8000_0001_EDX] =
892 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
893 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
894 .xlevel = 0x80000008,
895 },
896 {
897 .name = "n270",
898 /* original is on level 10 */
899 .level = 5,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 6,
902 .model = 28,
903 .stepping = 2,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
908 CPUID_ACPI | CPUID_SS,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
911 * CPUID_EXT_XTPR */
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
914 CPUID_EXT_MOVBE,
915 .features[FEAT_8000_0001_EDX] =
916 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
917 CPUID_EXT2_NX,
918 .features[FEAT_8000_0001_ECX] =
919 CPUID_EXT3_LAHF_LM,
920 .xlevel = 0x8000000A,
921 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
922 },
923 {
924 .name = "Conroe",
925 .level = 4,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 15,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x8000000A,
943 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
944 },
945 {
946 .name = "Penryn",
947 .level = 4,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 23,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
960 CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x8000000A,
966 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
967 },
968 {
969 .name = "Nehalem",
970 .level = 4,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 26,
974 .stepping = 3,
975 .features[FEAT_1_EDX] =
976 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
987 CPUID_EXT3_LAHF_LM,
988 .xlevel = 0x8000000A,
989 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
990 },
991 {
992 .name = "Westmere",
993 .level = 11,
994 .vendor = CPUID_VENDOR_INTEL,
995 .family = 6,
996 .model = 44,
997 .stepping = 1,
998 .features[FEAT_1_EDX] =
999 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1006 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1007 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1008 .features[FEAT_8000_0001_EDX] =
1009 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1010 .features[FEAT_8000_0001_ECX] =
1011 CPUID_EXT3_LAHF_LM,
1012 .xlevel = 0x8000000A,
1013 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1014 },
1015 {
1016 .name = "SandyBridge",
1017 .level = 0xd,
1018 .vendor = CPUID_VENDOR_INTEL,
1019 .family = 6,
1020 .model = 42,
1021 .stepping = 1,
1022 .features[FEAT_1_EDX] =
1023 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1024 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1025 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1026 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1027 CPUID_DE | CPUID_FP87,
1028 .features[FEAT_1_ECX] =
1029 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1031 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1032 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1033 CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1036 CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .features[FEAT_XSAVE] =
1040 CPUID_XSAVE_XSAVEOPT,
1041 .xlevel = 0x8000000A,
1042 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1043 },
1044 {
1045 .name = "IvyBridge",
1046 .level = 0xd,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 58,
1050 .stepping = 9,
1051 .features[FEAT_1_EDX] =
1052 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1059 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1060 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1061 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1062 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1063 .features[FEAT_7_0_EBX] =
1064 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1065 CPUID_7_0_EBX_ERMS,
1066 .features[FEAT_8000_0001_EDX] =
1067 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1068 CPUID_EXT2_SYSCALL,
1069 .features[FEAT_8000_0001_ECX] =
1070 CPUID_EXT3_LAHF_LM,
1071 .features[FEAT_XSAVE] =
1072 CPUID_XSAVE_XSAVEOPT,
1073 .xlevel = 0x8000000A,
1074 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1075 },
1076 {
1077 .name = "Haswell",
1078 .level = 0xd,
1079 .vendor = CPUID_VENDOR_INTEL,
1080 .family = 6,
1081 .model = 60,
1082 .stepping = 1,
1083 .features[FEAT_1_EDX] =
1084 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1085 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1086 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1087 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1088 CPUID_DE | CPUID_FP87,
1089 .features[FEAT_1_ECX] =
1090 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1091 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1092 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1093 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1094 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1095 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1096 .features[FEAT_8000_0001_EDX] =
1097 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1098 CPUID_EXT2_SYSCALL,
1099 .features[FEAT_8000_0001_ECX] =
1100 CPUID_EXT3_LAHF_LM,
1101 .features[FEAT_7_0_EBX] =
1102 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1103 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1104 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1105 CPUID_7_0_EBX_RTM,
1106 .features[FEAT_XSAVE] =
1107 CPUID_XSAVE_XSAVEOPT,
1108 .xlevel = 0x8000000A,
1109 .model_id = "Intel Core Processor (Haswell)",
1110 },
1111 {
1112 .name = "Broadwell",
1113 .level = 0xd,
1114 .vendor = CPUID_VENDOR_INTEL,
1115 .family = 6,
1116 .model = 61,
1117 .stepping = 2,
1118 .features[FEAT_1_EDX] =
1119 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1120 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1121 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1122 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1123 CPUID_DE | CPUID_FP87,
1124 .features[FEAT_1_ECX] =
1125 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1126 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1127 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1128 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1129 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1130 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1131 .features[FEAT_8000_0001_EDX] =
1132 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1133 CPUID_EXT2_SYSCALL,
1134 .features[FEAT_8000_0001_ECX] =
1135 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1136 .features[FEAT_7_0_EBX] =
1137 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1138 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1139 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1140 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1141 CPUID_7_0_EBX_SMAP,
1142 .features[FEAT_XSAVE] =
1143 CPUID_XSAVE_XSAVEOPT,
1144 .xlevel = 0x8000000A,
1145 .model_id = "Intel Core Processor (Broadwell)",
1146 },
1147 {
1148 .name = "Opteron_G1",
1149 .level = 5,
1150 .vendor = CPUID_VENDOR_AMD,
1151 .family = 15,
1152 .model = 6,
1153 .stepping = 1,
1154 .features[FEAT_1_EDX] =
1155 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1156 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1157 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1158 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1159 CPUID_DE | CPUID_FP87,
1160 .features[FEAT_1_ECX] =
1161 CPUID_EXT_SSE3,
1162 .features[FEAT_8000_0001_EDX] =
1163 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1164 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1165 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1166 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1167 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1168 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1169 .xlevel = 0x80000008,
1170 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1171 },
1172 {
1173 .name = "Opteron_G2",
1174 .level = 5,
1175 .vendor = CPUID_VENDOR_AMD,
1176 .family = 15,
1177 .model = 6,
1178 .stepping = 1,
1179 .features[FEAT_1_EDX] =
1180 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1181 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1182 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1183 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1184 CPUID_DE | CPUID_FP87,
1185 .features[FEAT_1_ECX] =
1186 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1187 .features[FEAT_8000_0001_EDX] =
1188 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1189 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1190 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1191 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1192 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1193 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1194 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1195 .features[FEAT_8000_0001_ECX] =
1196 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1197 .xlevel = 0x80000008,
1198 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1199 },
1200 {
1201 .name = "Opteron_G3",
1202 .level = 5,
1203 .vendor = CPUID_VENDOR_AMD,
1204 .family = 15,
1205 .model = 6,
1206 .stepping = 1,
1207 .features[FEAT_1_EDX] =
1208 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1209 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1210 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1211 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1212 CPUID_DE | CPUID_FP87,
1213 .features[FEAT_1_ECX] =
1214 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1215 CPUID_EXT_SSE3,
1216 .features[FEAT_8000_0001_EDX] =
1217 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1218 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1219 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1220 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1221 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1222 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1223 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1224 .features[FEAT_8000_0001_ECX] =
1225 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1226 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1227 .xlevel = 0x80000008,
1228 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1229 },
1230 {
1231 .name = "Opteron_G4",
1232 .level = 0xd,
1233 .vendor = CPUID_VENDOR_AMD,
1234 .family = 21,
1235 .model = 1,
1236 .stepping = 2,
1237 .features[FEAT_1_EDX] =
1238 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1239 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1240 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1241 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1242 CPUID_DE | CPUID_FP87,
1243 .features[FEAT_1_ECX] =
1244 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1245 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1246 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1247 CPUID_EXT_SSE3,
1248 .features[FEAT_8000_0001_EDX] =
1249 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1250 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1251 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1252 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1253 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1254 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1255 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1256 .features[FEAT_8000_0001_ECX] =
1257 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1258 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1259 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1260 CPUID_EXT3_LAHF_LM,
1261 /* no xsaveopt! */
1262 .xlevel = 0x8000001A,
1263 .model_id = "AMD Opteron 62xx class CPU",
1264 },
1265 {
1266 .name = "Opteron_G5",
1267 .level = 0xd,
1268 .vendor = CPUID_VENDOR_AMD,
1269 .family = 21,
1270 .model = 2,
1271 .stepping = 0,
1272 .features[FEAT_1_EDX] =
1273 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1274 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1275 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1276 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1277 CPUID_DE | CPUID_FP87,
1278 .features[FEAT_1_ECX] =
1279 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1280 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1281 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1282 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1283 .features[FEAT_8000_0001_EDX] =
1284 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1285 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1286 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1287 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1288 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1289 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1290 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1291 .features[FEAT_8000_0001_ECX] =
1292 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1293 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1294 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1295 CPUID_EXT3_LAHF_LM,
1296 /* no xsaveopt! */
1297 .xlevel = 0x8000001A,
1298 .model_id = "AMD Opteron 63xx class CPU",
1299 },
1300 };
1301
1302 /**
1303 * x86_cpu_compat_set_features:
1304 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1305 * @w: Identifies the feature word to be changed.
1306 * @feat_add: Feature bits to be added to feature word
1307 * @feat_remove: Feature bits to be removed from feature word
1308 *
1309 * Change CPU model feature bits for compatibility.
1310 *
1311 * This function may be used by machine-type compatibility functions
1312 * to enable or disable feature bits on specific CPU models.
1313 */
1314 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1315 uint32_t feat_add, uint32_t feat_remove)
1316 {
1317 X86CPUDefinition *def;
1318 int i;
1319 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1320 def = &builtin_x86_defs[i];
1321 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1322 def->features[w] |= feat_add;
1323 def->features[w] &= ~feat_remove;
1324 }
1325 }
1326 }
1327
1328 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1329 bool migratable_only);
1330
1331 #ifdef CONFIG_KVM
1332
1333 static int cpu_x86_fill_model_id(char *str)
1334 {
1335 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1336 int i;
1337
1338 for (i = 0; i < 3; i++) {
1339 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1340 memcpy(str + i * 16 + 0, &eax, 4);
1341 memcpy(str + i * 16 + 4, &ebx, 4);
1342 memcpy(str + i * 16 + 8, &ecx, 4);
1343 memcpy(str + i * 16 + 12, &edx, 4);
1344 }
1345 return 0;
1346 }
1347
1348 static X86CPUDefinition host_cpudef;
1349
1350 static Property host_x86_cpu_properties[] = {
1351 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1352 DEFINE_PROP_END_OF_LIST()
1353 };
1354
1355 /* class_init for the "host" CPU model
1356 *
1357 * This function may be called before KVM is initialized.
1358 */
1359 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1360 {
1361 DeviceClass *dc = DEVICE_CLASS(oc);
1362 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1363 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1364
1365 xcc->kvm_required = true;
1366
1367 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1368 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1369
1370 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1371 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1372 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1373 host_cpudef.stepping = eax & 0x0F;
1374
1375 cpu_x86_fill_model_id(host_cpudef.model_id);
1376
1377 xcc->cpu_def = &host_cpudef;
1378 host_cpudef.cache_info_passthrough = true;
1379
1380 /* level, xlevel, xlevel2, and the feature words are initialized on
1381 * instance_init, because they require KVM to be initialized.
1382 */
1383
1384 dc->props = host_x86_cpu_properties;
1385 }
1386
1387 static void host_x86_cpu_initfn(Object *obj)
1388 {
1389 X86CPU *cpu = X86_CPU(obj);
1390 CPUX86State *env = &cpu->env;
1391 KVMState *s = kvm_state;
1392
1393 assert(kvm_enabled());
1394
1395 /* We can't fill the features array here because we don't know yet if
1396 * "migratable" is true or false.
1397 */
1398 cpu->host_features = true;
1399
1400 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1401 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1402 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1403
1404 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1405 }
1406
1407 static const TypeInfo host_x86_cpu_type_info = {
1408 .name = X86_CPU_TYPE_NAME("host"),
1409 .parent = TYPE_X86_CPU,
1410 .instance_init = host_x86_cpu_initfn,
1411 .class_init = host_x86_cpu_class_init,
1412 };
1413
1414 #endif
1415
1416 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1417 {
1418 FeatureWordInfo *f = &feature_word_info[w];
1419 int i;
1420
1421 for (i = 0; i < 32; ++i) {
1422 if (1 << i & mask) {
1423 const char *reg = get_register_name_32(f->cpuid_reg);
1424 assert(reg);
1425 fprintf(stderr, "warning: %s doesn't support requested feature: "
1426 "CPUID.%02XH:%s%s%s [bit %d]\n",
1427 kvm_enabled() ? "host" : "TCG",
1428 f->cpuid_eax, reg,
1429 f->feat_names[i] ? "." : "",
1430 f->feat_names[i] ? f->feat_names[i] : "", i);
1431 }
1432 }
1433 }
1434
1435 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1436 const char *name, Error **errp)
1437 {
1438 X86CPU *cpu = X86_CPU(obj);
1439 CPUX86State *env = &cpu->env;
1440 int64_t value;
1441
1442 value = (env->cpuid_version >> 8) & 0xf;
1443 if (value == 0xf) {
1444 value += (env->cpuid_version >> 20) & 0xff;
1445 }
1446 visit_type_int(v, &value, name, errp);
1447 }
1448
1449 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1450 const char *name, Error **errp)
1451 {
1452 X86CPU *cpu = X86_CPU(obj);
1453 CPUX86State *env = &cpu->env;
1454 const int64_t min = 0;
1455 const int64_t max = 0xff + 0xf;
1456 Error *local_err = NULL;
1457 int64_t value;
1458
1459 visit_type_int(v, &value, name, &local_err);
1460 if (local_err) {
1461 error_propagate(errp, local_err);
1462 return;
1463 }
1464 if (value < min || value > max) {
1465 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1466 name ? name : "null", value, min, max);
1467 return;
1468 }
1469
1470 env->cpuid_version &= ~0xff00f00;
1471 if (value > 0x0f) {
1472 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1473 } else {
1474 env->cpuid_version |= value << 8;
1475 }
1476 }
1477
1478 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1479 const char *name, Error **errp)
1480 {
1481 X86CPU *cpu = X86_CPU(obj);
1482 CPUX86State *env = &cpu->env;
1483 int64_t value;
1484
1485 value = (env->cpuid_version >> 4) & 0xf;
1486 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1487 visit_type_int(v, &value, name, errp);
1488 }
1489
1490 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1491 const char *name, Error **errp)
1492 {
1493 X86CPU *cpu = X86_CPU(obj);
1494 CPUX86State *env = &cpu->env;
1495 const int64_t min = 0;
1496 const int64_t max = 0xff;
1497 Error *local_err = NULL;
1498 int64_t value;
1499
1500 visit_type_int(v, &value, name, &local_err);
1501 if (local_err) {
1502 error_propagate(errp, local_err);
1503 return;
1504 }
1505 if (value < min || value > max) {
1506 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1507 name ? name : "null", value, min, max);
1508 return;
1509 }
1510
1511 env->cpuid_version &= ~0xf00f0;
1512 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1513 }
1514
1515 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1516 void *opaque, const char *name,
1517 Error **errp)
1518 {
1519 X86CPU *cpu = X86_CPU(obj);
1520 CPUX86State *env = &cpu->env;
1521 int64_t value;
1522
1523 value = env->cpuid_version & 0xf;
1524 visit_type_int(v, &value, name, errp);
1525 }
1526
1527 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1528 void *opaque, const char *name,
1529 Error **errp)
1530 {
1531 X86CPU *cpu = X86_CPU(obj);
1532 CPUX86State *env = &cpu->env;
1533 const int64_t min = 0;
1534 const int64_t max = 0xf;
1535 Error *local_err = NULL;
1536 int64_t value;
1537
1538 visit_type_int(v, &value, name, &local_err);
1539 if (local_err) {
1540 error_propagate(errp, local_err);
1541 return;
1542 }
1543 if (value < min || value > max) {
1544 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1545 name ? name : "null", value, min, max);
1546 return;
1547 }
1548
1549 env->cpuid_version &= ~0xf;
1550 env->cpuid_version |= value & 0xf;
1551 }
1552
1553 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1554 const char *name, Error **errp)
1555 {
1556 X86CPU *cpu = X86_CPU(obj);
1557
1558 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1559 }
1560
1561 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1562 const char *name, Error **errp)
1563 {
1564 X86CPU *cpu = X86_CPU(obj);
1565
1566 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1567 }
1568
1569 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1570 const char *name, Error **errp)
1571 {
1572 X86CPU *cpu = X86_CPU(obj);
1573
1574 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1575 }
1576
1577 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1578 const char *name, Error **errp)
1579 {
1580 X86CPU *cpu = X86_CPU(obj);
1581
1582 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1583 }
1584
1585 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1586 {
1587 X86CPU *cpu = X86_CPU(obj);
1588 CPUX86State *env = &cpu->env;
1589 char *value;
1590
1591 value = g_malloc(CPUID_VENDOR_SZ + 1);
1592 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1593 env->cpuid_vendor3);
1594 return value;
1595 }
1596
1597 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1598 Error **errp)
1599 {
1600 X86CPU *cpu = X86_CPU(obj);
1601 CPUX86State *env = &cpu->env;
1602 int i;
1603
1604 if (strlen(value) != CPUID_VENDOR_SZ) {
1605 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1606 "vendor", value);
1607 return;
1608 }
1609
1610 env->cpuid_vendor1 = 0;
1611 env->cpuid_vendor2 = 0;
1612 env->cpuid_vendor3 = 0;
1613 for (i = 0; i < 4; i++) {
1614 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1615 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1616 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1617 }
1618 }
1619
1620 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1621 {
1622 X86CPU *cpu = X86_CPU(obj);
1623 CPUX86State *env = &cpu->env;
1624 char *value;
1625 int i;
1626
1627 value = g_malloc(48 + 1);
1628 for (i = 0; i < 48; i++) {
1629 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1630 }
1631 value[48] = '\0';
1632 return value;
1633 }
1634
1635 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1636 Error **errp)
1637 {
1638 X86CPU *cpu = X86_CPU(obj);
1639 CPUX86State *env = &cpu->env;
1640 int c, len, i;
1641
1642 if (model_id == NULL) {
1643 model_id = "";
1644 }
1645 len = strlen(model_id);
1646 memset(env->cpuid_model, 0, 48);
1647 for (i = 0; i < 48; i++) {
1648 if (i >= len) {
1649 c = '\0';
1650 } else {
1651 c = (uint8_t)model_id[i];
1652 }
1653 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1654 }
1655 }
1656
1657 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1658 const char *name, Error **errp)
1659 {
1660 X86CPU *cpu = X86_CPU(obj);
1661 int64_t value;
1662
1663 value = cpu->env.tsc_khz * 1000;
1664 visit_type_int(v, &value, name, errp);
1665 }
1666
1667 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1668 const char *name, Error **errp)
1669 {
1670 X86CPU *cpu = X86_CPU(obj);
1671 const int64_t min = 0;
1672 const int64_t max = INT64_MAX;
1673 Error *local_err = NULL;
1674 int64_t value;
1675
1676 visit_type_int(v, &value, name, &local_err);
1677 if (local_err) {
1678 error_propagate(errp, local_err);
1679 return;
1680 }
1681 if (value < min || value > max) {
1682 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1683 name ? name : "null", value, min, max);
1684 return;
1685 }
1686
1687 cpu->env.tsc_khz = value / 1000;
1688 }
1689
1690 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1691 const char *name, Error **errp)
1692 {
1693 X86CPU *cpu = X86_CPU(obj);
1694 int64_t value = cpu->env.cpuid_apic_id;
1695
1696 visit_type_int(v, &value, name, errp);
1697 }
1698
1699 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1700 const char *name, Error **errp)
1701 {
1702 X86CPU *cpu = X86_CPU(obj);
1703 DeviceState *dev = DEVICE(obj);
1704 const int64_t min = 0;
1705 const int64_t max = UINT32_MAX;
1706 Error *error = NULL;
1707 int64_t value;
1708
1709 if (dev->realized) {
1710 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1711 "it was realized", name, object_get_typename(obj));
1712 return;
1713 }
1714
1715 visit_type_int(v, &value, name, &error);
1716 if (error) {
1717 error_propagate(errp, error);
1718 return;
1719 }
1720 if (value < min || value > max) {
1721 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1722 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1723 object_get_typename(obj), name, value, min, max);
1724 return;
1725 }
1726
1727 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1728 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1729 return;
1730 }
1731 cpu->env.cpuid_apic_id = value;
1732 }
1733
1734 /* Generic getter for "feature-words" and "filtered-features" properties */
1735 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1736 const char *name, Error **errp)
1737 {
1738 uint32_t *array = (uint32_t *)opaque;
1739 FeatureWord w;
1740 Error *err = NULL;
1741 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1742 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1743 X86CPUFeatureWordInfoList *list = NULL;
1744
1745 for (w = 0; w < FEATURE_WORDS; w++) {
1746 FeatureWordInfo *wi = &feature_word_info[w];
1747 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1748 qwi->cpuid_input_eax = wi->cpuid_eax;
1749 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1750 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1751 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1752 qwi->features = array[w];
1753
1754 /* List will be in reverse order, but order shouldn't matter */
1755 list_entries[w].next = list;
1756 list_entries[w].value = &word_infos[w];
1757 list = &list_entries[w];
1758 }
1759
1760 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1761 error_propagate(errp, err);
1762 }
1763
1764 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1765 const char *name, Error **errp)
1766 {
1767 X86CPU *cpu = X86_CPU(obj);
1768 int64_t value = cpu->hyperv_spinlock_attempts;
1769
1770 visit_type_int(v, &value, name, errp);
1771 }
1772
1773 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1774 const char *name, Error **errp)
1775 {
1776 const int64_t min = 0xFFF;
1777 const int64_t max = UINT_MAX;
1778 X86CPU *cpu = X86_CPU(obj);
1779 Error *err = NULL;
1780 int64_t value;
1781
1782 visit_type_int(v, &value, name, &err);
1783 if (err) {
1784 error_propagate(errp, err);
1785 return;
1786 }
1787
1788 if (value < min || value > max) {
1789 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1790 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1791 object_get_typename(obj), name ? name : "null",
1792 value, min, max);
1793 return;
1794 }
1795 cpu->hyperv_spinlock_attempts = value;
1796 }
1797
1798 static PropertyInfo qdev_prop_spinlocks = {
1799 .name = "int",
1800 .get = x86_get_hv_spinlocks,
1801 .set = x86_set_hv_spinlocks,
1802 };
1803
1804 /* Convert all '_' in a feature string option name to '-', to make feature
1805 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1806 */
1807 static inline void feat2prop(char *s)
1808 {
1809 while ((s = strchr(s, '_'))) {
1810 *s = '-';
1811 }
1812 }
1813
1814 /* Parse "+feature,-feature,feature=foo" CPU feature string
1815 */
1816 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1817 Error **errp)
1818 {
1819 X86CPU *cpu = X86_CPU(cs);
1820 char *featurestr; /* Single 'key=value" string being parsed */
1821 FeatureWord w;
1822 /* Features to be added */
1823 FeatureWordArray plus_features = { 0 };
1824 /* Features to be removed */
1825 FeatureWordArray minus_features = { 0 };
1826 uint32_t numvalue;
1827 CPUX86State *env = &cpu->env;
1828 Error *local_err = NULL;
1829
1830 featurestr = features ? strtok(features, ",") : NULL;
1831
1832 while (featurestr) {
1833 char *val;
1834 if (featurestr[0] == '+') {
1835 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1836 } else if (featurestr[0] == '-') {
1837 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1838 } else if ((val = strchr(featurestr, '='))) {
1839 *val = 0; val++;
1840 feat2prop(featurestr);
1841 if (!strcmp(featurestr, "xlevel")) {
1842 char *err;
1843 char num[32];
1844
1845 numvalue = strtoul(val, &err, 0);
1846 if (!*val || *err) {
1847 error_setg(errp, "bad numerical value %s", val);
1848 return;
1849 }
1850 if (numvalue < 0x80000000) {
1851 error_report("xlevel value shall always be >= 0x80000000"
1852 ", fixup will be removed in future versions");
1853 numvalue += 0x80000000;
1854 }
1855 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1856 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1857 } else if (!strcmp(featurestr, "tsc-freq")) {
1858 int64_t tsc_freq;
1859 char *err;
1860 char num[32];
1861
1862 tsc_freq = strtosz_suffix_unit(val, &err,
1863 STRTOSZ_DEFSUFFIX_B, 1000);
1864 if (tsc_freq < 0 || *err) {
1865 error_setg(errp, "bad numerical value %s", val);
1866 return;
1867 }
1868 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1869 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1870 &local_err);
1871 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1872 char *err;
1873 const int min = 0xFFF;
1874 char num[32];
1875 numvalue = strtoul(val, &err, 0);
1876 if (!*val || *err) {
1877 error_setg(errp, "bad numerical value %s", val);
1878 return;
1879 }
1880 if (numvalue < min) {
1881 error_report("hv-spinlocks value shall always be >= 0x%x"
1882 ", fixup will be removed in future versions",
1883 min);
1884 numvalue = min;
1885 }
1886 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1887 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1888 } else {
1889 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1890 }
1891 } else {
1892 feat2prop(featurestr);
1893 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1894 }
1895 if (local_err) {
1896 error_propagate(errp, local_err);
1897 return;
1898 }
1899 featurestr = strtok(NULL, ",");
1900 }
1901
1902 if (cpu->host_features) {
1903 for (w = 0; w < FEATURE_WORDS; w++) {
1904 env->features[w] =
1905 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1906 }
1907 }
1908
1909 for (w = 0; w < FEATURE_WORDS; w++) {
1910 env->features[w] |= plus_features[w];
1911 env->features[w] &= ~minus_features[w];
1912 }
1913 }
1914
1915 /* generate a composite string into buf of all cpuid names in featureset
1916 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1917 * if flags, suppress names undefined in featureset.
1918 */
1919 static void listflags(char *buf, int bufsize, uint32_t fbits,
1920 const char **featureset, uint32_t flags)
1921 {
1922 const char **p = &featureset[31];
1923 char *q, *b, bit;
1924 int nc;
1925
1926 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1927 *buf = '\0';
1928 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1929 if (fbits & 1 << bit && (*p || !flags)) {
1930 if (*p)
1931 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1932 else
1933 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1934 if (bufsize <= nc) {
1935 if (b) {
1936 memcpy(b, "...", sizeof("..."));
1937 }
1938 return;
1939 }
1940 q += nc;
1941 bufsize -= nc;
1942 }
1943 }
1944
1945 /* generate CPU information. */
1946 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1947 {
1948 X86CPUDefinition *def;
1949 char buf[256];
1950 int i;
1951
1952 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1953 def = &builtin_x86_defs[i];
1954 snprintf(buf, sizeof(buf), "%s", def->name);
1955 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1956 }
1957 #ifdef CONFIG_KVM
1958 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1959 "KVM processor with all supported host features "
1960 "(only available in KVM mode)");
1961 #endif
1962
1963 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1964 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1965 FeatureWordInfo *fw = &feature_word_info[i];
1966
1967 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1968 (*cpu_fprintf)(f, " %s\n", buf);
1969 }
1970 }
1971
1972 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1973 {
1974 CpuDefinitionInfoList *cpu_list = NULL;
1975 X86CPUDefinition *def;
1976 int i;
1977
1978 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1979 CpuDefinitionInfoList *entry;
1980 CpuDefinitionInfo *info;
1981
1982 def = &builtin_x86_defs[i];
1983 info = g_malloc0(sizeof(*info));
1984 info->name = g_strdup(def->name);
1985
1986 entry = g_malloc0(sizeof(*entry));
1987 entry->value = info;
1988 entry->next = cpu_list;
1989 cpu_list = entry;
1990 }
1991
1992 return cpu_list;
1993 }
1994
1995 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1996 bool migratable_only)
1997 {
1998 FeatureWordInfo *wi = &feature_word_info[w];
1999 uint32_t r;
2000
2001 if (kvm_enabled()) {
2002 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
2003 wi->cpuid_ecx,
2004 wi->cpuid_reg);
2005 } else if (tcg_enabled()) {
2006 r = wi->tcg_features;
2007 } else {
2008 return ~0;
2009 }
2010 if (migratable_only) {
2011 r &= x86_cpu_get_migratable_flags(w);
2012 }
2013 return r;
2014 }
2015
2016 /*
2017 * Filters CPU feature words based on host availability of each feature.
2018 *
2019 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
2020 */
2021 static int x86_cpu_filter_features(X86CPU *cpu)
2022 {
2023 CPUX86State *env = &cpu->env;
2024 FeatureWord w;
2025 int rv = 0;
2026
2027 for (w = 0; w < FEATURE_WORDS; w++) {
2028 uint32_t host_feat =
2029 x86_cpu_get_supported_feature_word(w, cpu->migratable);
2030 uint32_t requested_features = env->features[w];
2031 env->features[w] &= host_feat;
2032 cpu->filtered_features[w] = requested_features & ~env->features[w];
2033 if (cpu->filtered_features[w]) {
2034 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2035 report_unavailable_features(w, cpu->filtered_features[w]);
2036 }
2037 rv = 1;
2038 }
2039 }
2040
2041 return rv;
2042 }
2043
2044 /* Load data from X86CPUDefinition
2045 */
2046 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2047 {
2048 CPUX86State *env = &cpu->env;
2049 const char *vendor;
2050 char host_vendor[CPUID_VENDOR_SZ + 1];
2051 FeatureWord w;
2052
2053 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2054 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2055 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2056 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2057 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2058 env->cpuid_xlevel2 = def->xlevel2;
2059 cpu->cache_info_passthrough = def->cache_info_passthrough;
2060 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2061 for (w = 0; w < FEATURE_WORDS; w++) {
2062 env->features[w] = def->features[w];
2063 }
2064
2065 /* Special cases not set in the X86CPUDefinition structs: */
2066 if (kvm_enabled()) {
2067 FeatureWord w;
2068 for (w = 0; w < FEATURE_WORDS; w++) {
2069 env->features[w] |= kvm_default_features[w];
2070 env->features[w] &= ~kvm_default_unset_features[w];
2071 }
2072 }
2073
2074 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2075
2076 /* sysenter isn't supported in compatibility mode on AMD,
2077 * syscall isn't supported in compatibility mode on Intel.
2078 * Normally we advertise the actual CPU vendor, but you can
2079 * override this using the 'vendor' property if you want to use
2080 * KVM's sysenter/syscall emulation in compatibility mode and
2081 * when doing cross vendor migration
2082 */
2083 vendor = def->vendor;
2084 if (kvm_enabled()) {
2085 uint32_t ebx = 0, ecx = 0, edx = 0;
2086 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2087 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2088 vendor = host_vendor;
2089 }
2090
2091 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2092
2093 }
2094
2095 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2096 Error **errp)
2097 {
2098 X86CPU *cpu = NULL;
2099 X86CPUClass *xcc;
2100 ObjectClass *oc;
2101 gchar **model_pieces;
2102 char *name, *features;
2103 Error *error = NULL;
2104
2105 model_pieces = g_strsplit(cpu_model, ",", 2);
2106 if (!model_pieces[0]) {
2107 error_setg(&error, "Invalid/empty CPU model name");
2108 goto out;
2109 }
2110 name = model_pieces[0];
2111 features = model_pieces[1];
2112
2113 oc = x86_cpu_class_by_name(name);
2114 if (oc == NULL) {
2115 error_setg(&error, "Unable to find CPU definition: %s", name);
2116 goto out;
2117 }
2118 xcc = X86_CPU_CLASS(oc);
2119
2120 if (xcc->kvm_required && !kvm_enabled()) {
2121 error_setg(&error, "CPU model '%s' requires KVM", name);
2122 goto out;
2123 }
2124
2125 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2126
2127 #ifndef CONFIG_USER_ONLY
2128 if (icc_bridge == NULL) {
2129 error_setg(&error, "Invalid icc-bridge value");
2130 goto out;
2131 }
2132 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2133 object_unref(OBJECT(cpu));
2134 #endif
2135
2136 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2137 if (error) {
2138 goto out;
2139 }
2140
2141 out:
2142 if (error != NULL) {
2143 error_propagate(errp, error);
2144 if (cpu) {
2145 object_unref(OBJECT(cpu));
2146 cpu = NULL;
2147 }
2148 }
2149 g_strfreev(model_pieces);
2150 return cpu;
2151 }
2152
2153 X86CPU *cpu_x86_init(const char *cpu_model)
2154 {
2155 Error *error = NULL;
2156 X86CPU *cpu;
2157
2158 cpu = cpu_x86_create(cpu_model, NULL, &error);
2159 if (error) {
2160 goto out;
2161 }
2162
2163 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2164
2165 out:
2166 if (error) {
2167 error_report("%s", error_get_pretty(error));
2168 error_free(error);
2169 if (cpu != NULL) {
2170 object_unref(OBJECT(cpu));
2171 cpu = NULL;
2172 }
2173 }
2174 return cpu;
2175 }
2176
2177 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2178 {
2179 X86CPUDefinition *cpudef = data;
2180 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2181
2182 xcc->cpu_def = cpudef;
2183 }
2184
2185 static void x86_register_cpudef_type(X86CPUDefinition *def)
2186 {
2187 char *typename = x86_cpu_type_name(def->name);
2188 TypeInfo ti = {
2189 .name = typename,
2190 .parent = TYPE_X86_CPU,
2191 .class_init = x86_cpu_cpudef_class_init,
2192 .class_data = def,
2193 };
2194
2195 type_register(&ti);
2196 g_free(typename);
2197 }
2198
2199 #if !defined(CONFIG_USER_ONLY)
2200
2201 void cpu_clear_apic_feature(CPUX86State *env)
2202 {
2203 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2204 }
2205
2206 #endif /* !CONFIG_USER_ONLY */
2207
2208 /* Initialize list of CPU models, filling some non-static fields if necessary
2209 */
2210 void x86_cpudef_setup(void)
2211 {
2212 int i, j;
2213 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2214
2215 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2216 X86CPUDefinition *def = &builtin_x86_defs[i];
2217
2218 /* Look for specific "cpudef" models that */
2219 /* have the QEMU version in .model_id */
2220 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2221 if (strcmp(model_with_versions[j], def->name) == 0) {
2222 pstrcpy(def->model_id, sizeof(def->model_id),
2223 "QEMU Virtual CPU version ");
2224 pstrcat(def->model_id, sizeof(def->model_id),
2225 qemu_get_version());
2226 break;
2227 }
2228 }
2229 }
2230 }
2231
2232 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2233 uint32_t *ecx, uint32_t *edx)
2234 {
2235 *ebx = env->cpuid_vendor1;
2236 *edx = env->cpuid_vendor2;
2237 *ecx = env->cpuid_vendor3;
2238 }
2239
2240 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2241 uint32_t *eax, uint32_t *ebx,
2242 uint32_t *ecx, uint32_t *edx)
2243 {
2244 X86CPU *cpu = x86_env_get_cpu(env);
2245 CPUState *cs = CPU(cpu);
2246
2247 /* test if maximum index reached */
2248 if (index & 0x80000000) {
2249 if (index > env->cpuid_xlevel) {
2250 if (env->cpuid_xlevel2 > 0) {
2251 /* Handle the Centaur's CPUID instruction. */
2252 if (index > env->cpuid_xlevel2) {
2253 index = env->cpuid_xlevel2;
2254 } else if (index < 0xC0000000) {
2255 index = env->cpuid_xlevel;
2256 }
2257 } else {
2258 /* Intel documentation states that invalid EAX input will
2259 * return the same information as EAX=cpuid_level
2260 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2261 */
2262 index = env->cpuid_level;
2263 }
2264 }
2265 } else {
2266 if (index > env->cpuid_level)
2267 index = env->cpuid_level;
2268 }
2269
2270 switch(index) {
2271 case 0:
2272 *eax = env->cpuid_level;
2273 get_cpuid_vendor(env, ebx, ecx, edx);
2274 break;
2275 case 1:
2276 *eax = env->cpuid_version;
2277 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2278 *ecx = env->features[FEAT_1_ECX];
2279 *edx = env->features[FEAT_1_EDX];
2280 if (cs->nr_cores * cs->nr_threads > 1) {
2281 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2282 *edx |= 1 << 28; /* HTT bit */
2283 }
2284 break;
2285 case 2:
2286 /* cache info: needed for Pentium Pro compatibility */
2287 if (cpu->cache_info_passthrough) {
2288 host_cpuid(index, 0, eax, ebx, ecx, edx);
2289 break;
2290 }
2291 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2292 *ebx = 0;
2293 *ecx = 0;
2294 *edx = (L1D_DESCRIPTOR << 16) | \
2295 (L1I_DESCRIPTOR << 8) | \
2296 (L2_DESCRIPTOR);
2297 break;
2298 case 4:
2299 /* cache info: needed for Core compatibility */
2300 if (cpu->cache_info_passthrough) {
2301 host_cpuid(index, count, eax, ebx, ecx, edx);
2302 *eax &= ~0xFC000000;
2303 } else {
2304 *eax = 0;
2305 switch (count) {
2306 case 0: /* L1 dcache info */
2307 *eax |= CPUID_4_TYPE_DCACHE | \
2308 CPUID_4_LEVEL(1) | \
2309 CPUID_4_SELF_INIT_LEVEL;
2310 *ebx = (L1D_LINE_SIZE - 1) | \
2311 ((L1D_PARTITIONS - 1) << 12) | \
2312 ((L1D_ASSOCIATIVITY - 1) << 22);
2313 *ecx = L1D_SETS - 1;
2314 *edx = CPUID_4_NO_INVD_SHARING;
2315 break;
2316 case 1: /* L1 icache info */
2317 *eax |= CPUID_4_TYPE_ICACHE | \
2318 CPUID_4_LEVEL(1) | \
2319 CPUID_4_SELF_INIT_LEVEL;
2320 *ebx = (L1I_LINE_SIZE - 1) | \
2321 ((L1I_PARTITIONS - 1) << 12) | \
2322 ((L1I_ASSOCIATIVITY - 1) << 22);
2323 *ecx = L1I_SETS - 1;
2324 *edx = CPUID_4_NO_INVD_SHARING;
2325 break;
2326 case 2: /* L2 cache info */
2327 *eax |= CPUID_4_TYPE_UNIFIED | \
2328 CPUID_4_LEVEL(2) | \
2329 CPUID_4_SELF_INIT_LEVEL;
2330 if (cs->nr_threads > 1) {
2331 *eax |= (cs->nr_threads - 1) << 14;
2332 }
2333 *ebx = (L2_LINE_SIZE - 1) | \
2334 ((L2_PARTITIONS - 1) << 12) | \
2335 ((L2_ASSOCIATIVITY - 1) << 22);
2336 *ecx = L2_SETS - 1;
2337 *edx = CPUID_4_NO_INVD_SHARING;
2338 break;
2339 default: /* end of info */
2340 *eax = 0;
2341 *ebx = 0;
2342 *ecx = 0;
2343 *edx = 0;
2344 break;
2345 }
2346 }
2347
2348 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2349 if ((*eax & 31) && cs->nr_cores > 1) {
2350 *eax |= (cs->nr_cores - 1) << 26;
2351 }
2352 break;
2353 case 5:
2354 /* mwait info: needed for Core compatibility */
2355 *eax = 0; /* Smallest monitor-line size in bytes */
2356 *ebx = 0; /* Largest monitor-line size in bytes */
2357 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2358 *edx = 0;
2359 break;
2360 case 6:
2361 /* Thermal and Power Leaf */
2362 *eax = 0;
2363 *ebx = 0;
2364 *ecx = 0;
2365 *edx = 0;
2366 break;
2367 case 7:
2368 /* Structured Extended Feature Flags Enumeration Leaf */
2369 if (count == 0) {
2370 *eax = 0; /* Maximum ECX value for sub-leaves */
2371 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2372 *ecx = 0; /* Reserved */
2373 *edx = 0; /* Reserved */
2374 } else {
2375 *eax = 0;
2376 *ebx = 0;
2377 *ecx = 0;
2378 *edx = 0;
2379 }
2380 break;
2381 case 9:
2382 /* Direct Cache Access Information Leaf */
2383 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2384 *ebx = 0;
2385 *ecx = 0;
2386 *edx = 0;
2387 break;
2388 case 0xA:
2389 /* Architectural Performance Monitoring Leaf */
2390 if (kvm_enabled() && cpu->enable_pmu) {
2391 KVMState *s = cs->kvm_state;
2392
2393 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2394 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2395 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2396 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2397 } else {
2398 *eax = 0;
2399 *ebx = 0;
2400 *ecx = 0;
2401 *edx = 0;
2402 }
2403 break;
2404 case 0xD: {
2405 KVMState *s = cs->kvm_state;
2406 uint64_t kvm_mask;
2407 int i;
2408
2409 /* Processor Extended State */
2410 *eax = 0;
2411 *ebx = 0;
2412 *ecx = 0;
2413 *edx = 0;
2414 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2415 break;
2416 }
2417 kvm_mask =
2418 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2419 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2420
2421 if (count == 0) {
2422 *ecx = 0x240;
2423 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2424 const ExtSaveArea *esa = &ext_save_areas[i];
2425 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2426 (kvm_mask & (1 << i)) != 0) {
2427 if (i < 32) {
2428 *eax |= 1 << i;
2429 } else {
2430 *edx |= 1 << (i - 32);
2431 }
2432 *ecx = MAX(*ecx, esa->offset + esa->size);
2433 }
2434 }
2435 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2436 *ebx = *ecx;
2437 } else if (count == 1) {
2438 *eax = env->features[FEAT_XSAVE];
2439 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2440 const ExtSaveArea *esa = &ext_save_areas[count];
2441 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2442 (kvm_mask & (1 << count)) != 0) {
2443 *eax = esa->size;
2444 *ebx = esa->offset;
2445 }
2446 }
2447 break;
2448 }
2449 case 0x80000000:
2450 *eax = env->cpuid_xlevel;
2451 *ebx = env->cpuid_vendor1;
2452 *edx = env->cpuid_vendor2;
2453 *ecx = env->cpuid_vendor3;
2454 break;
2455 case 0x80000001:
2456 *eax = env->cpuid_version;
2457 *ebx = 0;
2458 *ecx = env->features[FEAT_8000_0001_ECX];
2459 *edx = env->features[FEAT_8000_0001_EDX];
2460
2461 /* The Linux kernel checks for the CMPLegacy bit and
2462 * discards multiple thread information if it is set.
2463 * So dont set it here for Intel to make Linux guests happy.
2464 */
2465 if (cs->nr_cores * cs->nr_threads > 1) {
2466 uint32_t tebx, tecx, tedx;
2467 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2468 if (tebx != CPUID_VENDOR_INTEL_1 ||
2469 tedx != CPUID_VENDOR_INTEL_2 ||
2470 tecx != CPUID_VENDOR_INTEL_3) {
2471 *ecx |= 1 << 1; /* CmpLegacy bit */
2472 }
2473 }
2474 break;
2475 case 0x80000002:
2476 case 0x80000003:
2477 case 0x80000004:
2478 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2479 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2480 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2481 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2482 break;
2483 case 0x80000005:
2484 /* cache info (L1 cache) */
2485 if (cpu->cache_info_passthrough) {
2486 host_cpuid(index, 0, eax, ebx, ecx, edx);
2487 break;
2488 }
2489 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2490 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2491 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2492 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2493 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2494 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2495 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2496 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2497 break;
2498 case 0x80000006:
2499 /* cache info (L2 cache) */
2500 if (cpu->cache_info_passthrough) {
2501 host_cpuid(index, 0, eax, ebx, ecx, edx);
2502 break;
2503 }
2504 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2505 (L2_DTLB_2M_ENTRIES << 16) | \
2506 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2507 (L2_ITLB_2M_ENTRIES);
2508 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2509 (L2_DTLB_4K_ENTRIES << 16) | \
2510 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2511 (L2_ITLB_4K_ENTRIES);
2512 *ecx = (L2_SIZE_KB_AMD << 16) | \
2513 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2514 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2515 *edx = ((L3_SIZE_KB/512) << 18) | \
2516 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2517 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2518 break;
2519 case 0x80000007:
2520 *eax = 0;
2521 *ebx = 0;
2522 *ecx = 0;
2523 *edx = env->features[FEAT_8000_0007_EDX];
2524 break;
2525 case 0x80000008:
2526 /* virtual & phys address size in low 2 bytes. */
2527 /* XXX: This value must match the one used in the MMU code. */
2528 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2529 /* 64 bit processor */
2530 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2531 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2532 } else {
2533 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2534 *eax = 0x00000024; /* 36 bits physical */
2535 } else {
2536 *eax = 0x00000020; /* 32 bits physical */
2537 }
2538 }
2539 *ebx = 0;
2540 *ecx = 0;
2541 *edx = 0;
2542 if (cs->nr_cores * cs->nr_threads > 1) {
2543 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2544 }
2545 break;
2546 case 0x8000000A:
2547 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2548 *eax = 0x00000001; /* SVM Revision */
2549 *ebx = 0x00000010; /* nr of ASIDs */
2550 *ecx = 0;
2551 *edx = env->features[FEAT_SVM]; /* optional features */
2552 } else {
2553 *eax = 0;
2554 *ebx = 0;
2555 *ecx = 0;
2556 *edx = 0;
2557 }
2558 break;
2559 case 0xC0000000:
2560 *eax = env->cpuid_xlevel2;
2561 *ebx = 0;
2562 *ecx = 0;
2563 *edx = 0;
2564 break;
2565 case 0xC0000001:
2566 /* Support for VIA CPU's CPUID instruction */
2567 *eax = env->cpuid_version;
2568 *ebx = 0;
2569 *ecx = 0;
2570 *edx = env->features[FEAT_C000_0001_EDX];
2571 break;
2572 case 0xC0000002:
2573 case 0xC0000003:
2574 case 0xC0000004:
2575 /* Reserved for the future, and now filled with zero */
2576 *eax = 0;
2577 *ebx = 0;
2578 *ecx = 0;
2579 *edx = 0;
2580 break;
2581 default:
2582 /* reserved values: zero */
2583 *eax = 0;
2584 *ebx = 0;
2585 *ecx = 0;
2586 *edx = 0;
2587 break;
2588 }
2589 }
2590
2591 /* CPUClass::reset() */
2592 static void x86_cpu_reset(CPUState *s)
2593 {
2594 X86CPU *cpu = X86_CPU(s);
2595 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2596 CPUX86State *env = &cpu->env;
2597 int i;
2598
2599 xcc->parent_reset(s);
2600
2601 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2602
2603 tlb_flush(s, 1);
2604
2605 env->old_exception = -1;
2606
2607 /* init to reset state */
2608
2609 #ifdef CONFIG_SOFTMMU
2610 env->hflags |= HF_SOFTMMU_MASK;
2611 #endif
2612 env->hflags2 |= HF2_GIF_MASK;
2613
2614 cpu_x86_update_cr0(env, 0x60000010);
2615 env->a20_mask = ~0x0;
2616 env->smbase = 0x30000;
2617
2618 env->idt.limit = 0xffff;
2619 env->gdt.limit = 0xffff;
2620 env->ldt.limit = 0xffff;
2621 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2622 env->tr.limit = 0xffff;
2623 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2624
2625 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2626 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2627 DESC_R_MASK | DESC_A_MASK);
2628 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2629 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2630 DESC_A_MASK);
2631 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2632 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2633 DESC_A_MASK);
2634 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2635 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2636 DESC_A_MASK);
2637 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2638 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2639 DESC_A_MASK);
2640 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2641 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2642 DESC_A_MASK);
2643
2644 env->eip = 0xfff0;
2645 env->regs[R_EDX] = env->cpuid_version;
2646
2647 env->eflags = 0x2;
2648
2649 /* FPU init */
2650 for (i = 0; i < 8; i++) {
2651 env->fptags[i] = 1;
2652 }
2653 cpu_set_fpuc(env, 0x37f);
2654
2655 env->mxcsr = 0x1f80;
2656 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2657
2658 env->pat = 0x0007040600070406ULL;
2659 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2660
2661 memset(env->dr, 0, sizeof(env->dr));
2662 env->dr[6] = DR6_FIXED_1;
2663 env->dr[7] = DR7_FIXED_1;
2664 cpu_breakpoint_remove_all(s, BP_CPU);
2665 cpu_watchpoint_remove_all(s, BP_CPU);
2666
2667 env->xcr0 = 1;
2668
2669 /*
2670 * SDM 11.11.5 requires:
2671 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2672 * - IA32_MTRR_PHYSMASKn.V = 0
2673 * All other bits are undefined. For simplification, zero it all.
2674 */
2675 env->mtrr_deftype = 0;
2676 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2677 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2678
2679 #if !defined(CONFIG_USER_ONLY)
2680 /* We hard-wire the BSP to the first CPU. */
2681 if (s->cpu_index == 0) {
2682 apic_designate_bsp(cpu->apic_state);
2683 }
2684
2685 s->halted = !cpu_is_bsp(cpu);
2686
2687 if (kvm_enabled()) {
2688 kvm_arch_reset_vcpu(cpu);
2689 }
2690 #endif
2691 }
2692
2693 #ifndef CONFIG_USER_ONLY
2694 bool cpu_is_bsp(X86CPU *cpu)
2695 {
2696 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2697 }
2698
2699 /* TODO: remove me, when reset over QOM tree is implemented */
2700 static void x86_cpu_machine_reset_cb(void *opaque)
2701 {
2702 X86CPU *cpu = opaque;
2703 cpu_reset(CPU(cpu));
2704 }
2705 #endif
2706
2707 static void mce_init(X86CPU *cpu)
2708 {
2709 CPUX86State *cenv = &cpu->env;
2710 unsigned int bank;
2711
2712 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2713 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2714 (CPUID_MCE | CPUID_MCA)) {
2715 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2716 cenv->mcg_ctl = ~(uint64_t)0;
2717 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2718 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2719 }
2720 }
2721 }
2722
2723 #ifndef CONFIG_USER_ONLY
2724 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2725 {
2726 CPUX86State *env = &cpu->env;
2727 DeviceState *dev = DEVICE(cpu);
2728 APICCommonState *apic;
2729 const char *apic_type = "apic";
2730
2731 if (kvm_irqchip_in_kernel()) {
2732 apic_type = "kvm-apic";
2733 } else if (xen_enabled()) {
2734 apic_type = "xen-apic";
2735 }
2736
2737 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2738 if (cpu->apic_state == NULL) {
2739 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2740 return;
2741 }
2742
2743 object_property_add_child(OBJECT(cpu), "apic",
2744 OBJECT(cpu->apic_state), NULL);
2745 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2746 /* TODO: convert to link<> */
2747 apic = APIC_COMMON(cpu->apic_state);
2748 apic->cpu = cpu;
2749 }
2750
2751 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2752 {
2753 if (cpu->apic_state == NULL) {
2754 return;
2755 }
2756
2757 if (qdev_init(cpu->apic_state)) {
2758 error_setg(errp, "APIC device '%s' could not be initialized",
2759 object_get_typename(OBJECT(cpu->apic_state)));
2760 return;
2761 }
2762 }
2763 #else
2764 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2765 {
2766 }
2767 #endif
2768
2769
2770 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2771 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2772 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2773 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2774 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2775 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2776 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2777 {
2778 CPUState *cs = CPU(dev);
2779 X86CPU *cpu = X86_CPU(dev);
2780 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2781 CPUX86State *env = &cpu->env;
2782 Error *local_err = NULL;
2783 static bool ht_warned;
2784
2785 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2786 env->cpuid_level = 7;
2787 }
2788
2789 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2790 * CPUID[1].EDX.
2791 */
2792 if (IS_AMD_CPU(env)) {
2793 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2794 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2795 & CPUID_EXT2_AMD_ALIASES);
2796 }
2797
2798
2799 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2800 error_setg(&local_err,
2801 kvm_enabled() ?
2802 "Host doesn't support requested features" :
2803 "TCG doesn't support requested features");
2804 goto out;
2805 }
2806
2807 #ifndef CONFIG_USER_ONLY
2808 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2809
2810 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2811 x86_cpu_apic_create(cpu, &local_err);
2812 if (local_err != NULL) {
2813 goto out;
2814 }
2815 }
2816 #endif
2817
2818 mce_init(cpu);
2819 qemu_init_vcpu(cs);
2820
2821 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2822 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2823 * based on inputs (sockets,cores,threads), it is still better to gives
2824 * users a warning.
2825 *
2826 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2827 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2828 */
2829 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2830 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2831 " -smp options properly.");
2832 ht_warned = true;
2833 }
2834
2835 x86_cpu_apic_realize(cpu, &local_err);
2836 if (local_err != NULL) {
2837 goto out;
2838 }
2839 cpu_reset(cs);
2840
2841 xcc->parent_realize(dev, &local_err);
2842 out:
2843 if (local_err != NULL) {
2844 error_propagate(errp, local_err);
2845 return;
2846 }
2847 }
2848
2849 /* Enables contiguous-apic-ID mode, for compatibility */
2850 static bool compat_apic_id_mode;
2851
2852 void enable_compat_apic_id_mode(void)
2853 {
2854 compat_apic_id_mode = true;
2855 }
2856
2857 /* Calculates initial APIC ID for a specific CPU index
2858 *
2859 * Currently we need to be able to calculate the APIC ID from the CPU index
2860 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2861 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2862 * all CPUs up to max_cpus.
2863 */
2864 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2865 {
2866 uint32_t correct_id;
2867 static bool warned;
2868
2869 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2870 if (compat_apic_id_mode) {
2871 if (cpu_index != correct_id && !warned) {
2872 error_report("APIC IDs set in compatibility mode, "
2873 "CPU topology won't match the configuration");
2874 warned = true;
2875 }
2876 return cpu_index;
2877 } else {
2878 return correct_id;
2879 }
2880 }
2881
2882 static void x86_cpu_initfn(Object *obj)
2883 {
2884 CPUState *cs = CPU(obj);
2885 X86CPU *cpu = X86_CPU(obj);
2886 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2887 CPUX86State *env = &cpu->env;
2888 static int inited;
2889
2890 cs->env_ptr = env;
2891 cpu_exec_init(env);
2892
2893 object_property_add(obj, "family", "int",
2894 x86_cpuid_version_get_family,
2895 x86_cpuid_version_set_family, NULL, NULL, NULL);
2896 object_property_add(obj, "model", "int",
2897 x86_cpuid_version_get_model,
2898 x86_cpuid_version_set_model, NULL, NULL, NULL);
2899 object_property_add(obj, "stepping", "int",
2900 x86_cpuid_version_get_stepping,
2901 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2902 object_property_add(obj, "level", "int",
2903 x86_cpuid_get_level,
2904 x86_cpuid_set_level, NULL, NULL, NULL);
2905 object_property_add(obj, "xlevel", "int",
2906 x86_cpuid_get_xlevel,
2907 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2908 object_property_add_str(obj, "vendor",
2909 x86_cpuid_get_vendor,
2910 x86_cpuid_set_vendor, NULL);
2911 object_property_add_str(obj, "model-id",
2912 x86_cpuid_get_model_id,
2913 x86_cpuid_set_model_id, NULL);
2914 object_property_add(obj, "tsc-frequency", "int",
2915 x86_cpuid_get_tsc_freq,
2916 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2917 object_property_add(obj, "apic-id", "int",
2918 x86_cpuid_get_apic_id,
2919 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2920 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2921 x86_cpu_get_feature_words,
2922 NULL, NULL, (void *)env->features, NULL);
2923 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2924 x86_cpu_get_feature_words,
2925 NULL, NULL, (void *)cpu->filtered_features, NULL);
2926
2927 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2928 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2929
2930 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2931
2932 /* init various static tables used in TCG mode */
2933 if (tcg_enabled() && !inited) {
2934 inited = 1;
2935 optimize_flags_init();
2936 }
2937 }
2938
2939 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2940 {
2941 X86CPU *cpu = X86_CPU(cs);
2942 CPUX86State *env = &cpu->env;
2943
2944 return env->cpuid_apic_id;
2945 }
2946
2947 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2948 {
2949 X86CPU *cpu = X86_CPU(cs);
2950
2951 return cpu->env.cr[0] & CR0_PG_MASK;
2952 }
2953
2954 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2955 {
2956 X86CPU *cpu = X86_CPU(cs);
2957
2958 cpu->env.eip = value;
2959 }
2960
2961 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2962 {
2963 X86CPU *cpu = X86_CPU(cs);
2964
2965 cpu->env.eip = tb->pc - tb->cs_base;
2966 }
2967
2968 static bool x86_cpu_has_work(CPUState *cs)
2969 {
2970 X86CPU *cpu = X86_CPU(cs);
2971 CPUX86State *env = &cpu->env;
2972
2973 #if !defined(CONFIG_USER_ONLY)
2974 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2975 apic_poll_irq(cpu->apic_state);
2976 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2977 }
2978 #endif
2979
2980 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2981 (env->eflags & IF_MASK)) ||
2982 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2983 CPU_INTERRUPT_INIT |
2984 CPU_INTERRUPT_SIPI |
2985 CPU_INTERRUPT_MCE));
2986 }
2987
2988 static Property x86_cpu_properties[] = {
2989 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2990 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2991 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2992 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2993 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2994 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2995 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2996 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2997 DEFINE_PROP_END_OF_LIST()
2998 };
2999
3000 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
3001 {
3002 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3003 CPUClass *cc = CPU_CLASS(oc);
3004 DeviceClass *dc = DEVICE_CLASS(oc);
3005
3006 xcc->parent_realize = dc->realize;
3007 dc->realize = x86_cpu_realizefn;
3008 dc->bus_type = TYPE_ICC_BUS;
3009 dc->props = x86_cpu_properties;
3010
3011 xcc->parent_reset = cc->reset;
3012 cc->reset = x86_cpu_reset;
3013 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
3014
3015 cc->class_by_name = x86_cpu_class_by_name;
3016 cc->parse_features = x86_cpu_parse_featurestr;
3017 cc->has_work = x86_cpu_has_work;
3018 cc->do_interrupt = x86_cpu_do_interrupt;
3019 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
3020 cc->dump_state = x86_cpu_dump_state;
3021 cc->set_pc = x86_cpu_set_pc;
3022 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
3023 cc->gdb_read_register = x86_cpu_gdb_read_register;
3024 cc->gdb_write_register = x86_cpu_gdb_write_register;
3025 cc->get_arch_id = x86_cpu_get_arch_id;
3026 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
3027 #ifdef CONFIG_USER_ONLY
3028 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
3029 #else
3030 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
3031 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3032 cc->write_elf64_note = x86_cpu_write_elf64_note;
3033 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3034 cc->write_elf32_note = x86_cpu_write_elf32_note;
3035 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3036 cc->vmsd = &vmstate_x86_cpu;
3037 #endif
3038 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3039 #ifndef CONFIG_USER_ONLY
3040 cc->debug_excp_handler = breakpoint_handler;
3041 #endif
3042 cc->cpu_exec_enter = x86_cpu_exec_enter;
3043 cc->cpu_exec_exit = x86_cpu_exec_exit;
3044 }
3045
3046 static const TypeInfo x86_cpu_type_info = {
3047 .name = TYPE_X86_CPU,
3048 .parent = TYPE_CPU,
3049 .instance_size = sizeof(X86CPU),
3050 .instance_init = x86_cpu_initfn,
3051 .abstract = true,
3052 .class_size = sizeof(X86CPUClass),
3053 .class_init = x86_cpu_common_class_init,
3054 };
3055
3056 static void x86_cpu_register_types(void)
3057 {
3058 int i;
3059
3060 type_register_static(&x86_cpu_type_info);
3061 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3062 x86_register_cpudef_type(&builtin_x86_defs[i]);
3063 }
3064 #ifdef CONFIG_KVM
3065 type_register_static(&host_x86_cpu_type_info);
3066 #endif
3067 }
3068
3069 type_init(x86_cpu_register_types)