]> git.proxmox.com Git - mirror_qemu.git/blob - target-i386/cpu.c
x86: Drop some superfluous casts from void *
[mirror_qemu.git] / target-i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19 #include <stdlib.h>
20 #include <stdio.h>
21 #include <string.h>
22 #include <inttypes.h>
23
24 #include "cpu.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/cpus.h"
27 #include "kvm_i386.h"
28 #include "topology.h"
29
30 #include "qemu/option.h"
31 #include "qemu/config-file.h"
32 #include "qapi/qmp/qerror.h"
33
34 #include "qapi-types.h"
35 #include "qapi-visit.h"
36 #include "qapi/visitor.h"
37 #include "sysemu/arch_init.h"
38
39 #include "hw/hw.h"
40 #if defined(CONFIG_KVM)
41 #include <linux/kvm_para.h>
42 #endif
43
44 #include "sysemu/sysemu.h"
45 #include "hw/qdev-properties.h"
46 #include "hw/cpu/icc_bus.h"
47 #ifndef CONFIG_USER_ONLY
48 #include "hw/xen/xen.h"
49 #include "hw/i386/apic_internal.h"
50 #endif
51
52
53 /* Cache topology CPUID constants: */
54
55 /* CPUID Leaf 2 Descriptors */
56
57 #define CPUID_2_L1D_32KB_8WAY_64B 0x2c
58 #define CPUID_2_L1I_32KB_8WAY_64B 0x30
59 #define CPUID_2_L2_2MB_8WAY_64B 0x7d
60
61
62 /* CPUID Leaf 4 constants: */
63
64 /* EAX: */
65 #define CPUID_4_TYPE_DCACHE 1
66 #define CPUID_4_TYPE_ICACHE 2
67 #define CPUID_4_TYPE_UNIFIED 3
68
69 #define CPUID_4_LEVEL(l) ((l) << 5)
70
71 #define CPUID_4_SELF_INIT_LEVEL (1 << 8)
72 #define CPUID_4_FULLY_ASSOC (1 << 9)
73
74 /* EDX: */
75 #define CPUID_4_NO_INVD_SHARING (1 << 0)
76 #define CPUID_4_INCLUSIVE (1 << 1)
77 #define CPUID_4_COMPLEX_IDX (1 << 2)
78
79 #define ASSOC_FULL 0xFF
80
81 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
82 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
83 a == 2 ? 0x2 : \
84 a == 4 ? 0x4 : \
85 a == 8 ? 0x6 : \
86 a == 16 ? 0x8 : \
87 a == 32 ? 0xA : \
88 a == 48 ? 0xB : \
89 a == 64 ? 0xC : \
90 a == 96 ? 0xD : \
91 a == 128 ? 0xE : \
92 a == ASSOC_FULL ? 0xF : \
93 0 /* invalid value */)
94
95
96 /* Definitions of the hardcoded cache entries we expose: */
97
98 /* L1 data cache: */
99 #define L1D_LINE_SIZE 64
100 #define L1D_ASSOCIATIVITY 8
101 #define L1D_SETS 64
102 #define L1D_PARTITIONS 1
103 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
104 #define L1D_DESCRIPTOR CPUID_2_L1D_32KB_8WAY_64B
105 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
106 #define L1D_LINES_PER_TAG 1
107 #define L1D_SIZE_KB_AMD 64
108 #define L1D_ASSOCIATIVITY_AMD 2
109
110 /* L1 instruction cache: */
111 #define L1I_LINE_SIZE 64
112 #define L1I_ASSOCIATIVITY 8
113 #define L1I_SETS 64
114 #define L1I_PARTITIONS 1
115 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 32KiB */
116 #define L1I_DESCRIPTOR CPUID_2_L1I_32KB_8WAY_64B
117 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
118 #define L1I_LINES_PER_TAG 1
119 #define L1I_SIZE_KB_AMD 64
120 #define L1I_ASSOCIATIVITY_AMD 2
121
122 /* Level 2 unified cache: */
123 #define L2_LINE_SIZE 64
124 #define L2_ASSOCIATIVITY 16
125 #define L2_SETS 4096
126 #define L2_PARTITIONS 1
127 /* Size = LINE_SIZE*ASSOCIATIVITY*SETS*PARTITIONS = 4MiB */
128 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
129 #define L2_DESCRIPTOR CPUID_2_L2_2MB_8WAY_64B
130 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
131 #define L2_LINES_PER_TAG 1
132 #define L2_SIZE_KB_AMD 512
133
134 /* No L3 cache: */
135 #define L3_SIZE_KB 0 /* disabled */
136 #define L3_ASSOCIATIVITY 0 /* disabled */
137 #define L3_LINES_PER_TAG 0 /* disabled */
138 #define L3_LINE_SIZE 0 /* disabled */
139
140 /* TLB definitions: */
141
142 #define L1_DTLB_2M_ASSOC 1
143 #define L1_DTLB_2M_ENTRIES 255
144 #define L1_DTLB_4K_ASSOC 1
145 #define L1_DTLB_4K_ENTRIES 255
146
147 #define L1_ITLB_2M_ASSOC 1
148 #define L1_ITLB_2M_ENTRIES 255
149 #define L1_ITLB_4K_ASSOC 1
150 #define L1_ITLB_4K_ENTRIES 255
151
152 #define L2_DTLB_2M_ASSOC 0 /* disabled */
153 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
154 #define L2_DTLB_4K_ASSOC 4
155 #define L2_DTLB_4K_ENTRIES 512
156
157 #define L2_ITLB_2M_ASSOC 0 /* disabled */
158 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
159 #define L2_ITLB_4K_ASSOC 4
160 #define L2_ITLB_4K_ENTRIES 512
161
162
163
164 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
165 uint32_t vendor2, uint32_t vendor3)
166 {
167 int i;
168 for (i = 0; i < 4; i++) {
169 dst[i] = vendor1 >> (8 * i);
170 dst[i + 4] = vendor2 >> (8 * i);
171 dst[i + 8] = vendor3 >> (8 * i);
172 }
173 dst[CPUID_VENDOR_SZ] = '\0';
174 }
175
176 /* feature flags taken from "Intel Processor Identification and the CPUID
177 * Instruction" and AMD's "CPUID Specification". In cases of disagreement
178 * between feature naming conventions, aliases may be added.
179 */
180 static const char *feature_name[] = {
181 "fpu", "vme", "de", "pse",
182 "tsc", "msr", "pae", "mce",
183 "cx8", "apic", NULL, "sep",
184 "mtrr", "pge", "mca", "cmov",
185 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
186 NULL, "ds" /* Intel dts */, "acpi", "mmx",
187 "fxsr", "sse", "sse2", "ss",
188 "ht" /* Intel htt */, "tm", "ia64", "pbe",
189 };
190 static const char *ext_feature_name[] = {
191 "pni|sse3" /* Intel,AMD sse3 */, "pclmulqdq|pclmuldq", "dtes64", "monitor",
192 "ds_cpl", "vmx", "smx", "est",
193 "tm2", "ssse3", "cid", NULL,
194 "fma", "cx16", "xtpr", "pdcm",
195 NULL, "pcid", "dca", "sse4.1|sse4_1",
196 "sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
197 "tsc-deadline", "aes", "xsave", "osxsave",
198 "avx", "f16c", "rdrand", "hypervisor",
199 };
200 /* Feature names that are already defined on feature_name[] but are set on
201 * CPUID[8000_0001].EDX on AMD CPUs don't have their names on
202 * ext2_feature_name[]. They are copied automatically to cpuid_ext2_features
203 * if and only if CPU vendor is AMD.
204 */
205 static const char *ext2_feature_name[] = {
206 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
207 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
208 NULL /* cx8 */ /* AMD CMPXCHG8B */, NULL /* apic */, NULL, "syscall",
209 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
210 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
211 "nx|xd", NULL, "mmxext", NULL /* mmx */,
212 NULL /* fxsr */, "fxsr_opt|ffxsr", "pdpe1gb" /* AMD Page1GB */, "rdtscp",
213 NULL, "lm|i64", "3dnowext", "3dnow",
214 };
215 static const char *ext3_feature_name[] = {
216 "lahf_lm" /* AMD LahfSahf */, "cmp_legacy", "svm", "extapic" /* AMD ExtApicSpace */,
217 "cr8legacy" /* AMD AltMovCr8 */, "abm", "sse4a", "misalignsse",
218 "3dnowprefetch", "osvw", "ibs", "xop",
219 "skinit", "wdt", NULL, "lwp",
220 "fma4", "tce", NULL, "nodeid_msr",
221 NULL, "tbm", "topoext", "perfctr_core",
222 "perfctr_nb", NULL, NULL, NULL,
223 NULL, NULL, NULL, NULL,
224 };
225
226 static const char *ext4_feature_name[] = {
227 NULL, NULL, "xstore", "xstore-en",
228 NULL, NULL, "xcrypt", "xcrypt-en",
229 "ace2", "ace2-en", "phe", "phe-en",
230 "pmm", "pmm-en", NULL, NULL,
231 NULL, NULL, NULL, NULL,
232 NULL, NULL, NULL, NULL,
233 NULL, NULL, NULL, NULL,
234 NULL, NULL, NULL, NULL,
235 };
236
237 static const char *kvm_feature_name[] = {
238 "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock",
239 "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", "kvm_pv_unhalt",
240 NULL, NULL, NULL, NULL,
241 NULL, NULL, NULL, NULL,
242 NULL, NULL, NULL, NULL,
243 NULL, NULL, NULL, NULL,
244 "kvmclock-stable-bit", NULL, NULL, NULL,
245 NULL, NULL, NULL, NULL,
246 };
247
248 static const char *svm_feature_name[] = {
249 "npt", "lbrv", "svm_lock", "nrip_save",
250 "tsc_scale", "vmcb_clean", "flushbyasid", "decodeassists",
251 NULL, NULL, "pause_filter", NULL,
252 "pfthreshold", NULL, NULL, NULL,
253 NULL, NULL, NULL, NULL,
254 NULL, NULL, NULL, NULL,
255 NULL, NULL, NULL, NULL,
256 NULL, NULL, NULL, NULL,
257 };
258
259 static const char *cpuid_7_0_ebx_feature_name[] = {
260 "fsgsbase", "tsc_adjust", NULL, "bmi1", "hle", "avx2", NULL, "smep",
261 "bmi2", "erms", "invpcid", "rtm", NULL, NULL, "mpx", NULL,
262 "avx512f", NULL, "rdseed", "adx", "smap", NULL, NULL, NULL,
263 NULL, NULL, "avx512pf", "avx512er", "avx512cd", NULL, NULL, NULL,
264 };
265
266 static const char *cpuid_apm_edx_feature_name[] = {
267 NULL, NULL, NULL, NULL,
268 NULL, NULL, NULL, NULL,
269 "invtsc", NULL, NULL, NULL,
270 NULL, NULL, NULL, NULL,
271 NULL, NULL, NULL, NULL,
272 NULL, NULL, NULL, NULL,
273 NULL, NULL, NULL, NULL,
274 NULL, NULL, NULL, NULL,
275 };
276
277 static const char *cpuid_xsave_feature_name[] = {
278 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
279 NULL, NULL, NULL, NULL,
280 NULL, NULL, NULL, NULL,
281 NULL, NULL, NULL, NULL,
282 NULL, NULL, NULL, NULL,
283 NULL, NULL, NULL, NULL,
284 NULL, NULL, NULL, NULL,
285 NULL, NULL, NULL, NULL,
286 };
287
288 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
289 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
290 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
291 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
292 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
293 CPUID_PSE36 | CPUID_FXSR)
294 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
295 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
296 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
297 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
298 CPUID_PAE | CPUID_SEP | CPUID_APIC)
299
300 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
301 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
302 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
303 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
304 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS)
305 /* partly implemented:
306 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
307 /* missing:
308 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
309 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
310 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
311 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
312 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
313 /* missing:
314 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
315 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
316 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
317 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
318 CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
319 CPUID_EXT_RDRAND */
320
321 #ifdef TARGET_X86_64
322 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
323 #else
324 #define TCG_EXT2_X86_64_FEATURES 0
325 #endif
326
327 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
328 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
329 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
330 TCG_EXT2_X86_64_FEATURES)
331 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
332 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
333 #define TCG_EXT4_FEATURES 0
334 #define TCG_SVM_FEATURES 0
335 #define TCG_KVM_FEATURES 0
336 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
337 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX)
338 /* missing:
339 CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
340 CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
341 CPUID_7_0_EBX_RDSEED */
342 #define TCG_APM_FEATURES 0
343
344
345 typedef struct FeatureWordInfo {
346 const char **feat_names;
347 uint32_t cpuid_eax; /* Input EAX for CPUID */
348 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
349 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
350 int cpuid_reg; /* output register (R_* constant) */
351 uint32_t tcg_features; /* Feature flags supported by TCG */
352 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
353 } FeatureWordInfo;
354
355 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
356 [FEAT_1_EDX] = {
357 .feat_names = feature_name,
358 .cpuid_eax = 1, .cpuid_reg = R_EDX,
359 .tcg_features = TCG_FEATURES,
360 },
361 [FEAT_1_ECX] = {
362 .feat_names = ext_feature_name,
363 .cpuid_eax = 1, .cpuid_reg = R_ECX,
364 .tcg_features = TCG_EXT_FEATURES,
365 },
366 [FEAT_8000_0001_EDX] = {
367 .feat_names = ext2_feature_name,
368 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
369 .tcg_features = TCG_EXT2_FEATURES,
370 },
371 [FEAT_8000_0001_ECX] = {
372 .feat_names = ext3_feature_name,
373 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
374 .tcg_features = TCG_EXT3_FEATURES,
375 },
376 [FEAT_C000_0001_EDX] = {
377 .feat_names = ext4_feature_name,
378 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
379 .tcg_features = TCG_EXT4_FEATURES,
380 },
381 [FEAT_KVM] = {
382 .feat_names = kvm_feature_name,
383 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
384 .tcg_features = TCG_KVM_FEATURES,
385 },
386 [FEAT_SVM] = {
387 .feat_names = svm_feature_name,
388 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
389 .tcg_features = TCG_SVM_FEATURES,
390 },
391 [FEAT_7_0_EBX] = {
392 .feat_names = cpuid_7_0_ebx_feature_name,
393 .cpuid_eax = 7,
394 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
395 .cpuid_reg = R_EBX,
396 .tcg_features = TCG_7_0_EBX_FEATURES,
397 },
398 [FEAT_8000_0007_EDX] = {
399 .feat_names = cpuid_apm_edx_feature_name,
400 .cpuid_eax = 0x80000007,
401 .cpuid_reg = R_EDX,
402 .tcg_features = TCG_APM_FEATURES,
403 .unmigratable_flags = CPUID_APM_INVTSC,
404 },
405 [FEAT_XSAVE] = {
406 .feat_names = cpuid_xsave_feature_name,
407 .cpuid_eax = 0xd,
408 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
409 .cpuid_reg = R_EAX,
410 .tcg_features = 0,
411 },
412 };
413
414 typedef struct X86RegisterInfo32 {
415 /* Name of register */
416 const char *name;
417 /* QAPI enum value register */
418 X86CPURegister32 qapi_enum;
419 } X86RegisterInfo32;
420
421 #define REGISTER(reg) \
422 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
423 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
424 REGISTER(EAX),
425 REGISTER(ECX),
426 REGISTER(EDX),
427 REGISTER(EBX),
428 REGISTER(ESP),
429 REGISTER(EBP),
430 REGISTER(ESI),
431 REGISTER(EDI),
432 };
433 #undef REGISTER
434
435 typedef struct ExtSaveArea {
436 uint32_t feature, bits;
437 uint32_t offset, size;
438 } ExtSaveArea;
439
440 static const ExtSaveArea ext_save_areas[] = {
441 [2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
442 .offset = 0x240, .size = 0x100 },
443 [3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
444 .offset = 0x3c0, .size = 0x40 },
445 [4] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
446 .offset = 0x400, .size = 0x40 },
447 [5] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
448 .offset = 0x440, .size = 0x40 },
449 [6] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
450 .offset = 0x480, .size = 0x200 },
451 [7] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
452 .offset = 0x680, .size = 0x400 },
453 };
454
455 const char *get_register_name_32(unsigned int reg)
456 {
457 if (reg >= CPU_NB_REGS32) {
458 return NULL;
459 }
460 return x86_reg_info_32[reg].name;
461 }
462
463 /* KVM-specific features that are automatically added to all CPU models
464 * when KVM is enabled.
465 */
466 static uint32_t kvm_default_features[FEATURE_WORDS] = {
467 [FEAT_KVM] = (1 << KVM_FEATURE_CLOCKSOURCE) |
468 (1 << KVM_FEATURE_NOP_IO_DELAY) |
469 (1 << KVM_FEATURE_CLOCKSOURCE2) |
470 (1 << KVM_FEATURE_ASYNC_PF) |
471 (1 << KVM_FEATURE_STEAL_TIME) |
472 (1 << KVM_FEATURE_PV_EOI) |
473 (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT),
474 [FEAT_1_ECX] = CPUID_EXT_X2APIC,
475 };
476
477 /* Features that are not added by default to any CPU model when KVM is enabled.
478 */
479 static uint32_t kvm_default_unset_features[FEATURE_WORDS] = {
480 [FEAT_1_EDX] = CPUID_ACPI,
481 [FEAT_1_ECX] = CPUID_EXT_MONITOR,
482 [FEAT_8000_0001_ECX] = CPUID_EXT3_SVM,
483 };
484
485 void x86_cpu_compat_kvm_no_autoenable(FeatureWord w, uint32_t features)
486 {
487 kvm_default_features[w] &= ~features;
488 }
489
490 void x86_cpu_compat_kvm_no_autodisable(FeatureWord w, uint32_t features)
491 {
492 kvm_default_unset_features[w] &= ~features;
493 }
494
495 /*
496 * Returns the set of feature flags that are supported and migratable by
497 * QEMU, for a given FeatureWord.
498 */
499 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
500 {
501 FeatureWordInfo *wi = &feature_word_info[w];
502 uint32_t r = 0;
503 int i;
504
505 for (i = 0; i < 32; i++) {
506 uint32_t f = 1U << i;
507 /* If the feature name is unknown, it is not supported by QEMU yet */
508 if (!wi->feat_names[i]) {
509 continue;
510 }
511 /* Skip features known to QEMU, but explicitly marked as unmigratable */
512 if (wi->unmigratable_flags & f) {
513 continue;
514 }
515 r |= f;
516 }
517 return r;
518 }
519
520 void host_cpuid(uint32_t function, uint32_t count,
521 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
522 {
523 uint32_t vec[4];
524
525 #ifdef __x86_64__
526 asm volatile("cpuid"
527 : "=a"(vec[0]), "=b"(vec[1]),
528 "=c"(vec[2]), "=d"(vec[3])
529 : "0"(function), "c"(count) : "cc");
530 #elif defined(__i386__)
531 asm volatile("pusha \n\t"
532 "cpuid \n\t"
533 "mov %%eax, 0(%2) \n\t"
534 "mov %%ebx, 4(%2) \n\t"
535 "mov %%ecx, 8(%2) \n\t"
536 "mov %%edx, 12(%2) \n\t"
537 "popa"
538 : : "a"(function), "c"(count), "S"(vec)
539 : "memory", "cc");
540 #else
541 abort();
542 #endif
543
544 if (eax)
545 *eax = vec[0];
546 if (ebx)
547 *ebx = vec[1];
548 if (ecx)
549 *ecx = vec[2];
550 if (edx)
551 *edx = vec[3];
552 }
553
554 #define iswhite(c) ((c) && ((c) <= ' ' || '~' < (c)))
555
556 /* general substring compare of *[s1..e1) and *[s2..e2). sx is start of
557 * a substring. ex if !NULL points to the first char after a substring,
558 * otherwise the string is assumed to sized by a terminating nul.
559 * Return lexical ordering of *s1:*s2.
560 */
561 static int sstrcmp(const char *s1, const char *e1,
562 const char *s2, const char *e2)
563 {
564 for (;;) {
565 if (!*s1 || !*s2 || *s1 != *s2)
566 return (*s1 - *s2);
567 ++s1, ++s2;
568 if (s1 == e1 && s2 == e2)
569 return (0);
570 else if (s1 == e1)
571 return (*s2);
572 else if (s2 == e2)
573 return (*s1);
574 }
575 }
576
577 /* compare *[s..e) to *altstr. *altstr may be a simple string or multiple
578 * '|' delimited (possibly empty) strings in which case search for a match
579 * within the alternatives proceeds left to right. Return 0 for success,
580 * non-zero otherwise.
581 */
582 static int altcmp(const char *s, const char *e, const char *altstr)
583 {
584 const char *p, *q;
585
586 for (q = p = altstr; ; ) {
587 while (*p && *p != '|')
588 ++p;
589 if ((q == p && !*s) || (q != p && !sstrcmp(s, e, q, p)))
590 return (0);
591 if (!*p)
592 return (1);
593 else
594 q = ++p;
595 }
596 }
597
598 /* search featureset for flag *[s..e), if found set corresponding bit in
599 * *pval and return true, otherwise return false
600 */
601 static bool lookup_feature(uint32_t *pval, const char *s, const char *e,
602 const char **featureset)
603 {
604 uint32_t mask;
605 const char **ppc;
606 bool found = false;
607
608 for (mask = 1, ppc = featureset; mask; mask <<= 1, ++ppc) {
609 if (*ppc && !altcmp(s, e, *ppc)) {
610 *pval |= mask;
611 found = true;
612 }
613 }
614 return found;
615 }
616
617 static void add_flagname_to_bitmaps(const char *flagname,
618 FeatureWordArray words,
619 Error **errp)
620 {
621 FeatureWord w;
622 for (w = 0; w < FEATURE_WORDS; w++) {
623 FeatureWordInfo *wi = &feature_word_info[w];
624 if (wi->feat_names &&
625 lookup_feature(&words[w], flagname, NULL, wi->feat_names)) {
626 break;
627 }
628 }
629 if (w == FEATURE_WORDS) {
630 error_setg(errp, "CPU feature %s not found", flagname);
631 }
632 }
633
634 /* CPU class name definitions: */
635
636 #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU
637 #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX)
638
639 /* Return type name for a given CPU model name
640 * Caller is responsible for freeing the returned string.
641 */
642 static char *x86_cpu_type_name(const char *model_name)
643 {
644 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
645 }
646
647 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
648 {
649 ObjectClass *oc;
650 char *typename;
651
652 if (cpu_model == NULL) {
653 return NULL;
654 }
655
656 typename = x86_cpu_type_name(cpu_model);
657 oc = object_class_by_name(typename);
658 g_free(typename);
659 return oc;
660 }
661
662 struct X86CPUDefinition {
663 const char *name;
664 uint32_t level;
665 uint32_t xlevel;
666 uint32_t xlevel2;
667 /* vendor is zero-terminated, 12 character ASCII string */
668 char vendor[CPUID_VENDOR_SZ + 1];
669 int family;
670 int model;
671 int stepping;
672 FeatureWordArray features;
673 char model_id[48];
674 bool cache_info_passthrough;
675 };
676
677 static X86CPUDefinition builtin_x86_defs[] = {
678 {
679 .name = "qemu64",
680 .level = 4,
681 .vendor = CPUID_VENDOR_AMD,
682 .family = 6,
683 .model = 6,
684 .stepping = 3,
685 .features[FEAT_1_EDX] =
686 PPRO_FEATURES |
687 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
688 CPUID_PSE36,
689 .features[FEAT_1_ECX] =
690 CPUID_EXT_SSE3 | CPUID_EXT_CX16 | CPUID_EXT_POPCNT,
691 .features[FEAT_8000_0001_EDX] =
692 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
693 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
694 .features[FEAT_8000_0001_ECX] =
695 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
696 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
697 .xlevel = 0x8000000A,
698 },
699 {
700 .name = "phenom",
701 .level = 5,
702 .vendor = CPUID_VENDOR_AMD,
703 .family = 16,
704 .model = 2,
705 .stepping = 3,
706 /* Missing: CPUID_HT */
707 .features[FEAT_1_EDX] =
708 PPRO_FEATURES |
709 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
710 CPUID_PSE36 | CPUID_VME,
711 .features[FEAT_1_ECX] =
712 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
713 CPUID_EXT_POPCNT,
714 .features[FEAT_8000_0001_EDX] =
715 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
716 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
717 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
718 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
719 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
720 CPUID_EXT3_CR8LEG,
721 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
722 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
723 .features[FEAT_8000_0001_ECX] =
724 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
725 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
726 /* Missing: CPUID_SVM_LBRV */
727 .features[FEAT_SVM] =
728 CPUID_SVM_NPT,
729 .xlevel = 0x8000001A,
730 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
731 },
732 {
733 .name = "core2duo",
734 .level = 10,
735 .vendor = CPUID_VENDOR_INTEL,
736 .family = 6,
737 .model = 15,
738 .stepping = 11,
739 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
740 .features[FEAT_1_EDX] =
741 PPRO_FEATURES |
742 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
743 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
744 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
745 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
746 .features[FEAT_1_ECX] =
747 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
748 CPUID_EXT_CX16,
749 .features[FEAT_8000_0001_EDX] =
750 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
751 .features[FEAT_8000_0001_ECX] =
752 CPUID_EXT3_LAHF_LM,
753 .xlevel = 0x80000008,
754 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
755 },
756 {
757 .name = "kvm64",
758 .level = 5,
759 .vendor = CPUID_VENDOR_INTEL,
760 .family = 15,
761 .model = 6,
762 .stepping = 1,
763 /* Missing: CPUID_VME, CPUID_HT */
764 .features[FEAT_1_EDX] =
765 PPRO_FEATURES |
766 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
767 CPUID_PSE36,
768 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
769 .features[FEAT_1_ECX] =
770 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
771 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
772 .features[FEAT_8000_0001_EDX] =
773 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
774 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
775 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
776 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
777 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
778 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
779 .features[FEAT_8000_0001_ECX] =
780 0,
781 .xlevel = 0x80000008,
782 .model_id = "Common KVM processor"
783 },
784 {
785 .name = "qemu32",
786 .level = 4,
787 .vendor = CPUID_VENDOR_INTEL,
788 .family = 6,
789 .model = 6,
790 .stepping = 3,
791 .features[FEAT_1_EDX] =
792 PPRO_FEATURES,
793 .features[FEAT_1_ECX] =
794 CPUID_EXT_SSE3 | CPUID_EXT_POPCNT,
795 .xlevel = 0x80000004,
796 },
797 {
798 .name = "kvm32",
799 .level = 5,
800 .vendor = CPUID_VENDOR_INTEL,
801 .family = 15,
802 .model = 6,
803 .stepping = 1,
804 .features[FEAT_1_EDX] =
805 PPRO_FEATURES |
806 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
807 .features[FEAT_1_ECX] =
808 CPUID_EXT_SSE3,
809 .features[FEAT_8000_0001_EDX] =
810 PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES,
811 .features[FEAT_8000_0001_ECX] =
812 0,
813 .xlevel = 0x80000008,
814 .model_id = "Common 32-bit KVM processor"
815 },
816 {
817 .name = "coreduo",
818 .level = 10,
819 .vendor = CPUID_VENDOR_INTEL,
820 .family = 6,
821 .model = 14,
822 .stepping = 8,
823 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
824 .features[FEAT_1_EDX] =
825 PPRO_FEATURES | CPUID_VME |
826 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
827 CPUID_SS,
828 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
829 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
830 .features[FEAT_1_ECX] =
831 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
832 .features[FEAT_8000_0001_EDX] =
833 CPUID_EXT2_NX,
834 .xlevel = 0x80000008,
835 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
836 },
837 {
838 .name = "486",
839 .level = 1,
840 .vendor = CPUID_VENDOR_INTEL,
841 .family = 4,
842 .model = 8,
843 .stepping = 0,
844 .features[FEAT_1_EDX] =
845 I486_FEATURES,
846 .xlevel = 0,
847 },
848 {
849 .name = "pentium",
850 .level = 1,
851 .vendor = CPUID_VENDOR_INTEL,
852 .family = 5,
853 .model = 4,
854 .stepping = 3,
855 .features[FEAT_1_EDX] =
856 PENTIUM_FEATURES,
857 .xlevel = 0,
858 },
859 {
860 .name = "pentium2",
861 .level = 2,
862 .vendor = CPUID_VENDOR_INTEL,
863 .family = 6,
864 .model = 5,
865 .stepping = 2,
866 .features[FEAT_1_EDX] =
867 PENTIUM2_FEATURES,
868 .xlevel = 0,
869 },
870 {
871 .name = "pentium3",
872 .level = 2,
873 .vendor = CPUID_VENDOR_INTEL,
874 .family = 6,
875 .model = 7,
876 .stepping = 3,
877 .features[FEAT_1_EDX] =
878 PENTIUM3_FEATURES,
879 .xlevel = 0,
880 },
881 {
882 .name = "athlon",
883 .level = 2,
884 .vendor = CPUID_VENDOR_AMD,
885 .family = 6,
886 .model = 2,
887 .stepping = 3,
888 .features[FEAT_1_EDX] =
889 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
890 CPUID_MCA,
891 .features[FEAT_8000_0001_EDX] =
892 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
893 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
894 .xlevel = 0x80000008,
895 },
896 {
897 .name = "n270",
898 /* original is on level 10 */
899 .level = 5,
900 .vendor = CPUID_VENDOR_INTEL,
901 .family = 6,
902 .model = 28,
903 .stepping = 2,
904 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
905 .features[FEAT_1_EDX] =
906 PPRO_FEATURES |
907 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
908 CPUID_ACPI | CPUID_SS,
909 /* Some CPUs got no CPUID_SEP */
910 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
911 * CPUID_EXT_XTPR */
912 .features[FEAT_1_ECX] =
913 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
914 CPUID_EXT_MOVBE,
915 .features[FEAT_8000_0001_EDX] =
916 (PPRO_FEATURES & CPUID_EXT2_AMD_ALIASES) |
917 CPUID_EXT2_NX,
918 .features[FEAT_8000_0001_ECX] =
919 CPUID_EXT3_LAHF_LM,
920 .xlevel = 0x8000000A,
921 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
922 },
923 {
924 .name = "Conroe",
925 .level = 4,
926 .vendor = CPUID_VENDOR_INTEL,
927 .family = 6,
928 .model = 15,
929 .stepping = 3,
930 .features[FEAT_1_EDX] =
931 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
932 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
933 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
934 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
935 CPUID_DE | CPUID_FP87,
936 .features[FEAT_1_ECX] =
937 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
938 .features[FEAT_8000_0001_EDX] =
939 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
940 .features[FEAT_8000_0001_ECX] =
941 CPUID_EXT3_LAHF_LM,
942 .xlevel = 0x8000000A,
943 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
944 },
945 {
946 .name = "Penryn",
947 .level = 4,
948 .vendor = CPUID_VENDOR_INTEL,
949 .family = 6,
950 .model = 23,
951 .stepping = 3,
952 .features[FEAT_1_EDX] =
953 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
954 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
955 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
956 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
957 CPUID_DE | CPUID_FP87,
958 .features[FEAT_1_ECX] =
959 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
960 CPUID_EXT_SSE3,
961 .features[FEAT_8000_0001_EDX] =
962 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
963 .features[FEAT_8000_0001_ECX] =
964 CPUID_EXT3_LAHF_LM,
965 .xlevel = 0x8000000A,
966 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
967 },
968 {
969 .name = "Nehalem",
970 .level = 4,
971 .vendor = CPUID_VENDOR_INTEL,
972 .family = 6,
973 .model = 26,
974 .stepping = 3,
975 .features[FEAT_1_EDX] =
976 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
977 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
978 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
979 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
980 CPUID_DE | CPUID_FP87,
981 .features[FEAT_1_ECX] =
982 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
983 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
984 .features[FEAT_8000_0001_EDX] =
985 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
986 .features[FEAT_8000_0001_ECX] =
987 CPUID_EXT3_LAHF_LM,
988 .xlevel = 0x8000000A,
989 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
990 },
991 {
992 .name = "Westmere",
993 .level = 11,
994 .vendor = CPUID_VENDOR_INTEL,
995 .family = 6,
996 .model = 44,
997 .stepping = 1,
998 .features[FEAT_1_EDX] =
999 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1000 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1001 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1002 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1003 CPUID_DE | CPUID_FP87,
1004 .features[FEAT_1_ECX] =
1005 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1006 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1007 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1008 .features[FEAT_8000_0001_EDX] =
1009 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1010 .features[FEAT_8000_0001_ECX] =
1011 CPUID_EXT3_LAHF_LM,
1012 .xlevel = 0x8000000A,
1013 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1014 },
1015 {
1016 .name = "SandyBridge",
1017 .level = 0xd,
1018 .vendor = CPUID_VENDOR_INTEL,
1019 .family = 6,
1020 .model = 42,
1021 .stepping = 1,
1022 .features[FEAT_1_EDX] =
1023 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1024 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1025 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1026 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1027 CPUID_DE | CPUID_FP87,
1028 .features[FEAT_1_ECX] =
1029 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1030 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1031 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1032 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1033 CPUID_EXT_SSE3,
1034 .features[FEAT_8000_0001_EDX] =
1035 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1036 CPUID_EXT2_SYSCALL,
1037 .features[FEAT_8000_0001_ECX] =
1038 CPUID_EXT3_LAHF_LM,
1039 .features[FEAT_XSAVE] =
1040 CPUID_XSAVE_XSAVEOPT,
1041 .xlevel = 0x8000000A,
1042 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1043 },
1044 {
1045 .name = "Haswell",
1046 .level = 0xd,
1047 .vendor = CPUID_VENDOR_INTEL,
1048 .family = 6,
1049 .model = 60,
1050 .stepping = 1,
1051 .features[FEAT_1_EDX] =
1052 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1053 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1054 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1055 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1056 CPUID_DE | CPUID_FP87,
1057 .features[FEAT_1_ECX] =
1058 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1059 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1060 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1061 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1062 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1063 CPUID_EXT_PCID,
1064 .features[FEAT_8000_0001_EDX] =
1065 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1066 CPUID_EXT2_SYSCALL,
1067 .features[FEAT_8000_0001_ECX] =
1068 CPUID_EXT3_LAHF_LM,
1069 .features[FEAT_7_0_EBX] =
1070 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1071 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1072 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1073 CPUID_7_0_EBX_RTM,
1074 .features[FEAT_XSAVE] =
1075 CPUID_XSAVE_XSAVEOPT,
1076 .xlevel = 0x8000000A,
1077 .model_id = "Intel Core Processor (Haswell)",
1078 },
1079 {
1080 .name = "Broadwell",
1081 .level = 0xd,
1082 .vendor = CPUID_VENDOR_INTEL,
1083 .family = 6,
1084 .model = 61,
1085 .stepping = 2,
1086 .features[FEAT_1_EDX] =
1087 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1088 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1089 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1090 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1091 CPUID_DE | CPUID_FP87,
1092 .features[FEAT_1_ECX] =
1093 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1094 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1095 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1096 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1097 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1098 CPUID_EXT_PCID,
1099 .features[FEAT_8000_0001_EDX] =
1100 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1101 CPUID_EXT2_SYSCALL,
1102 .features[FEAT_8000_0001_ECX] =
1103 CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1104 .features[FEAT_7_0_EBX] =
1105 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1106 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1107 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1108 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1109 CPUID_7_0_EBX_SMAP,
1110 .features[FEAT_XSAVE] =
1111 CPUID_XSAVE_XSAVEOPT,
1112 .xlevel = 0x8000000A,
1113 .model_id = "Intel Core Processor (Broadwell)",
1114 },
1115 {
1116 .name = "Opteron_G1",
1117 .level = 5,
1118 .vendor = CPUID_VENDOR_AMD,
1119 .family = 15,
1120 .model = 6,
1121 .stepping = 1,
1122 .features[FEAT_1_EDX] =
1123 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1124 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1125 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1126 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1127 CPUID_DE | CPUID_FP87,
1128 .features[FEAT_1_ECX] =
1129 CPUID_EXT_SSE3,
1130 .features[FEAT_8000_0001_EDX] =
1131 CPUID_EXT2_LM | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1132 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1133 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1134 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1135 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1136 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1137 .xlevel = 0x80000008,
1138 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
1139 },
1140 {
1141 .name = "Opteron_G2",
1142 .level = 5,
1143 .vendor = CPUID_VENDOR_AMD,
1144 .family = 15,
1145 .model = 6,
1146 .stepping = 1,
1147 .features[FEAT_1_EDX] =
1148 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1149 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1150 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1151 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1152 CPUID_DE | CPUID_FP87,
1153 .features[FEAT_1_ECX] =
1154 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
1155 .features[FEAT_8000_0001_EDX] =
1156 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1157 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1158 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1159 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1160 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1161 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1162 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1163 .features[FEAT_8000_0001_ECX] =
1164 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1165 .xlevel = 0x80000008,
1166 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
1167 },
1168 {
1169 .name = "Opteron_G3",
1170 .level = 5,
1171 .vendor = CPUID_VENDOR_AMD,
1172 .family = 15,
1173 .model = 6,
1174 .stepping = 1,
1175 .features[FEAT_1_EDX] =
1176 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1177 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1178 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1179 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1180 CPUID_DE | CPUID_FP87,
1181 .features[FEAT_1_ECX] =
1182 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
1183 CPUID_EXT_SSE3,
1184 .features[FEAT_8000_0001_EDX] =
1185 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_FXSR |
1186 CPUID_EXT2_MMX | CPUID_EXT2_NX | CPUID_EXT2_PSE36 |
1187 CPUID_EXT2_PAT | CPUID_EXT2_CMOV | CPUID_EXT2_MCA |
1188 CPUID_EXT2_PGE | CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL |
1189 CPUID_EXT2_APIC | CPUID_EXT2_CX8 | CPUID_EXT2_MCE |
1190 CPUID_EXT2_PAE | CPUID_EXT2_MSR | CPUID_EXT2_TSC | CPUID_EXT2_PSE |
1191 CPUID_EXT2_DE | CPUID_EXT2_FPU,
1192 .features[FEAT_8000_0001_ECX] =
1193 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
1194 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
1195 .xlevel = 0x80000008,
1196 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
1197 },
1198 {
1199 .name = "Opteron_G4",
1200 .level = 0xd,
1201 .vendor = CPUID_VENDOR_AMD,
1202 .family = 21,
1203 .model = 1,
1204 .stepping = 2,
1205 .features[FEAT_1_EDX] =
1206 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1207 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1208 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1209 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1210 CPUID_DE | CPUID_FP87,
1211 .features[FEAT_1_ECX] =
1212 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1213 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1214 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1215 CPUID_EXT_SSE3,
1216 .features[FEAT_8000_0001_EDX] =
1217 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1218 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1219 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1220 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1221 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1222 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1223 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1224 .features[FEAT_8000_0001_ECX] =
1225 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1226 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1227 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1228 CPUID_EXT3_LAHF_LM,
1229 /* no xsaveopt! */
1230 .xlevel = 0x8000001A,
1231 .model_id = "AMD Opteron 62xx class CPU",
1232 },
1233 {
1234 .name = "Opteron_G5",
1235 .level = 0xd,
1236 .vendor = CPUID_VENDOR_AMD,
1237 .family = 21,
1238 .model = 2,
1239 .stepping = 0,
1240 .features[FEAT_1_EDX] =
1241 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1242 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1243 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1244 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1245 CPUID_DE | CPUID_FP87,
1246 .features[FEAT_1_ECX] =
1247 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
1248 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1249 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
1250 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1251 .features[FEAT_8000_0001_EDX] =
1252 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP |
1253 CPUID_EXT2_PDPE1GB | CPUID_EXT2_FXSR | CPUID_EXT2_MMX |
1254 CPUID_EXT2_NX | CPUID_EXT2_PSE36 | CPUID_EXT2_PAT |
1255 CPUID_EXT2_CMOV | CPUID_EXT2_MCA | CPUID_EXT2_PGE |
1256 CPUID_EXT2_MTRR | CPUID_EXT2_SYSCALL | CPUID_EXT2_APIC |
1257 CPUID_EXT2_CX8 | CPUID_EXT2_MCE | CPUID_EXT2_PAE | CPUID_EXT2_MSR |
1258 CPUID_EXT2_TSC | CPUID_EXT2_PSE | CPUID_EXT2_DE | CPUID_EXT2_FPU,
1259 .features[FEAT_8000_0001_ECX] =
1260 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
1261 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
1262 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
1263 CPUID_EXT3_LAHF_LM,
1264 /* no xsaveopt! */
1265 .xlevel = 0x8000001A,
1266 .model_id = "AMD Opteron 63xx class CPU",
1267 },
1268 };
1269
1270 /**
1271 * x86_cpu_compat_set_features:
1272 * @cpu_model: CPU model name to be changed. If NULL, all CPU models are changed
1273 * @w: Identifies the feature word to be changed.
1274 * @feat_add: Feature bits to be added to feature word
1275 * @feat_remove: Feature bits to be removed from feature word
1276 *
1277 * Change CPU model feature bits for compatibility.
1278 *
1279 * This function may be used by machine-type compatibility functions
1280 * to enable or disable feature bits on specific CPU models.
1281 */
1282 void x86_cpu_compat_set_features(const char *cpu_model, FeatureWord w,
1283 uint32_t feat_add, uint32_t feat_remove)
1284 {
1285 X86CPUDefinition *def;
1286 int i;
1287 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1288 def = &builtin_x86_defs[i];
1289 if (!cpu_model || !strcmp(cpu_model, def->name)) {
1290 def->features[w] |= feat_add;
1291 def->features[w] &= ~feat_remove;
1292 }
1293 }
1294 }
1295
1296 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1297 bool migratable_only);
1298
1299 #ifdef CONFIG_KVM
1300
1301 static int cpu_x86_fill_model_id(char *str)
1302 {
1303 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1304 int i;
1305
1306 for (i = 0; i < 3; i++) {
1307 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
1308 memcpy(str + i * 16 + 0, &eax, 4);
1309 memcpy(str + i * 16 + 4, &ebx, 4);
1310 memcpy(str + i * 16 + 8, &ecx, 4);
1311 memcpy(str + i * 16 + 12, &edx, 4);
1312 }
1313 return 0;
1314 }
1315
1316 static X86CPUDefinition host_cpudef;
1317
1318 static Property host_x86_cpu_properties[] = {
1319 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
1320 DEFINE_PROP_END_OF_LIST()
1321 };
1322
1323 /* class_init for the "host" CPU model
1324 *
1325 * This function may be called before KVM is initialized.
1326 */
1327 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
1328 {
1329 DeviceClass *dc = DEVICE_CLASS(oc);
1330 X86CPUClass *xcc = X86_CPU_CLASS(oc);
1331 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
1332
1333 xcc->kvm_required = true;
1334
1335 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1336 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
1337
1338 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1339 host_cpudef.family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1340 host_cpudef.model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1341 host_cpudef.stepping = eax & 0x0F;
1342
1343 cpu_x86_fill_model_id(host_cpudef.model_id);
1344
1345 xcc->cpu_def = &host_cpudef;
1346 host_cpudef.cache_info_passthrough = true;
1347
1348 /* level, xlevel, xlevel2, and the feature words are initialized on
1349 * instance_init, because they require KVM to be initialized.
1350 */
1351
1352 dc->props = host_x86_cpu_properties;
1353 }
1354
1355 static void host_x86_cpu_initfn(Object *obj)
1356 {
1357 X86CPU *cpu = X86_CPU(obj);
1358 CPUX86State *env = &cpu->env;
1359 KVMState *s = kvm_state;
1360
1361 assert(kvm_enabled());
1362
1363 /* We can't fill the features array here because we don't know yet if
1364 * "migratable" is true or false.
1365 */
1366 cpu->host_features = true;
1367
1368 env->cpuid_level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
1369 env->cpuid_xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
1370 env->cpuid_xlevel2 = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
1371
1372 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
1373 }
1374
1375 static const TypeInfo host_x86_cpu_type_info = {
1376 .name = X86_CPU_TYPE_NAME("host"),
1377 .parent = TYPE_X86_CPU,
1378 .instance_init = host_x86_cpu_initfn,
1379 .class_init = host_x86_cpu_class_init,
1380 };
1381
1382 #endif
1383
1384 static void report_unavailable_features(FeatureWord w, uint32_t mask)
1385 {
1386 FeatureWordInfo *f = &feature_word_info[w];
1387 int i;
1388
1389 for (i = 0; i < 32; ++i) {
1390 if (1 << i & mask) {
1391 const char *reg = get_register_name_32(f->cpuid_reg);
1392 assert(reg);
1393 fprintf(stderr, "warning: %s doesn't support requested feature: "
1394 "CPUID.%02XH:%s%s%s [bit %d]\n",
1395 kvm_enabled() ? "host" : "TCG",
1396 f->cpuid_eax, reg,
1397 f->feat_names[i] ? "." : "",
1398 f->feat_names[i] ? f->feat_names[i] : "", i);
1399 }
1400 }
1401 }
1402
1403 static void x86_cpuid_version_get_family(Object *obj, Visitor *v, void *opaque,
1404 const char *name, Error **errp)
1405 {
1406 X86CPU *cpu = X86_CPU(obj);
1407 CPUX86State *env = &cpu->env;
1408 int64_t value;
1409
1410 value = (env->cpuid_version >> 8) & 0xf;
1411 if (value == 0xf) {
1412 value += (env->cpuid_version >> 20) & 0xff;
1413 }
1414 visit_type_int(v, &value, name, errp);
1415 }
1416
1417 static void x86_cpuid_version_set_family(Object *obj, Visitor *v, void *opaque,
1418 const char *name, Error **errp)
1419 {
1420 X86CPU *cpu = X86_CPU(obj);
1421 CPUX86State *env = &cpu->env;
1422 const int64_t min = 0;
1423 const int64_t max = 0xff + 0xf;
1424 Error *local_err = NULL;
1425 int64_t value;
1426
1427 visit_type_int(v, &value, name, &local_err);
1428 if (local_err) {
1429 error_propagate(errp, local_err);
1430 return;
1431 }
1432 if (value < min || value > max) {
1433 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1434 name ? name : "null", value, min, max);
1435 return;
1436 }
1437
1438 env->cpuid_version &= ~0xff00f00;
1439 if (value > 0x0f) {
1440 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
1441 } else {
1442 env->cpuid_version |= value << 8;
1443 }
1444 }
1445
1446 static void x86_cpuid_version_get_model(Object *obj, Visitor *v, void *opaque,
1447 const char *name, Error **errp)
1448 {
1449 X86CPU *cpu = X86_CPU(obj);
1450 CPUX86State *env = &cpu->env;
1451 int64_t value;
1452
1453 value = (env->cpuid_version >> 4) & 0xf;
1454 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
1455 visit_type_int(v, &value, name, errp);
1456 }
1457
1458 static void x86_cpuid_version_set_model(Object *obj, Visitor *v, void *opaque,
1459 const char *name, Error **errp)
1460 {
1461 X86CPU *cpu = X86_CPU(obj);
1462 CPUX86State *env = &cpu->env;
1463 const int64_t min = 0;
1464 const int64_t max = 0xff;
1465 Error *local_err = NULL;
1466 int64_t value;
1467
1468 visit_type_int(v, &value, name, &local_err);
1469 if (local_err) {
1470 error_propagate(errp, local_err);
1471 return;
1472 }
1473 if (value < min || value > max) {
1474 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1475 name ? name : "null", value, min, max);
1476 return;
1477 }
1478
1479 env->cpuid_version &= ~0xf00f0;
1480 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
1481 }
1482
1483 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
1484 void *opaque, const char *name,
1485 Error **errp)
1486 {
1487 X86CPU *cpu = X86_CPU(obj);
1488 CPUX86State *env = &cpu->env;
1489 int64_t value;
1490
1491 value = env->cpuid_version & 0xf;
1492 visit_type_int(v, &value, name, errp);
1493 }
1494
1495 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
1496 void *opaque, const char *name,
1497 Error **errp)
1498 {
1499 X86CPU *cpu = X86_CPU(obj);
1500 CPUX86State *env = &cpu->env;
1501 const int64_t min = 0;
1502 const int64_t max = 0xf;
1503 Error *local_err = NULL;
1504 int64_t value;
1505
1506 visit_type_int(v, &value, name, &local_err);
1507 if (local_err) {
1508 error_propagate(errp, local_err);
1509 return;
1510 }
1511 if (value < min || value > max) {
1512 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1513 name ? name : "null", value, min, max);
1514 return;
1515 }
1516
1517 env->cpuid_version &= ~0xf;
1518 env->cpuid_version |= value & 0xf;
1519 }
1520
1521 static void x86_cpuid_get_level(Object *obj, Visitor *v, void *opaque,
1522 const char *name, Error **errp)
1523 {
1524 X86CPU *cpu = X86_CPU(obj);
1525
1526 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1527 }
1528
1529 static void x86_cpuid_set_level(Object *obj, Visitor *v, void *opaque,
1530 const char *name, Error **errp)
1531 {
1532 X86CPU *cpu = X86_CPU(obj);
1533
1534 visit_type_uint32(v, &cpu->env.cpuid_level, name, errp);
1535 }
1536
1537 static void x86_cpuid_get_xlevel(Object *obj, Visitor *v, void *opaque,
1538 const char *name, Error **errp)
1539 {
1540 X86CPU *cpu = X86_CPU(obj);
1541
1542 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1543 }
1544
1545 static void x86_cpuid_set_xlevel(Object *obj, Visitor *v, void *opaque,
1546 const char *name, Error **errp)
1547 {
1548 X86CPU *cpu = X86_CPU(obj);
1549
1550 visit_type_uint32(v, &cpu->env.cpuid_xlevel, name, errp);
1551 }
1552
1553 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
1554 {
1555 X86CPU *cpu = X86_CPU(obj);
1556 CPUX86State *env = &cpu->env;
1557 char *value;
1558
1559 value = g_malloc(CPUID_VENDOR_SZ + 1);
1560 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
1561 env->cpuid_vendor3);
1562 return value;
1563 }
1564
1565 static void x86_cpuid_set_vendor(Object *obj, const char *value,
1566 Error **errp)
1567 {
1568 X86CPU *cpu = X86_CPU(obj);
1569 CPUX86State *env = &cpu->env;
1570 int i;
1571
1572 if (strlen(value) != CPUID_VENDOR_SZ) {
1573 error_set(errp, QERR_PROPERTY_VALUE_BAD, "",
1574 "vendor", value);
1575 return;
1576 }
1577
1578 env->cpuid_vendor1 = 0;
1579 env->cpuid_vendor2 = 0;
1580 env->cpuid_vendor3 = 0;
1581 for (i = 0; i < 4; i++) {
1582 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
1583 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
1584 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
1585 }
1586 }
1587
1588 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
1589 {
1590 X86CPU *cpu = X86_CPU(obj);
1591 CPUX86State *env = &cpu->env;
1592 char *value;
1593 int i;
1594
1595 value = g_malloc(48 + 1);
1596 for (i = 0; i < 48; i++) {
1597 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
1598 }
1599 value[48] = '\0';
1600 return value;
1601 }
1602
1603 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
1604 Error **errp)
1605 {
1606 X86CPU *cpu = X86_CPU(obj);
1607 CPUX86State *env = &cpu->env;
1608 int c, len, i;
1609
1610 if (model_id == NULL) {
1611 model_id = "";
1612 }
1613 len = strlen(model_id);
1614 memset(env->cpuid_model, 0, 48);
1615 for (i = 0; i < 48; i++) {
1616 if (i >= len) {
1617 c = '\0';
1618 } else {
1619 c = (uint8_t)model_id[i];
1620 }
1621 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
1622 }
1623 }
1624
1625 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, void *opaque,
1626 const char *name, Error **errp)
1627 {
1628 X86CPU *cpu = X86_CPU(obj);
1629 int64_t value;
1630
1631 value = cpu->env.tsc_khz * 1000;
1632 visit_type_int(v, &value, name, errp);
1633 }
1634
1635 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, void *opaque,
1636 const char *name, Error **errp)
1637 {
1638 X86CPU *cpu = X86_CPU(obj);
1639 const int64_t min = 0;
1640 const int64_t max = INT64_MAX;
1641 Error *local_err = NULL;
1642 int64_t value;
1643
1644 visit_type_int(v, &value, name, &local_err);
1645 if (local_err) {
1646 error_propagate(errp, local_err);
1647 return;
1648 }
1649 if (value < min || value > max) {
1650 error_set(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
1651 name ? name : "null", value, min, max);
1652 return;
1653 }
1654
1655 cpu->env.tsc_khz = value / 1000;
1656 }
1657
1658 static void x86_cpuid_get_apic_id(Object *obj, Visitor *v, void *opaque,
1659 const char *name, Error **errp)
1660 {
1661 X86CPU *cpu = X86_CPU(obj);
1662 int64_t value = cpu->env.cpuid_apic_id;
1663
1664 visit_type_int(v, &value, name, errp);
1665 }
1666
1667 static void x86_cpuid_set_apic_id(Object *obj, Visitor *v, void *opaque,
1668 const char *name, Error **errp)
1669 {
1670 X86CPU *cpu = X86_CPU(obj);
1671 DeviceState *dev = DEVICE(obj);
1672 const int64_t min = 0;
1673 const int64_t max = UINT32_MAX;
1674 Error *error = NULL;
1675 int64_t value;
1676
1677 if (dev->realized) {
1678 error_setg(errp, "Attempt to set property '%s' on '%s' after "
1679 "it was realized", name, object_get_typename(obj));
1680 return;
1681 }
1682
1683 visit_type_int(v, &value, name, &error);
1684 if (error) {
1685 error_propagate(errp, error);
1686 return;
1687 }
1688 if (value < min || value > max) {
1689 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1690 " (minimum: %" PRId64 ", maximum: %" PRId64 ")" ,
1691 object_get_typename(obj), name, value, min, max);
1692 return;
1693 }
1694
1695 if ((value != cpu->env.cpuid_apic_id) && cpu_exists(value)) {
1696 error_setg(errp, "CPU with APIC ID %" PRIi64 " exists", value);
1697 return;
1698 }
1699 cpu->env.cpuid_apic_id = value;
1700 }
1701
1702 /* Generic getter for "feature-words" and "filtered-features" properties */
1703 static void x86_cpu_get_feature_words(Object *obj, Visitor *v, void *opaque,
1704 const char *name, Error **errp)
1705 {
1706 uint32_t *array = (uint32_t *)opaque;
1707 FeatureWord w;
1708 Error *err = NULL;
1709 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
1710 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
1711 X86CPUFeatureWordInfoList *list = NULL;
1712
1713 for (w = 0; w < FEATURE_WORDS; w++) {
1714 FeatureWordInfo *wi = &feature_word_info[w];
1715 X86CPUFeatureWordInfo *qwi = &word_infos[w];
1716 qwi->cpuid_input_eax = wi->cpuid_eax;
1717 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
1718 qwi->cpuid_input_ecx = wi->cpuid_ecx;
1719 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
1720 qwi->features = array[w];
1721
1722 /* List will be in reverse order, but order shouldn't matter */
1723 list_entries[w].next = list;
1724 list_entries[w].value = &word_infos[w];
1725 list = &list_entries[w];
1726 }
1727
1728 visit_type_X86CPUFeatureWordInfoList(v, &list, "feature-words", &err);
1729 error_propagate(errp, err);
1730 }
1731
1732 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1733 const char *name, Error **errp)
1734 {
1735 X86CPU *cpu = X86_CPU(obj);
1736 int64_t value = cpu->hyperv_spinlock_attempts;
1737
1738 visit_type_int(v, &value, name, errp);
1739 }
1740
1741 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, void *opaque,
1742 const char *name, Error **errp)
1743 {
1744 const int64_t min = 0xFFF;
1745 const int64_t max = UINT_MAX;
1746 X86CPU *cpu = X86_CPU(obj);
1747 Error *err = NULL;
1748 int64_t value;
1749
1750 visit_type_int(v, &value, name, &err);
1751 if (err) {
1752 error_propagate(errp, err);
1753 return;
1754 }
1755
1756 if (value < min || value > max) {
1757 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
1758 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
1759 object_get_typename(obj), name ? name : "null",
1760 value, min, max);
1761 return;
1762 }
1763 cpu->hyperv_spinlock_attempts = value;
1764 }
1765
1766 static PropertyInfo qdev_prop_spinlocks = {
1767 .name = "int",
1768 .get = x86_get_hv_spinlocks,
1769 .set = x86_set_hv_spinlocks,
1770 };
1771
1772 /* Convert all '_' in a feature string option name to '-', to make feature
1773 * name conform to QOM property naming rule, which uses '-' instead of '_'.
1774 */
1775 static inline void feat2prop(char *s)
1776 {
1777 while ((s = strchr(s, '_'))) {
1778 *s = '-';
1779 }
1780 }
1781
1782 /* Parse "+feature,-feature,feature=foo" CPU feature string
1783 */
1784 static void x86_cpu_parse_featurestr(CPUState *cs, char *features,
1785 Error **errp)
1786 {
1787 X86CPU *cpu = X86_CPU(cs);
1788 char *featurestr; /* Single 'key=value" string being parsed */
1789 FeatureWord w;
1790 /* Features to be added */
1791 FeatureWordArray plus_features = { 0 };
1792 /* Features to be removed */
1793 FeatureWordArray minus_features = { 0 };
1794 uint32_t numvalue;
1795 CPUX86State *env = &cpu->env;
1796 Error *local_err = NULL;
1797
1798 featurestr = features ? strtok(features, ",") : NULL;
1799
1800 while (featurestr) {
1801 char *val;
1802 if (featurestr[0] == '+') {
1803 add_flagname_to_bitmaps(featurestr + 1, plus_features, &local_err);
1804 } else if (featurestr[0] == '-') {
1805 add_flagname_to_bitmaps(featurestr + 1, minus_features, &local_err);
1806 } else if ((val = strchr(featurestr, '='))) {
1807 *val = 0; val++;
1808 feat2prop(featurestr);
1809 if (!strcmp(featurestr, "xlevel")) {
1810 char *err;
1811 char num[32];
1812
1813 numvalue = strtoul(val, &err, 0);
1814 if (!*val || *err) {
1815 error_setg(errp, "bad numerical value %s", val);
1816 return;
1817 }
1818 if (numvalue < 0x80000000) {
1819 error_report("xlevel value shall always be >= 0x80000000"
1820 ", fixup will be removed in future versions");
1821 numvalue += 0x80000000;
1822 }
1823 snprintf(num, sizeof(num), "%" PRIu32, numvalue);
1824 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1825 } else if (!strcmp(featurestr, "tsc-freq")) {
1826 int64_t tsc_freq;
1827 char *err;
1828 char num[32];
1829
1830 tsc_freq = strtosz_suffix_unit(val, &err,
1831 STRTOSZ_DEFSUFFIX_B, 1000);
1832 if (tsc_freq < 0 || *err) {
1833 error_setg(errp, "bad numerical value %s", val);
1834 return;
1835 }
1836 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
1837 object_property_parse(OBJECT(cpu), num, "tsc-frequency",
1838 &local_err);
1839 } else if (!strcmp(featurestr, "hv-spinlocks")) {
1840 char *err;
1841 const int min = 0xFFF;
1842 char num[32];
1843 numvalue = strtoul(val, &err, 0);
1844 if (!*val || *err) {
1845 error_setg(errp, "bad numerical value %s", val);
1846 return;
1847 }
1848 if (numvalue < min) {
1849 error_report("hv-spinlocks value shall always be >= 0x%x"
1850 ", fixup will be removed in future versions",
1851 min);
1852 numvalue = min;
1853 }
1854 snprintf(num, sizeof(num), "%" PRId32, numvalue);
1855 object_property_parse(OBJECT(cpu), num, featurestr, &local_err);
1856 } else {
1857 object_property_parse(OBJECT(cpu), val, featurestr, &local_err);
1858 }
1859 } else {
1860 feat2prop(featurestr);
1861 object_property_parse(OBJECT(cpu), "on", featurestr, &local_err);
1862 }
1863 if (local_err) {
1864 error_propagate(errp, local_err);
1865 return;
1866 }
1867 featurestr = strtok(NULL, ",");
1868 }
1869
1870 if (cpu->host_features) {
1871 for (w = 0; w < FEATURE_WORDS; w++) {
1872 env->features[w] =
1873 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1874 }
1875 }
1876
1877 for (w = 0; w < FEATURE_WORDS; w++) {
1878 env->features[w] |= plus_features[w];
1879 env->features[w] &= ~minus_features[w];
1880 }
1881 }
1882
1883 /* generate a composite string into buf of all cpuid names in featureset
1884 * selected by fbits. indicate truncation at bufsize in the event of overflow.
1885 * if flags, suppress names undefined in featureset.
1886 */
1887 static void listflags(char *buf, int bufsize, uint32_t fbits,
1888 const char **featureset, uint32_t flags)
1889 {
1890 const char **p = &featureset[31];
1891 char *q, *b, bit;
1892 int nc;
1893
1894 b = 4 <= bufsize ? buf + (bufsize -= 3) - 1 : NULL;
1895 *buf = '\0';
1896 for (q = buf, bit = 31; fbits && bufsize; --p, fbits &= ~(1 << bit), --bit)
1897 if (fbits & 1 << bit && (*p || !flags)) {
1898 if (*p)
1899 nc = snprintf(q, bufsize, "%s%s", q == buf ? "" : " ", *p);
1900 else
1901 nc = snprintf(q, bufsize, "%s[%d]", q == buf ? "" : " ", bit);
1902 if (bufsize <= nc) {
1903 if (b) {
1904 memcpy(b, "...", sizeof("..."));
1905 }
1906 return;
1907 }
1908 q += nc;
1909 bufsize -= nc;
1910 }
1911 }
1912
1913 /* generate CPU information. */
1914 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
1915 {
1916 X86CPUDefinition *def;
1917 char buf[256];
1918 int i;
1919
1920 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1921 def = &builtin_x86_defs[i];
1922 snprintf(buf, sizeof(buf), "%s", def->name);
1923 (*cpu_fprintf)(f, "x86 %16s %-48s\n", buf, def->model_id);
1924 }
1925 #ifdef CONFIG_KVM
1926 (*cpu_fprintf)(f, "x86 %16s %-48s\n", "host",
1927 "KVM processor with all supported host features "
1928 "(only available in KVM mode)");
1929 #endif
1930
1931 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
1932 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
1933 FeatureWordInfo *fw = &feature_word_info[i];
1934
1935 listflags(buf, sizeof(buf), (uint32_t)~0, fw->feat_names, 1);
1936 (*cpu_fprintf)(f, " %s\n", buf);
1937 }
1938 }
1939
1940 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
1941 {
1942 CpuDefinitionInfoList *cpu_list = NULL;
1943 X86CPUDefinition *def;
1944 int i;
1945
1946 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
1947 CpuDefinitionInfoList *entry;
1948 CpuDefinitionInfo *info;
1949
1950 def = &builtin_x86_defs[i];
1951 info = g_malloc0(sizeof(*info));
1952 info->name = g_strdup(def->name);
1953
1954 entry = g_malloc0(sizeof(*entry));
1955 entry->value = info;
1956 entry->next = cpu_list;
1957 cpu_list = entry;
1958 }
1959
1960 return cpu_list;
1961 }
1962
1963 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
1964 bool migratable_only)
1965 {
1966 FeatureWordInfo *wi = &feature_word_info[w];
1967 uint32_t r;
1968
1969 if (kvm_enabled()) {
1970 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
1971 wi->cpuid_ecx,
1972 wi->cpuid_reg);
1973 } else if (tcg_enabled()) {
1974 r = wi->tcg_features;
1975 } else {
1976 return ~0;
1977 }
1978 if (migratable_only) {
1979 r &= x86_cpu_get_migratable_flags(w);
1980 }
1981 return r;
1982 }
1983
1984 /*
1985 * Filters CPU feature words based on host availability of each feature.
1986 *
1987 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
1988 */
1989 static int x86_cpu_filter_features(X86CPU *cpu)
1990 {
1991 CPUX86State *env = &cpu->env;
1992 FeatureWord w;
1993 int rv = 0;
1994
1995 for (w = 0; w < FEATURE_WORDS; w++) {
1996 uint32_t host_feat =
1997 x86_cpu_get_supported_feature_word(w, cpu->migratable);
1998 uint32_t requested_features = env->features[w];
1999 env->features[w] &= host_feat;
2000 cpu->filtered_features[w] = requested_features & ~env->features[w];
2001 if (cpu->filtered_features[w]) {
2002 if (cpu->check_cpuid || cpu->enforce_cpuid) {
2003 report_unavailable_features(w, cpu->filtered_features[w]);
2004 }
2005 rv = 1;
2006 }
2007 }
2008
2009 return rv;
2010 }
2011
2012 /* Load data from X86CPUDefinition
2013 */
2014 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
2015 {
2016 CPUX86State *env = &cpu->env;
2017 const char *vendor;
2018 char host_vendor[CPUID_VENDOR_SZ + 1];
2019 FeatureWord w;
2020
2021 object_property_set_int(OBJECT(cpu), def->level, "level", errp);
2022 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
2023 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
2024 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
2025 object_property_set_int(OBJECT(cpu), def->xlevel, "xlevel", errp);
2026 env->cpuid_xlevel2 = def->xlevel2;
2027 cpu->cache_info_passthrough = def->cache_info_passthrough;
2028 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
2029 for (w = 0; w < FEATURE_WORDS; w++) {
2030 env->features[w] = def->features[w];
2031 }
2032
2033 /* Special cases not set in the X86CPUDefinition structs: */
2034 if (kvm_enabled()) {
2035 FeatureWord w;
2036 for (w = 0; w < FEATURE_WORDS; w++) {
2037 env->features[w] |= kvm_default_features[w];
2038 env->features[w] &= ~kvm_default_unset_features[w];
2039 }
2040 }
2041
2042 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
2043
2044 /* sysenter isn't supported in compatibility mode on AMD,
2045 * syscall isn't supported in compatibility mode on Intel.
2046 * Normally we advertise the actual CPU vendor, but you can
2047 * override this using the 'vendor' property if you want to use
2048 * KVM's sysenter/syscall emulation in compatibility mode and
2049 * when doing cross vendor migration
2050 */
2051 vendor = def->vendor;
2052 if (kvm_enabled()) {
2053 uint32_t ebx = 0, ecx = 0, edx = 0;
2054 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
2055 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
2056 vendor = host_vendor;
2057 }
2058
2059 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
2060
2061 }
2062
2063 X86CPU *cpu_x86_create(const char *cpu_model, DeviceState *icc_bridge,
2064 Error **errp)
2065 {
2066 X86CPU *cpu = NULL;
2067 X86CPUClass *xcc;
2068 ObjectClass *oc;
2069 gchar **model_pieces;
2070 char *name, *features;
2071 Error *error = NULL;
2072
2073 model_pieces = g_strsplit(cpu_model, ",", 2);
2074 if (!model_pieces[0]) {
2075 error_setg(&error, "Invalid/empty CPU model name");
2076 goto out;
2077 }
2078 name = model_pieces[0];
2079 features = model_pieces[1];
2080
2081 oc = x86_cpu_class_by_name(name);
2082 if (oc == NULL) {
2083 error_setg(&error, "Unable to find CPU definition: %s", name);
2084 goto out;
2085 }
2086 xcc = X86_CPU_CLASS(oc);
2087
2088 if (xcc->kvm_required && !kvm_enabled()) {
2089 error_setg(&error, "CPU model '%s' requires KVM", name);
2090 goto out;
2091 }
2092
2093 cpu = X86_CPU(object_new(object_class_get_name(oc)));
2094
2095 #ifndef CONFIG_USER_ONLY
2096 if (icc_bridge == NULL) {
2097 error_setg(&error, "Invalid icc-bridge value");
2098 goto out;
2099 }
2100 qdev_set_parent_bus(DEVICE(cpu), qdev_get_child_bus(icc_bridge, "icc"));
2101 object_unref(OBJECT(cpu));
2102 #endif
2103
2104 x86_cpu_parse_featurestr(CPU(cpu), features, &error);
2105 if (error) {
2106 goto out;
2107 }
2108
2109 out:
2110 if (error != NULL) {
2111 error_propagate(errp, error);
2112 if (cpu) {
2113 object_unref(OBJECT(cpu));
2114 cpu = NULL;
2115 }
2116 }
2117 g_strfreev(model_pieces);
2118 return cpu;
2119 }
2120
2121 X86CPU *cpu_x86_init(const char *cpu_model)
2122 {
2123 Error *error = NULL;
2124 X86CPU *cpu;
2125
2126 cpu = cpu_x86_create(cpu_model, NULL, &error);
2127 if (error) {
2128 goto out;
2129 }
2130
2131 object_property_set_bool(OBJECT(cpu), true, "realized", &error);
2132
2133 out:
2134 if (error) {
2135 error_report("%s", error_get_pretty(error));
2136 error_free(error);
2137 if (cpu != NULL) {
2138 object_unref(OBJECT(cpu));
2139 cpu = NULL;
2140 }
2141 }
2142 return cpu;
2143 }
2144
2145 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
2146 {
2147 X86CPUDefinition *cpudef = data;
2148 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2149
2150 xcc->cpu_def = cpudef;
2151 }
2152
2153 static void x86_register_cpudef_type(X86CPUDefinition *def)
2154 {
2155 char *typename = x86_cpu_type_name(def->name);
2156 TypeInfo ti = {
2157 .name = typename,
2158 .parent = TYPE_X86_CPU,
2159 .class_init = x86_cpu_cpudef_class_init,
2160 .class_data = def,
2161 };
2162
2163 type_register(&ti);
2164 g_free(typename);
2165 }
2166
2167 #if !defined(CONFIG_USER_ONLY)
2168
2169 void cpu_clear_apic_feature(CPUX86State *env)
2170 {
2171 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
2172 }
2173
2174 #endif /* !CONFIG_USER_ONLY */
2175
2176 /* Initialize list of CPU models, filling some non-static fields if necessary
2177 */
2178 void x86_cpudef_setup(void)
2179 {
2180 int i, j;
2181 static const char *model_with_versions[] = { "qemu32", "qemu64", "athlon" };
2182
2183 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); ++i) {
2184 X86CPUDefinition *def = &builtin_x86_defs[i];
2185
2186 /* Look for specific "cpudef" models that */
2187 /* have the QEMU version in .model_id */
2188 for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
2189 if (strcmp(model_with_versions[j], def->name) == 0) {
2190 pstrcpy(def->model_id, sizeof(def->model_id),
2191 "QEMU Virtual CPU version ");
2192 pstrcat(def->model_id, sizeof(def->model_id),
2193 qemu_get_version());
2194 break;
2195 }
2196 }
2197 }
2198 }
2199
2200 static void get_cpuid_vendor(CPUX86State *env, uint32_t *ebx,
2201 uint32_t *ecx, uint32_t *edx)
2202 {
2203 *ebx = env->cpuid_vendor1;
2204 *edx = env->cpuid_vendor2;
2205 *ecx = env->cpuid_vendor3;
2206 }
2207
2208 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
2209 uint32_t *eax, uint32_t *ebx,
2210 uint32_t *ecx, uint32_t *edx)
2211 {
2212 X86CPU *cpu = x86_env_get_cpu(env);
2213 CPUState *cs = CPU(cpu);
2214
2215 /* test if maximum index reached */
2216 if (index & 0x80000000) {
2217 if (index > env->cpuid_xlevel) {
2218 if (env->cpuid_xlevel2 > 0) {
2219 /* Handle the Centaur's CPUID instruction. */
2220 if (index > env->cpuid_xlevel2) {
2221 index = env->cpuid_xlevel2;
2222 } else if (index < 0xC0000000) {
2223 index = env->cpuid_xlevel;
2224 }
2225 } else {
2226 /* Intel documentation states that invalid EAX input will
2227 * return the same information as EAX=cpuid_level
2228 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
2229 */
2230 index = env->cpuid_level;
2231 }
2232 }
2233 } else {
2234 if (index > env->cpuid_level)
2235 index = env->cpuid_level;
2236 }
2237
2238 switch(index) {
2239 case 0:
2240 *eax = env->cpuid_level;
2241 get_cpuid_vendor(env, ebx, ecx, edx);
2242 break;
2243 case 1:
2244 *eax = env->cpuid_version;
2245 *ebx = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
2246 *ecx = env->features[FEAT_1_ECX];
2247 *edx = env->features[FEAT_1_EDX];
2248 if (cs->nr_cores * cs->nr_threads > 1) {
2249 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
2250 *edx |= 1 << 28; /* HTT bit */
2251 }
2252 break;
2253 case 2:
2254 /* cache info: needed for Pentium Pro compatibility */
2255 if (cpu->cache_info_passthrough) {
2256 host_cpuid(index, 0, eax, ebx, ecx, edx);
2257 break;
2258 }
2259 *eax = 1; /* Number of CPUID[EAX=2] calls required */
2260 *ebx = 0;
2261 *ecx = 0;
2262 *edx = (L1D_DESCRIPTOR << 16) | \
2263 (L1I_DESCRIPTOR << 8) | \
2264 (L2_DESCRIPTOR);
2265 break;
2266 case 4:
2267 /* cache info: needed for Core compatibility */
2268 if (cpu->cache_info_passthrough) {
2269 host_cpuid(index, count, eax, ebx, ecx, edx);
2270 *eax &= ~0xFC000000;
2271 } else {
2272 *eax = 0;
2273 switch (count) {
2274 case 0: /* L1 dcache info */
2275 *eax |= CPUID_4_TYPE_DCACHE | \
2276 CPUID_4_LEVEL(1) | \
2277 CPUID_4_SELF_INIT_LEVEL;
2278 *ebx = (L1D_LINE_SIZE - 1) | \
2279 ((L1D_PARTITIONS - 1) << 12) | \
2280 ((L1D_ASSOCIATIVITY - 1) << 22);
2281 *ecx = L1D_SETS - 1;
2282 *edx = CPUID_4_NO_INVD_SHARING;
2283 break;
2284 case 1: /* L1 icache info */
2285 *eax |= CPUID_4_TYPE_ICACHE | \
2286 CPUID_4_LEVEL(1) | \
2287 CPUID_4_SELF_INIT_LEVEL;
2288 *ebx = (L1I_LINE_SIZE - 1) | \
2289 ((L1I_PARTITIONS - 1) << 12) | \
2290 ((L1I_ASSOCIATIVITY - 1) << 22);
2291 *ecx = L1I_SETS - 1;
2292 *edx = CPUID_4_NO_INVD_SHARING;
2293 break;
2294 case 2: /* L2 cache info */
2295 *eax |= CPUID_4_TYPE_UNIFIED | \
2296 CPUID_4_LEVEL(2) | \
2297 CPUID_4_SELF_INIT_LEVEL;
2298 if (cs->nr_threads > 1) {
2299 *eax |= (cs->nr_threads - 1) << 14;
2300 }
2301 *ebx = (L2_LINE_SIZE - 1) | \
2302 ((L2_PARTITIONS - 1) << 12) | \
2303 ((L2_ASSOCIATIVITY - 1) << 22);
2304 *ecx = L2_SETS - 1;
2305 *edx = CPUID_4_NO_INVD_SHARING;
2306 break;
2307 default: /* end of info */
2308 *eax = 0;
2309 *ebx = 0;
2310 *ecx = 0;
2311 *edx = 0;
2312 break;
2313 }
2314 }
2315
2316 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
2317 if ((*eax & 31) && cs->nr_cores > 1) {
2318 *eax |= (cs->nr_cores - 1) << 26;
2319 }
2320 break;
2321 case 5:
2322 /* mwait info: needed for Core compatibility */
2323 *eax = 0; /* Smallest monitor-line size in bytes */
2324 *ebx = 0; /* Largest monitor-line size in bytes */
2325 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
2326 *edx = 0;
2327 break;
2328 case 6:
2329 /* Thermal and Power Leaf */
2330 *eax = 0;
2331 *ebx = 0;
2332 *ecx = 0;
2333 *edx = 0;
2334 break;
2335 case 7:
2336 /* Structured Extended Feature Flags Enumeration Leaf */
2337 if (count == 0) {
2338 *eax = 0; /* Maximum ECX value for sub-leaves */
2339 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
2340 *ecx = 0; /* Reserved */
2341 *edx = 0; /* Reserved */
2342 } else {
2343 *eax = 0;
2344 *ebx = 0;
2345 *ecx = 0;
2346 *edx = 0;
2347 }
2348 break;
2349 case 9:
2350 /* Direct Cache Access Information Leaf */
2351 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
2352 *ebx = 0;
2353 *ecx = 0;
2354 *edx = 0;
2355 break;
2356 case 0xA:
2357 /* Architectural Performance Monitoring Leaf */
2358 if (kvm_enabled() && cpu->enable_pmu) {
2359 KVMState *s = cs->kvm_state;
2360
2361 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
2362 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
2363 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
2364 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
2365 } else {
2366 *eax = 0;
2367 *ebx = 0;
2368 *ecx = 0;
2369 *edx = 0;
2370 }
2371 break;
2372 case 0xD: {
2373 KVMState *s = cs->kvm_state;
2374 uint64_t kvm_mask;
2375 int i;
2376
2377 /* Processor Extended State */
2378 *eax = 0;
2379 *ebx = 0;
2380 *ecx = 0;
2381 *edx = 0;
2382 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
2383 break;
2384 }
2385 kvm_mask =
2386 kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
2387 ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
2388
2389 if (count == 0) {
2390 *ecx = 0x240;
2391 for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
2392 const ExtSaveArea *esa = &ext_save_areas[i];
2393 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2394 (kvm_mask & (1 << i)) != 0) {
2395 if (i < 32) {
2396 *eax |= 1 << i;
2397 } else {
2398 *edx |= 1 << (i - 32);
2399 }
2400 *ecx = MAX(*ecx, esa->offset + esa->size);
2401 }
2402 }
2403 *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
2404 *ebx = *ecx;
2405 } else if (count == 1) {
2406 *eax = env->features[FEAT_XSAVE];
2407 } else if (count < ARRAY_SIZE(ext_save_areas)) {
2408 const ExtSaveArea *esa = &ext_save_areas[count];
2409 if ((env->features[esa->feature] & esa->bits) == esa->bits &&
2410 (kvm_mask & (1 << count)) != 0) {
2411 *eax = esa->size;
2412 *ebx = esa->offset;
2413 }
2414 }
2415 break;
2416 }
2417 case 0x80000000:
2418 *eax = env->cpuid_xlevel;
2419 *ebx = env->cpuid_vendor1;
2420 *edx = env->cpuid_vendor2;
2421 *ecx = env->cpuid_vendor3;
2422 break;
2423 case 0x80000001:
2424 *eax = env->cpuid_version;
2425 *ebx = 0;
2426 *ecx = env->features[FEAT_8000_0001_ECX];
2427 *edx = env->features[FEAT_8000_0001_EDX];
2428
2429 /* The Linux kernel checks for the CMPLegacy bit and
2430 * discards multiple thread information if it is set.
2431 * So dont set it here for Intel to make Linux guests happy.
2432 */
2433 if (cs->nr_cores * cs->nr_threads > 1) {
2434 uint32_t tebx, tecx, tedx;
2435 get_cpuid_vendor(env, &tebx, &tecx, &tedx);
2436 if (tebx != CPUID_VENDOR_INTEL_1 ||
2437 tedx != CPUID_VENDOR_INTEL_2 ||
2438 tecx != CPUID_VENDOR_INTEL_3) {
2439 *ecx |= 1 << 1; /* CmpLegacy bit */
2440 }
2441 }
2442 break;
2443 case 0x80000002:
2444 case 0x80000003:
2445 case 0x80000004:
2446 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
2447 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
2448 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
2449 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
2450 break;
2451 case 0x80000005:
2452 /* cache info (L1 cache) */
2453 if (cpu->cache_info_passthrough) {
2454 host_cpuid(index, 0, eax, ebx, ecx, edx);
2455 break;
2456 }
2457 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
2458 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
2459 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
2460 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
2461 *ecx = (L1D_SIZE_KB_AMD << 24) | (L1D_ASSOCIATIVITY_AMD << 16) | \
2462 (L1D_LINES_PER_TAG << 8) | (L1D_LINE_SIZE);
2463 *edx = (L1I_SIZE_KB_AMD << 24) | (L1I_ASSOCIATIVITY_AMD << 16) | \
2464 (L1I_LINES_PER_TAG << 8) | (L1I_LINE_SIZE);
2465 break;
2466 case 0x80000006:
2467 /* cache info (L2 cache) */
2468 if (cpu->cache_info_passthrough) {
2469 host_cpuid(index, 0, eax, ebx, ecx, edx);
2470 break;
2471 }
2472 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
2473 (L2_DTLB_2M_ENTRIES << 16) | \
2474 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
2475 (L2_ITLB_2M_ENTRIES);
2476 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
2477 (L2_DTLB_4K_ENTRIES << 16) | \
2478 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
2479 (L2_ITLB_4K_ENTRIES);
2480 *ecx = (L2_SIZE_KB_AMD << 16) | \
2481 (AMD_ENC_ASSOC(L2_ASSOCIATIVITY) << 12) | \
2482 (L2_LINES_PER_TAG << 8) | (L2_LINE_SIZE);
2483 *edx = ((L3_SIZE_KB/512) << 18) | \
2484 (AMD_ENC_ASSOC(L3_ASSOCIATIVITY) << 12) | \
2485 (L3_LINES_PER_TAG << 8) | (L3_LINE_SIZE);
2486 break;
2487 case 0x80000007:
2488 *eax = 0;
2489 *ebx = 0;
2490 *ecx = 0;
2491 *edx = env->features[FEAT_8000_0007_EDX];
2492 break;
2493 case 0x80000008:
2494 /* virtual & phys address size in low 2 bytes. */
2495 /* XXX: This value must match the one used in the MMU code. */
2496 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
2497 /* 64 bit processor */
2498 /* XXX: The physical address space is limited to 42 bits in exec.c. */
2499 *eax = 0x00003028; /* 48 bits virtual, 40 bits physical */
2500 } else {
2501 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
2502 *eax = 0x00000024; /* 36 bits physical */
2503 } else {
2504 *eax = 0x00000020; /* 32 bits physical */
2505 }
2506 }
2507 *ebx = 0;
2508 *ecx = 0;
2509 *edx = 0;
2510 if (cs->nr_cores * cs->nr_threads > 1) {
2511 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
2512 }
2513 break;
2514 case 0x8000000A:
2515 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
2516 *eax = 0x00000001; /* SVM Revision */
2517 *ebx = 0x00000010; /* nr of ASIDs */
2518 *ecx = 0;
2519 *edx = env->features[FEAT_SVM]; /* optional features */
2520 } else {
2521 *eax = 0;
2522 *ebx = 0;
2523 *ecx = 0;
2524 *edx = 0;
2525 }
2526 break;
2527 case 0xC0000000:
2528 *eax = env->cpuid_xlevel2;
2529 *ebx = 0;
2530 *ecx = 0;
2531 *edx = 0;
2532 break;
2533 case 0xC0000001:
2534 /* Support for VIA CPU's CPUID instruction */
2535 *eax = env->cpuid_version;
2536 *ebx = 0;
2537 *ecx = 0;
2538 *edx = env->features[FEAT_C000_0001_EDX];
2539 break;
2540 case 0xC0000002:
2541 case 0xC0000003:
2542 case 0xC0000004:
2543 /* Reserved for the future, and now filled with zero */
2544 *eax = 0;
2545 *ebx = 0;
2546 *ecx = 0;
2547 *edx = 0;
2548 break;
2549 default:
2550 /* reserved values: zero */
2551 *eax = 0;
2552 *ebx = 0;
2553 *ecx = 0;
2554 *edx = 0;
2555 break;
2556 }
2557 }
2558
2559 /* CPUClass::reset() */
2560 static void x86_cpu_reset(CPUState *s)
2561 {
2562 X86CPU *cpu = X86_CPU(s);
2563 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
2564 CPUX86State *env = &cpu->env;
2565 int i;
2566
2567 xcc->parent_reset(s);
2568
2569 memset(env, 0, offsetof(CPUX86State, cpuid_level));
2570
2571 tlb_flush(s, 1);
2572
2573 env->old_exception = -1;
2574
2575 /* init to reset state */
2576
2577 #ifdef CONFIG_SOFTMMU
2578 env->hflags |= HF_SOFTMMU_MASK;
2579 #endif
2580 env->hflags2 |= HF2_GIF_MASK;
2581
2582 cpu_x86_update_cr0(env, 0x60000010);
2583 env->a20_mask = ~0x0;
2584 env->smbase = 0x30000;
2585
2586 env->idt.limit = 0xffff;
2587 env->gdt.limit = 0xffff;
2588 env->ldt.limit = 0xffff;
2589 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
2590 env->tr.limit = 0xffff;
2591 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
2592
2593 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
2594 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
2595 DESC_R_MASK | DESC_A_MASK);
2596 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
2597 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2598 DESC_A_MASK);
2599 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
2600 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2601 DESC_A_MASK);
2602 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
2603 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2604 DESC_A_MASK);
2605 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
2606 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2607 DESC_A_MASK);
2608 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
2609 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
2610 DESC_A_MASK);
2611
2612 env->eip = 0xfff0;
2613 env->regs[R_EDX] = env->cpuid_version;
2614
2615 env->eflags = 0x2;
2616
2617 /* FPU init */
2618 for (i = 0; i < 8; i++) {
2619 env->fptags[i] = 1;
2620 }
2621 cpu_set_fpuc(env, 0x37f);
2622
2623 env->mxcsr = 0x1f80;
2624 env->xstate_bv = XSTATE_FP | XSTATE_SSE;
2625
2626 env->pat = 0x0007040600070406ULL;
2627 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
2628
2629 memset(env->dr, 0, sizeof(env->dr));
2630 env->dr[6] = DR6_FIXED_1;
2631 env->dr[7] = DR7_FIXED_1;
2632 cpu_breakpoint_remove_all(s, BP_CPU);
2633 cpu_watchpoint_remove_all(s, BP_CPU);
2634
2635 env->xcr0 = 1;
2636
2637 /*
2638 * SDM 11.11.5 requires:
2639 * - IA32_MTRR_DEF_TYPE MSR.E = 0
2640 * - IA32_MTRR_PHYSMASKn.V = 0
2641 * All other bits are undefined. For simplification, zero it all.
2642 */
2643 env->mtrr_deftype = 0;
2644 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
2645 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
2646
2647 #if !defined(CONFIG_USER_ONLY)
2648 /* We hard-wire the BSP to the first CPU. */
2649 if (s->cpu_index == 0) {
2650 apic_designate_bsp(cpu->apic_state);
2651 }
2652
2653 s->halted = !cpu_is_bsp(cpu);
2654
2655 if (kvm_enabled()) {
2656 kvm_arch_reset_vcpu(cpu);
2657 }
2658 #endif
2659 }
2660
2661 #ifndef CONFIG_USER_ONLY
2662 bool cpu_is_bsp(X86CPU *cpu)
2663 {
2664 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
2665 }
2666
2667 /* TODO: remove me, when reset over QOM tree is implemented */
2668 static void x86_cpu_machine_reset_cb(void *opaque)
2669 {
2670 X86CPU *cpu = opaque;
2671 cpu_reset(CPU(cpu));
2672 }
2673 #endif
2674
2675 static void mce_init(X86CPU *cpu)
2676 {
2677 CPUX86State *cenv = &cpu->env;
2678 unsigned int bank;
2679
2680 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
2681 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
2682 (CPUID_MCE | CPUID_MCA)) {
2683 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF;
2684 cenv->mcg_ctl = ~(uint64_t)0;
2685 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
2686 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
2687 }
2688 }
2689 }
2690
2691 #ifndef CONFIG_USER_ONLY
2692 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
2693 {
2694 CPUX86State *env = &cpu->env;
2695 DeviceState *dev = DEVICE(cpu);
2696 APICCommonState *apic;
2697 const char *apic_type = "apic";
2698
2699 if (kvm_irqchip_in_kernel()) {
2700 apic_type = "kvm-apic";
2701 } else if (xen_enabled()) {
2702 apic_type = "xen-apic";
2703 }
2704
2705 cpu->apic_state = qdev_try_create(qdev_get_parent_bus(dev), apic_type);
2706 if (cpu->apic_state == NULL) {
2707 error_setg(errp, "APIC device '%s' could not be created", apic_type);
2708 return;
2709 }
2710
2711 object_property_add_child(OBJECT(cpu), "apic",
2712 OBJECT(cpu->apic_state), NULL);
2713 qdev_prop_set_uint8(cpu->apic_state, "id", env->cpuid_apic_id);
2714 /* TODO: convert to link<> */
2715 apic = APIC_COMMON(cpu->apic_state);
2716 apic->cpu = cpu;
2717 }
2718
2719 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2720 {
2721 if (cpu->apic_state == NULL) {
2722 return;
2723 }
2724
2725 if (qdev_init(cpu->apic_state)) {
2726 error_setg(errp, "APIC device '%s' could not be initialized",
2727 object_get_typename(OBJECT(cpu->apic_state)));
2728 return;
2729 }
2730 }
2731 #else
2732 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
2733 {
2734 }
2735 #endif
2736
2737
2738 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
2739 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
2740 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
2741 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
2742 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
2743 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
2744 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
2745 {
2746 CPUState *cs = CPU(dev);
2747 X86CPU *cpu = X86_CPU(dev);
2748 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
2749 CPUX86State *env = &cpu->env;
2750 Error *local_err = NULL;
2751 static bool ht_warned;
2752
2753 if (env->features[FEAT_7_0_EBX] && env->cpuid_level < 7) {
2754 env->cpuid_level = 7;
2755 }
2756
2757 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
2758 * CPUID[1].EDX.
2759 */
2760 if (IS_AMD_CPU(env)) {
2761 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
2762 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
2763 & CPUID_EXT2_AMD_ALIASES);
2764 }
2765
2766
2767 if (x86_cpu_filter_features(cpu) && cpu->enforce_cpuid) {
2768 error_setg(&local_err,
2769 kvm_enabled() ?
2770 "Host doesn't support requested features" :
2771 "TCG doesn't support requested features");
2772 goto out;
2773 }
2774
2775 #ifndef CONFIG_USER_ONLY
2776 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
2777
2778 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
2779 x86_cpu_apic_create(cpu, &local_err);
2780 if (local_err != NULL) {
2781 goto out;
2782 }
2783 }
2784 #endif
2785
2786 mce_init(cpu);
2787 qemu_init_vcpu(cs);
2788
2789 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
2790 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
2791 * based on inputs (sockets,cores,threads), it is still better to gives
2792 * users a warning.
2793 *
2794 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
2795 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
2796 */
2797 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
2798 error_report("AMD CPU doesn't support hyperthreading. Please configure"
2799 " -smp options properly.");
2800 ht_warned = true;
2801 }
2802
2803 x86_cpu_apic_realize(cpu, &local_err);
2804 if (local_err != NULL) {
2805 goto out;
2806 }
2807 cpu_reset(cs);
2808
2809 xcc->parent_realize(dev, &local_err);
2810 out:
2811 if (local_err != NULL) {
2812 error_propagate(errp, local_err);
2813 return;
2814 }
2815 }
2816
2817 /* Enables contiguous-apic-ID mode, for compatibility */
2818 static bool compat_apic_id_mode;
2819
2820 void enable_compat_apic_id_mode(void)
2821 {
2822 compat_apic_id_mode = true;
2823 }
2824
2825 /* Calculates initial APIC ID for a specific CPU index
2826 *
2827 * Currently we need to be able to calculate the APIC ID from the CPU index
2828 * alone (without requiring a CPU object), as the QEMU<->Seabios interfaces have
2829 * no concept of "CPU index", and the NUMA tables on fw_cfg need the APIC ID of
2830 * all CPUs up to max_cpus.
2831 */
2832 uint32_t x86_cpu_apic_id_from_index(unsigned int cpu_index)
2833 {
2834 uint32_t correct_id;
2835 static bool warned;
2836
2837 correct_id = x86_apicid_from_cpu_idx(smp_cores, smp_threads, cpu_index);
2838 if (compat_apic_id_mode) {
2839 if (cpu_index != correct_id && !warned) {
2840 error_report("APIC IDs set in compatibility mode, "
2841 "CPU topology won't match the configuration");
2842 warned = true;
2843 }
2844 return cpu_index;
2845 } else {
2846 return correct_id;
2847 }
2848 }
2849
2850 static void x86_cpu_initfn(Object *obj)
2851 {
2852 CPUState *cs = CPU(obj);
2853 X86CPU *cpu = X86_CPU(obj);
2854 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
2855 CPUX86State *env = &cpu->env;
2856 static int inited;
2857
2858 cs->env_ptr = env;
2859 cpu_exec_init(env);
2860
2861 object_property_add(obj, "family", "int",
2862 x86_cpuid_version_get_family,
2863 x86_cpuid_version_set_family, NULL, NULL, NULL);
2864 object_property_add(obj, "model", "int",
2865 x86_cpuid_version_get_model,
2866 x86_cpuid_version_set_model, NULL, NULL, NULL);
2867 object_property_add(obj, "stepping", "int",
2868 x86_cpuid_version_get_stepping,
2869 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
2870 object_property_add(obj, "level", "int",
2871 x86_cpuid_get_level,
2872 x86_cpuid_set_level, NULL, NULL, NULL);
2873 object_property_add(obj, "xlevel", "int",
2874 x86_cpuid_get_xlevel,
2875 x86_cpuid_set_xlevel, NULL, NULL, NULL);
2876 object_property_add_str(obj, "vendor",
2877 x86_cpuid_get_vendor,
2878 x86_cpuid_set_vendor, NULL);
2879 object_property_add_str(obj, "model-id",
2880 x86_cpuid_get_model_id,
2881 x86_cpuid_set_model_id, NULL);
2882 object_property_add(obj, "tsc-frequency", "int",
2883 x86_cpuid_get_tsc_freq,
2884 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
2885 object_property_add(obj, "apic-id", "int",
2886 x86_cpuid_get_apic_id,
2887 x86_cpuid_set_apic_id, NULL, NULL, NULL);
2888 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
2889 x86_cpu_get_feature_words,
2890 NULL, NULL, (void *)env->features, NULL);
2891 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
2892 x86_cpu_get_feature_words,
2893 NULL, NULL, (void *)cpu->filtered_features, NULL);
2894
2895 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
2896 env->cpuid_apic_id = x86_cpu_apic_id_from_index(cs->cpu_index);
2897
2898 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
2899
2900 /* init various static tables used in TCG mode */
2901 if (tcg_enabled() && !inited) {
2902 inited = 1;
2903 optimize_flags_init();
2904 }
2905 }
2906
2907 static int64_t x86_cpu_get_arch_id(CPUState *cs)
2908 {
2909 X86CPU *cpu = X86_CPU(cs);
2910 CPUX86State *env = &cpu->env;
2911
2912 return env->cpuid_apic_id;
2913 }
2914
2915 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
2916 {
2917 X86CPU *cpu = X86_CPU(cs);
2918
2919 return cpu->env.cr[0] & CR0_PG_MASK;
2920 }
2921
2922 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
2923 {
2924 X86CPU *cpu = X86_CPU(cs);
2925
2926 cpu->env.eip = value;
2927 }
2928
2929 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
2930 {
2931 X86CPU *cpu = X86_CPU(cs);
2932
2933 cpu->env.eip = tb->pc - tb->cs_base;
2934 }
2935
2936 static bool x86_cpu_has_work(CPUState *cs)
2937 {
2938 X86CPU *cpu = X86_CPU(cs);
2939 CPUX86State *env = &cpu->env;
2940
2941 #if !defined(CONFIG_USER_ONLY)
2942 if (cs->interrupt_request & CPU_INTERRUPT_POLL) {
2943 apic_poll_irq(cpu->apic_state);
2944 cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL);
2945 }
2946 #endif
2947
2948 return ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
2949 (env->eflags & IF_MASK)) ||
2950 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
2951 CPU_INTERRUPT_INIT |
2952 CPU_INTERRUPT_SIPI |
2953 CPU_INTERRUPT_MCE));
2954 }
2955
2956 static Property x86_cpu_properties[] = {
2957 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
2958 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
2959 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
2960 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
2961 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
2962 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, false),
2963 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
2964 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
2965 DEFINE_PROP_END_OF_LIST()
2966 };
2967
2968 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
2969 {
2970 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2971 CPUClass *cc = CPU_CLASS(oc);
2972 DeviceClass *dc = DEVICE_CLASS(oc);
2973
2974 xcc->parent_realize = dc->realize;
2975 dc->realize = x86_cpu_realizefn;
2976 dc->bus_type = TYPE_ICC_BUS;
2977 dc->props = x86_cpu_properties;
2978
2979 xcc->parent_reset = cc->reset;
2980 cc->reset = x86_cpu_reset;
2981 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
2982
2983 cc->class_by_name = x86_cpu_class_by_name;
2984 cc->parse_features = x86_cpu_parse_featurestr;
2985 cc->has_work = x86_cpu_has_work;
2986 cc->do_interrupt = x86_cpu_do_interrupt;
2987 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
2988 cc->dump_state = x86_cpu_dump_state;
2989 cc->set_pc = x86_cpu_set_pc;
2990 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
2991 cc->gdb_read_register = x86_cpu_gdb_read_register;
2992 cc->gdb_write_register = x86_cpu_gdb_write_register;
2993 cc->get_arch_id = x86_cpu_get_arch_id;
2994 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
2995 #ifdef CONFIG_USER_ONLY
2996 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
2997 #else
2998 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
2999 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
3000 cc->write_elf64_note = x86_cpu_write_elf64_note;
3001 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
3002 cc->write_elf32_note = x86_cpu_write_elf32_note;
3003 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
3004 cc->vmsd = &vmstate_x86_cpu;
3005 #endif
3006 cc->gdb_num_core_regs = CPU_NB_REGS * 2 + 25;
3007 #ifndef CONFIG_USER_ONLY
3008 cc->debug_excp_handler = breakpoint_handler;
3009 #endif
3010 cc->cpu_exec_enter = x86_cpu_exec_enter;
3011 cc->cpu_exec_exit = x86_cpu_exec_exit;
3012 }
3013
3014 static const TypeInfo x86_cpu_type_info = {
3015 .name = TYPE_X86_CPU,
3016 .parent = TYPE_CPU,
3017 .instance_size = sizeof(X86CPU),
3018 .instance_init = x86_cpu_initfn,
3019 .abstract = true,
3020 .class_size = sizeof(X86CPUClass),
3021 .class_init = x86_cpu_common_class_init,
3022 };
3023
3024 static void x86_cpu_register_types(void)
3025 {
3026 int i;
3027
3028 type_register_static(&x86_cpu_type_info);
3029 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
3030 x86_register_cpudef_type(&builtin_x86_defs[i]);
3031 }
3032 #ifdef CONFIG_KVM
3033 type_register_static(&host_x86_cpu_type_info);
3034 #endif
3035 }
3036
3037 type_init(x86_cpu_register_types)