]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Initialize cache information for EPYC family processors
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #if defined(CONFIG_KVM)
44 #include <linux/kvm_para.h>
45 #endif
46
47 #include "sysemu/sysemu.h"
48 #include "hw/qdev-properties.h"
49 #include "hw/i386/topology.h"
50 #ifndef CONFIG_USER_ONLY
51 #include "exec/address-spaces.h"
52 #include "hw/hw.h"
53 #include "hw/xen/xen.h"
54 #include "hw/i386/apic_internal.h"
55 #endif
56
57 #include "disas/capstone.h"
58
59 /* Helpers for building CPUID[2] descriptors: */
60
61 struct CPUID2CacheDescriptorInfo {
62 enum CacheType type;
63 int level;
64 int size;
65 int line_size;
66 int associativity;
67 };
68
69 #define KiB 1024
70 #define MiB (1024 * 1024)
71
72 /*
73 * Known CPUID 2 cache descriptors.
74 * From Intel SDM Volume 2A, CPUID instruction
75 */
76 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
77 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
80 .associativity = 4, .line_size = 32, },
81 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
82 .associativity = 4, .line_size = 64, },
83 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
84 .associativity = 2, .line_size = 32, },
85 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 32, },
87 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
88 .associativity = 4, .line_size = 64, },
89 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
90 .associativity = 6, .line_size = 64, },
91 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
92 .associativity = 2, .line_size = 64, },
93 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
94 .associativity = 8, .line_size = 64, },
95 /* lines per sector is not supported cpuid2_cache_descriptor(),
96 * so descriptors 0x22, 0x23 are not included
97 */
98 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
99 .associativity = 16, .line_size = 64, },
100 /* lines per sector is not supported cpuid2_cache_descriptor(),
101 * so descriptors 0x25, 0x20 are not included
102 */
103 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
106 .associativity = 8, .line_size = 64, },
107 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
112 .associativity = 4, .line_size = 32, },
113 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
116 .associativity = 4, .line_size = 32, },
117 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
118 .associativity = 4, .line_size = 64, },
119 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
120 .associativity = 8, .line_size = 64, },
121 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
122 .associativity = 12, .line_size = 64, },
123 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
124 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
125 .associativity = 12, .line_size = 64, },
126 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
127 .associativity = 16, .line_size = 64, },
128 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
129 .associativity = 12, .line_size = 64, },
130 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
131 .associativity = 16, .line_size = 64, },
132 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
133 .associativity = 24, .line_size = 64, },
134 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
135 .associativity = 8, .line_size = 64, },
136 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
141 .associativity = 4, .line_size = 64, },
142 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
143 .associativity = 4, .line_size = 64, },
144 /* lines per sector is not supported cpuid2_cache_descriptor(),
145 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
146 */
147 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
148 .associativity = 8, .line_size = 64, },
149 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 2, .line_size = 64, },
151 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
152 .associativity = 8, .line_size = 64, },
153 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
156 .associativity = 8, .line_size = 32, },
157 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
160 .associativity = 8, .line_size = 32, },
161 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
162 .associativity = 4, .line_size = 64, },
163 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
164 .associativity = 8, .line_size = 64, },
165 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
170 .associativity = 4, .line_size = 64, },
171 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
176 .associativity = 8, .line_size = 64, },
177 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
182 .associativity = 12, .line_size = 64, },
183 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
188 .associativity = 16, .line_size = 64, },
189 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
192 .associativity = 24, .line_size = 64, },
193 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
194 .associativity = 24, .line_size = 64, },
195 };
196
197 /*
198 * "CPUID leaf 2 does not report cache descriptor information,
199 * use CPUID leaf 4 to query cache parameters"
200 */
201 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
202
203 /*
204 * Return a CPUID 2 cache descriptor for a given cache.
205 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
206 */
207 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
208 {
209 int i;
210
211 assert(cache->size > 0);
212 assert(cache->level > 0);
213 assert(cache->line_size > 0);
214 assert(cache->associativity > 0);
215 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
216 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
217 if (d->level == cache->level && d->type == cache->type &&
218 d->size == cache->size && d->line_size == cache->line_size &&
219 d->associativity == cache->associativity) {
220 return i;
221 }
222 }
223
224 return CACHE_DESCRIPTOR_UNAVAILABLE;
225 }
226
227 /* CPUID Leaf 4 constants: */
228
229 /* EAX: */
230 #define CACHE_TYPE_D 1
231 #define CACHE_TYPE_I 2
232 #define CACHE_TYPE_UNIFIED 3
233
234 #define CACHE_LEVEL(l) (l << 5)
235
236 #define CACHE_SELF_INIT_LEVEL (1 << 8)
237
238 /* EDX: */
239 #define CACHE_NO_INVD_SHARING (1 << 0)
240 #define CACHE_INCLUSIVE (1 << 1)
241 #define CACHE_COMPLEX_IDX (1 << 2)
242
243 /* Encode CacheType for CPUID[4].EAX */
244 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
245 ((t) == ICACHE) ? CACHE_TYPE_I : \
246 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
247 0 /* Invalid value */)
248
249
250 /* Encode cache info for CPUID[4] */
251 static void encode_cache_cpuid4(CPUCacheInfo *cache,
252 int num_apic_ids, int num_cores,
253 uint32_t *eax, uint32_t *ebx,
254 uint32_t *ecx, uint32_t *edx)
255 {
256 assert(cache->size == cache->line_size * cache->associativity *
257 cache->partitions * cache->sets);
258
259 assert(num_apic_ids > 0);
260 *eax = CACHE_TYPE(cache->type) |
261 CACHE_LEVEL(cache->level) |
262 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
263 ((num_cores - 1) << 26) |
264 ((num_apic_ids - 1) << 14);
265
266 assert(cache->line_size > 0);
267 assert(cache->partitions > 0);
268 assert(cache->associativity > 0);
269 /* We don't implement fully-associative caches */
270 assert(cache->associativity < cache->sets);
271 *ebx = (cache->line_size - 1) |
272 ((cache->partitions - 1) << 12) |
273 ((cache->associativity - 1) << 22);
274
275 assert(cache->sets > 0);
276 *ecx = cache->sets - 1;
277
278 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
279 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
280 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
281 }
282
283 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
284 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
285 {
286 assert(cache->size % 1024 == 0);
287 assert(cache->lines_per_tag > 0);
288 assert(cache->associativity > 0);
289 assert(cache->line_size > 0);
290 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
291 (cache->lines_per_tag << 8) | (cache->line_size);
292 }
293
294 #define ASSOC_FULL 0xFF
295
296 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
297 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
298 a == 2 ? 0x2 : \
299 a == 4 ? 0x4 : \
300 a == 8 ? 0x6 : \
301 a == 16 ? 0x8 : \
302 a == 32 ? 0xA : \
303 a == 48 ? 0xB : \
304 a == 64 ? 0xC : \
305 a == 96 ? 0xD : \
306 a == 128 ? 0xE : \
307 a == ASSOC_FULL ? 0xF : \
308 0 /* invalid value */)
309
310 /*
311 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
312 * @l3 can be NULL.
313 */
314 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
315 CPUCacheInfo *l3,
316 uint32_t *ecx, uint32_t *edx)
317 {
318 assert(l2->size % 1024 == 0);
319 assert(l2->associativity > 0);
320 assert(l2->lines_per_tag > 0);
321 assert(l2->line_size > 0);
322 *ecx = ((l2->size / 1024) << 16) |
323 (AMD_ENC_ASSOC(l2->associativity) << 12) |
324 (l2->lines_per_tag << 8) | (l2->line_size);
325
326 if (l3) {
327 assert(l3->size % (512 * 1024) == 0);
328 assert(l3->associativity > 0);
329 assert(l3->lines_per_tag > 0);
330 assert(l3->line_size > 0);
331 *edx = ((l3->size / (512 * 1024)) << 18) |
332 (AMD_ENC_ASSOC(l3->associativity) << 12) |
333 (l3->lines_per_tag << 8) | (l3->line_size);
334 } else {
335 *edx = 0;
336 }
337 }
338
339 /* Definitions of the hardcoded cache entries we expose: */
340
341 /* L1 data cache: */
342 static CPUCacheInfo l1d_cache = {
343 .type = DCACHE,
344 .level = 1,
345 .size = 32 * KiB,
346 .self_init = 1,
347 .line_size = 64,
348 .associativity = 8,
349 .sets = 64,
350 .partitions = 1,
351 .no_invd_sharing = true,
352 };
353
354 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
355 static CPUCacheInfo l1d_cache_amd = {
356 .type = DCACHE,
357 .level = 1,
358 .size = 64 * KiB,
359 .self_init = 1,
360 .line_size = 64,
361 .associativity = 2,
362 .sets = 512,
363 .partitions = 1,
364 .lines_per_tag = 1,
365 .no_invd_sharing = true,
366 };
367
368 /* L1 instruction cache: */
369 static CPUCacheInfo l1i_cache = {
370 .type = ICACHE,
371 .level = 1,
372 .size = 32 * KiB,
373 .self_init = 1,
374 .line_size = 64,
375 .associativity = 8,
376 .sets = 64,
377 .partitions = 1,
378 .no_invd_sharing = true,
379 };
380
381 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
382 static CPUCacheInfo l1i_cache_amd = {
383 .type = ICACHE,
384 .level = 1,
385 .size = 64 * KiB,
386 .self_init = 1,
387 .line_size = 64,
388 .associativity = 2,
389 .sets = 512,
390 .partitions = 1,
391 .lines_per_tag = 1,
392 .no_invd_sharing = true,
393 };
394
395 /* Level 2 unified cache: */
396 static CPUCacheInfo l2_cache = {
397 .type = UNIFIED_CACHE,
398 .level = 2,
399 .size = 4 * MiB,
400 .self_init = 1,
401 .line_size = 64,
402 .associativity = 16,
403 .sets = 4096,
404 .partitions = 1,
405 .no_invd_sharing = true,
406 };
407
408 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
409 static CPUCacheInfo l2_cache_cpuid2 = {
410 .type = UNIFIED_CACHE,
411 .level = 2,
412 .size = 2 * MiB,
413 .line_size = 64,
414 .associativity = 8,
415 };
416
417
418 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
419 static CPUCacheInfo l2_cache_amd = {
420 .type = UNIFIED_CACHE,
421 .level = 2,
422 .size = 512 * KiB,
423 .line_size = 64,
424 .lines_per_tag = 1,
425 .associativity = 16,
426 .sets = 512,
427 .partitions = 1,
428 };
429
430 /* Level 3 unified cache: */
431 static CPUCacheInfo l3_cache = {
432 .type = UNIFIED_CACHE,
433 .level = 3,
434 .size = 16 * MiB,
435 .line_size = 64,
436 .associativity = 16,
437 .sets = 16384,
438 .partitions = 1,
439 .lines_per_tag = 1,
440 .self_init = true,
441 .inclusive = true,
442 .complex_indexing = true,
443 };
444
445 /* TLB definitions: */
446
447 #define L1_DTLB_2M_ASSOC 1
448 #define L1_DTLB_2M_ENTRIES 255
449 #define L1_DTLB_4K_ASSOC 1
450 #define L1_DTLB_4K_ENTRIES 255
451
452 #define L1_ITLB_2M_ASSOC 1
453 #define L1_ITLB_2M_ENTRIES 255
454 #define L1_ITLB_4K_ASSOC 1
455 #define L1_ITLB_4K_ENTRIES 255
456
457 #define L2_DTLB_2M_ASSOC 0 /* disabled */
458 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
459 #define L2_DTLB_4K_ASSOC 4
460 #define L2_DTLB_4K_ENTRIES 512
461
462 #define L2_ITLB_2M_ASSOC 0 /* disabled */
463 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
464 #define L2_ITLB_4K_ASSOC 4
465 #define L2_ITLB_4K_ENTRIES 512
466
467 /* CPUID Leaf 0x14 constants: */
468 #define INTEL_PT_MAX_SUBLEAF 0x1
469 /*
470 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
471 * MSR can be accessed;
472 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
473 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
474 * of Intel PT MSRs across warm reset;
475 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
476 */
477 #define INTEL_PT_MINIMAL_EBX 0xf
478 /*
479 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
480 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
481 * accessed;
482 * bit[01]: ToPA tables can hold any number of output entries, up to the
483 * maximum allowed by the MaskOrTableOffset field of
484 * IA32_RTIT_OUTPUT_MASK_PTRS;
485 * bit[02]: Support Single-Range Output scheme;
486 */
487 #define INTEL_PT_MINIMAL_ECX 0x7
488 /* generated packets which contain IP payloads have LIP values */
489 #define INTEL_PT_IP_LIP (1 << 31)
490 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
491 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
492 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
493 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
494 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
495
496 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
497 uint32_t vendor2, uint32_t vendor3)
498 {
499 int i;
500 for (i = 0; i < 4; i++) {
501 dst[i] = vendor1 >> (8 * i);
502 dst[i + 4] = vendor2 >> (8 * i);
503 dst[i + 8] = vendor3 >> (8 * i);
504 }
505 dst[CPUID_VENDOR_SZ] = '\0';
506 }
507
508 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
509 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
510 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
511 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
512 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
513 CPUID_PSE36 | CPUID_FXSR)
514 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
515 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
516 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
517 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
518 CPUID_PAE | CPUID_SEP | CPUID_APIC)
519
520 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
521 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
522 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
523 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
524 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
525 /* partly implemented:
526 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
527 /* missing:
528 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
529 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
530 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
531 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
532 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
533 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
534 /* missing:
535 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
536 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
537 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
538 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
539 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
540
541 #ifdef TARGET_X86_64
542 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
543 #else
544 #define TCG_EXT2_X86_64_FEATURES 0
545 #endif
546
547 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
548 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
549 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
550 TCG_EXT2_X86_64_FEATURES)
551 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
552 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
553 #define TCG_EXT4_FEATURES 0
554 #define TCG_SVM_FEATURES 0
555 #define TCG_KVM_FEATURES 0
556 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
557 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
558 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
559 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
560 CPUID_7_0_EBX_ERMS)
561 /* missing:
562 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
563 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
564 CPUID_7_0_EBX_RDSEED */
565 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
566 CPUID_7_0_ECX_LA57)
567 #define TCG_7_0_EDX_FEATURES 0
568 #define TCG_APM_FEATURES 0
569 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
570 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
571 /* missing:
572 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
573
574 typedef struct FeatureWordInfo {
575 /* feature flags names are taken from "Intel Processor Identification and
576 * the CPUID Instruction" and AMD's "CPUID Specification".
577 * In cases of disagreement between feature naming conventions,
578 * aliases may be added.
579 */
580 const char *feat_names[32];
581 uint32_t cpuid_eax; /* Input EAX for CPUID */
582 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
583 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
584 int cpuid_reg; /* output register (R_* constant) */
585 uint32_t tcg_features; /* Feature flags supported by TCG */
586 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
587 uint32_t migratable_flags; /* Feature flags known to be migratable */
588 /* Features that shouldn't be auto-enabled by "-cpu host" */
589 uint32_t no_autoenable_flags;
590 } FeatureWordInfo;
591
592 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
593 [FEAT_1_EDX] = {
594 .feat_names = {
595 "fpu", "vme", "de", "pse",
596 "tsc", "msr", "pae", "mce",
597 "cx8", "apic", NULL, "sep",
598 "mtrr", "pge", "mca", "cmov",
599 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
600 NULL, "ds" /* Intel dts */, "acpi", "mmx",
601 "fxsr", "sse", "sse2", "ss",
602 "ht" /* Intel htt */, "tm", "ia64", "pbe",
603 },
604 .cpuid_eax = 1, .cpuid_reg = R_EDX,
605 .tcg_features = TCG_FEATURES,
606 },
607 [FEAT_1_ECX] = {
608 .feat_names = {
609 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
610 "ds-cpl", "vmx", "smx", "est",
611 "tm2", "ssse3", "cid", NULL,
612 "fma", "cx16", "xtpr", "pdcm",
613 NULL, "pcid", "dca", "sse4.1",
614 "sse4.2", "x2apic", "movbe", "popcnt",
615 "tsc-deadline", "aes", "xsave", "osxsave",
616 "avx", "f16c", "rdrand", "hypervisor",
617 },
618 .cpuid_eax = 1, .cpuid_reg = R_ECX,
619 .tcg_features = TCG_EXT_FEATURES,
620 },
621 /* Feature names that are already defined on feature_name[] but
622 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
623 * names on feat_names below. They are copied automatically
624 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
625 */
626 [FEAT_8000_0001_EDX] = {
627 .feat_names = {
628 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
629 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
630 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
631 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
632 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
633 "nx", NULL, "mmxext", NULL /* mmx */,
634 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
635 NULL, "lm", "3dnowext", "3dnow",
636 },
637 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
638 .tcg_features = TCG_EXT2_FEATURES,
639 },
640 [FEAT_8000_0001_ECX] = {
641 .feat_names = {
642 "lahf-lm", "cmp-legacy", "svm", "extapic",
643 "cr8legacy", "abm", "sse4a", "misalignsse",
644 "3dnowprefetch", "osvw", "ibs", "xop",
645 "skinit", "wdt", NULL, "lwp",
646 "fma4", "tce", NULL, "nodeid-msr",
647 NULL, "tbm", "topoext", "perfctr-core",
648 "perfctr-nb", NULL, NULL, NULL,
649 NULL, NULL, NULL, NULL,
650 },
651 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
652 .tcg_features = TCG_EXT3_FEATURES,
653 },
654 [FEAT_C000_0001_EDX] = {
655 .feat_names = {
656 NULL, NULL, "xstore", "xstore-en",
657 NULL, NULL, "xcrypt", "xcrypt-en",
658 "ace2", "ace2-en", "phe", "phe-en",
659 "pmm", "pmm-en", NULL, NULL,
660 NULL, NULL, NULL, NULL,
661 NULL, NULL, NULL, NULL,
662 NULL, NULL, NULL, NULL,
663 NULL, NULL, NULL, NULL,
664 },
665 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
666 .tcg_features = TCG_EXT4_FEATURES,
667 },
668 [FEAT_KVM] = {
669 .feat_names = {
670 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
671 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
672 NULL, "kvm-pv-tlb-flush", NULL, NULL,
673 NULL, NULL, NULL, NULL,
674 NULL, NULL, NULL, NULL,
675 NULL, NULL, NULL, NULL,
676 "kvmclock-stable-bit", NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL,
678 },
679 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
680 .tcg_features = TCG_KVM_FEATURES,
681 },
682 [FEAT_KVM_HINTS] = {
683 .feat_names = {
684 "kvm-hint-dedicated", NULL, NULL, NULL,
685 NULL, NULL, NULL, NULL,
686 NULL, NULL, NULL, NULL,
687 NULL, NULL, NULL, NULL,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL,
690 NULL, NULL, NULL, NULL,
691 NULL, NULL, NULL, NULL,
692 },
693 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
694 .tcg_features = TCG_KVM_FEATURES,
695 /*
696 * KVM hints aren't auto-enabled by -cpu host, they need to be
697 * explicitly enabled in the command-line.
698 */
699 .no_autoenable_flags = ~0U,
700 },
701 [FEAT_HYPERV_EAX] = {
702 .feat_names = {
703 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
704 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
705 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
706 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
707 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
708 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
709 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
710 NULL, NULL,
711 NULL, NULL, NULL, NULL,
712 NULL, NULL, NULL, NULL,
713 NULL, NULL, NULL, NULL,
714 NULL, NULL, NULL, NULL,
715 },
716 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
717 },
718 [FEAT_HYPERV_EBX] = {
719 .feat_names = {
720 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
721 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
722 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
723 NULL /* hv_create_port */, NULL /* hv_connect_port */,
724 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
725 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
726 NULL, NULL,
727 NULL, NULL, NULL, NULL,
728 NULL, NULL, NULL, NULL,
729 NULL, NULL, NULL, NULL,
730 NULL, NULL, NULL, NULL,
731 },
732 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
733 },
734 [FEAT_HYPERV_EDX] = {
735 .feat_names = {
736 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
737 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
738 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
739 NULL, NULL,
740 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
741 NULL, NULL, NULL, NULL,
742 NULL, NULL, NULL, NULL,
743 NULL, NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
745 NULL, NULL, NULL, NULL,
746 },
747 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
748 },
749 [FEAT_SVM] = {
750 .feat_names = {
751 "npt", "lbrv", "svm-lock", "nrip-save",
752 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
753 NULL, NULL, "pause-filter", NULL,
754 "pfthreshold", NULL, NULL, NULL,
755 NULL, NULL, NULL, NULL,
756 NULL, NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
759 },
760 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
761 .tcg_features = TCG_SVM_FEATURES,
762 },
763 [FEAT_7_0_EBX] = {
764 .feat_names = {
765 "fsgsbase", "tsc-adjust", NULL, "bmi1",
766 "hle", "avx2", NULL, "smep",
767 "bmi2", "erms", "invpcid", "rtm",
768 NULL, NULL, "mpx", NULL,
769 "avx512f", "avx512dq", "rdseed", "adx",
770 "smap", "avx512ifma", "pcommit", "clflushopt",
771 "clwb", "intel-pt", "avx512pf", "avx512er",
772 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
773 },
774 .cpuid_eax = 7,
775 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
776 .cpuid_reg = R_EBX,
777 .tcg_features = TCG_7_0_EBX_FEATURES,
778 },
779 [FEAT_7_0_ECX] = {
780 .feat_names = {
781 NULL, "avx512vbmi", "umip", "pku",
782 "ospke", NULL, "avx512vbmi2", NULL,
783 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
784 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
785 "la57", NULL, NULL, NULL,
786 NULL, NULL, "rdpid", NULL,
787 NULL, "cldemote", NULL, NULL,
788 NULL, NULL, NULL, NULL,
789 },
790 .cpuid_eax = 7,
791 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
792 .cpuid_reg = R_ECX,
793 .tcg_features = TCG_7_0_ECX_FEATURES,
794 },
795 [FEAT_7_0_EDX] = {
796 .feat_names = {
797 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
798 NULL, NULL, NULL, NULL,
799 NULL, NULL, NULL, NULL,
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 NULL, NULL, "spec-ctrl", NULL,
804 NULL, NULL, NULL, NULL,
805 },
806 .cpuid_eax = 7,
807 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
808 .cpuid_reg = R_EDX,
809 .tcg_features = TCG_7_0_EDX_FEATURES,
810 },
811 [FEAT_8000_0007_EDX] = {
812 .feat_names = {
813 NULL, NULL, NULL, NULL,
814 NULL, NULL, NULL, NULL,
815 "invtsc", NULL, NULL, NULL,
816 NULL, NULL, NULL, NULL,
817 NULL, NULL, NULL, NULL,
818 NULL, NULL, NULL, NULL,
819 NULL, NULL, NULL, NULL,
820 NULL, NULL, NULL, NULL,
821 },
822 .cpuid_eax = 0x80000007,
823 .cpuid_reg = R_EDX,
824 .tcg_features = TCG_APM_FEATURES,
825 .unmigratable_flags = CPUID_APM_INVTSC,
826 },
827 [FEAT_8000_0008_EBX] = {
828 .feat_names = {
829 NULL, NULL, NULL, NULL,
830 NULL, NULL, NULL, NULL,
831 NULL, NULL, NULL, NULL,
832 "ibpb", NULL, NULL, NULL,
833 NULL, NULL, NULL, NULL,
834 NULL, NULL, NULL, NULL,
835 NULL, NULL, NULL, NULL,
836 NULL, NULL, NULL, NULL,
837 },
838 .cpuid_eax = 0x80000008,
839 .cpuid_reg = R_EBX,
840 .tcg_features = 0,
841 .unmigratable_flags = 0,
842 },
843 [FEAT_XSAVE] = {
844 .feat_names = {
845 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
846 NULL, NULL, NULL, NULL,
847 NULL, NULL, NULL, NULL,
848 NULL, NULL, NULL, NULL,
849 NULL, NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 },
854 .cpuid_eax = 0xd,
855 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
856 .cpuid_reg = R_EAX,
857 .tcg_features = TCG_XSAVE_FEATURES,
858 },
859 [FEAT_6_EAX] = {
860 .feat_names = {
861 NULL, NULL, "arat", NULL,
862 NULL, NULL, NULL, NULL,
863 NULL, NULL, NULL, NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 },
870 .cpuid_eax = 6, .cpuid_reg = R_EAX,
871 .tcg_features = TCG_6_EAX_FEATURES,
872 },
873 [FEAT_XSAVE_COMP_LO] = {
874 .cpuid_eax = 0xD,
875 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
876 .cpuid_reg = R_EAX,
877 .tcg_features = ~0U,
878 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
879 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
880 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
881 XSTATE_PKRU_MASK,
882 },
883 [FEAT_XSAVE_COMP_HI] = {
884 .cpuid_eax = 0xD,
885 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
886 .cpuid_reg = R_EDX,
887 .tcg_features = ~0U,
888 },
889 };
890
891 typedef struct X86RegisterInfo32 {
892 /* Name of register */
893 const char *name;
894 /* QAPI enum value register */
895 X86CPURegister32 qapi_enum;
896 } X86RegisterInfo32;
897
898 #define REGISTER(reg) \
899 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
900 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
901 REGISTER(EAX),
902 REGISTER(ECX),
903 REGISTER(EDX),
904 REGISTER(EBX),
905 REGISTER(ESP),
906 REGISTER(EBP),
907 REGISTER(ESI),
908 REGISTER(EDI),
909 };
910 #undef REGISTER
911
912 typedef struct ExtSaveArea {
913 uint32_t feature, bits;
914 uint32_t offset, size;
915 } ExtSaveArea;
916
917 static const ExtSaveArea x86_ext_save_areas[] = {
918 [XSTATE_FP_BIT] = {
919 /* x87 FP state component is always enabled if XSAVE is supported */
920 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
921 /* x87 state is in the legacy region of the XSAVE area */
922 .offset = 0,
923 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
924 },
925 [XSTATE_SSE_BIT] = {
926 /* SSE state component is always enabled if XSAVE is supported */
927 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
928 /* SSE state is in the legacy region of the XSAVE area */
929 .offset = 0,
930 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
931 },
932 [XSTATE_YMM_BIT] =
933 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
934 .offset = offsetof(X86XSaveArea, avx_state),
935 .size = sizeof(XSaveAVX) },
936 [XSTATE_BNDREGS_BIT] =
937 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
938 .offset = offsetof(X86XSaveArea, bndreg_state),
939 .size = sizeof(XSaveBNDREG) },
940 [XSTATE_BNDCSR_BIT] =
941 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
942 .offset = offsetof(X86XSaveArea, bndcsr_state),
943 .size = sizeof(XSaveBNDCSR) },
944 [XSTATE_OPMASK_BIT] =
945 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
946 .offset = offsetof(X86XSaveArea, opmask_state),
947 .size = sizeof(XSaveOpmask) },
948 [XSTATE_ZMM_Hi256_BIT] =
949 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
950 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
951 .size = sizeof(XSaveZMM_Hi256) },
952 [XSTATE_Hi16_ZMM_BIT] =
953 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
954 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
955 .size = sizeof(XSaveHi16_ZMM) },
956 [XSTATE_PKRU_BIT] =
957 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
958 .offset = offsetof(X86XSaveArea, pkru_state),
959 .size = sizeof(XSavePKRU) },
960 };
961
962 static uint32_t xsave_area_size(uint64_t mask)
963 {
964 int i;
965 uint64_t ret = 0;
966
967 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
968 const ExtSaveArea *esa = &x86_ext_save_areas[i];
969 if ((mask >> i) & 1) {
970 ret = MAX(ret, esa->offset + esa->size);
971 }
972 }
973 return ret;
974 }
975
976 static inline bool accel_uses_host_cpuid(void)
977 {
978 return kvm_enabled() || hvf_enabled();
979 }
980
981 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
982 {
983 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
984 cpu->env.features[FEAT_XSAVE_COMP_LO];
985 }
986
987 const char *get_register_name_32(unsigned int reg)
988 {
989 if (reg >= CPU_NB_REGS32) {
990 return NULL;
991 }
992 return x86_reg_info_32[reg].name;
993 }
994
995 /*
996 * Returns the set of feature flags that are supported and migratable by
997 * QEMU, for a given FeatureWord.
998 */
999 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1000 {
1001 FeatureWordInfo *wi = &feature_word_info[w];
1002 uint32_t r = 0;
1003 int i;
1004
1005 for (i = 0; i < 32; i++) {
1006 uint32_t f = 1U << i;
1007
1008 /* If the feature name is known, it is implicitly considered migratable,
1009 * unless it is explicitly set in unmigratable_flags */
1010 if ((wi->migratable_flags & f) ||
1011 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1012 r |= f;
1013 }
1014 }
1015 return r;
1016 }
1017
1018 void host_cpuid(uint32_t function, uint32_t count,
1019 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1020 {
1021 uint32_t vec[4];
1022
1023 #ifdef __x86_64__
1024 asm volatile("cpuid"
1025 : "=a"(vec[0]), "=b"(vec[1]),
1026 "=c"(vec[2]), "=d"(vec[3])
1027 : "0"(function), "c"(count) : "cc");
1028 #elif defined(__i386__)
1029 asm volatile("pusha \n\t"
1030 "cpuid \n\t"
1031 "mov %%eax, 0(%2) \n\t"
1032 "mov %%ebx, 4(%2) \n\t"
1033 "mov %%ecx, 8(%2) \n\t"
1034 "mov %%edx, 12(%2) \n\t"
1035 "popa"
1036 : : "a"(function), "c"(count), "S"(vec)
1037 : "memory", "cc");
1038 #else
1039 abort();
1040 #endif
1041
1042 if (eax)
1043 *eax = vec[0];
1044 if (ebx)
1045 *ebx = vec[1];
1046 if (ecx)
1047 *ecx = vec[2];
1048 if (edx)
1049 *edx = vec[3];
1050 }
1051
1052 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1053 {
1054 uint32_t eax, ebx, ecx, edx;
1055
1056 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1057 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1058
1059 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1060 if (family) {
1061 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1062 }
1063 if (model) {
1064 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1065 }
1066 if (stepping) {
1067 *stepping = eax & 0x0F;
1068 }
1069 }
1070
1071 /* CPU class name definitions: */
1072
1073 /* Return type name for a given CPU model name
1074 * Caller is responsible for freeing the returned string.
1075 */
1076 static char *x86_cpu_type_name(const char *model_name)
1077 {
1078 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1079 }
1080
1081 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1082 {
1083 ObjectClass *oc;
1084 char *typename = x86_cpu_type_name(cpu_model);
1085 oc = object_class_by_name(typename);
1086 g_free(typename);
1087 return oc;
1088 }
1089
1090 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1091 {
1092 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1093 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1094 return g_strndup(class_name,
1095 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1096 }
1097
1098 struct X86CPUDefinition {
1099 const char *name;
1100 uint32_t level;
1101 uint32_t xlevel;
1102 /* vendor is zero-terminated, 12 character ASCII string */
1103 char vendor[CPUID_VENDOR_SZ + 1];
1104 int family;
1105 int model;
1106 int stepping;
1107 FeatureWordArray features;
1108 const char *model_id;
1109 CPUCaches *cache_info;
1110 };
1111
1112 static CPUCaches epyc_cache_info = {
1113 .l1d_cache = {
1114 .type = DCACHE,
1115 .level = 1,
1116 .size = 32 * KiB,
1117 .line_size = 64,
1118 .associativity = 8,
1119 .partitions = 1,
1120 .sets = 64,
1121 .lines_per_tag = 1,
1122 .self_init = 1,
1123 .no_invd_sharing = true,
1124 },
1125 .l1i_cache = {
1126 .type = ICACHE,
1127 .level = 1,
1128 .size = 64 * KiB,
1129 .line_size = 64,
1130 .associativity = 4,
1131 .partitions = 1,
1132 .sets = 256,
1133 .lines_per_tag = 1,
1134 .self_init = 1,
1135 .no_invd_sharing = true,
1136 },
1137 .l2_cache = {
1138 .type = UNIFIED_CACHE,
1139 .level = 2,
1140 .size = 512 * KiB,
1141 .line_size = 64,
1142 .associativity = 8,
1143 .partitions = 1,
1144 .sets = 1024,
1145 .lines_per_tag = 1,
1146 },
1147 .l3_cache = {
1148 .type = UNIFIED_CACHE,
1149 .level = 3,
1150 .size = 8 * MiB,
1151 .line_size = 64,
1152 .associativity = 16,
1153 .partitions = 1,
1154 .sets = 8192,
1155 .lines_per_tag = 1,
1156 .self_init = true,
1157 .inclusive = true,
1158 .complex_indexing = true,
1159 },
1160 };
1161
1162 static X86CPUDefinition builtin_x86_defs[] = {
1163 {
1164 .name = "qemu64",
1165 .level = 0xd,
1166 .vendor = CPUID_VENDOR_AMD,
1167 .family = 6,
1168 .model = 6,
1169 .stepping = 3,
1170 .features[FEAT_1_EDX] =
1171 PPRO_FEATURES |
1172 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1173 CPUID_PSE36,
1174 .features[FEAT_1_ECX] =
1175 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1176 .features[FEAT_8000_0001_EDX] =
1177 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1178 .features[FEAT_8000_0001_ECX] =
1179 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1180 .xlevel = 0x8000000A,
1181 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1182 },
1183 {
1184 .name = "phenom",
1185 .level = 5,
1186 .vendor = CPUID_VENDOR_AMD,
1187 .family = 16,
1188 .model = 2,
1189 .stepping = 3,
1190 /* Missing: CPUID_HT */
1191 .features[FEAT_1_EDX] =
1192 PPRO_FEATURES |
1193 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1194 CPUID_PSE36 | CPUID_VME,
1195 .features[FEAT_1_ECX] =
1196 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1197 CPUID_EXT_POPCNT,
1198 .features[FEAT_8000_0001_EDX] =
1199 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1200 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1201 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1202 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1203 CPUID_EXT3_CR8LEG,
1204 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1205 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1206 .features[FEAT_8000_0001_ECX] =
1207 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1208 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1209 /* Missing: CPUID_SVM_LBRV */
1210 .features[FEAT_SVM] =
1211 CPUID_SVM_NPT,
1212 .xlevel = 0x8000001A,
1213 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1214 },
1215 {
1216 .name = "core2duo",
1217 .level = 10,
1218 .vendor = CPUID_VENDOR_INTEL,
1219 .family = 6,
1220 .model = 15,
1221 .stepping = 11,
1222 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1223 .features[FEAT_1_EDX] =
1224 PPRO_FEATURES |
1225 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1226 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1227 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1228 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1229 .features[FEAT_1_ECX] =
1230 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1231 CPUID_EXT_CX16,
1232 .features[FEAT_8000_0001_EDX] =
1233 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1234 .features[FEAT_8000_0001_ECX] =
1235 CPUID_EXT3_LAHF_LM,
1236 .xlevel = 0x80000008,
1237 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1238 },
1239 {
1240 .name = "kvm64",
1241 .level = 0xd,
1242 .vendor = CPUID_VENDOR_INTEL,
1243 .family = 15,
1244 .model = 6,
1245 .stepping = 1,
1246 /* Missing: CPUID_HT */
1247 .features[FEAT_1_EDX] =
1248 PPRO_FEATURES | CPUID_VME |
1249 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1250 CPUID_PSE36,
1251 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1252 .features[FEAT_1_ECX] =
1253 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1254 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1255 .features[FEAT_8000_0001_EDX] =
1256 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1257 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1258 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1259 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1260 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1261 .features[FEAT_8000_0001_ECX] =
1262 0,
1263 .xlevel = 0x80000008,
1264 .model_id = "Common KVM processor"
1265 },
1266 {
1267 .name = "qemu32",
1268 .level = 4,
1269 .vendor = CPUID_VENDOR_INTEL,
1270 .family = 6,
1271 .model = 6,
1272 .stepping = 3,
1273 .features[FEAT_1_EDX] =
1274 PPRO_FEATURES,
1275 .features[FEAT_1_ECX] =
1276 CPUID_EXT_SSE3,
1277 .xlevel = 0x80000004,
1278 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1279 },
1280 {
1281 .name = "kvm32",
1282 .level = 5,
1283 .vendor = CPUID_VENDOR_INTEL,
1284 .family = 15,
1285 .model = 6,
1286 .stepping = 1,
1287 .features[FEAT_1_EDX] =
1288 PPRO_FEATURES | CPUID_VME |
1289 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1290 .features[FEAT_1_ECX] =
1291 CPUID_EXT_SSE3,
1292 .features[FEAT_8000_0001_ECX] =
1293 0,
1294 .xlevel = 0x80000008,
1295 .model_id = "Common 32-bit KVM processor"
1296 },
1297 {
1298 .name = "coreduo",
1299 .level = 10,
1300 .vendor = CPUID_VENDOR_INTEL,
1301 .family = 6,
1302 .model = 14,
1303 .stepping = 8,
1304 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1305 .features[FEAT_1_EDX] =
1306 PPRO_FEATURES | CPUID_VME |
1307 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1308 CPUID_SS,
1309 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1310 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1311 .features[FEAT_1_ECX] =
1312 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1313 .features[FEAT_8000_0001_EDX] =
1314 CPUID_EXT2_NX,
1315 .xlevel = 0x80000008,
1316 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1317 },
1318 {
1319 .name = "486",
1320 .level = 1,
1321 .vendor = CPUID_VENDOR_INTEL,
1322 .family = 4,
1323 .model = 8,
1324 .stepping = 0,
1325 .features[FEAT_1_EDX] =
1326 I486_FEATURES,
1327 .xlevel = 0,
1328 .model_id = "",
1329 },
1330 {
1331 .name = "pentium",
1332 .level = 1,
1333 .vendor = CPUID_VENDOR_INTEL,
1334 .family = 5,
1335 .model = 4,
1336 .stepping = 3,
1337 .features[FEAT_1_EDX] =
1338 PENTIUM_FEATURES,
1339 .xlevel = 0,
1340 .model_id = "",
1341 },
1342 {
1343 .name = "pentium2",
1344 .level = 2,
1345 .vendor = CPUID_VENDOR_INTEL,
1346 .family = 6,
1347 .model = 5,
1348 .stepping = 2,
1349 .features[FEAT_1_EDX] =
1350 PENTIUM2_FEATURES,
1351 .xlevel = 0,
1352 .model_id = "",
1353 },
1354 {
1355 .name = "pentium3",
1356 .level = 3,
1357 .vendor = CPUID_VENDOR_INTEL,
1358 .family = 6,
1359 .model = 7,
1360 .stepping = 3,
1361 .features[FEAT_1_EDX] =
1362 PENTIUM3_FEATURES,
1363 .xlevel = 0,
1364 .model_id = "",
1365 },
1366 {
1367 .name = "athlon",
1368 .level = 2,
1369 .vendor = CPUID_VENDOR_AMD,
1370 .family = 6,
1371 .model = 2,
1372 .stepping = 3,
1373 .features[FEAT_1_EDX] =
1374 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1375 CPUID_MCA,
1376 .features[FEAT_8000_0001_EDX] =
1377 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1378 .xlevel = 0x80000008,
1379 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1380 },
1381 {
1382 .name = "n270",
1383 .level = 10,
1384 .vendor = CPUID_VENDOR_INTEL,
1385 .family = 6,
1386 .model = 28,
1387 .stepping = 2,
1388 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1389 .features[FEAT_1_EDX] =
1390 PPRO_FEATURES |
1391 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1392 CPUID_ACPI | CPUID_SS,
1393 /* Some CPUs got no CPUID_SEP */
1394 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1395 * CPUID_EXT_XTPR */
1396 .features[FEAT_1_ECX] =
1397 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1398 CPUID_EXT_MOVBE,
1399 .features[FEAT_8000_0001_EDX] =
1400 CPUID_EXT2_NX,
1401 .features[FEAT_8000_0001_ECX] =
1402 CPUID_EXT3_LAHF_LM,
1403 .xlevel = 0x80000008,
1404 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1405 },
1406 {
1407 .name = "Conroe",
1408 .level = 10,
1409 .vendor = CPUID_VENDOR_INTEL,
1410 .family = 6,
1411 .model = 15,
1412 .stepping = 3,
1413 .features[FEAT_1_EDX] =
1414 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1415 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1416 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1417 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1418 CPUID_DE | CPUID_FP87,
1419 .features[FEAT_1_ECX] =
1420 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1421 .features[FEAT_8000_0001_EDX] =
1422 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1423 .features[FEAT_8000_0001_ECX] =
1424 CPUID_EXT3_LAHF_LM,
1425 .xlevel = 0x80000008,
1426 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1427 },
1428 {
1429 .name = "Penryn",
1430 .level = 10,
1431 .vendor = CPUID_VENDOR_INTEL,
1432 .family = 6,
1433 .model = 23,
1434 .stepping = 3,
1435 .features[FEAT_1_EDX] =
1436 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1437 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1438 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1439 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1440 CPUID_DE | CPUID_FP87,
1441 .features[FEAT_1_ECX] =
1442 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1443 CPUID_EXT_SSE3,
1444 .features[FEAT_8000_0001_EDX] =
1445 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1446 .features[FEAT_8000_0001_ECX] =
1447 CPUID_EXT3_LAHF_LM,
1448 .xlevel = 0x80000008,
1449 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1450 },
1451 {
1452 .name = "Nehalem",
1453 .level = 11,
1454 .vendor = CPUID_VENDOR_INTEL,
1455 .family = 6,
1456 .model = 26,
1457 .stepping = 3,
1458 .features[FEAT_1_EDX] =
1459 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1460 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1461 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1462 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1463 CPUID_DE | CPUID_FP87,
1464 .features[FEAT_1_ECX] =
1465 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1466 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1467 .features[FEAT_8000_0001_EDX] =
1468 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1469 .features[FEAT_8000_0001_ECX] =
1470 CPUID_EXT3_LAHF_LM,
1471 .xlevel = 0x80000008,
1472 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1473 },
1474 {
1475 .name = "Nehalem-IBRS",
1476 .level = 11,
1477 .vendor = CPUID_VENDOR_INTEL,
1478 .family = 6,
1479 .model = 26,
1480 .stepping = 3,
1481 .features[FEAT_1_EDX] =
1482 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1483 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1484 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1485 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1486 CPUID_DE | CPUID_FP87,
1487 .features[FEAT_1_ECX] =
1488 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1489 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1490 .features[FEAT_7_0_EDX] =
1491 CPUID_7_0_EDX_SPEC_CTRL,
1492 .features[FEAT_8000_0001_EDX] =
1493 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1494 .features[FEAT_8000_0001_ECX] =
1495 CPUID_EXT3_LAHF_LM,
1496 .xlevel = 0x80000008,
1497 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1498 },
1499 {
1500 .name = "Westmere",
1501 .level = 11,
1502 .vendor = CPUID_VENDOR_INTEL,
1503 .family = 6,
1504 .model = 44,
1505 .stepping = 1,
1506 .features[FEAT_1_EDX] =
1507 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1508 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1509 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1510 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1511 CPUID_DE | CPUID_FP87,
1512 .features[FEAT_1_ECX] =
1513 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1514 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1515 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1516 .features[FEAT_8000_0001_EDX] =
1517 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1518 .features[FEAT_8000_0001_ECX] =
1519 CPUID_EXT3_LAHF_LM,
1520 .features[FEAT_6_EAX] =
1521 CPUID_6_EAX_ARAT,
1522 .xlevel = 0x80000008,
1523 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1524 },
1525 {
1526 .name = "Westmere-IBRS",
1527 .level = 11,
1528 .vendor = CPUID_VENDOR_INTEL,
1529 .family = 6,
1530 .model = 44,
1531 .stepping = 1,
1532 .features[FEAT_1_EDX] =
1533 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1534 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1535 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1536 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1537 CPUID_DE | CPUID_FP87,
1538 .features[FEAT_1_ECX] =
1539 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1540 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1541 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1542 .features[FEAT_8000_0001_EDX] =
1543 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1544 .features[FEAT_8000_0001_ECX] =
1545 CPUID_EXT3_LAHF_LM,
1546 .features[FEAT_7_0_EDX] =
1547 CPUID_7_0_EDX_SPEC_CTRL,
1548 .features[FEAT_6_EAX] =
1549 CPUID_6_EAX_ARAT,
1550 .xlevel = 0x80000008,
1551 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1552 },
1553 {
1554 .name = "SandyBridge",
1555 .level = 0xd,
1556 .vendor = CPUID_VENDOR_INTEL,
1557 .family = 6,
1558 .model = 42,
1559 .stepping = 1,
1560 .features[FEAT_1_EDX] =
1561 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1562 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1563 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1564 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1565 CPUID_DE | CPUID_FP87,
1566 .features[FEAT_1_ECX] =
1567 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1568 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1569 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1570 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1571 CPUID_EXT_SSE3,
1572 .features[FEAT_8000_0001_EDX] =
1573 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1574 CPUID_EXT2_SYSCALL,
1575 .features[FEAT_8000_0001_ECX] =
1576 CPUID_EXT3_LAHF_LM,
1577 .features[FEAT_XSAVE] =
1578 CPUID_XSAVE_XSAVEOPT,
1579 .features[FEAT_6_EAX] =
1580 CPUID_6_EAX_ARAT,
1581 .xlevel = 0x80000008,
1582 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1583 },
1584 {
1585 .name = "SandyBridge-IBRS",
1586 .level = 0xd,
1587 .vendor = CPUID_VENDOR_INTEL,
1588 .family = 6,
1589 .model = 42,
1590 .stepping = 1,
1591 .features[FEAT_1_EDX] =
1592 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1593 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1594 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1595 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1596 CPUID_DE | CPUID_FP87,
1597 .features[FEAT_1_ECX] =
1598 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1599 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1600 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1601 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1602 CPUID_EXT_SSE3,
1603 .features[FEAT_8000_0001_EDX] =
1604 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1605 CPUID_EXT2_SYSCALL,
1606 .features[FEAT_8000_0001_ECX] =
1607 CPUID_EXT3_LAHF_LM,
1608 .features[FEAT_7_0_EDX] =
1609 CPUID_7_0_EDX_SPEC_CTRL,
1610 .features[FEAT_XSAVE] =
1611 CPUID_XSAVE_XSAVEOPT,
1612 .features[FEAT_6_EAX] =
1613 CPUID_6_EAX_ARAT,
1614 .xlevel = 0x80000008,
1615 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1616 },
1617 {
1618 .name = "IvyBridge",
1619 .level = 0xd,
1620 .vendor = CPUID_VENDOR_INTEL,
1621 .family = 6,
1622 .model = 58,
1623 .stepping = 9,
1624 .features[FEAT_1_EDX] =
1625 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1626 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1627 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1628 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1629 CPUID_DE | CPUID_FP87,
1630 .features[FEAT_1_ECX] =
1631 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1632 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1633 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1634 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1635 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1636 .features[FEAT_7_0_EBX] =
1637 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1638 CPUID_7_0_EBX_ERMS,
1639 .features[FEAT_8000_0001_EDX] =
1640 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1641 CPUID_EXT2_SYSCALL,
1642 .features[FEAT_8000_0001_ECX] =
1643 CPUID_EXT3_LAHF_LM,
1644 .features[FEAT_XSAVE] =
1645 CPUID_XSAVE_XSAVEOPT,
1646 .features[FEAT_6_EAX] =
1647 CPUID_6_EAX_ARAT,
1648 .xlevel = 0x80000008,
1649 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1650 },
1651 {
1652 .name = "IvyBridge-IBRS",
1653 .level = 0xd,
1654 .vendor = CPUID_VENDOR_INTEL,
1655 .family = 6,
1656 .model = 58,
1657 .stepping = 9,
1658 .features[FEAT_1_EDX] =
1659 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1660 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1661 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1662 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1663 CPUID_DE | CPUID_FP87,
1664 .features[FEAT_1_ECX] =
1665 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1666 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1667 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1668 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1669 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1670 .features[FEAT_7_0_EBX] =
1671 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1672 CPUID_7_0_EBX_ERMS,
1673 .features[FEAT_8000_0001_EDX] =
1674 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1675 CPUID_EXT2_SYSCALL,
1676 .features[FEAT_8000_0001_ECX] =
1677 CPUID_EXT3_LAHF_LM,
1678 .features[FEAT_7_0_EDX] =
1679 CPUID_7_0_EDX_SPEC_CTRL,
1680 .features[FEAT_XSAVE] =
1681 CPUID_XSAVE_XSAVEOPT,
1682 .features[FEAT_6_EAX] =
1683 CPUID_6_EAX_ARAT,
1684 .xlevel = 0x80000008,
1685 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1686 },
1687 {
1688 .name = "Haswell-noTSX",
1689 .level = 0xd,
1690 .vendor = CPUID_VENDOR_INTEL,
1691 .family = 6,
1692 .model = 60,
1693 .stepping = 1,
1694 .features[FEAT_1_EDX] =
1695 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1696 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1697 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1698 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1699 CPUID_DE | CPUID_FP87,
1700 .features[FEAT_1_ECX] =
1701 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1702 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1703 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1704 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1705 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1706 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1707 .features[FEAT_8000_0001_EDX] =
1708 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1709 CPUID_EXT2_SYSCALL,
1710 .features[FEAT_8000_0001_ECX] =
1711 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1712 .features[FEAT_7_0_EBX] =
1713 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1714 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1715 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1716 .features[FEAT_XSAVE] =
1717 CPUID_XSAVE_XSAVEOPT,
1718 .features[FEAT_6_EAX] =
1719 CPUID_6_EAX_ARAT,
1720 .xlevel = 0x80000008,
1721 .model_id = "Intel Core Processor (Haswell, no TSX)",
1722 },
1723 {
1724 .name = "Haswell-noTSX-IBRS",
1725 .level = 0xd,
1726 .vendor = CPUID_VENDOR_INTEL,
1727 .family = 6,
1728 .model = 60,
1729 .stepping = 1,
1730 .features[FEAT_1_EDX] =
1731 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1732 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1733 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1734 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1735 CPUID_DE | CPUID_FP87,
1736 .features[FEAT_1_ECX] =
1737 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1738 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1739 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1740 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1741 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1742 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1743 .features[FEAT_8000_0001_EDX] =
1744 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1745 CPUID_EXT2_SYSCALL,
1746 .features[FEAT_8000_0001_ECX] =
1747 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1748 .features[FEAT_7_0_EDX] =
1749 CPUID_7_0_EDX_SPEC_CTRL,
1750 .features[FEAT_7_0_EBX] =
1751 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1752 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1753 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1754 .features[FEAT_XSAVE] =
1755 CPUID_XSAVE_XSAVEOPT,
1756 .features[FEAT_6_EAX] =
1757 CPUID_6_EAX_ARAT,
1758 .xlevel = 0x80000008,
1759 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1760 },
1761 {
1762 .name = "Haswell",
1763 .level = 0xd,
1764 .vendor = CPUID_VENDOR_INTEL,
1765 .family = 6,
1766 .model = 60,
1767 .stepping = 4,
1768 .features[FEAT_1_EDX] =
1769 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1770 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1771 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1772 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1773 CPUID_DE | CPUID_FP87,
1774 .features[FEAT_1_ECX] =
1775 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1776 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1777 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1778 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1779 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1780 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1781 .features[FEAT_8000_0001_EDX] =
1782 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1783 CPUID_EXT2_SYSCALL,
1784 .features[FEAT_8000_0001_ECX] =
1785 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1786 .features[FEAT_7_0_EBX] =
1787 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1788 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1789 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1790 CPUID_7_0_EBX_RTM,
1791 .features[FEAT_XSAVE] =
1792 CPUID_XSAVE_XSAVEOPT,
1793 .features[FEAT_6_EAX] =
1794 CPUID_6_EAX_ARAT,
1795 .xlevel = 0x80000008,
1796 .model_id = "Intel Core Processor (Haswell)",
1797 },
1798 {
1799 .name = "Haswell-IBRS",
1800 .level = 0xd,
1801 .vendor = CPUID_VENDOR_INTEL,
1802 .family = 6,
1803 .model = 60,
1804 .stepping = 4,
1805 .features[FEAT_1_EDX] =
1806 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1807 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1808 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1809 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1810 CPUID_DE | CPUID_FP87,
1811 .features[FEAT_1_ECX] =
1812 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1813 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1814 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1815 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1816 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1817 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1818 .features[FEAT_8000_0001_EDX] =
1819 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1820 CPUID_EXT2_SYSCALL,
1821 .features[FEAT_8000_0001_ECX] =
1822 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1823 .features[FEAT_7_0_EDX] =
1824 CPUID_7_0_EDX_SPEC_CTRL,
1825 .features[FEAT_7_0_EBX] =
1826 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1827 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1828 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1829 CPUID_7_0_EBX_RTM,
1830 .features[FEAT_XSAVE] =
1831 CPUID_XSAVE_XSAVEOPT,
1832 .features[FEAT_6_EAX] =
1833 CPUID_6_EAX_ARAT,
1834 .xlevel = 0x80000008,
1835 .model_id = "Intel Core Processor (Haswell, IBRS)",
1836 },
1837 {
1838 .name = "Broadwell-noTSX",
1839 .level = 0xd,
1840 .vendor = CPUID_VENDOR_INTEL,
1841 .family = 6,
1842 .model = 61,
1843 .stepping = 2,
1844 .features[FEAT_1_EDX] =
1845 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1846 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1847 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1848 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1849 CPUID_DE | CPUID_FP87,
1850 .features[FEAT_1_ECX] =
1851 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1852 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1853 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1854 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1855 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1856 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1857 .features[FEAT_8000_0001_EDX] =
1858 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1859 CPUID_EXT2_SYSCALL,
1860 .features[FEAT_8000_0001_ECX] =
1861 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1862 .features[FEAT_7_0_EBX] =
1863 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1864 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1865 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1866 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1867 CPUID_7_0_EBX_SMAP,
1868 .features[FEAT_XSAVE] =
1869 CPUID_XSAVE_XSAVEOPT,
1870 .features[FEAT_6_EAX] =
1871 CPUID_6_EAX_ARAT,
1872 .xlevel = 0x80000008,
1873 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1874 },
1875 {
1876 .name = "Broadwell-noTSX-IBRS",
1877 .level = 0xd,
1878 .vendor = CPUID_VENDOR_INTEL,
1879 .family = 6,
1880 .model = 61,
1881 .stepping = 2,
1882 .features[FEAT_1_EDX] =
1883 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1884 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1885 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1886 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1887 CPUID_DE | CPUID_FP87,
1888 .features[FEAT_1_ECX] =
1889 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1890 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1891 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1892 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1893 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1894 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1895 .features[FEAT_8000_0001_EDX] =
1896 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1897 CPUID_EXT2_SYSCALL,
1898 .features[FEAT_8000_0001_ECX] =
1899 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1900 .features[FEAT_7_0_EDX] =
1901 CPUID_7_0_EDX_SPEC_CTRL,
1902 .features[FEAT_7_0_EBX] =
1903 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1904 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1905 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1906 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1907 CPUID_7_0_EBX_SMAP,
1908 .features[FEAT_XSAVE] =
1909 CPUID_XSAVE_XSAVEOPT,
1910 .features[FEAT_6_EAX] =
1911 CPUID_6_EAX_ARAT,
1912 .xlevel = 0x80000008,
1913 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1914 },
1915 {
1916 .name = "Broadwell",
1917 .level = 0xd,
1918 .vendor = CPUID_VENDOR_INTEL,
1919 .family = 6,
1920 .model = 61,
1921 .stepping = 2,
1922 .features[FEAT_1_EDX] =
1923 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1924 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1925 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1926 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1927 CPUID_DE | CPUID_FP87,
1928 .features[FEAT_1_ECX] =
1929 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1930 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1931 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1932 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1933 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1934 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1935 .features[FEAT_8000_0001_EDX] =
1936 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1937 CPUID_EXT2_SYSCALL,
1938 .features[FEAT_8000_0001_ECX] =
1939 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1940 .features[FEAT_7_0_EBX] =
1941 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1942 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1943 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1944 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1945 CPUID_7_0_EBX_SMAP,
1946 .features[FEAT_XSAVE] =
1947 CPUID_XSAVE_XSAVEOPT,
1948 .features[FEAT_6_EAX] =
1949 CPUID_6_EAX_ARAT,
1950 .xlevel = 0x80000008,
1951 .model_id = "Intel Core Processor (Broadwell)",
1952 },
1953 {
1954 .name = "Broadwell-IBRS",
1955 .level = 0xd,
1956 .vendor = CPUID_VENDOR_INTEL,
1957 .family = 6,
1958 .model = 61,
1959 .stepping = 2,
1960 .features[FEAT_1_EDX] =
1961 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1962 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1963 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1964 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1965 CPUID_DE | CPUID_FP87,
1966 .features[FEAT_1_ECX] =
1967 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1968 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1969 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1970 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1971 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1972 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1973 .features[FEAT_8000_0001_EDX] =
1974 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1975 CPUID_EXT2_SYSCALL,
1976 .features[FEAT_8000_0001_ECX] =
1977 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1978 .features[FEAT_7_0_EDX] =
1979 CPUID_7_0_EDX_SPEC_CTRL,
1980 .features[FEAT_7_0_EBX] =
1981 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1982 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1983 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1984 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1985 CPUID_7_0_EBX_SMAP,
1986 .features[FEAT_XSAVE] =
1987 CPUID_XSAVE_XSAVEOPT,
1988 .features[FEAT_6_EAX] =
1989 CPUID_6_EAX_ARAT,
1990 .xlevel = 0x80000008,
1991 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1992 },
1993 {
1994 .name = "Skylake-Client",
1995 .level = 0xd,
1996 .vendor = CPUID_VENDOR_INTEL,
1997 .family = 6,
1998 .model = 94,
1999 .stepping = 3,
2000 .features[FEAT_1_EDX] =
2001 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2002 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2003 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2004 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2005 CPUID_DE | CPUID_FP87,
2006 .features[FEAT_1_ECX] =
2007 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2008 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2009 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2010 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2011 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2012 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2013 .features[FEAT_8000_0001_EDX] =
2014 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2015 CPUID_EXT2_SYSCALL,
2016 .features[FEAT_8000_0001_ECX] =
2017 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2018 .features[FEAT_7_0_EBX] =
2019 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2020 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2021 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2022 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2023 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2024 /* Missing: XSAVES (not supported by some Linux versions,
2025 * including v4.1 to v4.12).
2026 * KVM doesn't yet expose any XSAVES state save component,
2027 * and the only one defined in Skylake (processor tracing)
2028 * probably will block migration anyway.
2029 */
2030 .features[FEAT_XSAVE] =
2031 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2032 CPUID_XSAVE_XGETBV1,
2033 .features[FEAT_6_EAX] =
2034 CPUID_6_EAX_ARAT,
2035 .xlevel = 0x80000008,
2036 .model_id = "Intel Core Processor (Skylake)",
2037 },
2038 {
2039 .name = "Skylake-Client-IBRS",
2040 .level = 0xd,
2041 .vendor = CPUID_VENDOR_INTEL,
2042 .family = 6,
2043 .model = 94,
2044 .stepping = 3,
2045 .features[FEAT_1_EDX] =
2046 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2047 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2048 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2049 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2050 CPUID_DE | CPUID_FP87,
2051 .features[FEAT_1_ECX] =
2052 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2053 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2054 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2055 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2056 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2057 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2058 .features[FEAT_8000_0001_EDX] =
2059 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2060 CPUID_EXT2_SYSCALL,
2061 .features[FEAT_8000_0001_ECX] =
2062 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2063 .features[FEAT_7_0_EDX] =
2064 CPUID_7_0_EDX_SPEC_CTRL,
2065 .features[FEAT_7_0_EBX] =
2066 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2067 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2068 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2069 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2070 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2071 /* Missing: XSAVES (not supported by some Linux versions,
2072 * including v4.1 to v4.12).
2073 * KVM doesn't yet expose any XSAVES state save component,
2074 * and the only one defined in Skylake (processor tracing)
2075 * probably will block migration anyway.
2076 */
2077 .features[FEAT_XSAVE] =
2078 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2079 CPUID_XSAVE_XGETBV1,
2080 .features[FEAT_6_EAX] =
2081 CPUID_6_EAX_ARAT,
2082 .xlevel = 0x80000008,
2083 .model_id = "Intel Core Processor (Skylake, IBRS)",
2084 },
2085 {
2086 .name = "Skylake-Server",
2087 .level = 0xd,
2088 .vendor = CPUID_VENDOR_INTEL,
2089 .family = 6,
2090 .model = 85,
2091 .stepping = 4,
2092 .features[FEAT_1_EDX] =
2093 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2094 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2095 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2096 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2097 CPUID_DE | CPUID_FP87,
2098 .features[FEAT_1_ECX] =
2099 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2100 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2101 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2102 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2103 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2104 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2105 .features[FEAT_8000_0001_EDX] =
2106 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2107 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2108 .features[FEAT_8000_0001_ECX] =
2109 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2110 .features[FEAT_7_0_EBX] =
2111 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2112 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2113 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2114 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2115 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2116 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2117 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2118 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2119 /* Missing: XSAVES (not supported by some Linux versions,
2120 * including v4.1 to v4.12).
2121 * KVM doesn't yet expose any XSAVES state save component,
2122 * and the only one defined in Skylake (processor tracing)
2123 * probably will block migration anyway.
2124 */
2125 .features[FEAT_XSAVE] =
2126 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2127 CPUID_XSAVE_XGETBV1,
2128 .features[FEAT_6_EAX] =
2129 CPUID_6_EAX_ARAT,
2130 .xlevel = 0x80000008,
2131 .model_id = "Intel Xeon Processor (Skylake)",
2132 },
2133 {
2134 .name = "Skylake-Server-IBRS",
2135 .level = 0xd,
2136 .vendor = CPUID_VENDOR_INTEL,
2137 .family = 6,
2138 .model = 85,
2139 .stepping = 4,
2140 .features[FEAT_1_EDX] =
2141 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2142 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2143 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2144 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2145 CPUID_DE | CPUID_FP87,
2146 .features[FEAT_1_ECX] =
2147 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2148 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2149 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2150 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2151 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2152 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2153 .features[FEAT_8000_0001_EDX] =
2154 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2155 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2156 .features[FEAT_8000_0001_ECX] =
2157 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2158 .features[FEAT_7_0_EDX] =
2159 CPUID_7_0_EDX_SPEC_CTRL,
2160 .features[FEAT_7_0_EBX] =
2161 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2162 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2163 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2164 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2165 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2166 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2167 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2168 CPUID_7_0_EBX_AVX512VL,
2169 /* Missing: XSAVES (not supported by some Linux versions,
2170 * including v4.1 to v4.12).
2171 * KVM doesn't yet expose any XSAVES state save component,
2172 * and the only one defined in Skylake (processor tracing)
2173 * probably will block migration anyway.
2174 */
2175 .features[FEAT_XSAVE] =
2176 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2177 CPUID_XSAVE_XGETBV1,
2178 .features[FEAT_6_EAX] =
2179 CPUID_6_EAX_ARAT,
2180 .xlevel = 0x80000008,
2181 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2182 },
2183 {
2184 .name = "KnightsMill",
2185 .level = 0xd,
2186 .vendor = CPUID_VENDOR_INTEL,
2187 .family = 6,
2188 .model = 133,
2189 .stepping = 0,
2190 .features[FEAT_1_EDX] =
2191 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2192 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2193 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2194 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2195 CPUID_PSE | CPUID_DE | CPUID_FP87,
2196 .features[FEAT_1_ECX] =
2197 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2198 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2199 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2200 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2201 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2202 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2203 .features[FEAT_8000_0001_EDX] =
2204 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2205 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2206 .features[FEAT_8000_0001_ECX] =
2207 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2208 .features[FEAT_7_0_EBX] =
2209 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2210 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2211 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2212 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2213 CPUID_7_0_EBX_AVX512ER,
2214 .features[FEAT_7_0_ECX] =
2215 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2216 .features[FEAT_7_0_EDX] =
2217 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2218 .features[FEAT_XSAVE] =
2219 CPUID_XSAVE_XSAVEOPT,
2220 .features[FEAT_6_EAX] =
2221 CPUID_6_EAX_ARAT,
2222 .xlevel = 0x80000008,
2223 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2224 },
2225 {
2226 .name = "Opteron_G1",
2227 .level = 5,
2228 .vendor = CPUID_VENDOR_AMD,
2229 .family = 15,
2230 .model = 6,
2231 .stepping = 1,
2232 .features[FEAT_1_EDX] =
2233 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2234 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2235 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2236 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2237 CPUID_DE | CPUID_FP87,
2238 .features[FEAT_1_ECX] =
2239 CPUID_EXT_SSE3,
2240 .features[FEAT_8000_0001_EDX] =
2241 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2242 .xlevel = 0x80000008,
2243 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2244 },
2245 {
2246 .name = "Opteron_G2",
2247 .level = 5,
2248 .vendor = CPUID_VENDOR_AMD,
2249 .family = 15,
2250 .model = 6,
2251 .stepping = 1,
2252 .features[FEAT_1_EDX] =
2253 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2254 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2255 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2256 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2257 CPUID_DE | CPUID_FP87,
2258 .features[FEAT_1_ECX] =
2259 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2260 /* Missing: CPUID_EXT2_RDTSCP */
2261 .features[FEAT_8000_0001_EDX] =
2262 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2263 .features[FEAT_8000_0001_ECX] =
2264 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2265 .xlevel = 0x80000008,
2266 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2267 },
2268 {
2269 .name = "Opteron_G3",
2270 .level = 5,
2271 .vendor = CPUID_VENDOR_AMD,
2272 .family = 16,
2273 .model = 2,
2274 .stepping = 3,
2275 .features[FEAT_1_EDX] =
2276 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2277 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2278 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2279 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2280 CPUID_DE | CPUID_FP87,
2281 .features[FEAT_1_ECX] =
2282 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2283 CPUID_EXT_SSE3,
2284 /* Missing: CPUID_EXT2_RDTSCP */
2285 .features[FEAT_8000_0001_EDX] =
2286 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2287 .features[FEAT_8000_0001_ECX] =
2288 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2289 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2290 .xlevel = 0x80000008,
2291 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2292 },
2293 {
2294 .name = "Opteron_G4",
2295 .level = 0xd,
2296 .vendor = CPUID_VENDOR_AMD,
2297 .family = 21,
2298 .model = 1,
2299 .stepping = 2,
2300 .features[FEAT_1_EDX] =
2301 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2302 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2303 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2304 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2305 CPUID_DE | CPUID_FP87,
2306 .features[FEAT_1_ECX] =
2307 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2308 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2309 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2310 CPUID_EXT_SSE3,
2311 /* Missing: CPUID_EXT2_RDTSCP */
2312 .features[FEAT_8000_0001_EDX] =
2313 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2314 CPUID_EXT2_SYSCALL,
2315 .features[FEAT_8000_0001_ECX] =
2316 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2317 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2318 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2319 CPUID_EXT3_LAHF_LM,
2320 /* no xsaveopt! */
2321 .xlevel = 0x8000001A,
2322 .model_id = "AMD Opteron 62xx class CPU",
2323 },
2324 {
2325 .name = "Opteron_G5",
2326 .level = 0xd,
2327 .vendor = CPUID_VENDOR_AMD,
2328 .family = 21,
2329 .model = 2,
2330 .stepping = 0,
2331 .features[FEAT_1_EDX] =
2332 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2333 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2334 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2335 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2336 CPUID_DE | CPUID_FP87,
2337 .features[FEAT_1_ECX] =
2338 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2339 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2340 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2341 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2342 /* Missing: CPUID_EXT2_RDTSCP */
2343 .features[FEAT_8000_0001_EDX] =
2344 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2345 CPUID_EXT2_SYSCALL,
2346 .features[FEAT_8000_0001_ECX] =
2347 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2348 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2349 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2350 CPUID_EXT3_LAHF_LM,
2351 /* no xsaveopt! */
2352 .xlevel = 0x8000001A,
2353 .model_id = "AMD Opteron 63xx class CPU",
2354 },
2355 {
2356 .name = "EPYC",
2357 .level = 0xd,
2358 .vendor = CPUID_VENDOR_AMD,
2359 .family = 23,
2360 .model = 1,
2361 .stepping = 2,
2362 .features[FEAT_1_EDX] =
2363 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2364 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2365 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2366 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2367 CPUID_VME | CPUID_FP87,
2368 .features[FEAT_1_ECX] =
2369 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2370 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2371 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2372 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2373 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2374 .features[FEAT_8000_0001_EDX] =
2375 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2376 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2377 CPUID_EXT2_SYSCALL,
2378 .features[FEAT_8000_0001_ECX] =
2379 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2380 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2381 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2382 .features[FEAT_7_0_EBX] =
2383 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2384 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2385 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2386 CPUID_7_0_EBX_SHA_NI,
2387 /* Missing: XSAVES (not supported by some Linux versions,
2388 * including v4.1 to v4.12).
2389 * KVM doesn't yet expose any XSAVES state save component.
2390 */
2391 .features[FEAT_XSAVE] =
2392 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2393 CPUID_XSAVE_XGETBV1,
2394 .features[FEAT_6_EAX] =
2395 CPUID_6_EAX_ARAT,
2396 .xlevel = 0x8000000A,
2397 .model_id = "AMD EPYC Processor",
2398 .cache_info = &epyc_cache_info,
2399 },
2400 {
2401 .name = "EPYC-IBPB",
2402 .level = 0xd,
2403 .vendor = CPUID_VENDOR_AMD,
2404 .family = 23,
2405 .model = 1,
2406 .stepping = 2,
2407 .features[FEAT_1_EDX] =
2408 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2409 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2410 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2411 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2412 CPUID_VME | CPUID_FP87,
2413 .features[FEAT_1_ECX] =
2414 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2415 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2416 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2417 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2418 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2419 .features[FEAT_8000_0001_EDX] =
2420 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2421 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2422 CPUID_EXT2_SYSCALL,
2423 .features[FEAT_8000_0001_ECX] =
2424 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2425 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2426 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2427 .features[FEAT_8000_0008_EBX] =
2428 CPUID_8000_0008_EBX_IBPB,
2429 .features[FEAT_7_0_EBX] =
2430 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2431 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2432 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2433 CPUID_7_0_EBX_SHA_NI,
2434 /* Missing: XSAVES (not supported by some Linux versions,
2435 * including v4.1 to v4.12).
2436 * KVM doesn't yet expose any XSAVES state save component.
2437 */
2438 .features[FEAT_XSAVE] =
2439 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2440 CPUID_XSAVE_XGETBV1,
2441 .features[FEAT_6_EAX] =
2442 CPUID_6_EAX_ARAT,
2443 .xlevel = 0x8000000A,
2444 .model_id = "AMD EPYC Processor (with IBPB)",
2445 .cache_info = &epyc_cache_info,
2446 },
2447 };
2448
2449 typedef struct PropValue {
2450 const char *prop, *value;
2451 } PropValue;
2452
2453 /* KVM-specific features that are automatically added/removed
2454 * from all CPU models when KVM is enabled.
2455 */
2456 static PropValue kvm_default_props[] = {
2457 { "kvmclock", "on" },
2458 { "kvm-nopiodelay", "on" },
2459 { "kvm-asyncpf", "on" },
2460 { "kvm-steal-time", "on" },
2461 { "kvm-pv-eoi", "on" },
2462 { "kvmclock-stable-bit", "on" },
2463 { "x2apic", "on" },
2464 { "acpi", "off" },
2465 { "monitor", "off" },
2466 { "svm", "off" },
2467 { NULL, NULL },
2468 };
2469
2470 /* TCG-specific defaults that override all CPU models when using TCG
2471 */
2472 static PropValue tcg_default_props[] = {
2473 { "vme", "off" },
2474 { NULL, NULL },
2475 };
2476
2477
2478 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2479 {
2480 PropValue *pv;
2481 for (pv = kvm_default_props; pv->prop; pv++) {
2482 if (!strcmp(pv->prop, prop)) {
2483 pv->value = value;
2484 break;
2485 }
2486 }
2487
2488 /* It is valid to call this function only for properties that
2489 * are already present in the kvm_default_props table.
2490 */
2491 assert(pv->prop);
2492 }
2493
2494 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2495 bool migratable_only);
2496
2497 static bool lmce_supported(void)
2498 {
2499 uint64_t mce_cap = 0;
2500
2501 #ifdef CONFIG_KVM
2502 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2503 return false;
2504 }
2505 #endif
2506
2507 return !!(mce_cap & MCG_LMCE_P);
2508 }
2509
2510 #define CPUID_MODEL_ID_SZ 48
2511
2512 /**
2513 * cpu_x86_fill_model_id:
2514 * Get CPUID model ID string from host CPU.
2515 *
2516 * @str should have at least CPUID_MODEL_ID_SZ bytes
2517 *
2518 * The function does NOT add a null terminator to the string
2519 * automatically.
2520 */
2521 static int cpu_x86_fill_model_id(char *str)
2522 {
2523 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2524 int i;
2525
2526 for (i = 0; i < 3; i++) {
2527 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2528 memcpy(str + i * 16 + 0, &eax, 4);
2529 memcpy(str + i * 16 + 4, &ebx, 4);
2530 memcpy(str + i * 16 + 8, &ecx, 4);
2531 memcpy(str + i * 16 + 12, &edx, 4);
2532 }
2533 return 0;
2534 }
2535
2536 static Property max_x86_cpu_properties[] = {
2537 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2538 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2539 DEFINE_PROP_END_OF_LIST()
2540 };
2541
2542 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2543 {
2544 DeviceClass *dc = DEVICE_CLASS(oc);
2545 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2546
2547 xcc->ordering = 9;
2548
2549 xcc->model_description =
2550 "Enables all features supported by the accelerator in the current host";
2551
2552 dc->props = max_x86_cpu_properties;
2553 }
2554
2555 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2556
2557 static void max_x86_cpu_initfn(Object *obj)
2558 {
2559 X86CPU *cpu = X86_CPU(obj);
2560 CPUX86State *env = &cpu->env;
2561 KVMState *s = kvm_state;
2562
2563 /* We can't fill the features array here because we don't know yet if
2564 * "migratable" is true or false.
2565 */
2566 cpu->max_features = true;
2567
2568 if (accel_uses_host_cpuid()) {
2569 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2570 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2571 int family, model, stepping;
2572 X86CPUDefinition host_cpudef = { };
2573 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2574
2575 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2576 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2577
2578 host_vendor_fms(vendor, &family, &model, &stepping);
2579
2580 cpu_x86_fill_model_id(model_id);
2581
2582 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2583 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2584 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2585 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2586 &error_abort);
2587 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2588 &error_abort);
2589
2590 if (kvm_enabled()) {
2591 env->cpuid_min_level =
2592 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2593 env->cpuid_min_xlevel =
2594 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2595 env->cpuid_min_xlevel2 =
2596 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2597 } else {
2598 env->cpuid_min_level =
2599 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2600 env->cpuid_min_xlevel =
2601 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2602 env->cpuid_min_xlevel2 =
2603 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2604 }
2605
2606 if (lmce_supported()) {
2607 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2608 }
2609 } else {
2610 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2611 "vendor", &error_abort);
2612 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2613 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2614 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2615 object_property_set_str(OBJECT(cpu),
2616 "QEMU TCG CPU version " QEMU_HW_VERSION,
2617 "model-id", &error_abort);
2618 }
2619
2620 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2621 }
2622
2623 static const TypeInfo max_x86_cpu_type_info = {
2624 .name = X86_CPU_TYPE_NAME("max"),
2625 .parent = TYPE_X86_CPU,
2626 .instance_init = max_x86_cpu_initfn,
2627 .class_init = max_x86_cpu_class_init,
2628 };
2629
2630 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2631 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2632 {
2633 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2634
2635 xcc->host_cpuid_required = true;
2636 xcc->ordering = 8;
2637
2638 if (kvm_enabled()) {
2639 xcc->model_description =
2640 "KVM processor with all supported host features ";
2641 } else if (hvf_enabled()) {
2642 xcc->model_description =
2643 "HVF processor with all supported host features ";
2644 }
2645 }
2646
2647 static const TypeInfo host_x86_cpu_type_info = {
2648 .name = X86_CPU_TYPE_NAME("host"),
2649 .parent = X86_CPU_TYPE_NAME("max"),
2650 .class_init = host_x86_cpu_class_init,
2651 };
2652
2653 #endif
2654
2655 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2656 {
2657 FeatureWordInfo *f = &feature_word_info[w];
2658 int i;
2659
2660 for (i = 0; i < 32; ++i) {
2661 if ((1UL << i) & mask) {
2662 const char *reg = get_register_name_32(f->cpuid_reg);
2663 assert(reg);
2664 warn_report("%s doesn't support requested feature: "
2665 "CPUID.%02XH:%s%s%s [bit %d]",
2666 accel_uses_host_cpuid() ? "host" : "TCG",
2667 f->cpuid_eax, reg,
2668 f->feat_names[i] ? "." : "",
2669 f->feat_names[i] ? f->feat_names[i] : "", i);
2670 }
2671 }
2672 }
2673
2674 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2675 const char *name, void *opaque,
2676 Error **errp)
2677 {
2678 X86CPU *cpu = X86_CPU(obj);
2679 CPUX86State *env = &cpu->env;
2680 int64_t value;
2681
2682 value = (env->cpuid_version >> 8) & 0xf;
2683 if (value == 0xf) {
2684 value += (env->cpuid_version >> 20) & 0xff;
2685 }
2686 visit_type_int(v, name, &value, errp);
2687 }
2688
2689 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2690 const char *name, void *opaque,
2691 Error **errp)
2692 {
2693 X86CPU *cpu = X86_CPU(obj);
2694 CPUX86State *env = &cpu->env;
2695 const int64_t min = 0;
2696 const int64_t max = 0xff + 0xf;
2697 Error *local_err = NULL;
2698 int64_t value;
2699
2700 visit_type_int(v, name, &value, &local_err);
2701 if (local_err) {
2702 error_propagate(errp, local_err);
2703 return;
2704 }
2705 if (value < min || value > max) {
2706 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2707 name ? name : "null", value, min, max);
2708 return;
2709 }
2710
2711 env->cpuid_version &= ~0xff00f00;
2712 if (value > 0x0f) {
2713 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2714 } else {
2715 env->cpuid_version |= value << 8;
2716 }
2717 }
2718
2719 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2720 const char *name, void *opaque,
2721 Error **errp)
2722 {
2723 X86CPU *cpu = X86_CPU(obj);
2724 CPUX86State *env = &cpu->env;
2725 int64_t value;
2726
2727 value = (env->cpuid_version >> 4) & 0xf;
2728 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2729 visit_type_int(v, name, &value, errp);
2730 }
2731
2732 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2733 const char *name, void *opaque,
2734 Error **errp)
2735 {
2736 X86CPU *cpu = X86_CPU(obj);
2737 CPUX86State *env = &cpu->env;
2738 const int64_t min = 0;
2739 const int64_t max = 0xff;
2740 Error *local_err = NULL;
2741 int64_t value;
2742
2743 visit_type_int(v, name, &value, &local_err);
2744 if (local_err) {
2745 error_propagate(errp, local_err);
2746 return;
2747 }
2748 if (value < min || value > max) {
2749 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2750 name ? name : "null", value, min, max);
2751 return;
2752 }
2753
2754 env->cpuid_version &= ~0xf00f0;
2755 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2756 }
2757
2758 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2759 const char *name, void *opaque,
2760 Error **errp)
2761 {
2762 X86CPU *cpu = X86_CPU(obj);
2763 CPUX86State *env = &cpu->env;
2764 int64_t value;
2765
2766 value = env->cpuid_version & 0xf;
2767 visit_type_int(v, name, &value, errp);
2768 }
2769
2770 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2771 const char *name, void *opaque,
2772 Error **errp)
2773 {
2774 X86CPU *cpu = X86_CPU(obj);
2775 CPUX86State *env = &cpu->env;
2776 const int64_t min = 0;
2777 const int64_t max = 0xf;
2778 Error *local_err = NULL;
2779 int64_t value;
2780
2781 visit_type_int(v, name, &value, &local_err);
2782 if (local_err) {
2783 error_propagate(errp, local_err);
2784 return;
2785 }
2786 if (value < min || value > max) {
2787 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2788 name ? name : "null", value, min, max);
2789 return;
2790 }
2791
2792 env->cpuid_version &= ~0xf;
2793 env->cpuid_version |= value & 0xf;
2794 }
2795
2796 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2797 {
2798 X86CPU *cpu = X86_CPU(obj);
2799 CPUX86State *env = &cpu->env;
2800 char *value;
2801
2802 value = g_malloc(CPUID_VENDOR_SZ + 1);
2803 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2804 env->cpuid_vendor3);
2805 return value;
2806 }
2807
2808 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2809 Error **errp)
2810 {
2811 X86CPU *cpu = X86_CPU(obj);
2812 CPUX86State *env = &cpu->env;
2813 int i;
2814
2815 if (strlen(value) != CPUID_VENDOR_SZ) {
2816 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2817 return;
2818 }
2819
2820 env->cpuid_vendor1 = 0;
2821 env->cpuid_vendor2 = 0;
2822 env->cpuid_vendor3 = 0;
2823 for (i = 0; i < 4; i++) {
2824 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2825 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2826 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2827 }
2828 }
2829
2830 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2831 {
2832 X86CPU *cpu = X86_CPU(obj);
2833 CPUX86State *env = &cpu->env;
2834 char *value;
2835 int i;
2836
2837 value = g_malloc(48 + 1);
2838 for (i = 0; i < 48; i++) {
2839 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2840 }
2841 value[48] = '\0';
2842 return value;
2843 }
2844
2845 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2846 Error **errp)
2847 {
2848 X86CPU *cpu = X86_CPU(obj);
2849 CPUX86State *env = &cpu->env;
2850 int c, len, i;
2851
2852 if (model_id == NULL) {
2853 model_id = "";
2854 }
2855 len = strlen(model_id);
2856 memset(env->cpuid_model, 0, 48);
2857 for (i = 0; i < 48; i++) {
2858 if (i >= len) {
2859 c = '\0';
2860 } else {
2861 c = (uint8_t)model_id[i];
2862 }
2863 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2864 }
2865 }
2866
2867 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2868 void *opaque, Error **errp)
2869 {
2870 X86CPU *cpu = X86_CPU(obj);
2871 int64_t value;
2872
2873 value = cpu->env.tsc_khz * 1000;
2874 visit_type_int(v, name, &value, errp);
2875 }
2876
2877 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2878 void *opaque, Error **errp)
2879 {
2880 X86CPU *cpu = X86_CPU(obj);
2881 const int64_t min = 0;
2882 const int64_t max = INT64_MAX;
2883 Error *local_err = NULL;
2884 int64_t value;
2885
2886 visit_type_int(v, name, &value, &local_err);
2887 if (local_err) {
2888 error_propagate(errp, local_err);
2889 return;
2890 }
2891 if (value < min || value > max) {
2892 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2893 name ? name : "null", value, min, max);
2894 return;
2895 }
2896
2897 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2898 }
2899
2900 /* Generic getter for "feature-words" and "filtered-features" properties */
2901 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2902 const char *name, void *opaque,
2903 Error **errp)
2904 {
2905 uint32_t *array = (uint32_t *)opaque;
2906 FeatureWord w;
2907 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2908 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2909 X86CPUFeatureWordInfoList *list = NULL;
2910
2911 for (w = 0; w < FEATURE_WORDS; w++) {
2912 FeatureWordInfo *wi = &feature_word_info[w];
2913 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2914 qwi->cpuid_input_eax = wi->cpuid_eax;
2915 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2916 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2917 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2918 qwi->features = array[w];
2919
2920 /* List will be in reverse order, but order shouldn't matter */
2921 list_entries[w].next = list;
2922 list_entries[w].value = &word_infos[w];
2923 list = &list_entries[w];
2924 }
2925
2926 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2927 }
2928
2929 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2930 void *opaque, Error **errp)
2931 {
2932 X86CPU *cpu = X86_CPU(obj);
2933 int64_t value = cpu->hyperv_spinlock_attempts;
2934
2935 visit_type_int(v, name, &value, errp);
2936 }
2937
2938 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2939 void *opaque, Error **errp)
2940 {
2941 const int64_t min = 0xFFF;
2942 const int64_t max = UINT_MAX;
2943 X86CPU *cpu = X86_CPU(obj);
2944 Error *err = NULL;
2945 int64_t value;
2946
2947 visit_type_int(v, name, &value, &err);
2948 if (err) {
2949 error_propagate(errp, err);
2950 return;
2951 }
2952
2953 if (value < min || value > max) {
2954 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2955 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2956 object_get_typename(obj), name ? name : "null",
2957 value, min, max);
2958 return;
2959 }
2960 cpu->hyperv_spinlock_attempts = value;
2961 }
2962
2963 static const PropertyInfo qdev_prop_spinlocks = {
2964 .name = "int",
2965 .get = x86_get_hv_spinlocks,
2966 .set = x86_set_hv_spinlocks,
2967 };
2968
2969 /* Convert all '_' in a feature string option name to '-', to make feature
2970 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2971 */
2972 static inline void feat2prop(char *s)
2973 {
2974 while ((s = strchr(s, '_'))) {
2975 *s = '-';
2976 }
2977 }
2978
2979 /* Return the feature property name for a feature flag bit */
2980 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2981 {
2982 /* XSAVE components are automatically enabled by other features,
2983 * so return the original feature name instead
2984 */
2985 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2986 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2987
2988 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2989 x86_ext_save_areas[comp].bits) {
2990 w = x86_ext_save_areas[comp].feature;
2991 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2992 }
2993 }
2994
2995 assert(bitnr < 32);
2996 assert(w < FEATURE_WORDS);
2997 return feature_word_info[w].feat_names[bitnr];
2998 }
2999
3000 /* Compatibily hack to maintain legacy +-feat semantic,
3001 * where +-feat overwrites any feature set by
3002 * feat=on|feat even if the later is parsed after +-feat
3003 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3004 */
3005 static GList *plus_features, *minus_features;
3006
3007 static gint compare_string(gconstpointer a, gconstpointer b)
3008 {
3009 return g_strcmp0(a, b);
3010 }
3011
3012 /* Parse "+feature,-feature,feature=foo" CPU feature string
3013 */
3014 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3015 Error **errp)
3016 {
3017 char *featurestr; /* Single 'key=value" string being parsed */
3018 static bool cpu_globals_initialized;
3019 bool ambiguous = false;
3020
3021 if (cpu_globals_initialized) {
3022 return;
3023 }
3024 cpu_globals_initialized = true;
3025
3026 if (!features) {
3027 return;
3028 }
3029
3030 for (featurestr = strtok(features, ",");
3031 featurestr;
3032 featurestr = strtok(NULL, ",")) {
3033 const char *name;
3034 const char *val = NULL;
3035 char *eq = NULL;
3036 char num[32];
3037 GlobalProperty *prop;
3038
3039 /* Compatibility syntax: */
3040 if (featurestr[0] == '+') {
3041 plus_features = g_list_append(plus_features,
3042 g_strdup(featurestr + 1));
3043 continue;
3044 } else if (featurestr[0] == '-') {
3045 minus_features = g_list_append(minus_features,
3046 g_strdup(featurestr + 1));
3047 continue;
3048 }
3049
3050 eq = strchr(featurestr, '=');
3051 if (eq) {
3052 *eq++ = 0;
3053 val = eq;
3054 } else {
3055 val = "on";
3056 }
3057
3058 feat2prop(featurestr);
3059 name = featurestr;
3060
3061 if (g_list_find_custom(plus_features, name, compare_string)) {
3062 warn_report("Ambiguous CPU model string. "
3063 "Don't mix both \"+%s\" and \"%s=%s\"",
3064 name, name, val);
3065 ambiguous = true;
3066 }
3067 if (g_list_find_custom(minus_features, name, compare_string)) {
3068 warn_report("Ambiguous CPU model string. "
3069 "Don't mix both \"-%s\" and \"%s=%s\"",
3070 name, name, val);
3071 ambiguous = true;
3072 }
3073
3074 /* Special case: */
3075 if (!strcmp(name, "tsc-freq")) {
3076 int ret;
3077 uint64_t tsc_freq;
3078
3079 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3080 if (ret < 0 || tsc_freq > INT64_MAX) {
3081 error_setg(errp, "bad numerical value %s", val);
3082 return;
3083 }
3084 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3085 val = num;
3086 name = "tsc-frequency";
3087 }
3088
3089 prop = g_new0(typeof(*prop), 1);
3090 prop->driver = typename;
3091 prop->property = g_strdup(name);
3092 prop->value = g_strdup(val);
3093 prop->errp = &error_fatal;
3094 qdev_prop_register_global(prop);
3095 }
3096
3097 if (ambiguous) {
3098 warn_report("Compatibility of ambiguous CPU model "
3099 "strings won't be kept on future QEMU versions");
3100 }
3101 }
3102
3103 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3104 static int x86_cpu_filter_features(X86CPU *cpu);
3105
3106 /* Check for missing features that may prevent the CPU class from
3107 * running using the current machine and accelerator.
3108 */
3109 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3110 strList **missing_feats)
3111 {
3112 X86CPU *xc;
3113 FeatureWord w;
3114 Error *err = NULL;
3115 strList **next = missing_feats;
3116
3117 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3118 strList *new = g_new0(strList, 1);
3119 new->value = g_strdup("kvm");
3120 *missing_feats = new;
3121 return;
3122 }
3123
3124 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3125
3126 x86_cpu_expand_features(xc, &err);
3127 if (err) {
3128 /* Errors at x86_cpu_expand_features should never happen,
3129 * but in case it does, just report the model as not
3130 * runnable at all using the "type" property.
3131 */
3132 strList *new = g_new0(strList, 1);
3133 new->value = g_strdup("type");
3134 *next = new;
3135 next = &new->next;
3136 }
3137
3138 x86_cpu_filter_features(xc);
3139
3140 for (w = 0; w < FEATURE_WORDS; w++) {
3141 uint32_t filtered = xc->filtered_features[w];
3142 int i;
3143 for (i = 0; i < 32; i++) {
3144 if (filtered & (1UL << i)) {
3145 strList *new = g_new0(strList, 1);
3146 new->value = g_strdup(x86_cpu_feature_name(w, i));
3147 *next = new;
3148 next = &new->next;
3149 }
3150 }
3151 }
3152
3153 object_unref(OBJECT(xc));
3154 }
3155
3156 /* Print all cpuid feature names in featureset
3157 */
3158 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3159 {
3160 int bit;
3161 bool first = true;
3162
3163 for (bit = 0; bit < 32; bit++) {
3164 if (featureset[bit]) {
3165 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3166 first = false;
3167 }
3168 }
3169 }
3170
3171 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3172 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3173 {
3174 ObjectClass *class_a = (ObjectClass *)a;
3175 ObjectClass *class_b = (ObjectClass *)b;
3176 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3177 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3178 const char *name_a, *name_b;
3179
3180 if (cc_a->ordering != cc_b->ordering) {
3181 return cc_a->ordering - cc_b->ordering;
3182 } else {
3183 name_a = object_class_get_name(class_a);
3184 name_b = object_class_get_name(class_b);
3185 return strcmp(name_a, name_b);
3186 }
3187 }
3188
3189 static GSList *get_sorted_cpu_model_list(void)
3190 {
3191 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3192 list = g_slist_sort(list, x86_cpu_list_compare);
3193 return list;
3194 }
3195
3196 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3197 {
3198 ObjectClass *oc = data;
3199 X86CPUClass *cc = X86_CPU_CLASS(oc);
3200 CPUListState *s = user_data;
3201 char *name = x86_cpu_class_get_model_name(cc);
3202 const char *desc = cc->model_description;
3203 if (!desc && cc->cpu_def) {
3204 desc = cc->cpu_def->model_id;
3205 }
3206
3207 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3208 name, desc);
3209 g_free(name);
3210 }
3211
3212 /* list available CPU models and flags */
3213 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3214 {
3215 int i;
3216 CPUListState s = {
3217 .file = f,
3218 .cpu_fprintf = cpu_fprintf,
3219 };
3220 GSList *list;
3221
3222 (*cpu_fprintf)(f, "Available CPUs:\n");
3223 list = get_sorted_cpu_model_list();
3224 g_slist_foreach(list, x86_cpu_list_entry, &s);
3225 g_slist_free(list);
3226
3227 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3228 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3229 FeatureWordInfo *fw = &feature_word_info[i];
3230
3231 (*cpu_fprintf)(f, " ");
3232 listflags(f, cpu_fprintf, fw->feat_names);
3233 (*cpu_fprintf)(f, "\n");
3234 }
3235 }
3236
3237 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3238 {
3239 ObjectClass *oc = data;
3240 X86CPUClass *cc = X86_CPU_CLASS(oc);
3241 CpuDefinitionInfoList **cpu_list = user_data;
3242 CpuDefinitionInfoList *entry;
3243 CpuDefinitionInfo *info;
3244
3245 info = g_malloc0(sizeof(*info));
3246 info->name = x86_cpu_class_get_model_name(cc);
3247 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3248 info->has_unavailable_features = true;
3249 info->q_typename = g_strdup(object_class_get_name(oc));
3250 info->migration_safe = cc->migration_safe;
3251 info->has_migration_safe = true;
3252 info->q_static = cc->static_model;
3253
3254 entry = g_malloc0(sizeof(*entry));
3255 entry->value = info;
3256 entry->next = *cpu_list;
3257 *cpu_list = entry;
3258 }
3259
3260 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3261 {
3262 CpuDefinitionInfoList *cpu_list = NULL;
3263 GSList *list = get_sorted_cpu_model_list();
3264 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3265 g_slist_free(list);
3266 return cpu_list;
3267 }
3268
3269 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3270 bool migratable_only)
3271 {
3272 FeatureWordInfo *wi = &feature_word_info[w];
3273 uint32_t r;
3274
3275 if (kvm_enabled()) {
3276 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3277 wi->cpuid_ecx,
3278 wi->cpuid_reg);
3279 } else if (hvf_enabled()) {
3280 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3281 wi->cpuid_ecx,
3282 wi->cpuid_reg);
3283 } else if (tcg_enabled()) {
3284 r = wi->tcg_features;
3285 } else {
3286 return ~0;
3287 }
3288 if (migratable_only) {
3289 r &= x86_cpu_get_migratable_flags(w);
3290 }
3291 return r;
3292 }
3293
3294 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3295 {
3296 FeatureWord w;
3297
3298 for (w = 0; w < FEATURE_WORDS; w++) {
3299 report_unavailable_features(w, cpu->filtered_features[w]);
3300 }
3301 }
3302
3303 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3304 {
3305 PropValue *pv;
3306 for (pv = props; pv->prop; pv++) {
3307 if (!pv->value) {
3308 continue;
3309 }
3310 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3311 &error_abort);
3312 }
3313 }
3314
3315 /* Load data from X86CPUDefinition into a X86CPU object
3316 */
3317 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3318 {
3319 CPUX86State *env = &cpu->env;
3320 const char *vendor;
3321 char host_vendor[CPUID_VENDOR_SZ + 1];
3322 FeatureWord w;
3323
3324 /*NOTE: any property set by this function should be returned by
3325 * x86_cpu_static_props(), so static expansion of
3326 * query-cpu-model-expansion is always complete.
3327 */
3328
3329 /* CPU models only set _minimum_ values for level/xlevel: */
3330 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3331 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3332
3333 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3334 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3335 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3336 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3337 for (w = 0; w < FEATURE_WORDS; w++) {
3338 env->features[w] = def->features[w];
3339 }
3340
3341 /* Special cases not set in the X86CPUDefinition structs: */
3342 /* TODO: in-kernel irqchip for hvf */
3343 if (kvm_enabled()) {
3344 if (!kvm_irqchip_in_kernel()) {
3345 x86_cpu_change_kvm_default("x2apic", "off");
3346 }
3347
3348 x86_cpu_apply_props(cpu, kvm_default_props);
3349 } else if (tcg_enabled()) {
3350 x86_cpu_apply_props(cpu, tcg_default_props);
3351 }
3352
3353 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3354
3355 /* sysenter isn't supported in compatibility mode on AMD,
3356 * syscall isn't supported in compatibility mode on Intel.
3357 * Normally we advertise the actual CPU vendor, but you can
3358 * override this using the 'vendor' property if you want to use
3359 * KVM's sysenter/syscall emulation in compatibility mode and
3360 * when doing cross vendor migration
3361 */
3362 vendor = def->vendor;
3363 if (accel_uses_host_cpuid()) {
3364 uint32_t ebx = 0, ecx = 0, edx = 0;
3365 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3366 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3367 vendor = host_vendor;
3368 }
3369
3370 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3371
3372 }
3373
3374 /* Return a QDict containing keys for all properties that can be included
3375 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3376 * must be included in the dictionary.
3377 */
3378 static QDict *x86_cpu_static_props(void)
3379 {
3380 FeatureWord w;
3381 int i;
3382 static const char *props[] = {
3383 "min-level",
3384 "min-xlevel",
3385 "family",
3386 "model",
3387 "stepping",
3388 "model-id",
3389 "vendor",
3390 "lmce",
3391 NULL,
3392 };
3393 static QDict *d;
3394
3395 if (d) {
3396 return d;
3397 }
3398
3399 d = qdict_new();
3400 for (i = 0; props[i]; i++) {
3401 qdict_put_null(d, props[i]);
3402 }
3403
3404 for (w = 0; w < FEATURE_WORDS; w++) {
3405 FeatureWordInfo *fi = &feature_word_info[w];
3406 int bit;
3407 for (bit = 0; bit < 32; bit++) {
3408 if (!fi->feat_names[bit]) {
3409 continue;
3410 }
3411 qdict_put_null(d, fi->feat_names[bit]);
3412 }
3413 }
3414
3415 return d;
3416 }
3417
3418 /* Add an entry to @props dict, with the value for property. */
3419 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3420 {
3421 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3422 &error_abort);
3423
3424 qdict_put_obj(props, prop, value);
3425 }
3426
3427 /* Convert CPU model data from X86CPU object to a property dictionary
3428 * that can recreate exactly the same CPU model.
3429 */
3430 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3431 {
3432 QDict *sprops = x86_cpu_static_props();
3433 const QDictEntry *e;
3434
3435 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3436 const char *prop = qdict_entry_key(e);
3437 x86_cpu_expand_prop(cpu, props, prop);
3438 }
3439 }
3440
3441 /* Convert CPU model data from X86CPU object to a property dictionary
3442 * that can recreate exactly the same CPU model, including every
3443 * writeable QOM property.
3444 */
3445 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3446 {
3447 ObjectPropertyIterator iter;
3448 ObjectProperty *prop;
3449
3450 object_property_iter_init(&iter, OBJECT(cpu));
3451 while ((prop = object_property_iter_next(&iter))) {
3452 /* skip read-only or write-only properties */
3453 if (!prop->get || !prop->set) {
3454 continue;
3455 }
3456
3457 /* "hotplugged" is the only property that is configurable
3458 * on the command-line but will be set differently on CPUs
3459 * created using "-cpu ... -smp ..." and by CPUs created
3460 * on the fly by x86_cpu_from_model() for querying. Skip it.
3461 */
3462 if (!strcmp(prop->name, "hotplugged")) {
3463 continue;
3464 }
3465 x86_cpu_expand_prop(cpu, props, prop->name);
3466 }
3467 }
3468
3469 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3470 {
3471 const QDictEntry *prop;
3472 Error *err = NULL;
3473
3474 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3475 object_property_set_qobject(obj, qdict_entry_value(prop),
3476 qdict_entry_key(prop), &err);
3477 if (err) {
3478 break;
3479 }
3480 }
3481
3482 error_propagate(errp, err);
3483 }
3484
3485 /* Create X86CPU object according to model+props specification */
3486 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3487 {
3488 X86CPU *xc = NULL;
3489 X86CPUClass *xcc;
3490 Error *err = NULL;
3491
3492 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3493 if (xcc == NULL) {
3494 error_setg(&err, "CPU model '%s' not found", model);
3495 goto out;
3496 }
3497
3498 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3499 if (props) {
3500 object_apply_props(OBJECT(xc), props, &err);
3501 if (err) {
3502 goto out;
3503 }
3504 }
3505
3506 x86_cpu_expand_features(xc, &err);
3507 if (err) {
3508 goto out;
3509 }
3510
3511 out:
3512 if (err) {
3513 error_propagate(errp, err);
3514 object_unref(OBJECT(xc));
3515 xc = NULL;
3516 }
3517 return xc;
3518 }
3519
3520 CpuModelExpansionInfo *
3521 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3522 CpuModelInfo *model,
3523 Error **errp)
3524 {
3525 X86CPU *xc = NULL;
3526 Error *err = NULL;
3527 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3528 QDict *props = NULL;
3529 const char *base_name;
3530
3531 xc = x86_cpu_from_model(model->name,
3532 model->has_props ?
3533 qobject_to(QDict, model->props) :
3534 NULL, &err);
3535 if (err) {
3536 goto out;
3537 }
3538
3539 props = qdict_new();
3540
3541 switch (type) {
3542 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3543 /* Static expansion will be based on "base" only */
3544 base_name = "base";
3545 x86_cpu_to_dict(xc, props);
3546 break;
3547 case CPU_MODEL_EXPANSION_TYPE_FULL:
3548 /* As we don't return every single property, full expansion needs
3549 * to keep the original model name+props, and add extra
3550 * properties on top of that.
3551 */
3552 base_name = model->name;
3553 x86_cpu_to_dict_full(xc, props);
3554 break;
3555 default:
3556 error_setg(&err, "Unsupportted expansion type");
3557 goto out;
3558 }
3559
3560 if (!props) {
3561 props = qdict_new();
3562 }
3563 x86_cpu_to_dict(xc, props);
3564
3565 ret->model = g_new0(CpuModelInfo, 1);
3566 ret->model->name = g_strdup(base_name);
3567 ret->model->props = QOBJECT(props);
3568 ret->model->has_props = true;
3569
3570 out:
3571 object_unref(OBJECT(xc));
3572 if (err) {
3573 error_propagate(errp, err);
3574 qapi_free_CpuModelExpansionInfo(ret);
3575 ret = NULL;
3576 }
3577 return ret;
3578 }
3579
3580 static gchar *x86_gdb_arch_name(CPUState *cs)
3581 {
3582 #ifdef TARGET_X86_64
3583 return g_strdup("i386:x86-64");
3584 #else
3585 return g_strdup("i386");
3586 #endif
3587 }
3588
3589 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3590 {
3591 X86CPUDefinition *cpudef = data;
3592 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3593
3594 xcc->cpu_def = cpudef;
3595 xcc->migration_safe = true;
3596 }
3597
3598 static void x86_register_cpudef_type(X86CPUDefinition *def)
3599 {
3600 char *typename = x86_cpu_type_name(def->name);
3601 TypeInfo ti = {
3602 .name = typename,
3603 .parent = TYPE_X86_CPU,
3604 .class_init = x86_cpu_cpudef_class_init,
3605 .class_data = def,
3606 };
3607
3608 /* AMD aliases are handled at runtime based on CPUID vendor, so
3609 * they shouldn't be set on the CPU model table.
3610 */
3611 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3612 /* catch mistakes instead of silently truncating model_id when too long */
3613 assert(def->model_id && strlen(def->model_id) <= 48);
3614
3615
3616 type_register(&ti);
3617 g_free(typename);
3618 }
3619
3620 #if !defined(CONFIG_USER_ONLY)
3621
3622 void cpu_clear_apic_feature(CPUX86State *env)
3623 {
3624 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3625 }
3626
3627 #endif /* !CONFIG_USER_ONLY */
3628
3629 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3630 uint32_t *eax, uint32_t *ebx,
3631 uint32_t *ecx, uint32_t *edx)
3632 {
3633 X86CPU *cpu = x86_env_get_cpu(env);
3634 CPUState *cs = CPU(cpu);
3635 uint32_t pkg_offset;
3636 uint32_t limit;
3637 uint32_t signature[3];
3638
3639 /* Calculate & apply limits for different index ranges */
3640 if (index >= 0xC0000000) {
3641 limit = env->cpuid_xlevel2;
3642 } else if (index >= 0x80000000) {
3643 limit = env->cpuid_xlevel;
3644 } else if (index >= 0x40000000) {
3645 limit = 0x40000001;
3646 } else {
3647 limit = env->cpuid_level;
3648 }
3649
3650 if (index > limit) {
3651 /* Intel documentation states that invalid EAX input will
3652 * return the same information as EAX=cpuid_level
3653 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3654 */
3655 index = env->cpuid_level;
3656 }
3657
3658 switch(index) {
3659 case 0:
3660 *eax = env->cpuid_level;
3661 *ebx = env->cpuid_vendor1;
3662 *edx = env->cpuid_vendor2;
3663 *ecx = env->cpuid_vendor3;
3664 break;
3665 case 1:
3666 *eax = env->cpuid_version;
3667 *ebx = (cpu->apic_id << 24) |
3668 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3669 *ecx = env->features[FEAT_1_ECX];
3670 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3671 *ecx |= CPUID_EXT_OSXSAVE;
3672 }
3673 *edx = env->features[FEAT_1_EDX];
3674 if (cs->nr_cores * cs->nr_threads > 1) {
3675 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3676 *edx |= CPUID_HT;
3677 }
3678 break;
3679 case 2:
3680 /* cache info: needed for Pentium Pro compatibility */
3681 if (cpu->cache_info_passthrough) {
3682 host_cpuid(index, 0, eax, ebx, ecx, edx);
3683 break;
3684 }
3685 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3686 *ebx = 0;
3687 if (!cpu->enable_l3_cache) {
3688 *ecx = 0;
3689 } else {
3690 *ecx = cpuid2_cache_descriptor(&l3_cache);
3691 }
3692 *edx = (cpuid2_cache_descriptor(&l1d_cache) << 16) |
3693 (cpuid2_cache_descriptor(&l1i_cache) << 8) |
3694 (cpuid2_cache_descriptor(&l2_cache_cpuid2));
3695 break;
3696 case 4:
3697 /* cache info: needed for Core compatibility */
3698 if (cpu->cache_info_passthrough) {
3699 host_cpuid(index, count, eax, ebx, ecx, edx);
3700 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3701 *eax &= ~0xFC000000;
3702 if ((*eax & 31) && cs->nr_cores > 1) {
3703 *eax |= (cs->nr_cores - 1) << 26;
3704 }
3705 } else {
3706 *eax = 0;
3707 switch (count) {
3708 case 0: /* L1 dcache info */
3709 encode_cache_cpuid4(&l1d_cache,
3710 1, cs->nr_cores,
3711 eax, ebx, ecx, edx);
3712 break;
3713 case 1: /* L1 icache info */
3714 encode_cache_cpuid4(&l1i_cache,
3715 1, cs->nr_cores,
3716 eax, ebx, ecx, edx);
3717 break;
3718 case 2: /* L2 cache info */
3719 encode_cache_cpuid4(&l2_cache,
3720 cs->nr_threads, cs->nr_cores,
3721 eax, ebx, ecx, edx);
3722 break;
3723 case 3: /* L3 cache info */
3724 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3725 if (cpu->enable_l3_cache) {
3726 encode_cache_cpuid4(&l3_cache,
3727 (1 << pkg_offset), cs->nr_cores,
3728 eax, ebx, ecx, edx);
3729 break;
3730 }
3731 /* fall through */
3732 default: /* end of info */
3733 *eax = *ebx = *ecx = *edx = 0;
3734 break;
3735 }
3736 }
3737 break;
3738 case 5:
3739 /* mwait info: needed for Core compatibility */
3740 *eax = 0; /* Smallest monitor-line size in bytes */
3741 *ebx = 0; /* Largest monitor-line size in bytes */
3742 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3743 *edx = 0;
3744 break;
3745 case 6:
3746 /* Thermal and Power Leaf */
3747 *eax = env->features[FEAT_6_EAX];
3748 *ebx = 0;
3749 *ecx = 0;
3750 *edx = 0;
3751 break;
3752 case 7:
3753 /* Structured Extended Feature Flags Enumeration Leaf */
3754 if (count == 0) {
3755 *eax = 0; /* Maximum ECX value for sub-leaves */
3756 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3757 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3758 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3759 *ecx |= CPUID_7_0_ECX_OSPKE;
3760 }
3761 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3762 } else {
3763 *eax = 0;
3764 *ebx = 0;
3765 *ecx = 0;
3766 *edx = 0;
3767 }
3768 break;
3769 case 9:
3770 /* Direct Cache Access Information Leaf */
3771 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3772 *ebx = 0;
3773 *ecx = 0;
3774 *edx = 0;
3775 break;
3776 case 0xA:
3777 /* Architectural Performance Monitoring Leaf */
3778 if (kvm_enabled() && cpu->enable_pmu) {
3779 KVMState *s = cs->kvm_state;
3780
3781 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3782 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3783 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3784 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3785 } else if (hvf_enabled() && cpu->enable_pmu) {
3786 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3787 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3788 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3789 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3790 } else {
3791 *eax = 0;
3792 *ebx = 0;
3793 *ecx = 0;
3794 *edx = 0;
3795 }
3796 break;
3797 case 0xB:
3798 /* Extended Topology Enumeration Leaf */
3799 if (!cpu->enable_cpuid_0xb) {
3800 *eax = *ebx = *ecx = *edx = 0;
3801 break;
3802 }
3803
3804 *ecx = count & 0xff;
3805 *edx = cpu->apic_id;
3806
3807 switch (count) {
3808 case 0:
3809 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3810 *ebx = cs->nr_threads;
3811 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3812 break;
3813 case 1:
3814 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3815 *ebx = cs->nr_cores * cs->nr_threads;
3816 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3817 break;
3818 default:
3819 *eax = 0;
3820 *ebx = 0;
3821 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3822 }
3823
3824 assert(!(*eax & ~0x1f));
3825 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3826 break;
3827 case 0xD: {
3828 /* Processor Extended State */
3829 *eax = 0;
3830 *ebx = 0;
3831 *ecx = 0;
3832 *edx = 0;
3833 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3834 break;
3835 }
3836
3837 if (count == 0) {
3838 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3839 *eax = env->features[FEAT_XSAVE_COMP_LO];
3840 *edx = env->features[FEAT_XSAVE_COMP_HI];
3841 *ebx = *ecx;
3842 } else if (count == 1) {
3843 *eax = env->features[FEAT_XSAVE];
3844 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3845 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3846 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3847 *eax = esa->size;
3848 *ebx = esa->offset;
3849 }
3850 }
3851 break;
3852 }
3853 case 0x14: {
3854 /* Intel Processor Trace Enumeration */
3855 *eax = 0;
3856 *ebx = 0;
3857 *ecx = 0;
3858 *edx = 0;
3859 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3860 !kvm_enabled()) {
3861 break;
3862 }
3863
3864 if (count == 0) {
3865 *eax = INTEL_PT_MAX_SUBLEAF;
3866 *ebx = INTEL_PT_MINIMAL_EBX;
3867 *ecx = INTEL_PT_MINIMAL_ECX;
3868 } else if (count == 1) {
3869 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3870 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3871 }
3872 break;
3873 }
3874 case 0x40000000:
3875 /*
3876 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3877 * set here, but we restrict to TCG none the less.
3878 */
3879 if (tcg_enabled() && cpu->expose_tcg) {
3880 memcpy(signature, "TCGTCGTCGTCG", 12);
3881 *eax = 0x40000001;
3882 *ebx = signature[0];
3883 *ecx = signature[1];
3884 *edx = signature[2];
3885 } else {
3886 *eax = 0;
3887 *ebx = 0;
3888 *ecx = 0;
3889 *edx = 0;
3890 }
3891 break;
3892 case 0x40000001:
3893 *eax = 0;
3894 *ebx = 0;
3895 *ecx = 0;
3896 *edx = 0;
3897 break;
3898 case 0x80000000:
3899 *eax = env->cpuid_xlevel;
3900 *ebx = env->cpuid_vendor1;
3901 *edx = env->cpuid_vendor2;
3902 *ecx = env->cpuid_vendor3;
3903 break;
3904 case 0x80000001:
3905 *eax = env->cpuid_version;
3906 *ebx = 0;
3907 *ecx = env->features[FEAT_8000_0001_ECX];
3908 *edx = env->features[FEAT_8000_0001_EDX];
3909
3910 /* The Linux kernel checks for the CMPLegacy bit and
3911 * discards multiple thread information if it is set.
3912 * So don't set it here for Intel to make Linux guests happy.
3913 */
3914 if (cs->nr_cores * cs->nr_threads > 1) {
3915 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3916 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3917 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3918 *ecx |= 1 << 1; /* CmpLegacy bit */
3919 }
3920 }
3921 break;
3922 case 0x80000002:
3923 case 0x80000003:
3924 case 0x80000004:
3925 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3926 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3927 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3928 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3929 break;
3930 case 0x80000005:
3931 /* cache info (L1 cache) */
3932 if (cpu->cache_info_passthrough) {
3933 host_cpuid(index, 0, eax, ebx, ecx, edx);
3934 break;
3935 }
3936 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3937 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3938 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3939 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3940 *ecx = encode_cache_cpuid80000005(&l1d_cache_amd);
3941 *edx = encode_cache_cpuid80000005(&l1i_cache_amd);
3942 break;
3943 case 0x80000006:
3944 /* cache info (L2 cache) */
3945 if (cpu->cache_info_passthrough) {
3946 host_cpuid(index, 0, eax, ebx, ecx, edx);
3947 break;
3948 }
3949 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3950 (L2_DTLB_2M_ENTRIES << 16) | \
3951 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3952 (L2_ITLB_2M_ENTRIES);
3953 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3954 (L2_DTLB_4K_ENTRIES << 16) | \
3955 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3956 (L2_ITLB_4K_ENTRIES);
3957 encode_cache_cpuid80000006(&l2_cache_amd,
3958 cpu->enable_l3_cache ? &l3_cache : NULL,
3959 ecx, edx);
3960 break;
3961 case 0x80000007:
3962 *eax = 0;
3963 *ebx = 0;
3964 *ecx = 0;
3965 *edx = env->features[FEAT_8000_0007_EDX];
3966 break;
3967 case 0x80000008:
3968 /* virtual & phys address size in low 2 bytes. */
3969 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3970 /* 64 bit processor */
3971 *eax = cpu->phys_bits; /* configurable physical bits */
3972 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3973 *eax |= 0x00003900; /* 57 bits virtual */
3974 } else {
3975 *eax |= 0x00003000; /* 48 bits virtual */
3976 }
3977 } else {
3978 *eax = cpu->phys_bits;
3979 }
3980 *ebx = env->features[FEAT_8000_0008_EBX];
3981 *ecx = 0;
3982 *edx = 0;
3983 if (cs->nr_cores * cs->nr_threads > 1) {
3984 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3985 }
3986 break;
3987 case 0x8000000A:
3988 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3989 *eax = 0x00000001; /* SVM Revision */
3990 *ebx = 0x00000010; /* nr of ASIDs */
3991 *ecx = 0;
3992 *edx = env->features[FEAT_SVM]; /* optional features */
3993 } else {
3994 *eax = 0;
3995 *ebx = 0;
3996 *ecx = 0;
3997 *edx = 0;
3998 }
3999 break;
4000 case 0xC0000000:
4001 *eax = env->cpuid_xlevel2;
4002 *ebx = 0;
4003 *ecx = 0;
4004 *edx = 0;
4005 break;
4006 case 0xC0000001:
4007 /* Support for VIA CPU's CPUID instruction */
4008 *eax = env->cpuid_version;
4009 *ebx = 0;
4010 *ecx = 0;
4011 *edx = env->features[FEAT_C000_0001_EDX];
4012 break;
4013 case 0xC0000002:
4014 case 0xC0000003:
4015 case 0xC0000004:
4016 /* Reserved for the future, and now filled with zero */
4017 *eax = 0;
4018 *ebx = 0;
4019 *ecx = 0;
4020 *edx = 0;
4021 break;
4022 case 0x8000001F:
4023 *eax = sev_enabled() ? 0x2 : 0;
4024 *ebx = sev_get_cbit_position();
4025 *ebx |= sev_get_reduced_phys_bits() << 6;
4026 *ecx = 0;
4027 *edx = 0;
4028 break;
4029 default:
4030 /* reserved values: zero */
4031 *eax = 0;
4032 *ebx = 0;
4033 *ecx = 0;
4034 *edx = 0;
4035 break;
4036 }
4037 }
4038
4039 /* CPUClass::reset() */
4040 static void x86_cpu_reset(CPUState *s)
4041 {
4042 X86CPU *cpu = X86_CPU(s);
4043 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4044 CPUX86State *env = &cpu->env;
4045 target_ulong cr4;
4046 uint64_t xcr0;
4047 int i;
4048
4049 xcc->parent_reset(s);
4050
4051 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4052
4053 env->old_exception = -1;
4054
4055 /* init to reset state */
4056
4057 env->hflags2 |= HF2_GIF_MASK;
4058
4059 cpu_x86_update_cr0(env, 0x60000010);
4060 env->a20_mask = ~0x0;
4061 env->smbase = 0x30000;
4062 env->msr_smi_count = 0;
4063
4064 env->idt.limit = 0xffff;
4065 env->gdt.limit = 0xffff;
4066 env->ldt.limit = 0xffff;
4067 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4068 env->tr.limit = 0xffff;
4069 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4070
4071 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4072 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4073 DESC_R_MASK | DESC_A_MASK);
4074 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4075 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4076 DESC_A_MASK);
4077 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4078 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4079 DESC_A_MASK);
4080 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4081 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4082 DESC_A_MASK);
4083 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4084 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4085 DESC_A_MASK);
4086 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4087 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4088 DESC_A_MASK);
4089
4090 env->eip = 0xfff0;
4091 env->regs[R_EDX] = env->cpuid_version;
4092
4093 env->eflags = 0x2;
4094
4095 /* FPU init */
4096 for (i = 0; i < 8; i++) {
4097 env->fptags[i] = 1;
4098 }
4099 cpu_set_fpuc(env, 0x37f);
4100
4101 env->mxcsr = 0x1f80;
4102 /* All units are in INIT state. */
4103 env->xstate_bv = 0;
4104
4105 env->pat = 0x0007040600070406ULL;
4106 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4107
4108 memset(env->dr, 0, sizeof(env->dr));
4109 env->dr[6] = DR6_FIXED_1;
4110 env->dr[7] = DR7_FIXED_1;
4111 cpu_breakpoint_remove_all(s, BP_CPU);
4112 cpu_watchpoint_remove_all(s, BP_CPU);
4113
4114 cr4 = 0;
4115 xcr0 = XSTATE_FP_MASK;
4116
4117 #ifdef CONFIG_USER_ONLY
4118 /* Enable all the features for user-mode. */
4119 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4120 xcr0 |= XSTATE_SSE_MASK;
4121 }
4122 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4123 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4124 if (env->features[esa->feature] & esa->bits) {
4125 xcr0 |= 1ull << i;
4126 }
4127 }
4128
4129 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4130 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4131 }
4132 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4133 cr4 |= CR4_FSGSBASE_MASK;
4134 }
4135 #endif
4136
4137 env->xcr0 = xcr0;
4138 cpu_x86_update_cr4(env, cr4);
4139
4140 /*
4141 * SDM 11.11.5 requires:
4142 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4143 * - IA32_MTRR_PHYSMASKn.V = 0
4144 * All other bits are undefined. For simplification, zero it all.
4145 */
4146 env->mtrr_deftype = 0;
4147 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4148 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4149
4150 env->interrupt_injected = -1;
4151 env->exception_injected = -1;
4152 env->nmi_injected = false;
4153 #if !defined(CONFIG_USER_ONLY)
4154 /* We hard-wire the BSP to the first CPU. */
4155 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4156
4157 s->halted = !cpu_is_bsp(cpu);
4158
4159 if (kvm_enabled()) {
4160 kvm_arch_reset_vcpu(cpu);
4161 }
4162 else if (hvf_enabled()) {
4163 hvf_reset_vcpu(s);
4164 }
4165 #endif
4166 }
4167
4168 #ifndef CONFIG_USER_ONLY
4169 bool cpu_is_bsp(X86CPU *cpu)
4170 {
4171 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4172 }
4173
4174 /* TODO: remove me, when reset over QOM tree is implemented */
4175 static void x86_cpu_machine_reset_cb(void *opaque)
4176 {
4177 X86CPU *cpu = opaque;
4178 cpu_reset(CPU(cpu));
4179 }
4180 #endif
4181
4182 static void mce_init(X86CPU *cpu)
4183 {
4184 CPUX86State *cenv = &cpu->env;
4185 unsigned int bank;
4186
4187 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4188 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4189 (CPUID_MCE | CPUID_MCA)) {
4190 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4191 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4192 cenv->mcg_ctl = ~(uint64_t)0;
4193 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4194 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4195 }
4196 }
4197 }
4198
4199 #ifndef CONFIG_USER_ONLY
4200 APICCommonClass *apic_get_class(void)
4201 {
4202 const char *apic_type = "apic";
4203
4204 /* TODO: in-kernel irqchip for hvf */
4205 if (kvm_apic_in_kernel()) {
4206 apic_type = "kvm-apic";
4207 } else if (xen_enabled()) {
4208 apic_type = "xen-apic";
4209 }
4210
4211 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4212 }
4213
4214 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4215 {
4216 APICCommonState *apic;
4217 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4218
4219 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4220
4221 object_property_add_child(OBJECT(cpu), "lapic",
4222 OBJECT(cpu->apic_state), &error_abort);
4223 object_unref(OBJECT(cpu->apic_state));
4224
4225 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4226 /* TODO: convert to link<> */
4227 apic = APIC_COMMON(cpu->apic_state);
4228 apic->cpu = cpu;
4229 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4230 }
4231
4232 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4233 {
4234 APICCommonState *apic;
4235 static bool apic_mmio_map_once;
4236
4237 if (cpu->apic_state == NULL) {
4238 return;
4239 }
4240 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4241 errp);
4242
4243 /* Map APIC MMIO area */
4244 apic = APIC_COMMON(cpu->apic_state);
4245 if (!apic_mmio_map_once) {
4246 memory_region_add_subregion_overlap(get_system_memory(),
4247 apic->apicbase &
4248 MSR_IA32_APICBASE_BASE,
4249 &apic->io_memory,
4250 0x1000);
4251 apic_mmio_map_once = true;
4252 }
4253 }
4254
4255 static void x86_cpu_machine_done(Notifier *n, void *unused)
4256 {
4257 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4258 MemoryRegion *smram =
4259 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4260
4261 if (smram) {
4262 cpu->smram = g_new(MemoryRegion, 1);
4263 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4264 smram, 0, 1ull << 32);
4265 memory_region_set_enabled(cpu->smram, true);
4266 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4267 }
4268 }
4269 #else
4270 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4271 {
4272 }
4273 #endif
4274
4275 /* Note: Only safe for use on x86(-64) hosts */
4276 static uint32_t x86_host_phys_bits(void)
4277 {
4278 uint32_t eax;
4279 uint32_t host_phys_bits;
4280
4281 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4282 if (eax >= 0x80000008) {
4283 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4284 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4285 * at 23:16 that can specify a maximum physical address bits for
4286 * the guest that can override this value; but I've not seen
4287 * anything with that set.
4288 */
4289 host_phys_bits = eax & 0xff;
4290 } else {
4291 /* It's an odd 64 bit machine that doesn't have the leaf for
4292 * physical address bits; fall back to 36 that's most older
4293 * Intel.
4294 */
4295 host_phys_bits = 36;
4296 }
4297
4298 return host_phys_bits;
4299 }
4300
4301 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4302 {
4303 if (*min < value) {
4304 *min = value;
4305 }
4306 }
4307
4308 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4309 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4310 {
4311 CPUX86State *env = &cpu->env;
4312 FeatureWordInfo *fi = &feature_word_info[w];
4313 uint32_t eax = fi->cpuid_eax;
4314 uint32_t region = eax & 0xF0000000;
4315
4316 if (!env->features[w]) {
4317 return;
4318 }
4319
4320 switch (region) {
4321 case 0x00000000:
4322 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4323 break;
4324 case 0x80000000:
4325 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4326 break;
4327 case 0xC0000000:
4328 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4329 break;
4330 }
4331 }
4332
4333 /* Calculate XSAVE components based on the configured CPU feature flags */
4334 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4335 {
4336 CPUX86State *env = &cpu->env;
4337 int i;
4338 uint64_t mask;
4339
4340 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4341 return;
4342 }
4343
4344 mask = 0;
4345 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4346 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4347 if (env->features[esa->feature] & esa->bits) {
4348 mask |= (1ULL << i);
4349 }
4350 }
4351
4352 env->features[FEAT_XSAVE_COMP_LO] = mask;
4353 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4354 }
4355
4356 /***** Steps involved on loading and filtering CPUID data
4357 *
4358 * When initializing and realizing a CPU object, the steps
4359 * involved in setting up CPUID data are:
4360 *
4361 * 1) Loading CPU model definition (X86CPUDefinition). This is
4362 * implemented by x86_cpu_load_def() and should be completely
4363 * transparent, as it is done automatically by instance_init.
4364 * No code should need to look at X86CPUDefinition structs
4365 * outside instance_init.
4366 *
4367 * 2) CPU expansion. This is done by realize before CPUID
4368 * filtering, and will make sure host/accelerator data is
4369 * loaded for CPU models that depend on host capabilities
4370 * (e.g. "host"). Done by x86_cpu_expand_features().
4371 *
4372 * 3) CPUID filtering. This initializes extra data related to
4373 * CPUID, and checks if the host supports all capabilities
4374 * required by the CPU. Runnability of a CPU model is
4375 * determined at this step. Done by x86_cpu_filter_features().
4376 *
4377 * Some operations don't require all steps to be performed.
4378 * More precisely:
4379 *
4380 * - CPU instance creation (instance_init) will run only CPU
4381 * model loading. CPU expansion can't run at instance_init-time
4382 * because host/accelerator data may be not available yet.
4383 * - CPU realization will perform both CPU model expansion and CPUID
4384 * filtering, and return an error in case one of them fails.
4385 * - query-cpu-definitions needs to run all 3 steps. It needs
4386 * to run CPUID filtering, as the 'unavailable-features'
4387 * field is set based on the filtering results.
4388 * - The query-cpu-model-expansion QMP command only needs to run
4389 * CPU model loading and CPU expansion. It should not filter
4390 * any CPUID data based on host capabilities.
4391 */
4392
4393 /* Expand CPU configuration data, based on configured features
4394 * and host/accelerator capabilities when appropriate.
4395 */
4396 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4397 {
4398 CPUX86State *env = &cpu->env;
4399 FeatureWord w;
4400 GList *l;
4401 Error *local_err = NULL;
4402
4403 /*TODO: Now cpu->max_features doesn't overwrite features
4404 * set using QOM properties, and we can convert
4405 * plus_features & minus_features to global properties
4406 * inside x86_cpu_parse_featurestr() too.
4407 */
4408 if (cpu->max_features) {
4409 for (w = 0; w < FEATURE_WORDS; w++) {
4410 /* Override only features that weren't set explicitly
4411 * by the user.
4412 */
4413 env->features[w] |=
4414 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4415 ~env->user_features[w] & \
4416 ~feature_word_info[w].no_autoenable_flags;
4417 }
4418 }
4419
4420 for (l = plus_features; l; l = l->next) {
4421 const char *prop = l->data;
4422 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4423 if (local_err) {
4424 goto out;
4425 }
4426 }
4427
4428 for (l = minus_features; l; l = l->next) {
4429 const char *prop = l->data;
4430 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4431 if (local_err) {
4432 goto out;
4433 }
4434 }
4435
4436 if (!kvm_enabled() || !cpu->expose_kvm) {
4437 env->features[FEAT_KVM] = 0;
4438 }
4439
4440 x86_cpu_enable_xsave_components(cpu);
4441
4442 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4443 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4444 if (cpu->full_cpuid_auto_level) {
4445 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4446 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4447 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4448 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4449 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4450 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4451 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4452 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4453 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4454 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4455 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4456 /* SVM requires CPUID[0x8000000A] */
4457 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4458 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4459 }
4460
4461 /* SEV requires CPUID[0x8000001F] */
4462 if (sev_enabled()) {
4463 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4464 }
4465 }
4466
4467 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4468 if (env->cpuid_level == UINT32_MAX) {
4469 env->cpuid_level = env->cpuid_min_level;
4470 }
4471 if (env->cpuid_xlevel == UINT32_MAX) {
4472 env->cpuid_xlevel = env->cpuid_min_xlevel;
4473 }
4474 if (env->cpuid_xlevel2 == UINT32_MAX) {
4475 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4476 }
4477
4478 out:
4479 if (local_err != NULL) {
4480 error_propagate(errp, local_err);
4481 }
4482 }
4483
4484 /*
4485 * Finishes initialization of CPUID data, filters CPU feature
4486 * words based on host availability of each feature.
4487 *
4488 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4489 */
4490 static int x86_cpu_filter_features(X86CPU *cpu)
4491 {
4492 CPUX86State *env = &cpu->env;
4493 FeatureWord w;
4494 int rv = 0;
4495
4496 for (w = 0; w < FEATURE_WORDS; w++) {
4497 uint32_t host_feat =
4498 x86_cpu_get_supported_feature_word(w, false);
4499 uint32_t requested_features = env->features[w];
4500 env->features[w] &= host_feat;
4501 cpu->filtered_features[w] = requested_features & ~env->features[w];
4502 if (cpu->filtered_features[w]) {
4503 rv = 1;
4504 }
4505 }
4506
4507 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4508 kvm_enabled()) {
4509 KVMState *s = CPU(cpu)->kvm_state;
4510 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4511 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4512 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4513 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4514 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4515
4516 if (!eax_0 ||
4517 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4518 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4519 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4520 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4521 INTEL_PT_ADDR_RANGES_NUM) ||
4522 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4523 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4524 (ecx_0 & INTEL_PT_IP_LIP)) {
4525 /*
4526 * Processor Trace capabilities aren't configurable, so if the
4527 * host can't emulate the capabilities we report on
4528 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4529 */
4530 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4531 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4532 rv = 1;
4533 }
4534 }
4535
4536 return rv;
4537 }
4538
4539 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4540 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4541 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4542 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4543 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4544 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4545 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4546 {
4547 CPUState *cs = CPU(dev);
4548 X86CPU *cpu = X86_CPU(dev);
4549 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4550 CPUX86State *env = &cpu->env;
4551 Error *local_err = NULL;
4552 static bool ht_warned;
4553
4554 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4555 char *name = x86_cpu_class_get_model_name(xcc);
4556 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4557 g_free(name);
4558 goto out;
4559 }
4560
4561 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4562 error_setg(errp, "apic-id property was not initialized properly");
4563 return;
4564 }
4565
4566 x86_cpu_expand_features(cpu, &local_err);
4567 if (local_err) {
4568 goto out;
4569 }
4570
4571 if (x86_cpu_filter_features(cpu) &&
4572 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4573 x86_cpu_report_filtered_features(cpu);
4574 if (cpu->enforce_cpuid) {
4575 error_setg(&local_err,
4576 accel_uses_host_cpuid() ?
4577 "Host doesn't support requested features" :
4578 "TCG doesn't support requested features");
4579 goto out;
4580 }
4581 }
4582
4583 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4584 * CPUID[1].EDX.
4585 */
4586 if (IS_AMD_CPU(env)) {
4587 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4588 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4589 & CPUID_EXT2_AMD_ALIASES);
4590 }
4591
4592 /* For 64bit systems think about the number of physical bits to present.
4593 * ideally this should be the same as the host; anything other than matching
4594 * the host can cause incorrect guest behaviour.
4595 * QEMU used to pick the magic value of 40 bits that corresponds to
4596 * consumer AMD devices but nothing else.
4597 */
4598 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4599 if (accel_uses_host_cpuid()) {
4600 uint32_t host_phys_bits = x86_host_phys_bits();
4601 static bool warned;
4602
4603 if (cpu->host_phys_bits) {
4604 /* The user asked for us to use the host physical bits */
4605 cpu->phys_bits = host_phys_bits;
4606 }
4607
4608 /* Print a warning if the user set it to a value that's not the
4609 * host value.
4610 */
4611 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4612 !warned) {
4613 warn_report("Host physical bits (%u)"
4614 " does not match phys-bits property (%u)",
4615 host_phys_bits, cpu->phys_bits);
4616 warned = true;
4617 }
4618
4619 if (cpu->phys_bits &&
4620 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4621 cpu->phys_bits < 32)) {
4622 error_setg(errp, "phys-bits should be between 32 and %u "
4623 " (but is %u)",
4624 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4625 return;
4626 }
4627 } else {
4628 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4629 error_setg(errp, "TCG only supports phys-bits=%u",
4630 TCG_PHYS_ADDR_BITS);
4631 return;
4632 }
4633 }
4634 /* 0 means it was not explicitly set by the user (or by machine
4635 * compat_props or by the host code above). In this case, the default
4636 * is the value used by TCG (40).
4637 */
4638 if (cpu->phys_bits == 0) {
4639 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4640 }
4641 } else {
4642 /* For 32 bit systems don't use the user set value, but keep
4643 * phys_bits consistent with what we tell the guest.
4644 */
4645 if (cpu->phys_bits != 0) {
4646 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4647 return;
4648 }
4649
4650 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4651 cpu->phys_bits = 36;
4652 } else {
4653 cpu->phys_bits = 32;
4654 }
4655 }
4656 cpu_exec_realizefn(cs, &local_err);
4657 if (local_err != NULL) {
4658 error_propagate(errp, local_err);
4659 return;
4660 }
4661
4662 #ifndef CONFIG_USER_ONLY
4663 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4664
4665 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4666 x86_cpu_apic_create(cpu, &local_err);
4667 if (local_err != NULL) {
4668 goto out;
4669 }
4670 }
4671 #endif
4672
4673 mce_init(cpu);
4674
4675 #ifndef CONFIG_USER_ONLY
4676 if (tcg_enabled()) {
4677 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4678 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4679
4680 /* Outer container... */
4681 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4682 memory_region_set_enabled(cpu->cpu_as_root, true);
4683
4684 /* ... with two regions inside: normal system memory with low
4685 * priority, and...
4686 */
4687 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4688 get_system_memory(), 0, ~0ull);
4689 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4690 memory_region_set_enabled(cpu->cpu_as_mem, true);
4691
4692 cs->num_ases = 2;
4693 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4694 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4695
4696 /* ... SMRAM with higher priority, linked from /machine/smram. */
4697 cpu->machine_done.notify = x86_cpu_machine_done;
4698 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4699 }
4700 #endif
4701
4702 qemu_init_vcpu(cs);
4703
4704 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4705 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4706 * based on inputs (sockets,cores,threads), it is still better to gives
4707 * users a warning.
4708 *
4709 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4710 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4711 */
4712 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4713 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4714 " -smp options properly.");
4715 ht_warned = true;
4716 }
4717
4718 x86_cpu_apic_realize(cpu, &local_err);
4719 if (local_err != NULL) {
4720 goto out;
4721 }
4722 cpu_reset(cs);
4723
4724 xcc->parent_realize(dev, &local_err);
4725
4726 out:
4727 if (local_err != NULL) {
4728 error_propagate(errp, local_err);
4729 return;
4730 }
4731 }
4732
4733 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4734 {
4735 X86CPU *cpu = X86_CPU(dev);
4736 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4737 Error *local_err = NULL;
4738
4739 #ifndef CONFIG_USER_ONLY
4740 cpu_remove_sync(CPU(dev));
4741 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4742 #endif
4743
4744 if (cpu->apic_state) {
4745 object_unparent(OBJECT(cpu->apic_state));
4746 cpu->apic_state = NULL;
4747 }
4748
4749 xcc->parent_unrealize(dev, &local_err);
4750 if (local_err != NULL) {
4751 error_propagate(errp, local_err);
4752 return;
4753 }
4754 }
4755
4756 typedef struct BitProperty {
4757 FeatureWord w;
4758 uint32_t mask;
4759 } BitProperty;
4760
4761 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4762 void *opaque, Error **errp)
4763 {
4764 X86CPU *cpu = X86_CPU(obj);
4765 BitProperty *fp = opaque;
4766 uint32_t f = cpu->env.features[fp->w];
4767 bool value = (f & fp->mask) == fp->mask;
4768 visit_type_bool(v, name, &value, errp);
4769 }
4770
4771 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4772 void *opaque, Error **errp)
4773 {
4774 DeviceState *dev = DEVICE(obj);
4775 X86CPU *cpu = X86_CPU(obj);
4776 BitProperty *fp = opaque;
4777 Error *local_err = NULL;
4778 bool value;
4779
4780 if (dev->realized) {
4781 qdev_prop_set_after_realize(dev, name, errp);
4782 return;
4783 }
4784
4785 visit_type_bool(v, name, &value, &local_err);
4786 if (local_err) {
4787 error_propagate(errp, local_err);
4788 return;
4789 }
4790
4791 if (value) {
4792 cpu->env.features[fp->w] |= fp->mask;
4793 } else {
4794 cpu->env.features[fp->w] &= ~fp->mask;
4795 }
4796 cpu->env.user_features[fp->w] |= fp->mask;
4797 }
4798
4799 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4800 void *opaque)
4801 {
4802 BitProperty *prop = opaque;
4803 g_free(prop);
4804 }
4805
4806 /* Register a boolean property to get/set a single bit in a uint32_t field.
4807 *
4808 * The same property name can be registered multiple times to make it affect
4809 * multiple bits in the same FeatureWord. In that case, the getter will return
4810 * true only if all bits are set.
4811 */
4812 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4813 const char *prop_name,
4814 FeatureWord w,
4815 int bitnr)
4816 {
4817 BitProperty *fp;
4818 ObjectProperty *op;
4819 uint32_t mask = (1UL << bitnr);
4820
4821 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4822 if (op) {
4823 fp = op->opaque;
4824 assert(fp->w == w);
4825 fp->mask |= mask;
4826 } else {
4827 fp = g_new0(BitProperty, 1);
4828 fp->w = w;
4829 fp->mask = mask;
4830 object_property_add(OBJECT(cpu), prop_name, "bool",
4831 x86_cpu_get_bit_prop,
4832 x86_cpu_set_bit_prop,
4833 x86_cpu_release_bit_prop, fp, &error_abort);
4834 }
4835 }
4836
4837 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4838 FeatureWord w,
4839 int bitnr)
4840 {
4841 FeatureWordInfo *fi = &feature_word_info[w];
4842 const char *name = fi->feat_names[bitnr];
4843
4844 if (!name) {
4845 return;
4846 }
4847
4848 /* Property names should use "-" instead of "_".
4849 * Old names containing underscores are registered as aliases
4850 * using object_property_add_alias()
4851 */
4852 assert(!strchr(name, '_'));
4853 /* aliases don't use "|" delimiters anymore, they are registered
4854 * manually using object_property_add_alias() */
4855 assert(!strchr(name, '|'));
4856 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4857 }
4858
4859 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4860 {
4861 X86CPU *cpu = X86_CPU(cs);
4862 CPUX86State *env = &cpu->env;
4863 GuestPanicInformation *panic_info = NULL;
4864
4865 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4866 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4867
4868 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4869
4870 assert(HV_CRASH_PARAMS >= 5);
4871 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4872 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4873 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4874 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4875 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4876 }
4877
4878 return panic_info;
4879 }
4880 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4881 const char *name, void *opaque,
4882 Error **errp)
4883 {
4884 CPUState *cs = CPU(obj);
4885 GuestPanicInformation *panic_info;
4886
4887 if (!cs->crash_occurred) {
4888 error_setg(errp, "No crash occured");
4889 return;
4890 }
4891
4892 panic_info = x86_cpu_get_crash_info(cs);
4893 if (panic_info == NULL) {
4894 error_setg(errp, "No crash information");
4895 return;
4896 }
4897
4898 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4899 errp);
4900 qapi_free_GuestPanicInformation(panic_info);
4901 }
4902
4903 static void x86_cpu_initfn(Object *obj)
4904 {
4905 CPUState *cs = CPU(obj);
4906 X86CPU *cpu = X86_CPU(obj);
4907 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4908 CPUX86State *env = &cpu->env;
4909 FeatureWord w;
4910
4911 cs->env_ptr = env;
4912
4913 object_property_add(obj, "family", "int",
4914 x86_cpuid_version_get_family,
4915 x86_cpuid_version_set_family, NULL, NULL, NULL);
4916 object_property_add(obj, "model", "int",
4917 x86_cpuid_version_get_model,
4918 x86_cpuid_version_set_model, NULL, NULL, NULL);
4919 object_property_add(obj, "stepping", "int",
4920 x86_cpuid_version_get_stepping,
4921 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4922 object_property_add_str(obj, "vendor",
4923 x86_cpuid_get_vendor,
4924 x86_cpuid_set_vendor, NULL);
4925 object_property_add_str(obj, "model-id",
4926 x86_cpuid_get_model_id,
4927 x86_cpuid_set_model_id, NULL);
4928 object_property_add(obj, "tsc-frequency", "int",
4929 x86_cpuid_get_tsc_freq,
4930 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4931 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4932 x86_cpu_get_feature_words,
4933 NULL, NULL, (void *)env->features, NULL);
4934 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4935 x86_cpu_get_feature_words,
4936 NULL, NULL, (void *)cpu->filtered_features, NULL);
4937
4938 object_property_add(obj, "crash-information", "GuestPanicInformation",
4939 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4940
4941 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4942
4943 for (w = 0; w < FEATURE_WORDS; w++) {
4944 int bitnr;
4945
4946 for (bitnr = 0; bitnr < 32; bitnr++) {
4947 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4948 }
4949 }
4950
4951 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4952 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4953 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4954 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4955 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4956 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4957 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4958
4959 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4960 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4961 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4962 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
4963 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
4964 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
4965 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
4966 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
4967 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
4968 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
4969 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
4970 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
4971 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
4972 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
4973 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
4974 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
4975 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
4976 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
4977 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
4978 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
4979 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
4980
4981 if (xcc->cpu_def) {
4982 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
4983 }
4984 }
4985
4986 static int64_t x86_cpu_get_arch_id(CPUState *cs)
4987 {
4988 X86CPU *cpu = X86_CPU(cs);
4989
4990 return cpu->apic_id;
4991 }
4992
4993 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
4994 {
4995 X86CPU *cpu = X86_CPU(cs);
4996
4997 return cpu->env.cr[0] & CR0_PG_MASK;
4998 }
4999
5000 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5001 {
5002 X86CPU *cpu = X86_CPU(cs);
5003
5004 cpu->env.eip = value;
5005 }
5006
5007 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5008 {
5009 X86CPU *cpu = X86_CPU(cs);
5010
5011 cpu->env.eip = tb->pc - tb->cs_base;
5012 }
5013
5014 static bool x86_cpu_has_work(CPUState *cs)
5015 {
5016 X86CPU *cpu = X86_CPU(cs);
5017 CPUX86State *env = &cpu->env;
5018
5019 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5020 CPU_INTERRUPT_POLL)) &&
5021 (env->eflags & IF_MASK)) ||
5022 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5023 CPU_INTERRUPT_INIT |
5024 CPU_INTERRUPT_SIPI |
5025 CPU_INTERRUPT_MCE)) ||
5026 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5027 !(env->hflags & HF_SMM_MASK));
5028 }
5029
5030 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5031 {
5032 X86CPU *cpu = X86_CPU(cs);
5033 CPUX86State *env = &cpu->env;
5034
5035 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5036 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5037 : bfd_mach_i386_i8086);
5038 info->print_insn = print_insn_i386;
5039
5040 info->cap_arch = CS_ARCH_X86;
5041 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5042 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5043 : CS_MODE_16);
5044 info->cap_insn_unit = 1;
5045 info->cap_insn_split = 8;
5046 }
5047
5048 void x86_update_hflags(CPUX86State *env)
5049 {
5050 uint32_t hflags;
5051 #define HFLAG_COPY_MASK \
5052 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5053 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5054 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5055 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5056
5057 hflags = env->hflags & HFLAG_COPY_MASK;
5058 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5059 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5060 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5061 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5062 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5063
5064 if (env->cr[4] & CR4_OSFXSR_MASK) {
5065 hflags |= HF_OSFXSR_MASK;
5066 }
5067
5068 if (env->efer & MSR_EFER_LMA) {
5069 hflags |= HF_LMA_MASK;
5070 }
5071
5072 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5073 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5074 } else {
5075 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5076 (DESC_B_SHIFT - HF_CS32_SHIFT);
5077 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5078 (DESC_B_SHIFT - HF_SS32_SHIFT);
5079 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5080 !(hflags & HF_CS32_MASK)) {
5081 hflags |= HF_ADDSEG_MASK;
5082 } else {
5083 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5084 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5085 }
5086 }
5087 env->hflags = hflags;
5088 }
5089
5090 static Property x86_cpu_properties[] = {
5091 #ifdef CONFIG_USER_ONLY
5092 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5093 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5094 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5095 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5096 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5097 #else
5098 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5099 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5100 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5101 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5102 #endif
5103 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5104 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5105 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5106 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5107 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5108 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5109 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5110 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5111 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5112 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5113 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5114 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5115 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5116 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5117 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5118 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5119 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5120 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5121 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5122 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5123 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5124 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5125 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5126 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5127 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5128 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5129 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5130 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5131 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5132 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5133 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5134 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5135 false),
5136 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5137 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5138
5139 /*
5140 * From "Requirements for Implementing the Microsoft
5141 * Hypervisor Interface":
5142 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5143 *
5144 * "Starting with Windows Server 2012 and Windows 8, if
5145 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5146 * the hypervisor imposes no specific limit to the number of VPs.
5147 * In this case, Windows Server 2012 guest VMs may use more than
5148 * 64 VPs, up to the maximum supported number of processors applicable
5149 * to the specific Windows version being used."
5150 */
5151 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5152 DEFINE_PROP_END_OF_LIST()
5153 };
5154
5155 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5156 {
5157 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5158 CPUClass *cc = CPU_CLASS(oc);
5159 DeviceClass *dc = DEVICE_CLASS(oc);
5160
5161 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5162 &xcc->parent_realize);
5163 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5164 &xcc->parent_unrealize);
5165 dc->props = x86_cpu_properties;
5166
5167 xcc->parent_reset = cc->reset;
5168 cc->reset = x86_cpu_reset;
5169 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5170
5171 cc->class_by_name = x86_cpu_class_by_name;
5172 cc->parse_features = x86_cpu_parse_featurestr;
5173 cc->has_work = x86_cpu_has_work;
5174 #ifdef CONFIG_TCG
5175 cc->do_interrupt = x86_cpu_do_interrupt;
5176 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5177 #endif
5178 cc->dump_state = x86_cpu_dump_state;
5179 cc->get_crash_info = x86_cpu_get_crash_info;
5180 cc->set_pc = x86_cpu_set_pc;
5181 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5182 cc->gdb_read_register = x86_cpu_gdb_read_register;
5183 cc->gdb_write_register = x86_cpu_gdb_write_register;
5184 cc->get_arch_id = x86_cpu_get_arch_id;
5185 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5186 #ifdef CONFIG_USER_ONLY
5187 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5188 #else
5189 cc->asidx_from_attrs = x86_asidx_from_attrs;
5190 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5191 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5192 cc->write_elf64_note = x86_cpu_write_elf64_note;
5193 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5194 cc->write_elf32_note = x86_cpu_write_elf32_note;
5195 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5196 cc->vmsd = &vmstate_x86_cpu;
5197 #endif
5198 cc->gdb_arch_name = x86_gdb_arch_name;
5199 #ifdef TARGET_X86_64
5200 cc->gdb_core_xml_file = "i386-64bit.xml";
5201 cc->gdb_num_core_regs = 57;
5202 #else
5203 cc->gdb_core_xml_file = "i386-32bit.xml";
5204 cc->gdb_num_core_regs = 41;
5205 #endif
5206 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5207 cc->debug_excp_handler = breakpoint_handler;
5208 #endif
5209 cc->cpu_exec_enter = x86_cpu_exec_enter;
5210 cc->cpu_exec_exit = x86_cpu_exec_exit;
5211 #ifdef CONFIG_TCG
5212 cc->tcg_initialize = tcg_x86_init;
5213 #endif
5214 cc->disas_set_info = x86_disas_set_info;
5215
5216 dc->user_creatable = true;
5217 }
5218
5219 static const TypeInfo x86_cpu_type_info = {
5220 .name = TYPE_X86_CPU,
5221 .parent = TYPE_CPU,
5222 .instance_size = sizeof(X86CPU),
5223 .instance_init = x86_cpu_initfn,
5224 .abstract = true,
5225 .class_size = sizeof(X86CPUClass),
5226 .class_init = x86_cpu_common_class_init,
5227 };
5228
5229
5230 /* "base" CPU model, used by query-cpu-model-expansion */
5231 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5232 {
5233 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5234
5235 xcc->static_model = true;
5236 xcc->migration_safe = true;
5237 xcc->model_description = "base CPU model type with no features enabled";
5238 xcc->ordering = 8;
5239 }
5240
5241 static const TypeInfo x86_base_cpu_type_info = {
5242 .name = X86_CPU_TYPE_NAME("base"),
5243 .parent = TYPE_X86_CPU,
5244 .class_init = x86_cpu_base_class_init,
5245 };
5246
5247 static void x86_cpu_register_types(void)
5248 {
5249 int i;
5250
5251 type_register_static(&x86_cpu_type_info);
5252 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5253 x86_register_cpudef_type(&builtin_x86_defs[i]);
5254 }
5255 type_register_static(&max_x86_cpu_type_info);
5256 type_register_static(&x86_base_cpu_type_info);
5257 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5258 type_register_static(&host_x86_cpu_type_info);
5259 #endif
5260 }
5261
5262 type_init(x86_cpu_register_types)