]> git.proxmox.com Git - mirror_qemu.git/blob - target/i386/cpu.c
i386: Clean up cache CPUID code
[mirror_qemu.git] / target / i386 / cpu.c
1 /*
2 * i386 CPUID helper functions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/cutils.h"
22
23 #include "cpu.h"
24 #include "exec/exec-all.h"
25 #include "sysemu/kvm.h"
26 #include "sysemu/hvf.h"
27 #include "sysemu/cpus.h"
28 #include "kvm_i386.h"
29 #include "sev_i386.h"
30
31 #include "qemu/error-report.h"
32 #include "qemu/option.h"
33 #include "qemu/config-file.h"
34 #include "qapi/error.h"
35 #include "qapi/qapi-visit-misc.h"
36 #include "qapi/qapi-visit-run-state.h"
37 #include "qapi/qmp/qdict.h"
38 #include "qapi/qmp/qerror.h"
39 #include "qapi/visitor.h"
40 #include "qom/qom-qobject.h"
41 #include "sysemu/arch_init.h"
42
43 #include "standard-headers/asm-x86/kvm_para.h"
44
45 #include "sysemu/sysemu.h"
46 #include "hw/qdev-properties.h"
47 #include "hw/i386/topology.h"
48 #ifndef CONFIG_USER_ONLY
49 #include "exec/address-spaces.h"
50 #include "hw/hw.h"
51 #include "hw/xen/xen.h"
52 #include "hw/i386/apic_internal.h"
53 #endif
54
55 #include "disas/capstone.h"
56
57 /* Helpers for building CPUID[2] descriptors: */
58
59 struct CPUID2CacheDescriptorInfo {
60 enum CacheType type;
61 int level;
62 int size;
63 int line_size;
64 int associativity;
65 };
66
67 #define KiB 1024
68 #define MiB (1024 * 1024)
69
70 /*
71 * Known CPUID 2 cache descriptors.
72 * From Intel SDM Volume 2A, CPUID instruction
73 */
74 struct CPUID2CacheDescriptorInfo cpuid2_cache_descriptors[] = {
75 [0x06] = { .level = 1, .type = ICACHE, .size = 8 * KiB,
76 .associativity = 4, .line_size = 32, },
77 [0x08] = { .level = 1, .type = ICACHE, .size = 16 * KiB,
78 .associativity = 4, .line_size = 32, },
79 [0x09] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
80 .associativity = 4, .line_size = 64, },
81 [0x0A] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
82 .associativity = 2, .line_size = 32, },
83 [0x0C] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
84 .associativity = 4, .line_size = 32, },
85 [0x0D] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
86 .associativity = 4, .line_size = 64, },
87 [0x0E] = { .level = 1, .type = DCACHE, .size = 24 * KiB,
88 .associativity = 6, .line_size = 64, },
89 [0x1D] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
90 .associativity = 2, .line_size = 64, },
91 [0x21] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
92 .associativity = 8, .line_size = 64, },
93 /* lines per sector is not supported cpuid2_cache_descriptor(),
94 * so descriptors 0x22, 0x23 are not included
95 */
96 [0x24] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
97 .associativity = 16, .line_size = 64, },
98 /* lines per sector is not supported cpuid2_cache_descriptor(),
99 * so descriptors 0x25, 0x20 are not included
100 */
101 [0x2C] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
102 .associativity = 8, .line_size = 64, },
103 [0x30] = { .level = 1, .type = ICACHE, .size = 32 * KiB,
104 .associativity = 8, .line_size = 64, },
105 [0x41] = { .level = 2, .type = UNIFIED_CACHE, .size = 128 * KiB,
106 .associativity = 4, .line_size = 32, },
107 [0x42] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
108 .associativity = 4, .line_size = 32, },
109 [0x43] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
110 .associativity = 4, .line_size = 32, },
111 [0x44] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
112 .associativity = 4, .line_size = 32, },
113 [0x45] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
114 .associativity = 4, .line_size = 32, },
115 [0x46] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
116 .associativity = 4, .line_size = 64, },
117 [0x47] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
118 .associativity = 8, .line_size = 64, },
119 [0x48] = { .level = 2, .type = UNIFIED_CACHE, .size = 3 * MiB,
120 .associativity = 12, .line_size = 64, },
121 /* Descriptor 0x49 depends on CPU family/model, so it is not included */
122 [0x4A] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
123 .associativity = 12, .line_size = 64, },
124 [0x4B] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
125 .associativity = 16, .line_size = 64, },
126 [0x4C] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
127 .associativity = 12, .line_size = 64, },
128 [0x4D] = { .level = 3, .type = UNIFIED_CACHE, .size = 16 * MiB,
129 .associativity = 16, .line_size = 64, },
130 [0x4E] = { .level = 2, .type = UNIFIED_CACHE, .size = 6 * MiB,
131 .associativity = 24, .line_size = 64, },
132 [0x60] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
133 .associativity = 8, .line_size = 64, },
134 [0x66] = { .level = 1, .type = DCACHE, .size = 8 * KiB,
135 .associativity = 4, .line_size = 64, },
136 [0x67] = { .level = 1, .type = DCACHE, .size = 16 * KiB,
137 .associativity = 4, .line_size = 64, },
138 [0x68] = { .level = 1, .type = DCACHE, .size = 32 * KiB,
139 .associativity = 4, .line_size = 64, },
140 [0x78] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
141 .associativity = 4, .line_size = 64, },
142 /* lines per sector is not supported cpuid2_cache_descriptor(),
143 * so descriptors 0x79, 0x7A, 0x7B, 0x7C are not included.
144 */
145 [0x7D] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
146 .associativity = 8, .line_size = 64, },
147 [0x7F] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
148 .associativity = 2, .line_size = 64, },
149 [0x80] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
150 .associativity = 8, .line_size = 64, },
151 [0x82] = { .level = 2, .type = UNIFIED_CACHE, .size = 256 * KiB,
152 .associativity = 8, .line_size = 32, },
153 [0x83] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
154 .associativity = 8, .line_size = 32, },
155 [0x84] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
156 .associativity = 8, .line_size = 32, },
157 [0x85] = { .level = 2, .type = UNIFIED_CACHE, .size = 2 * MiB,
158 .associativity = 8, .line_size = 32, },
159 [0x86] = { .level = 2, .type = UNIFIED_CACHE, .size = 512 * KiB,
160 .associativity = 4, .line_size = 64, },
161 [0x87] = { .level = 2, .type = UNIFIED_CACHE, .size = 1 * MiB,
162 .associativity = 8, .line_size = 64, },
163 [0xD0] = { .level = 3, .type = UNIFIED_CACHE, .size = 512 * KiB,
164 .associativity = 4, .line_size = 64, },
165 [0xD1] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
166 .associativity = 4, .line_size = 64, },
167 [0xD2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
168 .associativity = 4, .line_size = 64, },
169 [0xD6] = { .level = 3, .type = UNIFIED_CACHE, .size = 1 * MiB,
170 .associativity = 8, .line_size = 64, },
171 [0xD7] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
172 .associativity = 8, .line_size = 64, },
173 [0xD8] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
174 .associativity = 8, .line_size = 64, },
175 [0xDC] = { .level = 3, .type = UNIFIED_CACHE, .size = 1.5 * MiB,
176 .associativity = 12, .line_size = 64, },
177 [0xDD] = { .level = 3, .type = UNIFIED_CACHE, .size = 3 * MiB,
178 .associativity = 12, .line_size = 64, },
179 [0xDE] = { .level = 3, .type = UNIFIED_CACHE, .size = 6 * MiB,
180 .associativity = 12, .line_size = 64, },
181 [0xE2] = { .level = 3, .type = UNIFIED_CACHE, .size = 2 * MiB,
182 .associativity = 16, .line_size = 64, },
183 [0xE3] = { .level = 3, .type = UNIFIED_CACHE, .size = 4 * MiB,
184 .associativity = 16, .line_size = 64, },
185 [0xE4] = { .level = 3, .type = UNIFIED_CACHE, .size = 8 * MiB,
186 .associativity = 16, .line_size = 64, },
187 [0xEA] = { .level = 3, .type = UNIFIED_CACHE, .size = 12 * MiB,
188 .associativity = 24, .line_size = 64, },
189 [0xEB] = { .level = 3, .type = UNIFIED_CACHE, .size = 18 * MiB,
190 .associativity = 24, .line_size = 64, },
191 [0xEC] = { .level = 3, .type = UNIFIED_CACHE, .size = 24 * MiB,
192 .associativity = 24, .line_size = 64, },
193 };
194
195 /*
196 * "CPUID leaf 2 does not report cache descriptor information,
197 * use CPUID leaf 4 to query cache parameters"
198 */
199 #define CACHE_DESCRIPTOR_UNAVAILABLE 0xFF
200
201 /*
202 * Return a CPUID 2 cache descriptor for a given cache.
203 * If no known descriptor is found, return CACHE_DESCRIPTOR_UNAVAILABLE
204 */
205 static uint8_t cpuid2_cache_descriptor(CPUCacheInfo *cache)
206 {
207 int i;
208
209 assert(cache->size > 0);
210 assert(cache->level > 0);
211 assert(cache->line_size > 0);
212 assert(cache->associativity > 0);
213 for (i = 0; i < ARRAY_SIZE(cpuid2_cache_descriptors); i++) {
214 struct CPUID2CacheDescriptorInfo *d = &cpuid2_cache_descriptors[i];
215 if (d->level == cache->level && d->type == cache->type &&
216 d->size == cache->size && d->line_size == cache->line_size &&
217 d->associativity == cache->associativity) {
218 return i;
219 }
220 }
221
222 return CACHE_DESCRIPTOR_UNAVAILABLE;
223 }
224
225 /* CPUID Leaf 4 constants: */
226
227 /* EAX: */
228 #define CACHE_TYPE_D 1
229 #define CACHE_TYPE_I 2
230 #define CACHE_TYPE_UNIFIED 3
231
232 #define CACHE_LEVEL(l) (l << 5)
233
234 #define CACHE_SELF_INIT_LEVEL (1 << 8)
235
236 /* EDX: */
237 #define CACHE_NO_INVD_SHARING (1 << 0)
238 #define CACHE_INCLUSIVE (1 << 1)
239 #define CACHE_COMPLEX_IDX (1 << 2)
240
241 /* Encode CacheType for CPUID[4].EAX */
242 #define CACHE_TYPE(t) (((t) == DCACHE) ? CACHE_TYPE_D : \
243 ((t) == ICACHE) ? CACHE_TYPE_I : \
244 ((t) == UNIFIED_CACHE) ? CACHE_TYPE_UNIFIED : \
245 0 /* Invalid value */)
246
247
248 /* Encode cache info for CPUID[4] */
249 static void encode_cache_cpuid4(CPUCacheInfo *cache,
250 int num_apic_ids, int num_cores,
251 uint32_t *eax, uint32_t *ebx,
252 uint32_t *ecx, uint32_t *edx)
253 {
254 assert(cache->size == cache->line_size * cache->associativity *
255 cache->partitions * cache->sets);
256
257 assert(num_apic_ids > 0);
258 *eax = CACHE_TYPE(cache->type) |
259 CACHE_LEVEL(cache->level) |
260 (cache->self_init ? CACHE_SELF_INIT_LEVEL : 0) |
261 ((num_cores - 1) << 26) |
262 ((num_apic_ids - 1) << 14);
263
264 assert(cache->line_size > 0);
265 assert(cache->partitions > 0);
266 assert(cache->associativity > 0);
267 /* We don't implement fully-associative caches */
268 assert(cache->associativity < cache->sets);
269 *ebx = (cache->line_size - 1) |
270 ((cache->partitions - 1) << 12) |
271 ((cache->associativity - 1) << 22);
272
273 assert(cache->sets > 0);
274 *ecx = cache->sets - 1;
275
276 *edx = (cache->no_invd_sharing ? CACHE_NO_INVD_SHARING : 0) |
277 (cache->inclusive ? CACHE_INCLUSIVE : 0) |
278 (cache->complex_indexing ? CACHE_COMPLEX_IDX : 0);
279 }
280
281 /* Encode cache info for CPUID[0x80000005].ECX or CPUID[0x80000005].EDX */
282 static uint32_t encode_cache_cpuid80000005(CPUCacheInfo *cache)
283 {
284 assert(cache->size % 1024 == 0);
285 assert(cache->lines_per_tag > 0);
286 assert(cache->associativity > 0);
287 assert(cache->line_size > 0);
288 return ((cache->size / 1024) << 24) | (cache->associativity << 16) |
289 (cache->lines_per_tag << 8) | (cache->line_size);
290 }
291
292 #define ASSOC_FULL 0xFF
293
294 /* AMD associativity encoding used on CPUID Leaf 0x80000006: */
295 #define AMD_ENC_ASSOC(a) (a <= 1 ? a : \
296 a == 2 ? 0x2 : \
297 a == 4 ? 0x4 : \
298 a == 8 ? 0x6 : \
299 a == 16 ? 0x8 : \
300 a == 32 ? 0xA : \
301 a == 48 ? 0xB : \
302 a == 64 ? 0xC : \
303 a == 96 ? 0xD : \
304 a == 128 ? 0xE : \
305 a == ASSOC_FULL ? 0xF : \
306 0 /* invalid value */)
307
308 /*
309 * Encode cache info for CPUID[0x80000006].ECX and CPUID[0x80000006].EDX
310 * @l3 can be NULL.
311 */
312 static void encode_cache_cpuid80000006(CPUCacheInfo *l2,
313 CPUCacheInfo *l3,
314 uint32_t *ecx, uint32_t *edx)
315 {
316 assert(l2->size % 1024 == 0);
317 assert(l2->associativity > 0);
318 assert(l2->lines_per_tag > 0);
319 assert(l2->line_size > 0);
320 *ecx = ((l2->size / 1024) << 16) |
321 (AMD_ENC_ASSOC(l2->associativity) << 12) |
322 (l2->lines_per_tag << 8) | (l2->line_size);
323
324 if (l3) {
325 assert(l3->size % (512 * 1024) == 0);
326 assert(l3->associativity > 0);
327 assert(l3->lines_per_tag > 0);
328 assert(l3->line_size > 0);
329 *edx = ((l3->size / (512 * 1024)) << 18) |
330 (AMD_ENC_ASSOC(l3->associativity) << 12) |
331 (l3->lines_per_tag << 8) | (l3->line_size);
332 } else {
333 *edx = 0;
334 }
335 }
336
337 /*
338 * Definitions of the hardcoded cache entries we expose:
339 * These are legacy cache values. If there is a need to change any
340 * of these values please use builtin_x86_defs
341 */
342
343 /* L1 data cache: */
344 static CPUCacheInfo legacy_l1d_cache = {
345 .type = DCACHE,
346 .level = 1,
347 .size = 32 * KiB,
348 .self_init = 1,
349 .line_size = 64,
350 .associativity = 8,
351 .sets = 64,
352 .partitions = 1,
353 .no_invd_sharing = true,
354 };
355
356 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
357 static CPUCacheInfo legacy_l1d_cache_amd = {
358 .type = DCACHE,
359 .level = 1,
360 .size = 64 * KiB,
361 .self_init = 1,
362 .line_size = 64,
363 .associativity = 2,
364 .sets = 512,
365 .partitions = 1,
366 .lines_per_tag = 1,
367 .no_invd_sharing = true,
368 };
369
370 /* L1 instruction cache: */
371 static CPUCacheInfo legacy_l1i_cache = {
372 .type = ICACHE,
373 .level = 1,
374 .size = 32 * KiB,
375 .self_init = 1,
376 .line_size = 64,
377 .associativity = 8,
378 .sets = 64,
379 .partitions = 1,
380 .no_invd_sharing = true,
381 };
382
383 /*FIXME: CPUID leaf 0x80000005 is inconsistent with leaves 2 & 4 */
384 static CPUCacheInfo legacy_l1i_cache_amd = {
385 .type = ICACHE,
386 .level = 1,
387 .size = 64 * KiB,
388 .self_init = 1,
389 .line_size = 64,
390 .associativity = 2,
391 .sets = 512,
392 .partitions = 1,
393 .lines_per_tag = 1,
394 .no_invd_sharing = true,
395 };
396
397 /* Level 2 unified cache: */
398 static CPUCacheInfo legacy_l2_cache = {
399 .type = UNIFIED_CACHE,
400 .level = 2,
401 .size = 4 * MiB,
402 .self_init = 1,
403 .line_size = 64,
404 .associativity = 16,
405 .sets = 4096,
406 .partitions = 1,
407 .no_invd_sharing = true,
408 };
409
410 /*FIXME: CPUID leaf 2 descriptor is inconsistent with CPUID leaf 4 */
411 static CPUCacheInfo legacy_l2_cache_cpuid2 = {
412 .type = UNIFIED_CACHE,
413 .level = 2,
414 .size = 2 * MiB,
415 .line_size = 64,
416 .associativity = 8,
417 };
418
419
420 /*FIXME: CPUID leaf 0x80000006 is inconsistent with leaves 2 & 4 */
421 static CPUCacheInfo legacy_l2_cache_amd = {
422 .type = UNIFIED_CACHE,
423 .level = 2,
424 .size = 512 * KiB,
425 .line_size = 64,
426 .lines_per_tag = 1,
427 .associativity = 16,
428 .sets = 512,
429 .partitions = 1,
430 };
431
432 /* Level 3 unified cache: */
433 static CPUCacheInfo legacy_l3_cache = {
434 .type = UNIFIED_CACHE,
435 .level = 3,
436 .size = 16 * MiB,
437 .line_size = 64,
438 .associativity = 16,
439 .sets = 16384,
440 .partitions = 1,
441 .lines_per_tag = 1,
442 .self_init = true,
443 .inclusive = true,
444 .complex_indexing = true,
445 };
446
447 /* TLB definitions: */
448
449 #define L1_DTLB_2M_ASSOC 1
450 #define L1_DTLB_2M_ENTRIES 255
451 #define L1_DTLB_4K_ASSOC 1
452 #define L1_DTLB_4K_ENTRIES 255
453
454 #define L1_ITLB_2M_ASSOC 1
455 #define L1_ITLB_2M_ENTRIES 255
456 #define L1_ITLB_4K_ASSOC 1
457 #define L1_ITLB_4K_ENTRIES 255
458
459 #define L2_DTLB_2M_ASSOC 0 /* disabled */
460 #define L2_DTLB_2M_ENTRIES 0 /* disabled */
461 #define L2_DTLB_4K_ASSOC 4
462 #define L2_DTLB_4K_ENTRIES 512
463
464 #define L2_ITLB_2M_ASSOC 0 /* disabled */
465 #define L2_ITLB_2M_ENTRIES 0 /* disabled */
466 #define L2_ITLB_4K_ASSOC 4
467 #define L2_ITLB_4K_ENTRIES 512
468
469 /* CPUID Leaf 0x14 constants: */
470 #define INTEL_PT_MAX_SUBLEAF 0x1
471 /*
472 * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH
473 * MSR can be accessed;
474 * bit[01]: Support Configurable PSB and Cycle-Accurate Mode;
475 * bit[02]: Support IP Filtering, TraceStop filtering, and preservation
476 * of Intel PT MSRs across warm reset;
477 * bit[03]: Support MTC timing packet and suppression of COFI-based packets;
478 */
479 #define INTEL_PT_MINIMAL_EBX 0xf
480 /*
481 * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and
482 * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be
483 * accessed;
484 * bit[01]: ToPA tables can hold any number of output entries, up to the
485 * maximum allowed by the MaskOrTableOffset field of
486 * IA32_RTIT_OUTPUT_MASK_PTRS;
487 * bit[02]: Support Single-Range Output scheme;
488 */
489 #define INTEL_PT_MINIMAL_ECX 0x7
490 /* generated packets which contain IP payloads have LIP values */
491 #define INTEL_PT_IP_LIP (1 << 31)
492 #define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */
493 #define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3
494 #define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */
495 #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */
496 #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */
497
498 static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1,
499 uint32_t vendor2, uint32_t vendor3)
500 {
501 int i;
502 for (i = 0; i < 4; i++) {
503 dst[i] = vendor1 >> (8 * i);
504 dst[i + 4] = vendor2 >> (8 * i);
505 dst[i + 8] = vendor3 >> (8 * i);
506 }
507 dst[CPUID_VENDOR_SZ] = '\0';
508 }
509
510 #define I486_FEATURES (CPUID_FP87 | CPUID_VME | CPUID_PSE)
511 #define PENTIUM_FEATURES (I486_FEATURES | CPUID_DE | CPUID_TSC | \
512 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_MMX | CPUID_APIC)
513 #define PENTIUM2_FEATURES (PENTIUM_FEATURES | CPUID_PAE | CPUID_SEP | \
514 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
515 CPUID_PSE36 | CPUID_FXSR)
516 #define PENTIUM3_FEATURES (PENTIUM2_FEATURES | CPUID_SSE)
517 #define PPRO_FEATURES (CPUID_FP87 | CPUID_DE | CPUID_PSE | CPUID_TSC | \
518 CPUID_MSR | CPUID_MCE | CPUID_CX8 | CPUID_PGE | CPUID_CMOV | \
519 CPUID_PAT | CPUID_FXSR | CPUID_MMX | CPUID_SSE | CPUID_SSE2 | \
520 CPUID_PAE | CPUID_SEP | CPUID_APIC)
521
522 #define TCG_FEATURES (CPUID_FP87 | CPUID_PSE | CPUID_TSC | CPUID_MSR | \
523 CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | CPUID_SEP | \
524 CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | CPUID_PAT | \
525 CPUID_PSE36 | CPUID_CLFLUSH | CPUID_ACPI | CPUID_MMX | \
526 CPUID_FXSR | CPUID_SSE | CPUID_SSE2 | CPUID_SS | CPUID_DE)
527 /* partly implemented:
528 CPUID_MTRR, CPUID_MCA, CPUID_CLFLUSH (needed for Win64) */
529 /* missing:
530 CPUID_VME, CPUID_DTS, CPUID_SS, CPUID_HT, CPUID_TM, CPUID_PBE */
531 #define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
532 CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
533 CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
534 CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
535 CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
536 /* missing:
537 CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
538 CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
539 CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
540 CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
541 CPUID_EXT_F16C, CPUID_EXT_RDRAND */
542
543 #ifdef TARGET_X86_64
544 #define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
545 #else
546 #define TCG_EXT2_X86_64_FEATURES 0
547 #endif
548
549 #define TCG_EXT2_FEATURES ((TCG_FEATURES & CPUID_EXT2_AMD_ALIASES) | \
550 CPUID_EXT2_NX | CPUID_EXT2_MMXEXT | CPUID_EXT2_RDTSCP | \
551 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_PDPE1GB | \
552 TCG_EXT2_X86_64_FEATURES)
553 #define TCG_EXT3_FEATURES (CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM | \
554 CPUID_EXT3_CR8LEG | CPUID_EXT3_ABM | CPUID_EXT3_SSE4A)
555 #define TCG_EXT4_FEATURES 0
556 #define TCG_SVM_FEATURES 0
557 #define TCG_KVM_FEATURES 0
558 #define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
559 CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
560 CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
561 CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE | \
562 CPUID_7_0_EBX_ERMS)
563 /* missing:
564 CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
565 CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
566 CPUID_7_0_EBX_RDSEED */
567 #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_OSPKE | \
568 CPUID_7_0_ECX_LA57)
569 #define TCG_7_0_EDX_FEATURES 0
570 #define TCG_APM_FEATURES 0
571 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
572 #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
573 /* missing:
574 CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
575
576 typedef struct FeatureWordInfo {
577 /* feature flags names are taken from "Intel Processor Identification and
578 * the CPUID Instruction" and AMD's "CPUID Specification".
579 * In cases of disagreement between feature naming conventions,
580 * aliases may be added.
581 */
582 const char *feat_names[32];
583 uint32_t cpuid_eax; /* Input EAX for CPUID */
584 bool cpuid_needs_ecx; /* CPUID instruction uses ECX as input */
585 uint32_t cpuid_ecx; /* Input ECX value for CPUID */
586 int cpuid_reg; /* output register (R_* constant) */
587 uint32_t tcg_features; /* Feature flags supported by TCG */
588 uint32_t unmigratable_flags; /* Feature flags known to be unmigratable */
589 uint32_t migratable_flags; /* Feature flags known to be migratable */
590 /* Features that shouldn't be auto-enabled by "-cpu host" */
591 uint32_t no_autoenable_flags;
592 } FeatureWordInfo;
593
594 static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
595 [FEAT_1_EDX] = {
596 .feat_names = {
597 "fpu", "vme", "de", "pse",
598 "tsc", "msr", "pae", "mce",
599 "cx8", "apic", NULL, "sep",
600 "mtrr", "pge", "mca", "cmov",
601 "pat", "pse36", "pn" /* Intel psn */, "clflush" /* Intel clfsh */,
602 NULL, "ds" /* Intel dts */, "acpi", "mmx",
603 "fxsr", "sse", "sse2", "ss",
604 "ht" /* Intel htt */, "tm", "ia64", "pbe",
605 },
606 .cpuid_eax = 1, .cpuid_reg = R_EDX,
607 .tcg_features = TCG_FEATURES,
608 },
609 [FEAT_1_ECX] = {
610 .feat_names = {
611 "pni" /* Intel,AMD sse3 */, "pclmulqdq", "dtes64", "monitor",
612 "ds-cpl", "vmx", "smx", "est",
613 "tm2", "ssse3", "cid", NULL,
614 "fma", "cx16", "xtpr", "pdcm",
615 NULL, "pcid", "dca", "sse4.1",
616 "sse4.2", "x2apic", "movbe", "popcnt",
617 "tsc-deadline", "aes", "xsave", "osxsave",
618 "avx", "f16c", "rdrand", "hypervisor",
619 },
620 .cpuid_eax = 1, .cpuid_reg = R_ECX,
621 .tcg_features = TCG_EXT_FEATURES,
622 },
623 /* Feature names that are already defined on feature_name[] but
624 * are set on CPUID[8000_0001].EDX on AMD CPUs don't have their
625 * names on feat_names below. They are copied automatically
626 * to features[FEAT_8000_0001_EDX] if and only if CPU vendor is AMD.
627 */
628 [FEAT_8000_0001_EDX] = {
629 .feat_names = {
630 NULL /* fpu */, NULL /* vme */, NULL /* de */, NULL /* pse */,
631 NULL /* tsc */, NULL /* msr */, NULL /* pae */, NULL /* mce */,
632 NULL /* cx8 */, NULL /* apic */, NULL, "syscall",
633 NULL /* mtrr */, NULL /* pge */, NULL /* mca */, NULL /* cmov */,
634 NULL /* pat */, NULL /* pse36 */, NULL, NULL /* Linux mp */,
635 "nx", NULL, "mmxext", NULL /* mmx */,
636 NULL /* fxsr */, "fxsr-opt", "pdpe1gb", "rdtscp",
637 NULL, "lm", "3dnowext", "3dnow",
638 },
639 .cpuid_eax = 0x80000001, .cpuid_reg = R_EDX,
640 .tcg_features = TCG_EXT2_FEATURES,
641 },
642 [FEAT_8000_0001_ECX] = {
643 .feat_names = {
644 "lahf-lm", "cmp-legacy", "svm", "extapic",
645 "cr8legacy", "abm", "sse4a", "misalignsse",
646 "3dnowprefetch", "osvw", "ibs", "xop",
647 "skinit", "wdt", NULL, "lwp",
648 "fma4", "tce", NULL, "nodeid-msr",
649 NULL, "tbm", "topoext", "perfctr-core",
650 "perfctr-nb", NULL, NULL, NULL,
651 NULL, NULL, NULL, NULL,
652 },
653 .cpuid_eax = 0x80000001, .cpuid_reg = R_ECX,
654 .tcg_features = TCG_EXT3_FEATURES,
655 },
656 [FEAT_C000_0001_EDX] = {
657 .feat_names = {
658 NULL, NULL, "xstore", "xstore-en",
659 NULL, NULL, "xcrypt", "xcrypt-en",
660 "ace2", "ace2-en", "phe", "phe-en",
661 "pmm", "pmm-en", NULL, NULL,
662 NULL, NULL, NULL, NULL,
663 NULL, NULL, NULL, NULL,
664 NULL, NULL, NULL, NULL,
665 NULL, NULL, NULL, NULL,
666 },
667 .cpuid_eax = 0xC0000001, .cpuid_reg = R_EDX,
668 .tcg_features = TCG_EXT4_FEATURES,
669 },
670 [FEAT_KVM] = {
671 .feat_names = {
672 "kvmclock", "kvm-nopiodelay", "kvm-mmu", "kvmclock",
673 "kvm-asyncpf", "kvm-steal-time", "kvm-pv-eoi", "kvm-pv-unhalt",
674 NULL, "kvm-pv-tlb-flush", NULL, NULL,
675 NULL, NULL, NULL, NULL,
676 NULL, NULL, NULL, NULL,
677 NULL, NULL, NULL, NULL,
678 "kvmclock-stable-bit", NULL, NULL, NULL,
679 NULL, NULL, NULL, NULL,
680 },
681 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX,
682 .tcg_features = TCG_KVM_FEATURES,
683 },
684 [FEAT_KVM_HINTS] = {
685 .feat_names = {
686 "kvm-hint-dedicated", NULL, NULL, NULL,
687 NULL, NULL, NULL, NULL,
688 NULL, NULL, NULL, NULL,
689 NULL, NULL, NULL, NULL,
690 NULL, NULL, NULL, NULL,
691 NULL, NULL, NULL, NULL,
692 NULL, NULL, NULL, NULL,
693 NULL, NULL, NULL, NULL,
694 },
695 .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX,
696 .tcg_features = TCG_KVM_FEATURES,
697 /*
698 * KVM hints aren't auto-enabled by -cpu host, they need to be
699 * explicitly enabled in the command-line.
700 */
701 .no_autoenable_flags = ~0U,
702 },
703 [FEAT_HYPERV_EAX] = {
704 .feat_names = {
705 NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */,
706 NULL /* hv_msr_synic_access */, NULL /* hv_msr_stimer_access */,
707 NULL /* hv_msr_apic_access */, NULL /* hv_msr_hypercall_access */,
708 NULL /* hv_vpindex_access */, NULL /* hv_msr_reset_access */,
709 NULL /* hv_msr_stats_access */, NULL /* hv_reftsc_access */,
710 NULL /* hv_msr_idle_access */, NULL /* hv_msr_frequency_access */,
711 NULL /* hv_msr_debug_access */, NULL /* hv_msr_reenlightenment_access */,
712 NULL, NULL,
713 NULL, NULL, NULL, NULL,
714 NULL, NULL, NULL, NULL,
715 NULL, NULL, NULL, NULL,
716 NULL, NULL, NULL, NULL,
717 },
718 .cpuid_eax = 0x40000003, .cpuid_reg = R_EAX,
719 },
720 [FEAT_HYPERV_EBX] = {
721 .feat_names = {
722 NULL /* hv_create_partitions */, NULL /* hv_access_partition_id */,
723 NULL /* hv_access_memory_pool */, NULL /* hv_adjust_message_buffers */,
724 NULL /* hv_post_messages */, NULL /* hv_signal_events */,
725 NULL /* hv_create_port */, NULL /* hv_connect_port */,
726 NULL /* hv_access_stats */, NULL, NULL, NULL /* hv_debugging */,
727 NULL /* hv_cpu_power_management */, NULL /* hv_configure_profiler */,
728 NULL, NULL,
729 NULL, NULL, NULL, NULL,
730 NULL, NULL, NULL, NULL,
731 NULL, NULL, NULL, NULL,
732 NULL, NULL, NULL, NULL,
733 },
734 .cpuid_eax = 0x40000003, .cpuid_reg = R_EBX,
735 },
736 [FEAT_HYPERV_EDX] = {
737 .feat_names = {
738 NULL /* hv_mwait */, NULL /* hv_guest_debugging */,
739 NULL /* hv_perf_monitor */, NULL /* hv_cpu_dynamic_part */,
740 NULL /* hv_hypercall_params_xmm */, NULL /* hv_guest_idle_state */,
741 NULL, NULL,
742 NULL, NULL, NULL /* hv_guest_crash_msr */, NULL,
743 NULL, NULL, NULL, NULL,
744 NULL, NULL, NULL, NULL,
745 NULL, NULL, NULL, NULL,
746 NULL, NULL, NULL, NULL,
747 NULL, NULL, NULL, NULL,
748 },
749 .cpuid_eax = 0x40000003, .cpuid_reg = R_EDX,
750 },
751 [FEAT_SVM] = {
752 .feat_names = {
753 "npt", "lbrv", "svm-lock", "nrip-save",
754 "tsc-scale", "vmcb-clean", "flushbyasid", "decodeassists",
755 NULL, NULL, "pause-filter", NULL,
756 "pfthreshold", NULL, NULL, NULL,
757 NULL, NULL, NULL, NULL,
758 NULL, NULL, NULL, NULL,
759 NULL, NULL, NULL, NULL,
760 NULL, NULL, NULL, NULL,
761 },
762 .cpuid_eax = 0x8000000A, .cpuid_reg = R_EDX,
763 .tcg_features = TCG_SVM_FEATURES,
764 },
765 [FEAT_7_0_EBX] = {
766 .feat_names = {
767 "fsgsbase", "tsc-adjust", NULL, "bmi1",
768 "hle", "avx2", NULL, "smep",
769 "bmi2", "erms", "invpcid", "rtm",
770 NULL, NULL, "mpx", NULL,
771 "avx512f", "avx512dq", "rdseed", "adx",
772 "smap", "avx512ifma", "pcommit", "clflushopt",
773 "clwb", "intel-pt", "avx512pf", "avx512er",
774 "avx512cd", "sha-ni", "avx512bw", "avx512vl",
775 },
776 .cpuid_eax = 7,
777 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
778 .cpuid_reg = R_EBX,
779 .tcg_features = TCG_7_0_EBX_FEATURES,
780 },
781 [FEAT_7_0_ECX] = {
782 .feat_names = {
783 NULL, "avx512vbmi", "umip", "pku",
784 "ospke", NULL, "avx512vbmi2", NULL,
785 "gfni", "vaes", "vpclmulqdq", "avx512vnni",
786 "avx512bitalg", NULL, "avx512-vpopcntdq", NULL,
787 "la57", NULL, NULL, NULL,
788 NULL, NULL, "rdpid", NULL,
789 NULL, "cldemote", NULL, NULL,
790 NULL, NULL, NULL, NULL,
791 },
792 .cpuid_eax = 7,
793 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
794 .cpuid_reg = R_ECX,
795 .tcg_features = TCG_7_0_ECX_FEATURES,
796 },
797 [FEAT_7_0_EDX] = {
798 .feat_names = {
799 NULL, NULL, "avx512-4vnniw", "avx512-4fmaps",
800 NULL, NULL, NULL, NULL,
801 NULL, NULL, NULL, NULL,
802 NULL, NULL, NULL, NULL,
803 NULL, NULL, NULL, NULL,
804 NULL, NULL, NULL, NULL,
805 NULL, NULL, "spec-ctrl", NULL,
806 NULL, NULL, NULL, "ssbd",
807 },
808 .cpuid_eax = 7,
809 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
810 .cpuid_reg = R_EDX,
811 .tcg_features = TCG_7_0_EDX_FEATURES,
812 },
813 [FEAT_8000_0007_EDX] = {
814 .feat_names = {
815 NULL, NULL, NULL, NULL,
816 NULL, NULL, NULL, NULL,
817 "invtsc", NULL, NULL, NULL,
818 NULL, NULL, NULL, NULL,
819 NULL, NULL, NULL, NULL,
820 NULL, NULL, NULL, NULL,
821 NULL, NULL, NULL, NULL,
822 NULL, NULL, NULL, NULL,
823 },
824 .cpuid_eax = 0x80000007,
825 .cpuid_reg = R_EDX,
826 .tcg_features = TCG_APM_FEATURES,
827 .unmigratable_flags = CPUID_APM_INVTSC,
828 },
829 [FEAT_8000_0008_EBX] = {
830 .feat_names = {
831 NULL, NULL, NULL, NULL,
832 NULL, NULL, NULL, NULL,
833 NULL, NULL, NULL, NULL,
834 "ibpb", NULL, NULL, NULL,
835 NULL, NULL, NULL, NULL,
836 NULL, NULL, NULL, NULL,
837 NULL, "virt-ssbd", NULL, NULL,
838 NULL, NULL, NULL, NULL,
839 },
840 .cpuid_eax = 0x80000008,
841 .cpuid_reg = R_EBX,
842 .tcg_features = 0,
843 .unmigratable_flags = 0,
844 },
845 [FEAT_XSAVE] = {
846 .feat_names = {
847 "xsaveopt", "xsavec", "xgetbv1", "xsaves",
848 NULL, NULL, NULL, NULL,
849 NULL, NULL, NULL, NULL,
850 NULL, NULL, NULL, NULL,
851 NULL, NULL, NULL, NULL,
852 NULL, NULL, NULL, NULL,
853 NULL, NULL, NULL, NULL,
854 NULL, NULL, NULL, NULL,
855 },
856 .cpuid_eax = 0xd,
857 .cpuid_needs_ecx = true, .cpuid_ecx = 1,
858 .cpuid_reg = R_EAX,
859 .tcg_features = TCG_XSAVE_FEATURES,
860 },
861 [FEAT_6_EAX] = {
862 .feat_names = {
863 NULL, NULL, "arat", NULL,
864 NULL, NULL, NULL, NULL,
865 NULL, NULL, NULL, NULL,
866 NULL, NULL, NULL, NULL,
867 NULL, NULL, NULL, NULL,
868 NULL, NULL, NULL, NULL,
869 NULL, NULL, NULL, NULL,
870 NULL, NULL, NULL, NULL,
871 },
872 .cpuid_eax = 6, .cpuid_reg = R_EAX,
873 .tcg_features = TCG_6_EAX_FEATURES,
874 },
875 [FEAT_XSAVE_COMP_LO] = {
876 .cpuid_eax = 0xD,
877 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
878 .cpuid_reg = R_EAX,
879 .tcg_features = ~0U,
880 .migratable_flags = XSTATE_FP_MASK | XSTATE_SSE_MASK |
881 XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | XSTATE_BNDCSR_MASK |
882 XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK |
883 XSTATE_PKRU_MASK,
884 },
885 [FEAT_XSAVE_COMP_HI] = {
886 .cpuid_eax = 0xD,
887 .cpuid_needs_ecx = true, .cpuid_ecx = 0,
888 .cpuid_reg = R_EDX,
889 .tcg_features = ~0U,
890 },
891 };
892
893 typedef struct X86RegisterInfo32 {
894 /* Name of register */
895 const char *name;
896 /* QAPI enum value register */
897 X86CPURegister32 qapi_enum;
898 } X86RegisterInfo32;
899
900 #define REGISTER(reg) \
901 [R_##reg] = { .name = #reg, .qapi_enum = X86_CPU_REGISTER32_##reg }
902 static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
903 REGISTER(EAX),
904 REGISTER(ECX),
905 REGISTER(EDX),
906 REGISTER(EBX),
907 REGISTER(ESP),
908 REGISTER(EBP),
909 REGISTER(ESI),
910 REGISTER(EDI),
911 };
912 #undef REGISTER
913
914 typedef struct ExtSaveArea {
915 uint32_t feature, bits;
916 uint32_t offset, size;
917 } ExtSaveArea;
918
919 static const ExtSaveArea x86_ext_save_areas[] = {
920 [XSTATE_FP_BIT] = {
921 /* x87 FP state component is always enabled if XSAVE is supported */
922 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
923 /* x87 state is in the legacy region of the XSAVE area */
924 .offset = 0,
925 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
926 },
927 [XSTATE_SSE_BIT] = {
928 /* SSE state component is always enabled if XSAVE is supported */
929 .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE,
930 /* SSE state is in the legacy region of the XSAVE area */
931 .offset = 0,
932 .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader),
933 },
934 [XSTATE_YMM_BIT] =
935 { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
936 .offset = offsetof(X86XSaveArea, avx_state),
937 .size = sizeof(XSaveAVX) },
938 [XSTATE_BNDREGS_BIT] =
939 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
940 .offset = offsetof(X86XSaveArea, bndreg_state),
941 .size = sizeof(XSaveBNDREG) },
942 [XSTATE_BNDCSR_BIT] =
943 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
944 .offset = offsetof(X86XSaveArea, bndcsr_state),
945 .size = sizeof(XSaveBNDCSR) },
946 [XSTATE_OPMASK_BIT] =
947 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
948 .offset = offsetof(X86XSaveArea, opmask_state),
949 .size = sizeof(XSaveOpmask) },
950 [XSTATE_ZMM_Hi256_BIT] =
951 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
952 .offset = offsetof(X86XSaveArea, zmm_hi256_state),
953 .size = sizeof(XSaveZMM_Hi256) },
954 [XSTATE_Hi16_ZMM_BIT] =
955 { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F,
956 .offset = offsetof(X86XSaveArea, hi16_zmm_state),
957 .size = sizeof(XSaveHi16_ZMM) },
958 [XSTATE_PKRU_BIT] =
959 { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU,
960 .offset = offsetof(X86XSaveArea, pkru_state),
961 .size = sizeof(XSavePKRU) },
962 };
963
964 static uint32_t xsave_area_size(uint64_t mask)
965 {
966 int i;
967 uint64_t ret = 0;
968
969 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
970 const ExtSaveArea *esa = &x86_ext_save_areas[i];
971 if ((mask >> i) & 1) {
972 ret = MAX(ret, esa->offset + esa->size);
973 }
974 }
975 return ret;
976 }
977
978 static inline bool accel_uses_host_cpuid(void)
979 {
980 return kvm_enabled() || hvf_enabled();
981 }
982
983 static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu)
984 {
985 return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 |
986 cpu->env.features[FEAT_XSAVE_COMP_LO];
987 }
988
989 const char *get_register_name_32(unsigned int reg)
990 {
991 if (reg >= CPU_NB_REGS32) {
992 return NULL;
993 }
994 return x86_reg_info_32[reg].name;
995 }
996
997 /*
998 * Returns the set of feature flags that are supported and migratable by
999 * QEMU, for a given FeatureWord.
1000 */
1001 static uint32_t x86_cpu_get_migratable_flags(FeatureWord w)
1002 {
1003 FeatureWordInfo *wi = &feature_word_info[w];
1004 uint32_t r = 0;
1005 int i;
1006
1007 for (i = 0; i < 32; i++) {
1008 uint32_t f = 1U << i;
1009
1010 /* If the feature name is known, it is implicitly considered migratable,
1011 * unless it is explicitly set in unmigratable_flags */
1012 if ((wi->migratable_flags & f) ||
1013 (wi->feat_names[i] && !(wi->unmigratable_flags & f))) {
1014 r |= f;
1015 }
1016 }
1017 return r;
1018 }
1019
1020 void host_cpuid(uint32_t function, uint32_t count,
1021 uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx)
1022 {
1023 uint32_t vec[4];
1024
1025 #ifdef __x86_64__
1026 asm volatile("cpuid"
1027 : "=a"(vec[0]), "=b"(vec[1]),
1028 "=c"(vec[2]), "=d"(vec[3])
1029 : "0"(function), "c"(count) : "cc");
1030 #elif defined(__i386__)
1031 asm volatile("pusha \n\t"
1032 "cpuid \n\t"
1033 "mov %%eax, 0(%2) \n\t"
1034 "mov %%ebx, 4(%2) \n\t"
1035 "mov %%ecx, 8(%2) \n\t"
1036 "mov %%edx, 12(%2) \n\t"
1037 "popa"
1038 : : "a"(function), "c"(count), "S"(vec)
1039 : "memory", "cc");
1040 #else
1041 abort();
1042 #endif
1043
1044 if (eax)
1045 *eax = vec[0];
1046 if (ebx)
1047 *ebx = vec[1];
1048 if (ecx)
1049 *ecx = vec[2];
1050 if (edx)
1051 *edx = vec[3];
1052 }
1053
1054 void host_vendor_fms(char *vendor, int *family, int *model, int *stepping)
1055 {
1056 uint32_t eax, ebx, ecx, edx;
1057
1058 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
1059 x86_cpu_vendor_words2str(vendor, ebx, edx, ecx);
1060
1061 host_cpuid(0x1, 0, &eax, &ebx, &ecx, &edx);
1062 if (family) {
1063 *family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF);
1064 }
1065 if (model) {
1066 *model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12);
1067 }
1068 if (stepping) {
1069 *stepping = eax & 0x0F;
1070 }
1071 }
1072
1073 /* CPU class name definitions: */
1074
1075 /* Return type name for a given CPU model name
1076 * Caller is responsible for freeing the returned string.
1077 */
1078 static char *x86_cpu_type_name(const char *model_name)
1079 {
1080 return g_strdup_printf(X86_CPU_TYPE_NAME("%s"), model_name);
1081 }
1082
1083 static ObjectClass *x86_cpu_class_by_name(const char *cpu_model)
1084 {
1085 ObjectClass *oc;
1086 char *typename = x86_cpu_type_name(cpu_model);
1087 oc = object_class_by_name(typename);
1088 g_free(typename);
1089 return oc;
1090 }
1091
1092 static char *x86_cpu_class_get_model_name(X86CPUClass *cc)
1093 {
1094 const char *class_name = object_class_get_name(OBJECT_CLASS(cc));
1095 assert(g_str_has_suffix(class_name, X86_CPU_TYPE_SUFFIX));
1096 return g_strndup(class_name,
1097 strlen(class_name) - strlen(X86_CPU_TYPE_SUFFIX));
1098 }
1099
1100 struct X86CPUDefinition {
1101 const char *name;
1102 uint32_t level;
1103 uint32_t xlevel;
1104 /* vendor is zero-terminated, 12 character ASCII string */
1105 char vendor[CPUID_VENDOR_SZ + 1];
1106 int family;
1107 int model;
1108 int stepping;
1109 FeatureWordArray features;
1110 const char *model_id;
1111 CPUCaches *cache_info;
1112 };
1113
1114 static CPUCaches epyc_cache_info = {
1115 .l1d_cache = &(CPUCacheInfo) {
1116 .type = DCACHE,
1117 .level = 1,
1118 .size = 32 * KiB,
1119 .line_size = 64,
1120 .associativity = 8,
1121 .partitions = 1,
1122 .sets = 64,
1123 .lines_per_tag = 1,
1124 .self_init = 1,
1125 .no_invd_sharing = true,
1126 },
1127 .l1i_cache = &(CPUCacheInfo) {
1128 .type = ICACHE,
1129 .level = 1,
1130 .size = 64 * KiB,
1131 .line_size = 64,
1132 .associativity = 4,
1133 .partitions = 1,
1134 .sets = 256,
1135 .lines_per_tag = 1,
1136 .self_init = 1,
1137 .no_invd_sharing = true,
1138 },
1139 .l2_cache = &(CPUCacheInfo) {
1140 .type = UNIFIED_CACHE,
1141 .level = 2,
1142 .size = 512 * KiB,
1143 .line_size = 64,
1144 .associativity = 8,
1145 .partitions = 1,
1146 .sets = 1024,
1147 .lines_per_tag = 1,
1148 },
1149 .l3_cache = &(CPUCacheInfo) {
1150 .type = UNIFIED_CACHE,
1151 .level = 3,
1152 .size = 8 * MiB,
1153 .line_size = 64,
1154 .associativity = 16,
1155 .partitions = 1,
1156 .sets = 8192,
1157 .lines_per_tag = 1,
1158 .self_init = true,
1159 .inclusive = true,
1160 .complex_indexing = true,
1161 },
1162 };
1163
1164 static X86CPUDefinition builtin_x86_defs[] = {
1165 {
1166 .name = "qemu64",
1167 .level = 0xd,
1168 .vendor = CPUID_VENDOR_AMD,
1169 .family = 6,
1170 .model = 6,
1171 .stepping = 3,
1172 .features[FEAT_1_EDX] =
1173 PPRO_FEATURES |
1174 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1175 CPUID_PSE36,
1176 .features[FEAT_1_ECX] =
1177 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1178 .features[FEAT_8000_0001_EDX] =
1179 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1180 .features[FEAT_8000_0001_ECX] =
1181 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM,
1182 .xlevel = 0x8000000A,
1183 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1184 },
1185 {
1186 .name = "phenom",
1187 .level = 5,
1188 .vendor = CPUID_VENDOR_AMD,
1189 .family = 16,
1190 .model = 2,
1191 .stepping = 3,
1192 /* Missing: CPUID_HT */
1193 .features[FEAT_1_EDX] =
1194 PPRO_FEATURES |
1195 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1196 CPUID_PSE36 | CPUID_VME,
1197 .features[FEAT_1_ECX] =
1198 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_CX16 |
1199 CPUID_EXT_POPCNT,
1200 .features[FEAT_8000_0001_EDX] =
1201 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX |
1202 CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT | CPUID_EXT2_MMXEXT |
1203 CPUID_EXT2_FFXSR | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP,
1204 /* Missing: CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1205 CPUID_EXT3_CR8LEG,
1206 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1207 CPUID_EXT3_OSVW, CPUID_EXT3_IBS */
1208 .features[FEAT_8000_0001_ECX] =
1209 CPUID_EXT3_LAHF_LM | CPUID_EXT3_SVM |
1210 CPUID_EXT3_ABM | CPUID_EXT3_SSE4A,
1211 /* Missing: CPUID_SVM_LBRV */
1212 .features[FEAT_SVM] =
1213 CPUID_SVM_NPT,
1214 .xlevel = 0x8000001A,
1215 .model_id = "AMD Phenom(tm) 9550 Quad-Core Processor"
1216 },
1217 {
1218 .name = "core2duo",
1219 .level = 10,
1220 .vendor = CPUID_VENDOR_INTEL,
1221 .family = 6,
1222 .model = 15,
1223 .stepping = 11,
1224 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1225 .features[FEAT_1_EDX] =
1226 PPRO_FEATURES |
1227 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1228 CPUID_PSE36 | CPUID_VME | CPUID_ACPI | CPUID_SS,
1229 /* Missing: CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_EST,
1230 * CPUID_EXT_TM2, CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_VMX */
1231 .features[FEAT_1_ECX] =
1232 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1233 CPUID_EXT_CX16,
1234 .features[FEAT_8000_0001_EDX] =
1235 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1236 .features[FEAT_8000_0001_ECX] =
1237 CPUID_EXT3_LAHF_LM,
1238 .xlevel = 0x80000008,
1239 .model_id = "Intel(R) Core(TM)2 Duo CPU T7700 @ 2.40GHz",
1240 },
1241 {
1242 .name = "kvm64",
1243 .level = 0xd,
1244 .vendor = CPUID_VENDOR_INTEL,
1245 .family = 15,
1246 .model = 6,
1247 .stepping = 1,
1248 /* Missing: CPUID_HT */
1249 .features[FEAT_1_EDX] =
1250 PPRO_FEATURES | CPUID_VME |
1251 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA |
1252 CPUID_PSE36,
1253 /* Missing: CPUID_EXT_POPCNT, CPUID_EXT_MONITOR */
1254 .features[FEAT_1_ECX] =
1255 CPUID_EXT_SSE3 | CPUID_EXT_CX16,
1256 /* Missing: CPUID_EXT2_PDPE1GB, CPUID_EXT2_RDTSCP */
1257 .features[FEAT_8000_0001_EDX] =
1258 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1259 /* Missing: CPUID_EXT3_LAHF_LM, CPUID_EXT3_CMP_LEG, CPUID_EXT3_EXTAPIC,
1260 CPUID_EXT3_CR8LEG, CPUID_EXT3_ABM, CPUID_EXT3_SSE4A,
1261 CPUID_EXT3_MISALIGNSSE, CPUID_EXT3_3DNOWPREFETCH,
1262 CPUID_EXT3_OSVW, CPUID_EXT3_IBS, CPUID_EXT3_SVM */
1263 .features[FEAT_8000_0001_ECX] =
1264 0,
1265 .xlevel = 0x80000008,
1266 .model_id = "Common KVM processor"
1267 },
1268 {
1269 .name = "qemu32",
1270 .level = 4,
1271 .vendor = CPUID_VENDOR_INTEL,
1272 .family = 6,
1273 .model = 6,
1274 .stepping = 3,
1275 .features[FEAT_1_EDX] =
1276 PPRO_FEATURES,
1277 .features[FEAT_1_ECX] =
1278 CPUID_EXT_SSE3,
1279 .xlevel = 0x80000004,
1280 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1281 },
1282 {
1283 .name = "kvm32",
1284 .level = 5,
1285 .vendor = CPUID_VENDOR_INTEL,
1286 .family = 15,
1287 .model = 6,
1288 .stepping = 1,
1289 .features[FEAT_1_EDX] =
1290 PPRO_FEATURES | CPUID_VME |
1291 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_PSE36,
1292 .features[FEAT_1_ECX] =
1293 CPUID_EXT_SSE3,
1294 .features[FEAT_8000_0001_ECX] =
1295 0,
1296 .xlevel = 0x80000008,
1297 .model_id = "Common 32-bit KVM processor"
1298 },
1299 {
1300 .name = "coreduo",
1301 .level = 10,
1302 .vendor = CPUID_VENDOR_INTEL,
1303 .family = 6,
1304 .model = 14,
1305 .stepping = 8,
1306 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1307 .features[FEAT_1_EDX] =
1308 PPRO_FEATURES | CPUID_VME |
1309 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_ACPI |
1310 CPUID_SS,
1311 /* Missing: CPUID_EXT_EST, CPUID_EXT_TM2 , CPUID_EXT_XTPR,
1312 * CPUID_EXT_PDCM, CPUID_EXT_VMX */
1313 .features[FEAT_1_ECX] =
1314 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR,
1315 .features[FEAT_8000_0001_EDX] =
1316 CPUID_EXT2_NX,
1317 .xlevel = 0x80000008,
1318 .model_id = "Genuine Intel(R) CPU T2600 @ 2.16GHz",
1319 },
1320 {
1321 .name = "486",
1322 .level = 1,
1323 .vendor = CPUID_VENDOR_INTEL,
1324 .family = 4,
1325 .model = 8,
1326 .stepping = 0,
1327 .features[FEAT_1_EDX] =
1328 I486_FEATURES,
1329 .xlevel = 0,
1330 .model_id = "",
1331 },
1332 {
1333 .name = "pentium",
1334 .level = 1,
1335 .vendor = CPUID_VENDOR_INTEL,
1336 .family = 5,
1337 .model = 4,
1338 .stepping = 3,
1339 .features[FEAT_1_EDX] =
1340 PENTIUM_FEATURES,
1341 .xlevel = 0,
1342 .model_id = "",
1343 },
1344 {
1345 .name = "pentium2",
1346 .level = 2,
1347 .vendor = CPUID_VENDOR_INTEL,
1348 .family = 6,
1349 .model = 5,
1350 .stepping = 2,
1351 .features[FEAT_1_EDX] =
1352 PENTIUM2_FEATURES,
1353 .xlevel = 0,
1354 .model_id = "",
1355 },
1356 {
1357 .name = "pentium3",
1358 .level = 3,
1359 .vendor = CPUID_VENDOR_INTEL,
1360 .family = 6,
1361 .model = 7,
1362 .stepping = 3,
1363 .features[FEAT_1_EDX] =
1364 PENTIUM3_FEATURES,
1365 .xlevel = 0,
1366 .model_id = "",
1367 },
1368 {
1369 .name = "athlon",
1370 .level = 2,
1371 .vendor = CPUID_VENDOR_AMD,
1372 .family = 6,
1373 .model = 2,
1374 .stepping = 3,
1375 .features[FEAT_1_EDX] =
1376 PPRO_FEATURES | CPUID_PSE36 | CPUID_VME | CPUID_MTRR |
1377 CPUID_MCA,
1378 .features[FEAT_8000_0001_EDX] =
1379 CPUID_EXT2_MMXEXT | CPUID_EXT2_3DNOW | CPUID_EXT2_3DNOWEXT,
1380 .xlevel = 0x80000008,
1381 .model_id = "QEMU Virtual CPU version " QEMU_HW_VERSION,
1382 },
1383 {
1384 .name = "n270",
1385 .level = 10,
1386 .vendor = CPUID_VENDOR_INTEL,
1387 .family = 6,
1388 .model = 28,
1389 .stepping = 2,
1390 /* Missing: CPUID_DTS, CPUID_HT, CPUID_TM, CPUID_PBE */
1391 .features[FEAT_1_EDX] =
1392 PPRO_FEATURES |
1393 CPUID_MTRR | CPUID_CLFLUSH | CPUID_MCA | CPUID_VME |
1394 CPUID_ACPI | CPUID_SS,
1395 /* Some CPUs got no CPUID_SEP */
1396 /* Missing: CPUID_EXT_DSCPL, CPUID_EXT_EST, CPUID_EXT_TM2,
1397 * CPUID_EXT_XTPR */
1398 .features[FEAT_1_ECX] =
1399 CPUID_EXT_SSE3 | CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 |
1400 CPUID_EXT_MOVBE,
1401 .features[FEAT_8000_0001_EDX] =
1402 CPUID_EXT2_NX,
1403 .features[FEAT_8000_0001_ECX] =
1404 CPUID_EXT3_LAHF_LM,
1405 .xlevel = 0x80000008,
1406 .model_id = "Intel(R) Atom(TM) CPU N270 @ 1.60GHz",
1407 },
1408 {
1409 .name = "Conroe",
1410 .level = 10,
1411 .vendor = CPUID_VENDOR_INTEL,
1412 .family = 6,
1413 .model = 15,
1414 .stepping = 3,
1415 .features[FEAT_1_EDX] =
1416 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1417 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1418 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1419 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1420 CPUID_DE | CPUID_FP87,
1421 .features[FEAT_1_ECX] =
1422 CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1423 .features[FEAT_8000_0001_EDX] =
1424 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1425 .features[FEAT_8000_0001_ECX] =
1426 CPUID_EXT3_LAHF_LM,
1427 .xlevel = 0x80000008,
1428 .model_id = "Intel Celeron_4x0 (Conroe/Merom Class Core 2)",
1429 },
1430 {
1431 .name = "Penryn",
1432 .level = 10,
1433 .vendor = CPUID_VENDOR_INTEL,
1434 .family = 6,
1435 .model = 23,
1436 .stepping = 3,
1437 .features[FEAT_1_EDX] =
1438 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1439 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1440 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1441 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1442 CPUID_DE | CPUID_FP87,
1443 .features[FEAT_1_ECX] =
1444 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1445 CPUID_EXT_SSE3,
1446 .features[FEAT_8000_0001_EDX] =
1447 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
1448 .features[FEAT_8000_0001_ECX] =
1449 CPUID_EXT3_LAHF_LM,
1450 .xlevel = 0x80000008,
1451 .model_id = "Intel Core 2 Duo P9xxx (Penryn Class Core 2)",
1452 },
1453 {
1454 .name = "Nehalem",
1455 .level = 11,
1456 .vendor = CPUID_VENDOR_INTEL,
1457 .family = 6,
1458 .model = 26,
1459 .stepping = 3,
1460 .features[FEAT_1_EDX] =
1461 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1462 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1463 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1464 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1465 CPUID_DE | CPUID_FP87,
1466 .features[FEAT_1_ECX] =
1467 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1468 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1469 .features[FEAT_8000_0001_EDX] =
1470 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1471 .features[FEAT_8000_0001_ECX] =
1472 CPUID_EXT3_LAHF_LM,
1473 .xlevel = 0x80000008,
1474 .model_id = "Intel Core i7 9xx (Nehalem Class Core i7)",
1475 },
1476 {
1477 .name = "Nehalem-IBRS",
1478 .level = 11,
1479 .vendor = CPUID_VENDOR_INTEL,
1480 .family = 6,
1481 .model = 26,
1482 .stepping = 3,
1483 .features[FEAT_1_EDX] =
1484 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1485 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1486 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1487 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1488 CPUID_DE | CPUID_FP87,
1489 .features[FEAT_1_ECX] =
1490 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1491 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_SSE3,
1492 .features[FEAT_7_0_EDX] =
1493 CPUID_7_0_EDX_SPEC_CTRL,
1494 .features[FEAT_8000_0001_EDX] =
1495 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1496 .features[FEAT_8000_0001_ECX] =
1497 CPUID_EXT3_LAHF_LM,
1498 .xlevel = 0x80000008,
1499 .model_id = "Intel Core i7 9xx (Nehalem Core i7, IBRS update)",
1500 },
1501 {
1502 .name = "Westmere",
1503 .level = 11,
1504 .vendor = CPUID_VENDOR_INTEL,
1505 .family = 6,
1506 .model = 44,
1507 .stepping = 1,
1508 .features[FEAT_1_EDX] =
1509 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1510 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1511 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1512 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1513 CPUID_DE | CPUID_FP87,
1514 .features[FEAT_1_ECX] =
1515 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1516 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1517 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1518 .features[FEAT_8000_0001_EDX] =
1519 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1520 .features[FEAT_8000_0001_ECX] =
1521 CPUID_EXT3_LAHF_LM,
1522 .features[FEAT_6_EAX] =
1523 CPUID_6_EAX_ARAT,
1524 .xlevel = 0x80000008,
1525 .model_id = "Westmere E56xx/L56xx/X56xx (Nehalem-C)",
1526 },
1527 {
1528 .name = "Westmere-IBRS",
1529 .level = 11,
1530 .vendor = CPUID_VENDOR_INTEL,
1531 .family = 6,
1532 .model = 44,
1533 .stepping = 1,
1534 .features[FEAT_1_EDX] =
1535 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1536 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1537 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1538 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1539 CPUID_DE | CPUID_FP87,
1540 .features[FEAT_1_ECX] =
1541 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
1542 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1543 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
1544 .features[FEAT_8000_0001_EDX] =
1545 CPUID_EXT2_LM | CPUID_EXT2_SYSCALL | CPUID_EXT2_NX,
1546 .features[FEAT_8000_0001_ECX] =
1547 CPUID_EXT3_LAHF_LM,
1548 .features[FEAT_7_0_EDX] =
1549 CPUID_7_0_EDX_SPEC_CTRL,
1550 .features[FEAT_6_EAX] =
1551 CPUID_6_EAX_ARAT,
1552 .xlevel = 0x80000008,
1553 .model_id = "Westmere E56xx/L56xx/X56xx (IBRS update)",
1554 },
1555 {
1556 .name = "SandyBridge",
1557 .level = 0xd,
1558 .vendor = CPUID_VENDOR_INTEL,
1559 .family = 6,
1560 .model = 42,
1561 .stepping = 1,
1562 .features[FEAT_1_EDX] =
1563 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1564 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1565 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1566 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1567 CPUID_DE | CPUID_FP87,
1568 .features[FEAT_1_ECX] =
1569 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1570 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1571 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1572 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1573 CPUID_EXT_SSE3,
1574 .features[FEAT_8000_0001_EDX] =
1575 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1576 CPUID_EXT2_SYSCALL,
1577 .features[FEAT_8000_0001_ECX] =
1578 CPUID_EXT3_LAHF_LM,
1579 .features[FEAT_XSAVE] =
1580 CPUID_XSAVE_XSAVEOPT,
1581 .features[FEAT_6_EAX] =
1582 CPUID_6_EAX_ARAT,
1583 .xlevel = 0x80000008,
1584 .model_id = "Intel Xeon E312xx (Sandy Bridge)",
1585 },
1586 {
1587 .name = "SandyBridge-IBRS",
1588 .level = 0xd,
1589 .vendor = CPUID_VENDOR_INTEL,
1590 .family = 6,
1591 .model = 42,
1592 .stepping = 1,
1593 .features[FEAT_1_EDX] =
1594 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1595 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1596 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1597 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1598 CPUID_DE | CPUID_FP87,
1599 .features[FEAT_1_ECX] =
1600 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1601 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1602 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1603 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1604 CPUID_EXT_SSE3,
1605 .features[FEAT_8000_0001_EDX] =
1606 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1607 CPUID_EXT2_SYSCALL,
1608 .features[FEAT_8000_0001_ECX] =
1609 CPUID_EXT3_LAHF_LM,
1610 .features[FEAT_7_0_EDX] =
1611 CPUID_7_0_EDX_SPEC_CTRL,
1612 .features[FEAT_XSAVE] =
1613 CPUID_XSAVE_XSAVEOPT,
1614 .features[FEAT_6_EAX] =
1615 CPUID_6_EAX_ARAT,
1616 .xlevel = 0x80000008,
1617 .model_id = "Intel Xeon E312xx (Sandy Bridge, IBRS update)",
1618 },
1619 {
1620 .name = "IvyBridge",
1621 .level = 0xd,
1622 .vendor = CPUID_VENDOR_INTEL,
1623 .family = 6,
1624 .model = 58,
1625 .stepping = 9,
1626 .features[FEAT_1_EDX] =
1627 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1628 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1629 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1630 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1631 CPUID_DE | CPUID_FP87,
1632 .features[FEAT_1_ECX] =
1633 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1634 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1635 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1636 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1637 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1638 .features[FEAT_7_0_EBX] =
1639 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1640 CPUID_7_0_EBX_ERMS,
1641 .features[FEAT_8000_0001_EDX] =
1642 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1643 CPUID_EXT2_SYSCALL,
1644 .features[FEAT_8000_0001_ECX] =
1645 CPUID_EXT3_LAHF_LM,
1646 .features[FEAT_XSAVE] =
1647 CPUID_XSAVE_XSAVEOPT,
1648 .features[FEAT_6_EAX] =
1649 CPUID_6_EAX_ARAT,
1650 .xlevel = 0x80000008,
1651 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge)",
1652 },
1653 {
1654 .name = "IvyBridge-IBRS",
1655 .level = 0xd,
1656 .vendor = CPUID_VENDOR_INTEL,
1657 .family = 6,
1658 .model = 58,
1659 .stepping = 9,
1660 .features[FEAT_1_EDX] =
1661 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1662 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1663 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1664 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1665 CPUID_DE | CPUID_FP87,
1666 .features[FEAT_1_ECX] =
1667 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1668 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_POPCNT |
1669 CPUID_EXT_X2APIC | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
1670 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
1671 CPUID_EXT_SSE3 | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1672 .features[FEAT_7_0_EBX] =
1673 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_SMEP |
1674 CPUID_7_0_EBX_ERMS,
1675 .features[FEAT_8000_0001_EDX] =
1676 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1677 CPUID_EXT2_SYSCALL,
1678 .features[FEAT_8000_0001_ECX] =
1679 CPUID_EXT3_LAHF_LM,
1680 .features[FEAT_7_0_EDX] =
1681 CPUID_7_0_EDX_SPEC_CTRL,
1682 .features[FEAT_XSAVE] =
1683 CPUID_XSAVE_XSAVEOPT,
1684 .features[FEAT_6_EAX] =
1685 CPUID_6_EAX_ARAT,
1686 .xlevel = 0x80000008,
1687 .model_id = "Intel Xeon E3-12xx v2 (Ivy Bridge, IBRS)",
1688 },
1689 {
1690 .name = "Haswell-noTSX",
1691 .level = 0xd,
1692 .vendor = CPUID_VENDOR_INTEL,
1693 .family = 6,
1694 .model = 60,
1695 .stepping = 1,
1696 .features[FEAT_1_EDX] =
1697 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1698 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1699 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1700 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1701 CPUID_DE | CPUID_FP87,
1702 .features[FEAT_1_ECX] =
1703 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1704 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1705 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1706 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1707 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1708 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1709 .features[FEAT_8000_0001_EDX] =
1710 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1711 CPUID_EXT2_SYSCALL,
1712 .features[FEAT_8000_0001_ECX] =
1713 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1714 .features[FEAT_7_0_EBX] =
1715 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1716 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1717 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1718 .features[FEAT_XSAVE] =
1719 CPUID_XSAVE_XSAVEOPT,
1720 .features[FEAT_6_EAX] =
1721 CPUID_6_EAX_ARAT,
1722 .xlevel = 0x80000008,
1723 .model_id = "Intel Core Processor (Haswell, no TSX)",
1724 },
1725 {
1726 .name = "Haswell-noTSX-IBRS",
1727 .level = 0xd,
1728 .vendor = CPUID_VENDOR_INTEL,
1729 .family = 6,
1730 .model = 60,
1731 .stepping = 1,
1732 .features[FEAT_1_EDX] =
1733 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1734 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1735 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1736 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1737 CPUID_DE | CPUID_FP87,
1738 .features[FEAT_1_ECX] =
1739 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1740 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1741 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1742 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1743 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1744 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1745 .features[FEAT_8000_0001_EDX] =
1746 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1747 CPUID_EXT2_SYSCALL,
1748 .features[FEAT_8000_0001_ECX] =
1749 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1750 .features[FEAT_7_0_EDX] =
1751 CPUID_7_0_EDX_SPEC_CTRL,
1752 .features[FEAT_7_0_EBX] =
1753 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1754 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1755 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID,
1756 .features[FEAT_XSAVE] =
1757 CPUID_XSAVE_XSAVEOPT,
1758 .features[FEAT_6_EAX] =
1759 CPUID_6_EAX_ARAT,
1760 .xlevel = 0x80000008,
1761 .model_id = "Intel Core Processor (Haswell, no TSX, IBRS)",
1762 },
1763 {
1764 .name = "Haswell",
1765 .level = 0xd,
1766 .vendor = CPUID_VENDOR_INTEL,
1767 .family = 6,
1768 .model = 60,
1769 .stepping = 4,
1770 .features[FEAT_1_EDX] =
1771 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1772 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1773 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1774 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1775 CPUID_DE | CPUID_FP87,
1776 .features[FEAT_1_ECX] =
1777 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1778 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1779 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1780 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1781 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1782 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1783 .features[FEAT_8000_0001_EDX] =
1784 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1785 CPUID_EXT2_SYSCALL,
1786 .features[FEAT_8000_0001_ECX] =
1787 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1788 .features[FEAT_7_0_EBX] =
1789 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1790 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1791 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1792 CPUID_7_0_EBX_RTM,
1793 .features[FEAT_XSAVE] =
1794 CPUID_XSAVE_XSAVEOPT,
1795 .features[FEAT_6_EAX] =
1796 CPUID_6_EAX_ARAT,
1797 .xlevel = 0x80000008,
1798 .model_id = "Intel Core Processor (Haswell)",
1799 },
1800 {
1801 .name = "Haswell-IBRS",
1802 .level = 0xd,
1803 .vendor = CPUID_VENDOR_INTEL,
1804 .family = 6,
1805 .model = 60,
1806 .stepping = 4,
1807 .features[FEAT_1_EDX] =
1808 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1809 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1810 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1811 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1812 CPUID_DE | CPUID_FP87,
1813 .features[FEAT_1_ECX] =
1814 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1815 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1816 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1817 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1818 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1819 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1820 .features[FEAT_8000_0001_EDX] =
1821 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1822 CPUID_EXT2_SYSCALL,
1823 .features[FEAT_8000_0001_ECX] =
1824 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM,
1825 .features[FEAT_7_0_EDX] =
1826 CPUID_7_0_EDX_SPEC_CTRL,
1827 .features[FEAT_7_0_EBX] =
1828 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1829 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1830 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1831 CPUID_7_0_EBX_RTM,
1832 .features[FEAT_XSAVE] =
1833 CPUID_XSAVE_XSAVEOPT,
1834 .features[FEAT_6_EAX] =
1835 CPUID_6_EAX_ARAT,
1836 .xlevel = 0x80000008,
1837 .model_id = "Intel Core Processor (Haswell, IBRS)",
1838 },
1839 {
1840 .name = "Broadwell-noTSX",
1841 .level = 0xd,
1842 .vendor = CPUID_VENDOR_INTEL,
1843 .family = 6,
1844 .model = 61,
1845 .stepping = 2,
1846 .features[FEAT_1_EDX] =
1847 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1848 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1849 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1850 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1851 CPUID_DE | CPUID_FP87,
1852 .features[FEAT_1_ECX] =
1853 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1854 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1855 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1856 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1857 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1858 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1859 .features[FEAT_8000_0001_EDX] =
1860 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1861 CPUID_EXT2_SYSCALL,
1862 .features[FEAT_8000_0001_ECX] =
1863 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1864 .features[FEAT_7_0_EBX] =
1865 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1866 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1867 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1868 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1869 CPUID_7_0_EBX_SMAP,
1870 .features[FEAT_XSAVE] =
1871 CPUID_XSAVE_XSAVEOPT,
1872 .features[FEAT_6_EAX] =
1873 CPUID_6_EAX_ARAT,
1874 .xlevel = 0x80000008,
1875 .model_id = "Intel Core Processor (Broadwell, no TSX)",
1876 },
1877 {
1878 .name = "Broadwell-noTSX-IBRS",
1879 .level = 0xd,
1880 .vendor = CPUID_VENDOR_INTEL,
1881 .family = 6,
1882 .model = 61,
1883 .stepping = 2,
1884 .features[FEAT_1_EDX] =
1885 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1886 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1887 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1888 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1889 CPUID_DE | CPUID_FP87,
1890 .features[FEAT_1_ECX] =
1891 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1892 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1893 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1894 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1895 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1896 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1897 .features[FEAT_8000_0001_EDX] =
1898 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1899 CPUID_EXT2_SYSCALL,
1900 .features[FEAT_8000_0001_ECX] =
1901 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1902 .features[FEAT_7_0_EDX] =
1903 CPUID_7_0_EDX_SPEC_CTRL,
1904 .features[FEAT_7_0_EBX] =
1905 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1906 CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1907 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1908 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1909 CPUID_7_0_EBX_SMAP,
1910 .features[FEAT_XSAVE] =
1911 CPUID_XSAVE_XSAVEOPT,
1912 .features[FEAT_6_EAX] =
1913 CPUID_6_EAX_ARAT,
1914 .xlevel = 0x80000008,
1915 .model_id = "Intel Core Processor (Broadwell, no TSX, IBRS)",
1916 },
1917 {
1918 .name = "Broadwell",
1919 .level = 0xd,
1920 .vendor = CPUID_VENDOR_INTEL,
1921 .family = 6,
1922 .model = 61,
1923 .stepping = 2,
1924 .features[FEAT_1_EDX] =
1925 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1926 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1927 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1928 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1929 CPUID_DE | CPUID_FP87,
1930 .features[FEAT_1_ECX] =
1931 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1932 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1933 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1934 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1935 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1936 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1937 .features[FEAT_8000_0001_EDX] =
1938 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1939 CPUID_EXT2_SYSCALL,
1940 .features[FEAT_8000_0001_ECX] =
1941 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1942 .features[FEAT_7_0_EBX] =
1943 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1944 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1945 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1946 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1947 CPUID_7_0_EBX_SMAP,
1948 .features[FEAT_XSAVE] =
1949 CPUID_XSAVE_XSAVEOPT,
1950 .features[FEAT_6_EAX] =
1951 CPUID_6_EAX_ARAT,
1952 .xlevel = 0x80000008,
1953 .model_id = "Intel Core Processor (Broadwell)",
1954 },
1955 {
1956 .name = "Broadwell-IBRS",
1957 .level = 0xd,
1958 .vendor = CPUID_VENDOR_INTEL,
1959 .family = 6,
1960 .model = 61,
1961 .stepping = 2,
1962 .features[FEAT_1_EDX] =
1963 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
1964 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
1965 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
1966 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
1967 CPUID_DE | CPUID_FP87,
1968 .features[FEAT_1_ECX] =
1969 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
1970 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
1971 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
1972 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
1973 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
1974 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
1975 .features[FEAT_8000_0001_EDX] =
1976 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
1977 CPUID_EXT2_SYSCALL,
1978 .features[FEAT_8000_0001_ECX] =
1979 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
1980 .features[FEAT_7_0_EDX] =
1981 CPUID_7_0_EDX_SPEC_CTRL,
1982 .features[FEAT_7_0_EBX] =
1983 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
1984 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
1985 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
1986 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
1987 CPUID_7_0_EBX_SMAP,
1988 .features[FEAT_XSAVE] =
1989 CPUID_XSAVE_XSAVEOPT,
1990 .features[FEAT_6_EAX] =
1991 CPUID_6_EAX_ARAT,
1992 .xlevel = 0x80000008,
1993 .model_id = "Intel Core Processor (Broadwell, IBRS)",
1994 },
1995 {
1996 .name = "Skylake-Client",
1997 .level = 0xd,
1998 .vendor = CPUID_VENDOR_INTEL,
1999 .family = 6,
2000 .model = 94,
2001 .stepping = 3,
2002 .features[FEAT_1_EDX] =
2003 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2004 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2005 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2006 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2007 CPUID_DE | CPUID_FP87,
2008 .features[FEAT_1_ECX] =
2009 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2010 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2011 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2012 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2013 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2014 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2015 .features[FEAT_8000_0001_EDX] =
2016 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2017 CPUID_EXT2_SYSCALL,
2018 .features[FEAT_8000_0001_ECX] =
2019 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2020 .features[FEAT_7_0_EBX] =
2021 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2022 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2023 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2024 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2025 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2026 /* Missing: XSAVES (not supported by some Linux versions,
2027 * including v4.1 to v4.12).
2028 * KVM doesn't yet expose any XSAVES state save component,
2029 * and the only one defined in Skylake (processor tracing)
2030 * probably will block migration anyway.
2031 */
2032 .features[FEAT_XSAVE] =
2033 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2034 CPUID_XSAVE_XGETBV1,
2035 .features[FEAT_6_EAX] =
2036 CPUID_6_EAX_ARAT,
2037 .xlevel = 0x80000008,
2038 .model_id = "Intel Core Processor (Skylake)",
2039 },
2040 {
2041 .name = "Skylake-Client-IBRS",
2042 .level = 0xd,
2043 .vendor = CPUID_VENDOR_INTEL,
2044 .family = 6,
2045 .model = 94,
2046 .stepping = 3,
2047 .features[FEAT_1_EDX] =
2048 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2049 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2050 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2051 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2052 CPUID_DE | CPUID_FP87,
2053 .features[FEAT_1_ECX] =
2054 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2055 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2056 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2057 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2058 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2059 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2060 .features[FEAT_8000_0001_EDX] =
2061 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_NX |
2062 CPUID_EXT2_SYSCALL,
2063 .features[FEAT_8000_0001_ECX] =
2064 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2065 .features[FEAT_7_0_EDX] =
2066 CPUID_7_0_EDX_SPEC_CTRL,
2067 .features[FEAT_7_0_EBX] =
2068 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2069 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2070 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2071 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2072 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX,
2073 /* Missing: XSAVES (not supported by some Linux versions,
2074 * including v4.1 to v4.12).
2075 * KVM doesn't yet expose any XSAVES state save component,
2076 * and the only one defined in Skylake (processor tracing)
2077 * probably will block migration anyway.
2078 */
2079 .features[FEAT_XSAVE] =
2080 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2081 CPUID_XSAVE_XGETBV1,
2082 .features[FEAT_6_EAX] =
2083 CPUID_6_EAX_ARAT,
2084 .xlevel = 0x80000008,
2085 .model_id = "Intel Core Processor (Skylake, IBRS)",
2086 },
2087 {
2088 .name = "Skylake-Server",
2089 .level = 0xd,
2090 .vendor = CPUID_VENDOR_INTEL,
2091 .family = 6,
2092 .model = 85,
2093 .stepping = 4,
2094 .features[FEAT_1_EDX] =
2095 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2096 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2097 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2098 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2099 CPUID_DE | CPUID_FP87,
2100 .features[FEAT_1_ECX] =
2101 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2102 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2103 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2104 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2105 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2106 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2107 .features[FEAT_8000_0001_EDX] =
2108 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2109 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2110 .features[FEAT_8000_0001_ECX] =
2111 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2112 .features[FEAT_7_0_EBX] =
2113 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2114 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2115 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2116 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2117 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2118 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2119 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2120 CPUID_7_0_EBX_AVX512VL | CPUID_7_0_EBX_CLFLUSHOPT,
2121 /* Missing: XSAVES (not supported by some Linux versions,
2122 * including v4.1 to v4.12).
2123 * KVM doesn't yet expose any XSAVES state save component,
2124 * and the only one defined in Skylake (processor tracing)
2125 * probably will block migration anyway.
2126 */
2127 .features[FEAT_XSAVE] =
2128 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2129 CPUID_XSAVE_XGETBV1,
2130 .features[FEAT_6_EAX] =
2131 CPUID_6_EAX_ARAT,
2132 .xlevel = 0x80000008,
2133 .model_id = "Intel Xeon Processor (Skylake)",
2134 },
2135 {
2136 .name = "Skylake-Server-IBRS",
2137 .level = 0xd,
2138 .vendor = CPUID_VENDOR_INTEL,
2139 .family = 6,
2140 .model = 85,
2141 .stepping = 4,
2142 .features[FEAT_1_EDX] =
2143 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2144 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2145 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2146 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2147 CPUID_DE | CPUID_FP87,
2148 .features[FEAT_1_ECX] =
2149 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2150 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2151 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2152 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2153 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2154 CPUID_EXT_PCID | CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2155 .features[FEAT_8000_0001_EDX] =
2156 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2157 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2158 .features[FEAT_8000_0001_ECX] =
2159 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2160 .features[FEAT_7_0_EDX] =
2161 CPUID_7_0_EDX_SPEC_CTRL,
2162 .features[FEAT_7_0_EBX] =
2163 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 |
2164 CPUID_7_0_EBX_HLE | CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP |
2165 CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID |
2166 CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX |
2167 CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_CLWB |
2168 CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ |
2169 CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512CD |
2170 CPUID_7_0_EBX_AVX512VL,
2171 /* Missing: XSAVES (not supported by some Linux versions,
2172 * including v4.1 to v4.12).
2173 * KVM doesn't yet expose any XSAVES state save component,
2174 * and the only one defined in Skylake (processor tracing)
2175 * probably will block migration anyway.
2176 */
2177 .features[FEAT_XSAVE] =
2178 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2179 CPUID_XSAVE_XGETBV1,
2180 .features[FEAT_6_EAX] =
2181 CPUID_6_EAX_ARAT,
2182 .xlevel = 0x80000008,
2183 .model_id = "Intel Xeon Processor (Skylake, IBRS)",
2184 },
2185 {
2186 .name = "KnightsMill",
2187 .level = 0xd,
2188 .vendor = CPUID_VENDOR_INTEL,
2189 .family = 6,
2190 .model = 133,
2191 .stepping = 0,
2192 .features[FEAT_1_EDX] =
2193 CPUID_VME | CPUID_SS | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR |
2194 CPUID_MMX | CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV |
2195 CPUID_MCA | CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC |
2196 CPUID_CX8 | CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC |
2197 CPUID_PSE | CPUID_DE | CPUID_FP87,
2198 .features[FEAT_1_ECX] =
2199 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2200 CPUID_EXT_POPCNT | CPUID_EXT_X2APIC | CPUID_EXT_SSE42 |
2201 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_SSSE3 |
2202 CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3 |
2203 CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_FMA | CPUID_EXT_MOVBE |
2204 CPUID_EXT_F16C | CPUID_EXT_RDRAND,
2205 .features[FEAT_8000_0001_EDX] =
2206 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_RDTSCP |
2207 CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2208 .features[FEAT_8000_0001_ECX] =
2209 CPUID_EXT3_ABM | CPUID_EXT3_LAHF_LM | CPUID_EXT3_3DNOWPREFETCH,
2210 .features[FEAT_7_0_EBX] =
2211 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2212 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS |
2213 CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_AVX512F |
2214 CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_AVX512PF |
2215 CPUID_7_0_EBX_AVX512ER,
2216 .features[FEAT_7_0_ECX] =
2217 CPUID_7_0_ECX_AVX512_VPOPCNTDQ,
2218 .features[FEAT_7_0_EDX] =
2219 CPUID_7_0_EDX_AVX512_4VNNIW | CPUID_7_0_EDX_AVX512_4FMAPS,
2220 .features[FEAT_XSAVE] =
2221 CPUID_XSAVE_XSAVEOPT,
2222 .features[FEAT_6_EAX] =
2223 CPUID_6_EAX_ARAT,
2224 .xlevel = 0x80000008,
2225 .model_id = "Intel Xeon Phi Processor (Knights Mill)",
2226 },
2227 {
2228 .name = "Opteron_G1",
2229 .level = 5,
2230 .vendor = CPUID_VENDOR_AMD,
2231 .family = 15,
2232 .model = 6,
2233 .stepping = 1,
2234 .features[FEAT_1_EDX] =
2235 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2236 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2237 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2238 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2239 CPUID_DE | CPUID_FP87,
2240 .features[FEAT_1_ECX] =
2241 CPUID_EXT_SSE3,
2242 .features[FEAT_8000_0001_EDX] =
2243 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2244 .xlevel = 0x80000008,
2245 .model_id = "AMD Opteron 240 (Gen 1 Class Opteron)",
2246 },
2247 {
2248 .name = "Opteron_G2",
2249 .level = 5,
2250 .vendor = CPUID_VENDOR_AMD,
2251 .family = 15,
2252 .model = 6,
2253 .stepping = 1,
2254 .features[FEAT_1_EDX] =
2255 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2256 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2257 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2258 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2259 CPUID_DE | CPUID_FP87,
2260 .features[FEAT_1_ECX] =
2261 CPUID_EXT_CX16 | CPUID_EXT_SSE3,
2262 /* Missing: CPUID_EXT2_RDTSCP */
2263 .features[FEAT_8000_0001_EDX] =
2264 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2265 .features[FEAT_8000_0001_ECX] =
2266 CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2267 .xlevel = 0x80000008,
2268 .model_id = "AMD Opteron 22xx (Gen 2 Class Opteron)",
2269 },
2270 {
2271 .name = "Opteron_G3",
2272 .level = 5,
2273 .vendor = CPUID_VENDOR_AMD,
2274 .family = 16,
2275 .model = 2,
2276 .stepping = 3,
2277 .features[FEAT_1_EDX] =
2278 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2279 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2280 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2281 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2282 CPUID_DE | CPUID_FP87,
2283 .features[FEAT_1_ECX] =
2284 CPUID_EXT_POPCNT | CPUID_EXT_CX16 | CPUID_EXT_MONITOR |
2285 CPUID_EXT_SSE3,
2286 /* Missing: CPUID_EXT2_RDTSCP */
2287 .features[FEAT_8000_0001_EDX] =
2288 CPUID_EXT2_LM | CPUID_EXT2_NX | CPUID_EXT2_SYSCALL,
2289 .features[FEAT_8000_0001_ECX] =
2290 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A |
2291 CPUID_EXT3_ABM | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2292 .xlevel = 0x80000008,
2293 .model_id = "AMD Opteron 23xx (Gen 3 Class Opteron)",
2294 },
2295 {
2296 .name = "Opteron_G4",
2297 .level = 0xd,
2298 .vendor = CPUID_VENDOR_AMD,
2299 .family = 21,
2300 .model = 1,
2301 .stepping = 2,
2302 .features[FEAT_1_EDX] =
2303 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2304 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2305 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2306 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2307 CPUID_DE | CPUID_FP87,
2308 .features[FEAT_1_ECX] =
2309 CPUID_EXT_AVX | CPUID_EXT_XSAVE | CPUID_EXT_AES |
2310 CPUID_EXT_POPCNT | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2311 CPUID_EXT_CX16 | CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ |
2312 CPUID_EXT_SSE3,
2313 /* Missing: CPUID_EXT2_RDTSCP */
2314 .features[FEAT_8000_0001_EDX] =
2315 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2316 CPUID_EXT2_SYSCALL,
2317 .features[FEAT_8000_0001_ECX] =
2318 CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2319 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2320 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2321 CPUID_EXT3_LAHF_LM,
2322 /* no xsaveopt! */
2323 .xlevel = 0x8000001A,
2324 .model_id = "AMD Opteron 62xx class CPU",
2325 },
2326 {
2327 .name = "Opteron_G5",
2328 .level = 0xd,
2329 .vendor = CPUID_VENDOR_AMD,
2330 .family = 21,
2331 .model = 2,
2332 .stepping = 0,
2333 .features[FEAT_1_EDX] =
2334 CPUID_VME | CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX |
2335 CPUID_CLFLUSH | CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA |
2336 CPUID_PGE | CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 |
2337 CPUID_MCE | CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE |
2338 CPUID_DE | CPUID_FP87,
2339 .features[FEAT_1_ECX] =
2340 CPUID_EXT_F16C | CPUID_EXT_AVX | CPUID_EXT_XSAVE |
2341 CPUID_EXT_AES | CPUID_EXT_POPCNT | CPUID_EXT_SSE42 |
2342 CPUID_EXT_SSE41 | CPUID_EXT_CX16 | CPUID_EXT_FMA |
2343 CPUID_EXT_SSSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2344 /* Missing: CPUID_EXT2_RDTSCP */
2345 .features[FEAT_8000_0001_EDX] =
2346 CPUID_EXT2_LM | CPUID_EXT2_PDPE1GB | CPUID_EXT2_NX |
2347 CPUID_EXT2_SYSCALL,
2348 .features[FEAT_8000_0001_ECX] =
2349 CPUID_EXT3_TBM | CPUID_EXT3_FMA4 | CPUID_EXT3_XOP |
2350 CPUID_EXT3_3DNOWPREFETCH | CPUID_EXT3_MISALIGNSSE |
2351 CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | CPUID_EXT3_SVM |
2352 CPUID_EXT3_LAHF_LM,
2353 /* no xsaveopt! */
2354 .xlevel = 0x8000001A,
2355 .model_id = "AMD Opteron 63xx class CPU",
2356 },
2357 {
2358 .name = "EPYC",
2359 .level = 0xd,
2360 .vendor = CPUID_VENDOR_AMD,
2361 .family = 23,
2362 .model = 1,
2363 .stepping = 2,
2364 .features[FEAT_1_EDX] =
2365 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2366 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2367 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2368 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2369 CPUID_VME | CPUID_FP87,
2370 .features[FEAT_1_ECX] =
2371 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2372 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2373 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2374 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2375 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2376 .features[FEAT_8000_0001_EDX] =
2377 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2378 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2379 CPUID_EXT2_SYSCALL,
2380 .features[FEAT_8000_0001_ECX] =
2381 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2382 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2383 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2384 .features[FEAT_7_0_EBX] =
2385 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2386 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2387 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2388 CPUID_7_0_EBX_SHA_NI,
2389 /* Missing: XSAVES (not supported by some Linux versions,
2390 * including v4.1 to v4.12).
2391 * KVM doesn't yet expose any XSAVES state save component.
2392 */
2393 .features[FEAT_XSAVE] =
2394 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2395 CPUID_XSAVE_XGETBV1,
2396 .features[FEAT_6_EAX] =
2397 CPUID_6_EAX_ARAT,
2398 .xlevel = 0x8000000A,
2399 .model_id = "AMD EPYC Processor",
2400 .cache_info = &epyc_cache_info,
2401 },
2402 {
2403 .name = "EPYC-IBPB",
2404 .level = 0xd,
2405 .vendor = CPUID_VENDOR_AMD,
2406 .family = 23,
2407 .model = 1,
2408 .stepping = 2,
2409 .features[FEAT_1_EDX] =
2410 CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH |
2411 CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE |
2412 CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE |
2413 CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE |
2414 CPUID_VME | CPUID_FP87,
2415 .features[FEAT_1_ECX] =
2416 CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX |
2417 CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT |
2418 CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 |
2419 CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 |
2420 CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3,
2421 .features[FEAT_8000_0001_EDX] =
2422 CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB |
2423 CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX |
2424 CPUID_EXT2_SYSCALL,
2425 .features[FEAT_8000_0001_ECX] =
2426 CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH |
2427 CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM |
2428 CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM,
2429 .features[FEAT_8000_0008_EBX] =
2430 CPUID_8000_0008_EBX_IBPB,
2431 .features[FEAT_7_0_EBX] =
2432 CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 |
2433 CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED |
2434 CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT |
2435 CPUID_7_0_EBX_SHA_NI,
2436 /* Missing: XSAVES (not supported by some Linux versions,
2437 * including v4.1 to v4.12).
2438 * KVM doesn't yet expose any XSAVES state save component.
2439 */
2440 .features[FEAT_XSAVE] =
2441 CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC |
2442 CPUID_XSAVE_XGETBV1,
2443 .features[FEAT_6_EAX] =
2444 CPUID_6_EAX_ARAT,
2445 .xlevel = 0x8000000A,
2446 .model_id = "AMD EPYC Processor (with IBPB)",
2447 .cache_info = &epyc_cache_info,
2448 },
2449 };
2450
2451 typedef struct PropValue {
2452 const char *prop, *value;
2453 } PropValue;
2454
2455 /* KVM-specific features that are automatically added/removed
2456 * from all CPU models when KVM is enabled.
2457 */
2458 static PropValue kvm_default_props[] = {
2459 { "kvmclock", "on" },
2460 { "kvm-nopiodelay", "on" },
2461 { "kvm-asyncpf", "on" },
2462 { "kvm-steal-time", "on" },
2463 { "kvm-pv-eoi", "on" },
2464 { "kvmclock-stable-bit", "on" },
2465 { "x2apic", "on" },
2466 { "acpi", "off" },
2467 { "monitor", "off" },
2468 { "svm", "off" },
2469 { NULL, NULL },
2470 };
2471
2472 /* TCG-specific defaults that override all CPU models when using TCG
2473 */
2474 static PropValue tcg_default_props[] = {
2475 { "vme", "off" },
2476 { NULL, NULL },
2477 };
2478
2479
2480 void x86_cpu_change_kvm_default(const char *prop, const char *value)
2481 {
2482 PropValue *pv;
2483 for (pv = kvm_default_props; pv->prop; pv++) {
2484 if (!strcmp(pv->prop, prop)) {
2485 pv->value = value;
2486 break;
2487 }
2488 }
2489
2490 /* It is valid to call this function only for properties that
2491 * are already present in the kvm_default_props table.
2492 */
2493 assert(pv->prop);
2494 }
2495
2496 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
2497 bool migratable_only);
2498
2499 static bool lmce_supported(void)
2500 {
2501 uint64_t mce_cap = 0;
2502
2503 #ifdef CONFIG_KVM
2504 if (kvm_ioctl(kvm_state, KVM_X86_GET_MCE_CAP_SUPPORTED, &mce_cap) < 0) {
2505 return false;
2506 }
2507 #endif
2508
2509 return !!(mce_cap & MCG_LMCE_P);
2510 }
2511
2512 #define CPUID_MODEL_ID_SZ 48
2513
2514 /**
2515 * cpu_x86_fill_model_id:
2516 * Get CPUID model ID string from host CPU.
2517 *
2518 * @str should have at least CPUID_MODEL_ID_SZ bytes
2519 *
2520 * The function does NOT add a null terminator to the string
2521 * automatically.
2522 */
2523 static int cpu_x86_fill_model_id(char *str)
2524 {
2525 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2526 int i;
2527
2528 for (i = 0; i < 3; i++) {
2529 host_cpuid(0x80000002 + i, 0, &eax, &ebx, &ecx, &edx);
2530 memcpy(str + i * 16 + 0, &eax, 4);
2531 memcpy(str + i * 16 + 4, &ebx, 4);
2532 memcpy(str + i * 16 + 8, &ecx, 4);
2533 memcpy(str + i * 16 + 12, &edx, 4);
2534 }
2535 return 0;
2536 }
2537
2538 static Property max_x86_cpu_properties[] = {
2539 DEFINE_PROP_BOOL("migratable", X86CPU, migratable, true),
2540 DEFINE_PROP_BOOL("host-cache-info", X86CPU, cache_info_passthrough, false),
2541 DEFINE_PROP_END_OF_LIST()
2542 };
2543
2544 static void max_x86_cpu_class_init(ObjectClass *oc, void *data)
2545 {
2546 DeviceClass *dc = DEVICE_CLASS(oc);
2547 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2548
2549 xcc->ordering = 9;
2550
2551 xcc->model_description =
2552 "Enables all features supported by the accelerator in the current host";
2553
2554 dc->props = max_x86_cpu_properties;
2555 }
2556
2557 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp);
2558
2559 static void max_x86_cpu_initfn(Object *obj)
2560 {
2561 X86CPU *cpu = X86_CPU(obj);
2562 CPUX86State *env = &cpu->env;
2563 KVMState *s = kvm_state;
2564
2565 /* We can't fill the features array here because we don't know yet if
2566 * "migratable" is true or false.
2567 */
2568 cpu->max_features = true;
2569
2570 if (accel_uses_host_cpuid()) {
2571 char vendor[CPUID_VENDOR_SZ + 1] = { 0 };
2572 char model_id[CPUID_MODEL_ID_SZ + 1] = { 0 };
2573 int family, model, stepping;
2574 X86CPUDefinition host_cpudef = { };
2575 uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0;
2576
2577 host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx);
2578 x86_cpu_vendor_words2str(host_cpudef.vendor, ebx, edx, ecx);
2579
2580 host_vendor_fms(vendor, &family, &model, &stepping);
2581
2582 cpu_x86_fill_model_id(model_id);
2583
2584 object_property_set_str(OBJECT(cpu), vendor, "vendor", &error_abort);
2585 object_property_set_int(OBJECT(cpu), family, "family", &error_abort);
2586 object_property_set_int(OBJECT(cpu), model, "model", &error_abort);
2587 object_property_set_int(OBJECT(cpu), stepping, "stepping",
2588 &error_abort);
2589 object_property_set_str(OBJECT(cpu), model_id, "model-id",
2590 &error_abort);
2591
2592 if (kvm_enabled()) {
2593 env->cpuid_min_level =
2594 kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX);
2595 env->cpuid_min_xlevel =
2596 kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX);
2597 env->cpuid_min_xlevel2 =
2598 kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX);
2599 } else {
2600 env->cpuid_min_level =
2601 hvf_get_supported_cpuid(0x0, 0, R_EAX);
2602 env->cpuid_min_xlevel =
2603 hvf_get_supported_cpuid(0x80000000, 0, R_EAX);
2604 env->cpuid_min_xlevel2 =
2605 hvf_get_supported_cpuid(0xC0000000, 0, R_EAX);
2606 }
2607
2608 if (lmce_supported()) {
2609 object_property_set_bool(OBJECT(cpu), true, "lmce", &error_abort);
2610 }
2611 } else {
2612 object_property_set_str(OBJECT(cpu), CPUID_VENDOR_AMD,
2613 "vendor", &error_abort);
2614 object_property_set_int(OBJECT(cpu), 6, "family", &error_abort);
2615 object_property_set_int(OBJECT(cpu), 6, "model", &error_abort);
2616 object_property_set_int(OBJECT(cpu), 3, "stepping", &error_abort);
2617 object_property_set_str(OBJECT(cpu),
2618 "QEMU TCG CPU version " QEMU_HW_VERSION,
2619 "model-id", &error_abort);
2620 }
2621
2622 object_property_set_bool(OBJECT(cpu), true, "pmu", &error_abort);
2623 }
2624
2625 static const TypeInfo max_x86_cpu_type_info = {
2626 .name = X86_CPU_TYPE_NAME("max"),
2627 .parent = TYPE_X86_CPU,
2628 .instance_init = max_x86_cpu_initfn,
2629 .class_init = max_x86_cpu_class_init,
2630 };
2631
2632 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
2633 static void host_x86_cpu_class_init(ObjectClass *oc, void *data)
2634 {
2635 X86CPUClass *xcc = X86_CPU_CLASS(oc);
2636
2637 xcc->host_cpuid_required = true;
2638 xcc->ordering = 8;
2639
2640 if (kvm_enabled()) {
2641 xcc->model_description =
2642 "KVM processor with all supported host features ";
2643 } else if (hvf_enabled()) {
2644 xcc->model_description =
2645 "HVF processor with all supported host features ";
2646 }
2647 }
2648
2649 static const TypeInfo host_x86_cpu_type_info = {
2650 .name = X86_CPU_TYPE_NAME("host"),
2651 .parent = X86_CPU_TYPE_NAME("max"),
2652 .class_init = host_x86_cpu_class_init,
2653 };
2654
2655 #endif
2656
2657 static void report_unavailable_features(FeatureWord w, uint32_t mask)
2658 {
2659 FeatureWordInfo *f = &feature_word_info[w];
2660 int i;
2661
2662 for (i = 0; i < 32; ++i) {
2663 if ((1UL << i) & mask) {
2664 const char *reg = get_register_name_32(f->cpuid_reg);
2665 assert(reg);
2666 warn_report("%s doesn't support requested feature: "
2667 "CPUID.%02XH:%s%s%s [bit %d]",
2668 accel_uses_host_cpuid() ? "host" : "TCG",
2669 f->cpuid_eax, reg,
2670 f->feat_names[i] ? "." : "",
2671 f->feat_names[i] ? f->feat_names[i] : "", i);
2672 }
2673 }
2674 }
2675
2676 static void x86_cpuid_version_get_family(Object *obj, Visitor *v,
2677 const char *name, void *opaque,
2678 Error **errp)
2679 {
2680 X86CPU *cpu = X86_CPU(obj);
2681 CPUX86State *env = &cpu->env;
2682 int64_t value;
2683
2684 value = (env->cpuid_version >> 8) & 0xf;
2685 if (value == 0xf) {
2686 value += (env->cpuid_version >> 20) & 0xff;
2687 }
2688 visit_type_int(v, name, &value, errp);
2689 }
2690
2691 static void x86_cpuid_version_set_family(Object *obj, Visitor *v,
2692 const char *name, void *opaque,
2693 Error **errp)
2694 {
2695 X86CPU *cpu = X86_CPU(obj);
2696 CPUX86State *env = &cpu->env;
2697 const int64_t min = 0;
2698 const int64_t max = 0xff + 0xf;
2699 Error *local_err = NULL;
2700 int64_t value;
2701
2702 visit_type_int(v, name, &value, &local_err);
2703 if (local_err) {
2704 error_propagate(errp, local_err);
2705 return;
2706 }
2707 if (value < min || value > max) {
2708 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2709 name ? name : "null", value, min, max);
2710 return;
2711 }
2712
2713 env->cpuid_version &= ~0xff00f00;
2714 if (value > 0x0f) {
2715 env->cpuid_version |= 0xf00 | ((value - 0x0f) << 20);
2716 } else {
2717 env->cpuid_version |= value << 8;
2718 }
2719 }
2720
2721 static void x86_cpuid_version_get_model(Object *obj, Visitor *v,
2722 const char *name, void *opaque,
2723 Error **errp)
2724 {
2725 X86CPU *cpu = X86_CPU(obj);
2726 CPUX86State *env = &cpu->env;
2727 int64_t value;
2728
2729 value = (env->cpuid_version >> 4) & 0xf;
2730 value |= ((env->cpuid_version >> 16) & 0xf) << 4;
2731 visit_type_int(v, name, &value, errp);
2732 }
2733
2734 static void x86_cpuid_version_set_model(Object *obj, Visitor *v,
2735 const char *name, void *opaque,
2736 Error **errp)
2737 {
2738 X86CPU *cpu = X86_CPU(obj);
2739 CPUX86State *env = &cpu->env;
2740 const int64_t min = 0;
2741 const int64_t max = 0xff;
2742 Error *local_err = NULL;
2743 int64_t value;
2744
2745 visit_type_int(v, name, &value, &local_err);
2746 if (local_err) {
2747 error_propagate(errp, local_err);
2748 return;
2749 }
2750 if (value < min || value > max) {
2751 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2752 name ? name : "null", value, min, max);
2753 return;
2754 }
2755
2756 env->cpuid_version &= ~0xf00f0;
2757 env->cpuid_version |= ((value & 0xf) << 4) | ((value >> 4) << 16);
2758 }
2759
2760 static void x86_cpuid_version_get_stepping(Object *obj, Visitor *v,
2761 const char *name, void *opaque,
2762 Error **errp)
2763 {
2764 X86CPU *cpu = X86_CPU(obj);
2765 CPUX86State *env = &cpu->env;
2766 int64_t value;
2767
2768 value = env->cpuid_version & 0xf;
2769 visit_type_int(v, name, &value, errp);
2770 }
2771
2772 static void x86_cpuid_version_set_stepping(Object *obj, Visitor *v,
2773 const char *name, void *opaque,
2774 Error **errp)
2775 {
2776 X86CPU *cpu = X86_CPU(obj);
2777 CPUX86State *env = &cpu->env;
2778 const int64_t min = 0;
2779 const int64_t max = 0xf;
2780 Error *local_err = NULL;
2781 int64_t value;
2782
2783 visit_type_int(v, name, &value, &local_err);
2784 if (local_err) {
2785 error_propagate(errp, local_err);
2786 return;
2787 }
2788 if (value < min || value > max) {
2789 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2790 name ? name : "null", value, min, max);
2791 return;
2792 }
2793
2794 env->cpuid_version &= ~0xf;
2795 env->cpuid_version |= value & 0xf;
2796 }
2797
2798 static char *x86_cpuid_get_vendor(Object *obj, Error **errp)
2799 {
2800 X86CPU *cpu = X86_CPU(obj);
2801 CPUX86State *env = &cpu->env;
2802 char *value;
2803
2804 value = g_malloc(CPUID_VENDOR_SZ + 1);
2805 x86_cpu_vendor_words2str(value, env->cpuid_vendor1, env->cpuid_vendor2,
2806 env->cpuid_vendor3);
2807 return value;
2808 }
2809
2810 static void x86_cpuid_set_vendor(Object *obj, const char *value,
2811 Error **errp)
2812 {
2813 X86CPU *cpu = X86_CPU(obj);
2814 CPUX86State *env = &cpu->env;
2815 int i;
2816
2817 if (strlen(value) != CPUID_VENDOR_SZ) {
2818 error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value);
2819 return;
2820 }
2821
2822 env->cpuid_vendor1 = 0;
2823 env->cpuid_vendor2 = 0;
2824 env->cpuid_vendor3 = 0;
2825 for (i = 0; i < 4; i++) {
2826 env->cpuid_vendor1 |= ((uint8_t)value[i ]) << (8 * i);
2827 env->cpuid_vendor2 |= ((uint8_t)value[i + 4]) << (8 * i);
2828 env->cpuid_vendor3 |= ((uint8_t)value[i + 8]) << (8 * i);
2829 }
2830 }
2831
2832 static char *x86_cpuid_get_model_id(Object *obj, Error **errp)
2833 {
2834 X86CPU *cpu = X86_CPU(obj);
2835 CPUX86State *env = &cpu->env;
2836 char *value;
2837 int i;
2838
2839 value = g_malloc(48 + 1);
2840 for (i = 0; i < 48; i++) {
2841 value[i] = env->cpuid_model[i >> 2] >> (8 * (i & 3));
2842 }
2843 value[48] = '\0';
2844 return value;
2845 }
2846
2847 static void x86_cpuid_set_model_id(Object *obj, const char *model_id,
2848 Error **errp)
2849 {
2850 X86CPU *cpu = X86_CPU(obj);
2851 CPUX86State *env = &cpu->env;
2852 int c, len, i;
2853
2854 if (model_id == NULL) {
2855 model_id = "";
2856 }
2857 len = strlen(model_id);
2858 memset(env->cpuid_model, 0, 48);
2859 for (i = 0; i < 48; i++) {
2860 if (i >= len) {
2861 c = '\0';
2862 } else {
2863 c = (uint8_t)model_id[i];
2864 }
2865 env->cpuid_model[i >> 2] |= c << (8 * (i & 3));
2866 }
2867 }
2868
2869 static void x86_cpuid_get_tsc_freq(Object *obj, Visitor *v, const char *name,
2870 void *opaque, Error **errp)
2871 {
2872 X86CPU *cpu = X86_CPU(obj);
2873 int64_t value;
2874
2875 value = cpu->env.tsc_khz * 1000;
2876 visit_type_int(v, name, &value, errp);
2877 }
2878
2879 static void x86_cpuid_set_tsc_freq(Object *obj, Visitor *v, const char *name,
2880 void *opaque, Error **errp)
2881 {
2882 X86CPU *cpu = X86_CPU(obj);
2883 const int64_t min = 0;
2884 const int64_t max = INT64_MAX;
2885 Error *local_err = NULL;
2886 int64_t value;
2887
2888 visit_type_int(v, name, &value, &local_err);
2889 if (local_err) {
2890 error_propagate(errp, local_err);
2891 return;
2892 }
2893 if (value < min || value > max) {
2894 error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, "",
2895 name ? name : "null", value, min, max);
2896 return;
2897 }
2898
2899 cpu->env.tsc_khz = cpu->env.user_tsc_khz = value / 1000;
2900 }
2901
2902 /* Generic getter for "feature-words" and "filtered-features" properties */
2903 static void x86_cpu_get_feature_words(Object *obj, Visitor *v,
2904 const char *name, void *opaque,
2905 Error **errp)
2906 {
2907 uint32_t *array = (uint32_t *)opaque;
2908 FeatureWord w;
2909 X86CPUFeatureWordInfo word_infos[FEATURE_WORDS] = { };
2910 X86CPUFeatureWordInfoList list_entries[FEATURE_WORDS] = { };
2911 X86CPUFeatureWordInfoList *list = NULL;
2912
2913 for (w = 0; w < FEATURE_WORDS; w++) {
2914 FeatureWordInfo *wi = &feature_word_info[w];
2915 X86CPUFeatureWordInfo *qwi = &word_infos[w];
2916 qwi->cpuid_input_eax = wi->cpuid_eax;
2917 qwi->has_cpuid_input_ecx = wi->cpuid_needs_ecx;
2918 qwi->cpuid_input_ecx = wi->cpuid_ecx;
2919 qwi->cpuid_register = x86_reg_info_32[wi->cpuid_reg].qapi_enum;
2920 qwi->features = array[w];
2921
2922 /* List will be in reverse order, but order shouldn't matter */
2923 list_entries[w].next = list;
2924 list_entries[w].value = &word_infos[w];
2925 list = &list_entries[w];
2926 }
2927
2928 visit_type_X86CPUFeatureWordInfoList(v, "feature-words", &list, errp);
2929 }
2930
2931 static void x86_get_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2932 void *opaque, Error **errp)
2933 {
2934 X86CPU *cpu = X86_CPU(obj);
2935 int64_t value = cpu->hyperv_spinlock_attempts;
2936
2937 visit_type_int(v, name, &value, errp);
2938 }
2939
2940 static void x86_set_hv_spinlocks(Object *obj, Visitor *v, const char *name,
2941 void *opaque, Error **errp)
2942 {
2943 const int64_t min = 0xFFF;
2944 const int64_t max = UINT_MAX;
2945 X86CPU *cpu = X86_CPU(obj);
2946 Error *err = NULL;
2947 int64_t value;
2948
2949 visit_type_int(v, name, &value, &err);
2950 if (err) {
2951 error_propagate(errp, err);
2952 return;
2953 }
2954
2955 if (value < min || value > max) {
2956 error_setg(errp, "Property %s.%s doesn't take value %" PRId64
2957 " (minimum: %" PRId64 ", maximum: %" PRId64 ")",
2958 object_get_typename(obj), name ? name : "null",
2959 value, min, max);
2960 return;
2961 }
2962 cpu->hyperv_spinlock_attempts = value;
2963 }
2964
2965 static const PropertyInfo qdev_prop_spinlocks = {
2966 .name = "int",
2967 .get = x86_get_hv_spinlocks,
2968 .set = x86_set_hv_spinlocks,
2969 };
2970
2971 /* Convert all '_' in a feature string option name to '-', to make feature
2972 * name conform to QOM property naming rule, which uses '-' instead of '_'.
2973 */
2974 static inline void feat2prop(char *s)
2975 {
2976 while ((s = strchr(s, '_'))) {
2977 *s = '-';
2978 }
2979 }
2980
2981 /* Return the feature property name for a feature flag bit */
2982 static const char *x86_cpu_feature_name(FeatureWord w, int bitnr)
2983 {
2984 /* XSAVE components are automatically enabled by other features,
2985 * so return the original feature name instead
2986 */
2987 if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) {
2988 int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr;
2989
2990 if (comp < ARRAY_SIZE(x86_ext_save_areas) &&
2991 x86_ext_save_areas[comp].bits) {
2992 w = x86_ext_save_areas[comp].feature;
2993 bitnr = ctz32(x86_ext_save_areas[comp].bits);
2994 }
2995 }
2996
2997 assert(bitnr < 32);
2998 assert(w < FEATURE_WORDS);
2999 return feature_word_info[w].feat_names[bitnr];
3000 }
3001
3002 /* Compatibily hack to maintain legacy +-feat semantic,
3003 * where +-feat overwrites any feature set by
3004 * feat=on|feat even if the later is parsed after +-feat
3005 * (i.e. "-x2apic,x2apic=on" will result in x2apic disabled)
3006 */
3007 static GList *plus_features, *minus_features;
3008
3009 static gint compare_string(gconstpointer a, gconstpointer b)
3010 {
3011 return g_strcmp0(a, b);
3012 }
3013
3014 /* Parse "+feature,-feature,feature=foo" CPU feature string
3015 */
3016 static void x86_cpu_parse_featurestr(const char *typename, char *features,
3017 Error **errp)
3018 {
3019 char *featurestr; /* Single 'key=value" string being parsed */
3020 static bool cpu_globals_initialized;
3021 bool ambiguous = false;
3022
3023 if (cpu_globals_initialized) {
3024 return;
3025 }
3026 cpu_globals_initialized = true;
3027
3028 if (!features) {
3029 return;
3030 }
3031
3032 for (featurestr = strtok(features, ",");
3033 featurestr;
3034 featurestr = strtok(NULL, ",")) {
3035 const char *name;
3036 const char *val = NULL;
3037 char *eq = NULL;
3038 char num[32];
3039 GlobalProperty *prop;
3040
3041 /* Compatibility syntax: */
3042 if (featurestr[0] == '+') {
3043 plus_features = g_list_append(plus_features,
3044 g_strdup(featurestr + 1));
3045 continue;
3046 } else if (featurestr[0] == '-') {
3047 minus_features = g_list_append(minus_features,
3048 g_strdup(featurestr + 1));
3049 continue;
3050 }
3051
3052 eq = strchr(featurestr, '=');
3053 if (eq) {
3054 *eq++ = 0;
3055 val = eq;
3056 } else {
3057 val = "on";
3058 }
3059
3060 feat2prop(featurestr);
3061 name = featurestr;
3062
3063 if (g_list_find_custom(plus_features, name, compare_string)) {
3064 warn_report("Ambiguous CPU model string. "
3065 "Don't mix both \"+%s\" and \"%s=%s\"",
3066 name, name, val);
3067 ambiguous = true;
3068 }
3069 if (g_list_find_custom(minus_features, name, compare_string)) {
3070 warn_report("Ambiguous CPU model string. "
3071 "Don't mix both \"-%s\" and \"%s=%s\"",
3072 name, name, val);
3073 ambiguous = true;
3074 }
3075
3076 /* Special case: */
3077 if (!strcmp(name, "tsc-freq")) {
3078 int ret;
3079 uint64_t tsc_freq;
3080
3081 ret = qemu_strtosz_metric(val, NULL, &tsc_freq);
3082 if (ret < 0 || tsc_freq > INT64_MAX) {
3083 error_setg(errp, "bad numerical value %s", val);
3084 return;
3085 }
3086 snprintf(num, sizeof(num), "%" PRId64, tsc_freq);
3087 val = num;
3088 name = "tsc-frequency";
3089 }
3090
3091 prop = g_new0(typeof(*prop), 1);
3092 prop->driver = typename;
3093 prop->property = g_strdup(name);
3094 prop->value = g_strdup(val);
3095 prop->errp = &error_fatal;
3096 qdev_prop_register_global(prop);
3097 }
3098
3099 if (ambiguous) {
3100 warn_report("Compatibility of ambiguous CPU model "
3101 "strings won't be kept on future QEMU versions");
3102 }
3103 }
3104
3105 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp);
3106 static int x86_cpu_filter_features(X86CPU *cpu);
3107
3108 /* Check for missing features that may prevent the CPU class from
3109 * running using the current machine and accelerator.
3110 */
3111 static void x86_cpu_class_check_missing_features(X86CPUClass *xcc,
3112 strList **missing_feats)
3113 {
3114 X86CPU *xc;
3115 FeatureWord w;
3116 Error *err = NULL;
3117 strList **next = missing_feats;
3118
3119 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
3120 strList *new = g_new0(strList, 1);
3121 new->value = g_strdup("kvm");
3122 *missing_feats = new;
3123 return;
3124 }
3125
3126 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3127
3128 x86_cpu_expand_features(xc, &err);
3129 if (err) {
3130 /* Errors at x86_cpu_expand_features should never happen,
3131 * but in case it does, just report the model as not
3132 * runnable at all using the "type" property.
3133 */
3134 strList *new = g_new0(strList, 1);
3135 new->value = g_strdup("type");
3136 *next = new;
3137 next = &new->next;
3138 }
3139
3140 x86_cpu_filter_features(xc);
3141
3142 for (w = 0; w < FEATURE_WORDS; w++) {
3143 uint32_t filtered = xc->filtered_features[w];
3144 int i;
3145 for (i = 0; i < 32; i++) {
3146 if (filtered & (1UL << i)) {
3147 strList *new = g_new0(strList, 1);
3148 new->value = g_strdup(x86_cpu_feature_name(w, i));
3149 *next = new;
3150 next = &new->next;
3151 }
3152 }
3153 }
3154
3155 object_unref(OBJECT(xc));
3156 }
3157
3158 /* Print all cpuid feature names in featureset
3159 */
3160 static void listflags(FILE *f, fprintf_function print, const char **featureset)
3161 {
3162 int bit;
3163 bool first = true;
3164
3165 for (bit = 0; bit < 32; bit++) {
3166 if (featureset[bit]) {
3167 print(f, "%s%s", first ? "" : " ", featureset[bit]);
3168 first = false;
3169 }
3170 }
3171 }
3172
3173 /* Sort alphabetically by type name, respecting X86CPUClass::ordering. */
3174 static gint x86_cpu_list_compare(gconstpointer a, gconstpointer b)
3175 {
3176 ObjectClass *class_a = (ObjectClass *)a;
3177 ObjectClass *class_b = (ObjectClass *)b;
3178 X86CPUClass *cc_a = X86_CPU_CLASS(class_a);
3179 X86CPUClass *cc_b = X86_CPU_CLASS(class_b);
3180 const char *name_a, *name_b;
3181
3182 if (cc_a->ordering != cc_b->ordering) {
3183 return cc_a->ordering - cc_b->ordering;
3184 } else {
3185 name_a = object_class_get_name(class_a);
3186 name_b = object_class_get_name(class_b);
3187 return strcmp(name_a, name_b);
3188 }
3189 }
3190
3191 static GSList *get_sorted_cpu_model_list(void)
3192 {
3193 GSList *list = object_class_get_list(TYPE_X86_CPU, false);
3194 list = g_slist_sort(list, x86_cpu_list_compare);
3195 return list;
3196 }
3197
3198 static void x86_cpu_list_entry(gpointer data, gpointer user_data)
3199 {
3200 ObjectClass *oc = data;
3201 X86CPUClass *cc = X86_CPU_CLASS(oc);
3202 CPUListState *s = user_data;
3203 char *name = x86_cpu_class_get_model_name(cc);
3204 const char *desc = cc->model_description;
3205 if (!desc && cc->cpu_def) {
3206 desc = cc->cpu_def->model_id;
3207 }
3208
3209 (*s->cpu_fprintf)(s->file, "x86 %16s %-48s\n",
3210 name, desc);
3211 g_free(name);
3212 }
3213
3214 /* list available CPU models and flags */
3215 void x86_cpu_list(FILE *f, fprintf_function cpu_fprintf)
3216 {
3217 int i;
3218 CPUListState s = {
3219 .file = f,
3220 .cpu_fprintf = cpu_fprintf,
3221 };
3222 GSList *list;
3223
3224 (*cpu_fprintf)(f, "Available CPUs:\n");
3225 list = get_sorted_cpu_model_list();
3226 g_slist_foreach(list, x86_cpu_list_entry, &s);
3227 g_slist_free(list);
3228
3229 (*cpu_fprintf)(f, "\nRecognized CPUID flags:\n");
3230 for (i = 0; i < ARRAY_SIZE(feature_word_info); i++) {
3231 FeatureWordInfo *fw = &feature_word_info[i];
3232
3233 (*cpu_fprintf)(f, " ");
3234 listflags(f, cpu_fprintf, fw->feat_names);
3235 (*cpu_fprintf)(f, "\n");
3236 }
3237 }
3238
3239 static void x86_cpu_definition_entry(gpointer data, gpointer user_data)
3240 {
3241 ObjectClass *oc = data;
3242 X86CPUClass *cc = X86_CPU_CLASS(oc);
3243 CpuDefinitionInfoList **cpu_list = user_data;
3244 CpuDefinitionInfoList *entry;
3245 CpuDefinitionInfo *info;
3246
3247 info = g_malloc0(sizeof(*info));
3248 info->name = x86_cpu_class_get_model_name(cc);
3249 x86_cpu_class_check_missing_features(cc, &info->unavailable_features);
3250 info->has_unavailable_features = true;
3251 info->q_typename = g_strdup(object_class_get_name(oc));
3252 info->migration_safe = cc->migration_safe;
3253 info->has_migration_safe = true;
3254 info->q_static = cc->static_model;
3255
3256 entry = g_malloc0(sizeof(*entry));
3257 entry->value = info;
3258 entry->next = *cpu_list;
3259 *cpu_list = entry;
3260 }
3261
3262 CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp)
3263 {
3264 CpuDefinitionInfoList *cpu_list = NULL;
3265 GSList *list = get_sorted_cpu_model_list();
3266 g_slist_foreach(list, x86_cpu_definition_entry, &cpu_list);
3267 g_slist_free(list);
3268 return cpu_list;
3269 }
3270
3271 static uint32_t x86_cpu_get_supported_feature_word(FeatureWord w,
3272 bool migratable_only)
3273 {
3274 FeatureWordInfo *wi = &feature_word_info[w];
3275 uint32_t r;
3276
3277 if (kvm_enabled()) {
3278 r = kvm_arch_get_supported_cpuid(kvm_state, wi->cpuid_eax,
3279 wi->cpuid_ecx,
3280 wi->cpuid_reg);
3281 } else if (hvf_enabled()) {
3282 r = hvf_get_supported_cpuid(wi->cpuid_eax,
3283 wi->cpuid_ecx,
3284 wi->cpuid_reg);
3285 } else if (tcg_enabled()) {
3286 r = wi->tcg_features;
3287 } else {
3288 return ~0;
3289 }
3290 if (migratable_only) {
3291 r &= x86_cpu_get_migratable_flags(w);
3292 }
3293 return r;
3294 }
3295
3296 static void x86_cpu_report_filtered_features(X86CPU *cpu)
3297 {
3298 FeatureWord w;
3299
3300 for (w = 0; w < FEATURE_WORDS; w++) {
3301 report_unavailable_features(w, cpu->filtered_features[w]);
3302 }
3303 }
3304
3305 static void x86_cpu_apply_props(X86CPU *cpu, PropValue *props)
3306 {
3307 PropValue *pv;
3308 for (pv = props; pv->prop; pv++) {
3309 if (!pv->value) {
3310 continue;
3311 }
3312 object_property_parse(OBJECT(cpu), pv->value, pv->prop,
3313 &error_abort);
3314 }
3315 }
3316
3317 /* Load data from X86CPUDefinition into a X86CPU object
3318 */
3319 static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp)
3320 {
3321 CPUX86State *env = &cpu->env;
3322 const char *vendor;
3323 char host_vendor[CPUID_VENDOR_SZ + 1];
3324 FeatureWord w;
3325
3326 /*NOTE: any property set by this function should be returned by
3327 * x86_cpu_static_props(), so static expansion of
3328 * query-cpu-model-expansion is always complete.
3329 */
3330
3331 /* CPU models only set _minimum_ values for level/xlevel: */
3332 object_property_set_uint(OBJECT(cpu), def->level, "min-level", errp);
3333 object_property_set_uint(OBJECT(cpu), def->xlevel, "min-xlevel", errp);
3334
3335 object_property_set_int(OBJECT(cpu), def->family, "family", errp);
3336 object_property_set_int(OBJECT(cpu), def->model, "model", errp);
3337 object_property_set_int(OBJECT(cpu), def->stepping, "stepping", errp);
3338 object_property_set_str(OBJECT(cpu), def->model_id, "model-id", errp);
3339 for (w = 0; w < FEATURE_WORDS; w++) {
3340 env->features[w] = def->features[w];
3341 }
3342
3343 /* legacy-cache defaults to 'off' if CPU model provides cache info */
3344 cpu->legacy_cache = !def->cache_info;
3345
3346 /* Special cases not set in the X86CPUDefinition structs: */
3347 /* TODO: in-kernel irqchip for hvf */
3348 if (kvm_enabled()) {
3349 if (!kvm_irqchip_in_kernel()) {
3350 x86_cpu_change_kvm_default("x2apic", "off");
3351 }
3352
3353 x86_cpu_apply_props(cpu, kvm_default_props);
3354 } else if (tcg_enabled()) {
3355 x86_cpu_apply_props(cpu, tcg_default_props);
3356 }
3357
3358 env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR;
3359
3360 /* sysenter isn't supported in compatibility mode on AMD,
3361 * syscall isn't supported in compatibility mode on Intel.
3362 * Normally we advertise the actual CPU vendor, but you can
3363 * override this using the 'vendor' property if you want to use
3364 * KVM's sysenter/syscall emulation in compatibility mode and
3365 * when doing cross vendor migration
3366 */
3367 vendor = def->vendor;
3368 if (accel_uses_host_cpuid()) {
3369 uint32_t ebx = 0, ecx = 0, edx = 0;
3370 host_cpuid(0, 0, NULL, &ebx, &ecx, &edx);
3371 x86_cpu_vendor_words2str(host_vendor, ebx, edx, ecx);
3372 vendor = host_vendor;
3373 }
3374
3375 object_property_set_str(OBJECT(cpu), vendor, "vendor", errp);
3376
3377 }
3378
3379 /* Return a QDict containing keys for all properties that can be included
3380 * in static expansion of CPU models. All properties set by x86_cpu_load_def()
3381 * must be included in the dictionary.
3382 */
3383 static QDict *x86_cpu_static_props(void)
3384 {
3385 FeatureWord w;
3386 int i;
3387 static const char *props[] = {
3388 "min-level",
3389 "min-xlevel",
3390 "family",
3391 "model",
3392 "stepping",
3393 "model-id",
3394 "vendor",
3395 "lmce",
3396 NULL,
3397 };
3398 static QDict *d;
3399
3400 if (d) {
3401 return d;
3402 }
3403
3404 d = qdict_new();
3405 for (i = 0; props[i]; i++) {
3406 qdict_put_null(d, props[i]);
3407 }
3408
3409 for (w = 0; w < FEATURE_WORDS; w++) {
3410 FeatureWordInfo *fi = &feature_word_info[w];
3411 int bit;
3412 for (bit = 0; bit < 32; bit++) {
3413 if (!fi->feat_names[bit]) {
3414 continue;
3415 }
3416 qdict_put_null(d, fi->feat_names[bit]);
3417 }
3418 }
3419
3420 return d;
3421 }
3422
3423 /* Add an entry to @props dict, with the value for property. */
3424 static void x86_cpu_expand_prop(X86CPU *cpu, QDict *props, const char *prop)
3425 {
3426 QObject *value = object_property_get_qobject(OBJECT(cpu), prop,
3427 &error_abort);
3428
3429 qdict_put_obj(props, prop, value);
3430 }
3431
3432 /* Convert CPU model data from X86CPU object to a property dictionary
3433 * that can recreate exactly the same CPU model.
3434 */
3435 static void x86_cpu_to_dict(X86CPU *cpu, QDict *props)
3436 {
3437 QDict *sprops = x86_cpu_static_props();
3438 const QDictEntry *e;
3439
3440 for (e = qdict_first(sprops); e; e = qdict_next(sprops, e)) {
3441 const char *prop = qdict_entry_key(e);
3442 x86_cpu_expand_prop(cpu, props, prop);
3443 }
3444 }
3445
3446 /* Convert CPU model data from X86CPU object to a property dictionary
3447 * that can recreate exactly the same CPU model, including every
3448 * writeable QOM property.
3449 */
3450 static void x86_cpu_to_dict_full(X86CPU *cpu, QDict *props)
3451 {
3452 ObjectPropertyIterator iter;
3453 ObjectProperty *prop;
3454
3455 object_property_iter_init(&iter, OBJECT(cpu));
3456 while ((prop = object_property_iter_next(&iter))) {
3457 /* skip read-only or write-only properties */
3458 if (!prop->get || !prop->set) {
3459 continue;
3460 }
3461
3462 /* "hotplugged" is the only property that is configurable
3463 * on the command-line but will be set differently on CPUs
3464 * created using "-cpu ... -smp ..." and by CPUs created
3465 * on the fly by x86_cpu_from_model() for querying. Skip it.
3466 */
3467 if (!strcmp(prop->name, "hotplugged")) {
3468 continue;
3469 }
3470 x86_cpu_expand_prop(cpu, props, prop->name);
3471 }
3472 }
3473
3474 static void object_apply_props(Object *obj, QDict *props, Error **errp)
3475 {
3476 const QDictEntry *prop;
3477 Error *err = NULL;
3478
3479 for (prop = qdict_first(props); prop; prop = qdict_next(props, prop)) {
3480 object_property_set_qobject(obj, qdict_entry_value(prop),
3481 qdict_entry_key(prop), &err);
3482 if (err) {
3483 break;
3484 }
3485 }
3486
3487 error_propagate(errp, err);
3488 }
3489
3490 /* Create X86CPU object according to model+props specification */
3491 static X86CPU *x86_cpu_from_model(const char *model, QDict *props, Error **errp)
3492 {
3493 X86CPU *xc = NULL;
3494 X86CPUClass *xcc;
3495 Error *err = NULL;
3496
3497 xcc = X86_CPU_CLASS(cpu_class_by_name(TYPE_X86_CPU, model));
3498 if (xcc == NULL) {
3499 error_setg(&err, "CPU model '%s' not found", model);
3500 goto out;
3501 }
3502
3503 xc = X86_CPU(object_new(object_class_get_name(OBJECT_CLASS(xcc))));
3504 if (props) {
3505 object_apply_props(OBJECT(xc), props, &err);
3506 if (err) {
3507 goto out;
3508 }
3509 }
3510
3511 x86_cpu_expand_features(xc, &err);
3512 if (err) {
3513 goto out;
3514 }
3515
3516 out:
3517 if (err) {
3518 error_propagate(errp, err);
3519 object_unref(OBJECT(xc));
3520 xc = NULL;
3521 }
3522 return xc;
3523 }
3524
3525 CpuModelExpansionInfo *
3526 arch_query_cpu_model_expansion(CpuModelExpansionType type,
3527 CpuModelInfo *model,
3528 Error **errp)
3529 {
3530 X86CPU *xc = NULL;
3531 Error *err = NULL;
3532 CpuModelExpansionInfo *ret = g_new0(CpuModelExpansionInfo, 1);
3533 QDict *props = NULL;
3534 const char *base_name;
3535
3536 xc = x86_cpu_from_model(model->name,
3537 model->has_props ?
3538 qobject_to(QDict, model->props) :
3539 NULL, &err);
3540 if (err) {
3541 goto out;
3542 }
3543
3544 props = qdict_new();
3545
3546 switch (type) {
3547 case CPU_MODEL_EXPANSION_TYPE_STATIC:
3548 /* Static expansion will be based on "base" only */
3549 base_name = "base";
3550 x86_cpu_to_dict(xc, props);
3551 break;
3552 case CPU_MODEL_EXPANSION_TYPE_FULL:
3553 /* As we don't return every single property, full expansion needs
3554 * to keep the original model name+props, and add extra
3555 * properties on top of that.
3556 */
3557 base_name = model->name;
3558 x86_cpu_to_dict_full(xc, props);
3559 break;
3560 default:
3561 error_setg(&err, "Unsupportted expansion type");
3562 goto out;
3563 }
3564
3565 if (!props) {
3566 props = qdict_new();
3567 }
3568 x86_cpu_to_dict(xc, props);
3569
3570 ret->model = g_new0(CpuModelInfo, 1);
3571 ret->model->name = g_strdup(base_name);
3572 ret->model->props = QOBJECT(props);
3573 ret->model->has_props = true;
3574
3575 out:
3576 object_unref(OBJECT(xc));
3577 if (err) {
3578 error_propagate(errp, err);
3579 qapi_free_CpuModelExpansionInfo(ret);
3580 ret = NULL;
3581 }
3582 return ret;
3583 }
3584
3585 static gchar *x86_gdb_arch_name(CPUState *cs)
3586 {
3587 #ifdef TARGET_X86_64
3588 return g_strdup("i386:x86-64");
3589 #else
3590 return g_strdup("i386");
3591 #endif
3592 }
3593
3594 static void x86_cpu_cpudef_class_init(ObjectClass *oc, void *data)
3595 {
3596 X86CPUDefinition *cpudef = data;
3597 X86CPUClass *xcc = X86_CPU_CLASS(oc);
3598
3599 xcc->cpu_def = cpudef;
3600 xcc->migration_safe = true;
3601 }
3602
3603 static void x86_register_cpudef_type(X86CPUDefinition *def)
3604 {
3605 char *typename = x86_cpu_type_name(def->name);
3606 TypeInfo ti = {
3607 .name = typename,
3608 .parent = TYPE_X86_CPU,
3609 .class_init = x86_cpu_cpudef_class_init,
3610 .class_data = def,
3611 };
3612
3613 /* AMD aliases are handled at runtime based on CPUID vendor, so
3614 * they shouldn't be set on the CPU model table.
3615 */
3616 assert(!(def->features[FEAT_8000_0001_EDX] & CPUID_EXT2_AMD_ALIASES));
3617 /* catch mistakes instead of silently truncating model_id when too long */
3618 assert(def->model_id && strlen(def->model_id) <= 48);
3619
3620
3621 type_register(&ti);
3622 g_free(typename);
3623 }
3624
3625 #if !defined(CONFIG_USER_ONLY)
3626
3627 void cpu_clear_apic_feature(CPUX86State *env)
3628 {
3629 env->features[FEAT_1_EDX] &= ~CPUID_APIC;
3630 }
3631
3632 #endif /* !CONFIG_USER_ONLY */
3633
3634 void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
3635 uint32_t *eax, uint32_t *ebx,
3636 uint32_t *ecx, uint32_t *edx)
3637 {
3638 X86CPU *cpu = x86_env_get_cpu(env);
3639 CPUState *cs = CPU(cpu);
3640 uint32_t pkg_offset;
3641 uint32_t limit;
3642 uint32_t signature[3];
3643
3644 /* Calculate & apply limits for different index ranges */
3645 if (index >= 0xC0000000) {
3646 limit = env->cpuid_xlevel2;
3647 } else if (index >= 0x80000000) {
3648 limit = env->cpuid_xlevel;
3649 } else if (index >= 0x40000000) {
3650 limit = 0x40000001;
3651 } else {
3652 limit = env->cpuid_level;
3653 }
3654
3655 if (index > limit) {
3656 /* Intel documentation states that invalid EAX input will
3657 * return the same information as EAX=cpuid_level
3658 * (Intel SDM Vol. 2A - Instruction Set Reference - CPUID)
3659 */
3660 index = env->cpuid_level;
3661 }
3662
3663 switch(index) {
3664 case 0:
3665 *eax = env->cpuid_level;
3666 *ebx = env->cpuid_vendor1;
3667 *edx = env->cpuid_vendor2;
3668 *ecx = env->cpuid_vendor3;
3669 break;
3670 case 1:
3671 *eax = env->cpuid_version;
3672 *ebx = (cpu->apic_id << 24) |
3673 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
3674 *ecx = env->features[FEAT_1_ECX];
3675 if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
3676 *ecx |= CPUID_EXT_OSXSAVE;
3677 }
3678 *edx = env->features[FEAT_1_EDX];
3679 if (cs->nr_cores * cs->nr_threads > 1) {
3680 *ebx |= (cs->nr_cores * cs->nr_threads) << 16;
3681 *edx |= CPUID_HT;
3682 }
3683 break;
3684 case 2:
3685 /* cache info: needed for Pentium Pro compatibility */
3686 if (cpu->cache_info_passthrough) {
3687 host_cpuid(index, 0, eax, ebx, ecx, edx);
3688 break;
3689 }
3690 *eax = 1; /* Number of CPUID[EAX=2] calls required */
3691 *ebx = 0;
3692 if (!cpu->enable_l3_cache) {
3693 *ecx = 0;
3694 } else {
3695 *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache);
3696 }
3697 *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) |
3698 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) |
3699 (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache));
3700 break;
3701 case 4:
3702 /* cache info: needed for Core compatibility */
3703 if (cpu->cache_info_passthrough) {
3704 host_cpuid(index, count, eax, ebx, ecx, edx);
3705 /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */
3706 *eax &= ~0xFC000000;
3707 if ((*eax & 31) && cs->nr_cores > 1) {
3708 *eax |= (cs->nr_cores - 1) << 26;
3709 }
3710 } else {
3711 *eax = 0;
3712 switch (count) {
3713 case 0: /* L1 dcache info */
3714 encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache,
3715 1, cs->nr_cores,
3716 eax, ebx, ecx, edx);
3717 break;
3718 case 1: /* L1 icache info */
3719 encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache,
3720 1, cs->nr_cores,
3721 eax, ebx, ecx, edx);
3722 break;
3723 case 2: /* L2 cache info */
3724 encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache,
3725 cs->nr_threads, cs->nr_cores,
3726 eax, ebx, ecx, edx);
3727 break;
3728 case 3: /* L3 cache info */
3729 pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3730 if (cpu->enable_l3_cache) {
3731 encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache,
3732 (1 << pkg_offset), cs->nr_cores,
3733 eax, ebx, ecx, edx);
3734 break;
3735 }
3736 /* fall through */
3737 default: /* end of info */
3738 *eax = *ebx = *ecx = *edx = 0;
3739 break;
3740 }
3741 }
3742 break;
3743 case 5:
3744 /* mwait info: needed for Core compatibility */
3745 *eax = 0; /* Smallest monitor-line size in bytes */
3746 *ebx = 0; /* Largest monitor-line size in bytes */
3747 *ecx = CPUID_MWAIT_EMX | CPUID_MWAIT_IBE;
3748 *edx = 0;
3749 break;
3750 case 6:
3751 /* Thermal and Power Leaf */
3752 *eax = env->features[FEAT_6_EAX];
3753 *ebx = 0;
3754 *ecx = 0;
3755 *edx = 0;
3756 break;
3757 case 7:
3758 /* Structured Extended Feature Flags Enumeration Leaf */
3759 if (count == 0) {
3760 *eax = 0; /* Maximum ECX value for sub-leaves */
3761 *ebx = env->features[FEAT_7_0_EBX]; /* Feature flags */
3762 *ecx = env->features[FEAT_7_0_ECX]; /* Feature flags */
3763 if ((*ecx & CPUID_7_0_ECX_PKU) && env->cr[4] & CR4_PKE_MASK) {
3764 *ecx |= CPUID_7_0_ECX_OSPKE;
3765 }
3766 *edx = env->features[FEAT_7_0_EDX]; /* Feature flags */
3767 } else {
3768 *eax = 0;
3769 *ebx = 0;
3770 *ecx = 0;
3771 *edx = 0;
3772 }
3773 break;
3774 case 9:
3775 /* Direct Cache Access Information Leaf */
3776 *eax = 0; /* Bits 0-31 in DCA_CAP MSR */
3777 *ebx = 0;
3778 *ecx = 0;
3779 *edx = 0;
3780 break;
3781 case 0xA:
3782 /* Architectural Performance Monitoring Leaf */
3783 if (kvm_enabled() && cpu->enable_pmu) {
3784 KVMState *s = cs->kvm_state;
3785
3786 *eax = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EAX);
3787 *ebx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EBX);
3788 *ecx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_ECX);
3789 *edx = kvm_arch_get_supported_cpuid(s, 0xA, count, R_EDX);
3790 } else if (hvf_enabled() && cpu->enable_pmu) {
3791 *eax = hvf_get_supported_cpuid(0xA, count, R_EAX);
3792 *ebx = hvf_get_supported_cpuid(0xA, count, R_EBX);
3793 *ecx = hvf_get_supported_cpuid(0xA, count, R_ECX);
3794 *edx = hvf_get_supported_cpuid(0xA, count, R_EDX);
3795 } else {
3796 *eax = 0;
3797 *ebx = 0;
3798 *ecx = 0;
3799 *edx = 0;
3800 }
3801 break;
3802 case 0xB:
3803 /* Extended Topology Enumeration Leaf */
3804 if (!cpu->enable_cpuid_0xb) {
3805 *eax = *ebx = *ecx = *edx = 0;
3806 break;
3807 }
3808
3809 *ecx = count & 0xff;
3810 *edx = cpu->apic_id;
3811
3812 switch (count) {
3813 case 0:
3814 *eax = apicid_core_offset(cs->nr_cores, cs->nr_threads);
3815 *ebx = cs->nr_threads;
3816 *ecx |= CPUID_TOPOLOGY_LEVEL_SMT;
3817 break;
3818 case 1:
3819 *eax = apicid_pkg_offset(cs->nr_cores, cs->nr_threads);
3820 *ebx = cs->nr_cores * cs->nr_threads;
3821 *ecx |= CPUID_TOPOLOGY_LEVEL_CORE;
3822 break;
3823 default:
3824 *eax = 0;
3825 *ebx = 0;
3826 *ecx |= CPUID_TOPOLOGY_LEVEL_INVALID;
3827 }
3828
3829 assert(!(*eax & ~0x1f));
3830 *ebx &= 0xffff; /* The count doesn't need to be reliable. */
3831 break;
3832 case 0xD: {
3833 /* Processor Extended State */
3834 *eax = 0;
3835 *ebx = 0;
3836 *ecx = 0;
3837 *edx = 0;
3838 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
3839 break;
3840 }
3841
3842 if (count == 0) {
3843 *ecx = xsave_area_size(x86_cpu_xsave_components(cpu));
3844 *eax = env->features[FEAT_XSAVE_COMP_LO];
3845 *edx = env->features[FEAT_XSAVE_COMP_HI];
3846 *ebx = *ecx;
3847 } else if (count == 1) {
3848 *eax = env->features[FEAT_XSAVE];
3849 } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
3850 if ((x86_cpu_xsave_components(cpu) >> count) & 1) {
3851 const ExtSaveArea *esa = &x86_ext_save_areas[count];
3852 *eax = esa->size;
3853 *ebx = esa->offset;
3854 }
3855 }
3856 break;
3857 }
3858 case 0x14: {
3859 /* Intel Processor Trace Enumeration */
3860 *eax = 0;
3861 *ebx = 0;
3862 *ecx = 0;
3863 *edx = 0;
3864 if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) ||
3865 !kvm_enabled()) {
3866 break;
3867 }
3868
3869 if (count == 0) {
3870 *eax = INTEL_PT_MAX_SUBLEAF;
3871 *ebx = INTEL_PT_MINIMAL_EBX;
3872 *ecx = INTEL_PT_MINIMAL_ECX;
3873 } else if (count == 1) {
3874 *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM;
3875 *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP;
3876 }
3877 break;
3878 }
3879 case 0x40000000:
3880 /*
3881 * CPUID code in kvm_arch_init_vcpu() ignores stuff
3882 * set here, but we restrict to TCG none the less.
3883 */
3884 if (tcg_enabled() && cpu->expose_tcg) {
3885 memcpy(signature, "TCGTCGTCGTCG", 12);
3886 *eax = 0x40000001;
3887 *ebx = signature[0];
3888 *ecx = signature[1];
3889 *edx = signature[2];
3890 } else {
3891 *eax = 0;
3892 *ebx = 0;
3893 *ecx = 0;
3894 *edx = 0;
3895 }
3896 break;
3897 case 0x40000001:
3898 *eax = 0;
3899 *ebx = 0;
3900 *ecx = 0;
3901 *edx = 0;
3902 break;
3903 case 0x80000000:
3904 *eax = env->cpuid_xlevel;
3905 *ebx = env->cpuid_vendor1;
3906 *edx = env->cpuid_vendor2;
3907 *ecx = env->cpuid_vendor3;
3908 break;
3909 case 0x80000001:
3910 *eax = env->cpuid_version;
3911 *ebx = 0;
3912 *ecx = env->features[FEAT_8000_0001_ECX];
3913 *edx = env->features[FEAT_8000_0001_EDX];
3914
3915 /* The Linux kernel checks for the CMPLegacy bit and
3916 * discards multiple thread information if it is set.
3917 * So don't set it here for Intel to make Linux guests happy.
3918 */
3919 if (cs->nr_cores * cs->nr_threads > 1) {
3920 if (env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1 ||
3921 env->cpuid_vendor2 != CPUID_VENDOR_INTEL_2 ||
3922 env->cpuid_vendor3 != CPUID_VENDOR_INTEL_3) {
3923 *ecx |= 1 << 1; /* CmpLegacy bit */
3924 }
3925 }
3926 break;
3927 case 0x80000002:
3928 case 0x80000003:
3929 case 0x80000004:
3930 *eax = env->cpuid_model[(index - 0x80000002) * 4 + 0];
3931 *ebx = env->cpuid_model[(index - 0x80000002) * 4 + 1];
3932 *ecx = env->cpuid_model[(index - 0x80000002) * 4 + 2];
3933 *edx = env->cpuid_model[(index - 0x80000002) * 4 + 3];
3934 break;
3935 case 0x80000005:
3936 /* cache info (L1 cache) */
3937 if (cpu->cache_info_passthrough) {
3938 host_cpuid(index, 0, eax, ebx, ecx, edx);
3939 break;
3940 }
3941 *eax = (L1_DTLB_2M_ASSOC << 24) | (L1_DTLB_2M_ENTRIES << 16) | \
3942 (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES);
3943 *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \
3944 (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES);
3945 *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache);
3946 *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache);
3947 break;
3948 case 0x80000006:
3949 /* cache info (L2 cache) */
3950 if (cpu->cache_info_passthrough) {
3951 host_cpuid(index, 0, eax, ebx, ecx, edx);
3952 break;
3953 }
3954 *eax = (AMD_ENC_ASSOC(L2_DTLB_2M_ASSOC) << 28) | \
3955 (L2_DTLB_2M_ENTRIES << 16) | \
3956 (AMD_ENC_ASSOC(L2_ITLB_2M_ASSOC) << 12) | \
3957 (L2_ITLB_2M_ENTRIES);
3958 *ebx = (AMD_ENC_ASSOC(L2_DTLB_4K_ASSOC) << 28) | \
3959 (L2_DTLB_4K_ENTRIES << 16) | \
3960 (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \
3961 (L2_ITLB_4K_ENTRIES);
3962 encode_cache_cpuid80000006(env->cache_info_amd.l2_cache,
3963 cpu->enable_l3_cache ?
3964 env->cache_info_amd.l3_cache : NULL,
3965 ecx, edx);
3966 break;
3967 case 0x80000007:
3968 *eax = 0;
3969 *ebx = 0;
3970 *ecx = 0;
3971 *edx = env->features[FEAT_8000_0007_EDX];
3972 break;
3973 case 0x80000008:
3974 /* virtual & phys address size in low 2 bytes. */
3975 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
3976 /* 64 bit processor */
3977 *eax = cpu->phys_bits; /* configurable physical bits */
3978 if (env->features[FEAT_7_0_ECX] & CPUID_7_0_ECX_LA57) {
3979 *eax |= 0x00003900; /* 57 bits virtual */
3980 } else {
3981 *eax |= 0x00003000; /* 48 bits virtual */
3982 }
3983 } else {
3984 *eax = cpu->phys_bits;
3985 }
3986 *ebx = env->features[FEAT_8000_0008_EBX];
3987 *ecx = 0;
3988 *edx = 0;
3989 if (cs->nr_cores * cs->nr_threads > 1) {
3990 *ecx |= (cs->nr_cores * cs->nr_threads) - 1;
3991 }
3992 break;
3993 case 0x8000000A:
3994 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
3995 *eax = 0x00000001; /* SVM Revision */
3996 *ebx = 0x00000010; /* nr of ASIDs */
3997 *ecx = 0;
3998 *edx = env->features[FEAT_SVM]; /* optional features */
3999 } else {
4000 *eax = 0;
4001 *ebx = 0;
4002 *ecx = 0;
4003 *edx = 0;
4004 }
4005 break;
4006 case 0xC0000000:
4007 *eax = env->cpuid_xlevel2;
4008 *ebx = 0;
4009 *ecx = 0;
4010 *edx = 0;
4011 break;
4012 case 0xC0000001:
4013 /* Support for VIA CPU's CPUID instruction */
4014 *eax = env->cpuid_version;
4015 *ebx = 0;
4016 *ecx = 0;
4017 *edx = env->features[FEAT_C000_0001_EDX];
4018 break;
4019 case 0xC0000002:
4020 case 0xC0000003:
4021 case 0xC0000004:
4022 /* Reserved for the future, and now filled with zero */
4023 *eax = 0;
4024 *ebx = 0;
4025 *ecx = 0;
4026 *edx = 0;
4027 break;
4028 case 0x8000001F:
4029 *eax = sev_enabled() ? 0x2 : 0;
4030 *ebx = sev_get_cbit_position();
4031 *ebx |= sev_get_reduced_phys_bits() << 6;
4032 *ecx = 0;
4033 *edx = 0;
4034 break;
4035 default:
4036 /* reserved values: zero */
4037 *eax = 0;
4038 *ebx = 0;
4039 *ecx = 0;
4040 *edx = 0;
4041 break;
4042 }
4043 }
4044
4045 /* CPUClass::reset() */
4046 static void x86_cpu_reset(CPUState *s)
4047 {
4048 X86CPU *cpu = X86_CPU(s);
4049 X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
4050 CPUX86State *env = &cpu->env;
4051 target_ulong cr4;
4052 uint64_t xcr0;
4053 int i;
4054
4055 xcc->parent_reset(s);
4056
4057 memset(env, 0, offsetof(CPUX86State, end_reset_fields));
4058
4059 env->old_exception = -1;
4060
4061 /* init to reset state */
4062
4063 env->hflags2 |= HF2_GIF_MASK;
4064
4065 cpu_x86_update_cr0(env, 0x60000010);
4066 env->a20_mask = ~0x0;
4067 env->smbase = 0x30000;
4068 env->msr_smi_count = 0;
4069
4070 env->idt.limit = 0xffff;
4071 env->gdt.limit = 0xffff;
4072 env->ldt.limit = 0xffff;
4073 env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT);
4074 env->tr.limit = 0xffff;
4075 env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT);
4076
4077 cpu_x86_load_seg_cache(env, R_CS, 0xf000, 0xffff0000, 0xffff,
4078 DESC_P_MASK | DESC_S_MASK | DESC_CS_MASK |
4079 DESC_R_MASK | DESC_A_MASK);
4080 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffff,
4081 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4082 DESC_A_MASK);
4083 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffff,
4084 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4085 DESC_A_MASK);
4086 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffff,
4087 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4088 DESC_A_MASK);
4089 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffff,
4090 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4091 DESC_A_MASK);
4092 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffff,
4093 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
4094 DESC_A_MASK);
4095
4096 env->eip = 0xfff0;
4097 env->regs[R_EDX] = env->cpuid_version;
4098
4099 env->eflags = 0x2;
4100
4101 /* FPU init */
4102 for (i = 0; i < 8; i++) {
4103 env->fptags[i] = 1;
4104 }
4105 cpu_set_fpuc(env, 0x37f);
4106
4107 env->mxcsr = 0x1f80;
4108 /* All units are in INIT state. */
4109 env->xstate_bv = 0;
4110
4111 env->pat = 0x0007040600070406ULL;
4112 env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
4113
4114 memset(env->dr, 0, sizeof(env->dr));
4115 env->dr[6] = DR6_FIXED_1;
4116 env->dr[7] = DR7_FIXED_1;
4117 cpu_breakpoint_remove_all(s, BP_CPU);
4118 cpu_watchpoint_remove_all(s, BP_CPU);
4119
4120 cr4 = 0;
4121 xcr0 = XSTATE_FP_MASK;
4122
4123 #ifdef CONFIG_USER_ONLY
4124 /* Enable all the features for user-mode. */
4125 if (env->features[FEAT_1_EDX] & CPUID_SSE) {
4126 xcr0 |= XSTATE_SSE_MASK;
4127 }
4128 for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4129 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4130 if (env->features[esa->feature] & esa->bits) {
4131 xcr0 |= 1ull << i;
4132 }
4133 }
4134
4135 if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
4136 cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
4137 }
4138 if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
4139 cr4 |= CR4_FSGSBASE_MASK;
4140 }
4141 #endif
4142
4143 env->xcr0 = xcr0;
4144 cpu_x86_update_cr4(env, cr4);
4145
4146 /*
4147 * SDM 11.11.5 requires:
4148 * - IA32_MTRR_DEF_TYPE MSR.E = 0
4149 * - IA32_MTRR_PHYSMASKn.V = 0
4150 * All other bits are undefined. For simplification, zero it all.
4151 */
4152 env->mtrr_deftype = 0;
4153 memset(env->mtrr_var, 0, sizeof(env->mtrr_var));
4154 memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
4155
4156 env->interrupt_injected = -1;
4157 env->exception_injected = -1;
4158 env->nmi_injected = false;
4159 #if !defined(CONFIG_USER_ONLY)
4160 /* We hard-wire the BSP to the first CPU. */
4161 apic_designate_bsp(cpu->apic_state, s->cpu_index == 0);
4162
4163 s->halted = !cpu_is_bsp(cpu);
4164
4165 if (kvm_enabled()) {
4166 kvm_arch_reset_vcpu(cpu);
4167 }
4168 else if (hvf_enabled()) {
4169 hvf_reset_vcpu(s);
4170 }
4171 #endif
4172 }
4173
4174 #ifndef CONFIG_USER_ONLY
4175 bool cpu_is_bsp(X86CPU *cpu)
4176 {
4177 return cpu_get_apic_base(cpu->apic_state) & MSR_IA32_APICBASE_BSP;
4178 }
4179
4180 /* TODO: remove me, when reset over QOM tree is implemented */
4181 static void x86_cpu_machine_reset_cb(void *opaque)
4182 {
4183 X86CPU *cpu = opaque;
4184 cpu_reset(CPU(cpu));
4185 }
4186 #endif
4187
4188 static void mce_init(X86CPU *cpu)
4189 {
4190 CPUX86State *cenv = &cpu->env;
4191 unsigned int bank;
4192
4193 if (((cenv->cpuid_version >> 8) & 0xf) >= 6
4194 && (cenv->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
4195 (CPUID_MCE | CPUID_MCA)) {
4196 cenv->mcg_cap = MCE_CAP_DEF | MCE_BANKS_DEF |
4197 (cpu->enable_lmce ? MCG_LMCE_P : 0);
4198 cenv->mcg_ctl = ~(uint64_t)0;
4199 for (bank = 0; bank < MCE_BANKS_DEF; bank++) {
4200 cenv->mce_banks[bank * 4] = ~(uint64_t)0;
4201 }
4202 }
4203 }
4204
4205 #ifndef CONFIG_USER_ONLY
4206 APICCommonClass *apic_get_class(void)
4207 {
4208 const char *apic_type = "apic";
4209
4210 /* TODO: in-kernel irqchip for hvf */
4211 if (kvm_apic_in_kernel()) {
4212 apic_type = "kvm-apic";
4213 } else if (xen_enabled()) {
4214 apic_type = "xen-apic";
4215 }
4216
4217 return APIC_COMMON_CLASS(object_class_by_name(apic_type));
4218 }
4219
4220 static void x86_cpu_apic_create(X86CPU *cpu, Error **errp)
4221 {
4222 APICCommonState *apic;
4223 ObjectClass *apic_class = OBJECT_CLASS(apic_get_class());
4224
4225 cpu->apic_state = DEVICE(object_new(object_class_get_name(apic_class)));
4226
4227 object_property_add_child(OBJECT(cpu), "lapic",
4228 OBJECT(cpu->apic_state), &error_abort);
4229 object_unref(OBJECT(cpu->apic_state));
4230
4231 qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id);
4232 /* TODO: convert to link<> */
4233 apic = APIC_COMMON(cpu->apic_state);
4234 apic->cpu = cpu;
4235 apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE;
4236 }
4237
4238 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4239 {
4240 APICCommonState *apic;
4241 static bool apic_mmio_map_once;
4242
4243 if (cpu->apic_state == NULL) {
4244 return;
4245 }
4246 object_property_set_bool(OBJECT(cpu->apic_state), true, "realized",
4247 errp);
4248
4249 /* Map APIC MMIO area */
4250 apic = APIC_COMMON(cpu->apic_state);
4251 if (!apic_mmio_map_once) {
4252 memory_region_add_subregion_overlap(get_system_memory(),
4253 apic->apicbase &
4254 MSR_IA32_APICBASE_BASE,
4255 &apic->io_memory,
4256 0x1000);
4257 apic_mmio_map_once = true;
4258 }
4259 }
4260
4261 static void x86_cpu_machine_done(Notifier *n, void *unused)
4262 {
4263 X86CPU *cpu = container_of(n, X86CPU, machine_done);
4264 MemoryRegion *smram =
4265 (MemoryRegion *) object_resolve_path("/machine/smram", NULL);
4266
4267 if (smram) {
4268 cpu->smram = g_new(MemoryRegion, 1);
4269 memory_region_init_alias(cpu->smram, OBJECT(cpu), "smram",
4270 smram, 0, 1ull << 32);
4271 memory_region_set_enabled(cpu->smram, true);
4272 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->smram, 1);
4273 }
4274 }
4275 #else
4276 static void x86_cpu_apic_realize(X86CPU *cpu, Error **errp)
4277 {
4278 }
4279 #endif
4280
4281 /* Note: Only safe for use on x86(-64) hosts */
4282 static uint32_t x86_host_phys_bits(void)
4283 {
4284 uint32_t eax;
4285 uint32_t host_phys_bits;
4286
4287 host_cpuid(0x80000000, 0, &eax, NULL, NULL, NULL);
4288 if (eax >= 0x80000008) {
4289 host_cpuid(0x80000008, 0, &eax, NULL, NULL, NULL);
4290 /* Note: According to AMD doc 25481 rev 2.34 they have a field
4291 * at 23:16 that can specify a maximum physical address bits for
4292 * the guest that can override this value; but I've not seen
4293 * anything with that set.
4294 */
4295 host_phys_bits = eax & 0xff;
4296 } else {
4297 /* It's an odd 64 bit machine that doesn't have the leaf for
4298 * physical address bits; fall back to 36 that's most older
4299 * Intel.
4300 */
4301 host_phys_bits = 36;
4302 }
4303
4304 return host_phys_bits;
4305 }
4306
4307 static void x86_cpu_adjust_level(X86CPU *cpu, uint32_t *min, uint32_t value)
4308 {
4309 if (*min < value) {
4310 *min = value;
4311 }
4312 }
4313
4314 /* Increase cpuid_min_{level,xlevel,xlevel2} automatically, if appropriate */
4315 static void x86_cpu_adjust_feat_level(X86CPU *cpu, FeatureWord w)
4316 {
4317 CPUX86State *env = &cpu->env;
4318 FeatureWordInfo *fi = &feature_word_info[w];
4319 uint32_t eax = fi->cpuid_eax;
4320 uint32_t region = eax & 0xF0000000;
4321
4322 if (!env->features[w]) {
4323 return;
4324 }
4325
4326 switch (region) {
4327 case 0x00000000:
4328 x86_cpu_adjust_level(cpu, &env->cpuid_min_level, eax);
4329 break;
4330 case 0x80000000:
4331 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, eax);
4332 break;
4333 case 0xC0000000:
4334 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel2, eax);
4335 break;
4336 }
4337 }
4338
4339 /* Calculate XSAVE components based on the configured CPU feature flags */
4340 static void x86_cpu_enable_xsave_components(X86CPU *cpu)
4341 {
4342 CPUX86State *env = &cpu->env;
4343 int i;
4344 uint64_t mask;
4345
4346 if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
4347 return;
4348 }
4349
4350 mask = 0;
4351 for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
4352 const ExtSaveArea *esa = &x86_ext_save_areas[i];
4353 if (env->features[esa->feature] & esa->bits) {
4354 mask |= (1ULL << i);
4355 }
4356 }
4357
4358 env->features[FEAT_XSAVE_COMP_LO] = mask;
4359 env->features[FEAT_XSAVE_COMP_HI] = mask >> 32;
4360 }
4361
4362 /***** Steps involved on loading and filtering CPUID data
4363 *
4364 * When initializing and realizing a CPU object, the steps
4365 * involved in setting up CPUID data are:
4366 *
4367 * 1) Loading CPU model definition (X86CPUDefinition). This is
4368 * implemented by x86_cpu_load_def() and should be completely
4369 * transparent, as it is done automatically by instance_init.
4370 * No code should need to look at X86CPUDefinition structs
4371 * outside instance_init.
4372 *
4373 * 2) CPU expansion. This is done by realize before CPUID
4374 * filtering, and will make sure host/accelerator data is
4375 * loaded for CPU models that depend on host capabilities
4376 * (e.g. "host"). Done by x86_cpu_expand_features().
4377 *
4378 * 3) CPUID filtering. This initializes extra data related to
4379 * CPUID, and checks if the host supports all capabilities
4380 * required by the CPU. Runnability of a CPU model is
4381 * determined at this step. Done by x86_cpu_filter_features().
4382 *
4383 * Some operations don't require all steps to be performed.
4384 * More precisely:
4385 *
4386 * - CPU instance creation (instance_init) will run only CPU
4387 * model loading. CPU expansion can't run at instance_init-time
4388 * because host/accelerator data may be not available yet.
4389 * - CPU realization will perform both CPU model expansion and CPUID
4390 * filtering, and return an error in case one of them fails.
4391 * - query-cpu-definitions needs to run all 3 steps. It needs
4392 * to run CPUID filtering, as the 'unavailable-features'
4393 * field is set based on the filtering results.
4394 * - The query-cpu-model-expansion QMP command only needs to run
4395 * CPU model loading and CPU expansion. It should not filter
4396 * any CPUID data based on host capabilities.
4397 */
4398
4399 /* Expand CPU configuration data, based on configured features
4400 * and host/accelerator capabilities when appropriate.
4401 */
4402 static void x86_cpu_expand_features(X86CPU *cpu, Error **errp)
4403 {
4404 CPUX86State *env = &cpu->env;
4405 FeatureWord w;
4406 GList *l;
4407 Error *local_err = NULL;
4408
4409 /*TODO: Now cpu->max_features doesn't overwrite features
4410 * set using QOM properties, and we can convert
4411 * plus_features & minus_features to global properties
4412 * inside x86_cpu_parse_featurestr() too.
4413 */
4414 if (cpu->max_features) {
4415 for (w = 0; w < FEATURE_WORDS; w++) {
4416 /* Override only features that weren't set explicitly
4417 * by the user.
4418 */
4419 env->features[w] |=
4420 x86_cpu_get_supported_feature_word(w, cpu->migratable) &
4421 ~env->user_features[w] & \
4422 ~feature_word_info[w].no_autoenable_flags;
4423 }
4424 }
4425
4426 for (l = plus_features; l; l = l->next) {
4427 const char *prop = l->data;
4428 object_property_set_bool(OBJECT(cpu), true, prop, &local_err);
4429 if (local_err) {
4430 goto out;
4431 }
4432 }
4433
4434 for (l = minus_features; l; l = l->next) {
4435 const char *prop = l->data;
4436 object_property_set_bool(OBJECT(cpu), false, prop, &local_err);
4437 if (local_err) {
4438 goto out;
4439 }
4440 }
4441
4442 if (!kvm_enabled() || !cpu->expose_kvm) {
4443 env->features[FEAT_KVM] = 0;
4444 }
4445
4446 x86_cpu_enable_xsave_components(cpu);
4447
4448 /* CPUID[EAX=7,ECX=0].EBX always increased level automatically: */
4449 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_EBX);
4450 if (cpu->full_cpuid_auto_level) {
4451 x86_cpu_adjust_feat_level(cpu, FEAT_1_EDX);
4452 x86_cpu_adjust_feat_level(cpu, FEAT_1_ECX);
4453 x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX);
4454 x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX);
4455 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX);
4456 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX);
4457 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX);
4458 x86_cpu_adjust_feat_level(cpu, FEAT_8000_0008_EBX);
4459 x86_cpu_adjust_feat_level(cpu, FEAT_C000_0001_EDX);
4460 x86_cpu_adjust_feat_level(cpu, FEAT_SVM);
4461 x86_cpu_adjust_feat_level(cpu, FEAT_XSAVE);
4462 /* SVM requires CPUID[0x8000000A] */
4463 if (env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM) {
4464 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000000A);
4465 }
4466
4467 /* SEV requires CPUID[0x8000001F] */
4468 if (sev_enabled()) {
4469 x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F);
4470 }
4471 }
4472
4473 /* Set cpuid_*level* based on cpuid_min_*level, if not explicitly set */
4474 if (env->cpuid_level == UINT32_MAX) {
4475 env->cpuid_level = env->cpuid_min_level;
4476 }
4477 if (env->cpuid_xlevel == UINT32_MAX) {
4478 env->cpuid_xlevel = env->cpuid_min_xlevel;
4479 }
4480 if (env->cpuid_xlevel2 == UINT32_MAX) {
4481 env->cpuid_xlevel2 = env->cpuid_min_xlevel2;
4482 }
4483
4484 out:
4485 if (local_err != NULL) {
4486 error_propagate(errp, local_err);
4487 }
4488 }
4489
4490 /*
4491 * Finishes initialization of CPUID data, filters CPU feature
4492 * words based on host availability of each feature.
4493 *
4494 * Returns: 0 if all flags are supported by the host, non-zero otherwise.
4495 */
4496 static int x86_cpu_filter_features(X86CPU *cpu)
4497 {
4498 CPUX86State *env = &cpu->env;
4499 FeatureWord w;
4500 int rv = 0;
4501
4502 for (w = 0; w < FEATURE_WORDS; w++) {
4503 uint32_t host_feat =
4504 x86_cpu_get_supported_feature_word(w, false);
4505 uint32_t requested_features = env->features[w];
4506 env->features[w] &= host_feat;
4507 cpu->filtered_features[w] = requested_features & ~env->features[w];
4508 if (cpu->filtered_features[w]) {
4509 rv = 1;
4510 }
4511 }
4512
4513 if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) &&
4514 kvm_enabled()) {
4515 KVMState *s = CPU(cpu)->kvm_state;
4516 uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX);
4517 uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX);
4518 uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX);
4519 uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX);
4520 uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX);
4521
4522 if (!eax_0 ||
4523 ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) ||
4524 ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) ||
4525 ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) ||
4526 ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) <
4527 INTEL_PT_ADDR_RANGES_NUM) ||
4528 ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) !=
4529 (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) ||
4530 (ecx_0 & INTEL_PT_IP_LIP)) {
4531 /*
4532 * Processor Trace capabilities aren't configurable, so if the
4533 * host can't emulate the capabilities we report on
4534 * cpu_x86_cpuid(), intel-pt can't be enabled on the current host.
4535 */
4536 env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT;
4537 cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT;
4538 rv = 1;
4539 }
4540 }
4541
4542 return rv;
4543 }
4544
4545 #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
4546 (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
4547 (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
4548 #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
4549 (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
4550 (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
4551 static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
4552 {
4553 CPUState *cs = CPU(dev);
4554 X86CPU *cpu = X86_CPU(dev);
4555 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4556 CPUX86State *env = &cpu->env;
4557 Error *local_err = NULL;
4558 static bool ht_warned;
4559
4560 if (xcc->host_cpuid_required && !accel_uses_host_cpuid()) {
4561 char *name = x86_cpu_class_get_model_name(xcc);
4562 error_setg(&local_err, "CPU model '%s' requires KVM", name);
4563 g_free(name);
4564 goto out;
4565 }
4566
4567 if (cpu->apic_id == UNASSIGNED_APIC_ID) {
4568 error_setg(errp, "apic-id property was not initialized properly");
4569 return;
4570 }
4571
4572 x86_cpu_expand_features(cpu, &local_err);
4573 if (local_err) {
4574 goto out;
4575 }
4576
4577 if (x86_cpu_filter_features(cpu) &&
4578 (cpu->check_cpuid || cpu->enforce_cpuid)) {
4579 x86_cpu_report_filtered_features(cpu);
4580 if (cpu->enforce_cpuid) {
4581 error_setg(&local_err,
4582 accel_uses_host_cpuid() ?
4583 "Host doesn't support requested features" :
4584 "TCG doesn't support requested features");
4585 goto out;
4586 }
4587 }
4588
4589 /* On AMD CPUs, some CPUID[8000_0001].EDX bits must match the bits on
4590 * CPUID[1].EDX.
4591 */
4592 if (IS_AMD_CPU(env)) {
4593 env->features[FEAT_8000_0001_EDX] &= ~CPUID_EXT2_AMD_ALIASES;
4594 env->features[FEAT_8000_0001_EDX] |= (env->features[FEAT_1_EDX]
4595 & CPUID_EXT2_AMD_ALIASES);
4596 }
4597
4598 /* For 64bit systems think about the number of physical bits to present.
4599 * ideally this should be the same as the host; anything other than matching
4600 * the host can cause incorrect guest behaviour.
4601 * QEMU used to pick the magic value of 40 bits that corresponds to
4602 * consumer AMD devices but nothing else.
4603 */
4604 if (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) {
4605 if (accel_uses_host_cpuid()) {
4606 uint32_t host_phys_bits = x86_host_phys_bits();
4607 static bool warned;
4608
4609 if (cpu->host_phys_bits) {
4610 /* The user asked for us to use the host physical bits */
4611 cpu->phys_bits = host_phys_bits;
4612 }
4613
4614 /* Print a warning if the user set it to a value that's not the
4615 * host value.
4616 */
4617 if (cpu->phys_bits != host_phys_bits && cpu->phys_bits != 0 &&
4618 !warned) {
4619 warn_report("Host physical bits (%u)"
4620 " does not match phys-bits property (%u)",
4621 host_phys_bits, cpu->phys_bits);
4622 warned = true;
4623 }
4624
4625 if (cpu->phys_bits &&
4626 (cpu->phys_bits > TARGET_PHYS_ADDR_SPACE_BITS ||
4627 cpu->phys_bits < 32)) {
4628 error_setg(errp, "phys-bits should be between 32 and %u "
4629 " (but is %u)",
4630 TARGET_PHYS_ADDR_SPACE_BITS, cpu->phys_bits);
4631 return;
4632 }
4633 } else {
4634 if (cpu->phys_bits && cpu->phys_bits != TCG_PHYS_ADDR_BITS) {
4635 error_setg(errp, "TCG only supports phys-bits=%u",
4636 TCG_PHYS_ADDR_BITS);
4637 return;
4638 }
4639 }
4640 /* 0 means it was not explicitly set by the user (or by machine
4641 * compat_props or by the host code above). In this case, the default
4642 * is the value used by TCG (40).
4643 */
4644 if (cpu->phys_bits == 0) {
4645 cpu->phys_bits = TCG_PHYS_ADDR_BITS;
4646 }
4647 } else {
4648 /* For 32 bit systems don't use the user set value, but keep
4649 * phys_bits consistent with what we tell the guest.
4650 */
4651 if (cpu->phys_bits != 0) {
4652 error_setg(errp, "phys-bits is not user-configurable in 32 bit");
4653 return;
4654 }
4655
4656 if (env->features[FEAT_1_EDX] & CPUID_PSE36) {
4657 cpu->phys_bits = 36;
4658 } else {
4659 cpu->phys_bits = 32;
4660 }
4661 }
4662
4663 /* Cache information initialization */
4664 if (!cpu->legacy_cache) {
4665 if (!xcc->cpu_def || !xcc->cpu_def->cache_info) {
4666 char *name = x86_cpu_class_get_model_name(xcc);
4667 error_setg(errp,
4668 "CPU model '%s' doesn't support legacy-cache=off", name);
4669 g_free(name);
4670 return;
4671 }
4672 env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd =
4673 *xcc->cpu_def->cache_info;
4674 } else {
4675 /* Build legacy cache information */
4676 env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache;
4677 env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache;
4678 env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2;
4679 env->cache_info_cpuid2.l3_cache = &legacy_l3_cache;
4680
4681 env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache;
4682 env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache;
4683 env->cache_info_cpuid4.l2_cache = &legacy_l2_cache;
4684 env->cache_info_cpuid4.l3_cache = &legacy_l3_cache;
4685
4686 env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd;
4687 env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd;
4688 env->cache_info_amd.l2_cache = &legacy_l2_cache_amd;
4689 env->cache_info_amd.l3_cache = &legacy_l3_cache;
4690 }
4691
4692
4693 cpu_exec_realizefn(cs, &local_err);
4694 if (local_err != NULL) {
4695 error_propagate(errp, local_err);
4696 return;
4697 }
4698
4699 #ifndef CONFIG_USER_ONLY
4700 qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
4701
4702 if (cpu->env.features[FEAT_1_EDX] & CPUID_APIC || smp_cpus > 1) {
4703 x86_cpu_apic_create(cpu, &local_err);
4704 if (local_err != NULL) {
4705 goto out;
4706 }
4707 }
4708 #endif
4709
4710 mce_init(cpu);
4711
4712 #ifndef CONFIG_USER_ONLY
4713 if (tcg_enabled()) {
4714 cpu->cpu_as_mem = g_new(MemoryRegion, 1);
4715 cpu->cpu_as_root = g_new(MemoryRegion, 1);
4716
4717 /* Outer container... */
4718 memory_region_init(cpu->cpu_as_root, OBJECT(cpu), "memory", ~0ull);
4719 memory_region_set_enabled(cpu->cpu_as_root, true);
4720
4721 /* ... with two regions inside: normal system memory with low
4722 * priority, and...
4723 */
4724 memory_region_init_alias(cpu->cpu_as_mem, OBJECT(cpu), "memory",
4725 get_system_memory(), 0, ~0ull);
4726 memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0);
4727 memory_region_set_enabled(cpu->cpu_as_mem, true);
4728
4729 cs->num_ases = 2;
4730 cpu_address_space_init(cs, 0, "cpu-memory", cs->memory);
4731 cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root);
4732
4733 /* ... SMRAM with higher priority, linked from /machine/smram. */
4734 cpu->machine_done.notify = x86_cpu_machine_done;
4735 qemu_add_machine_init_done_notifier(&cpu->machine_done);
4736 }
4737 #endif
4738
4739 qemu_init_vcpu(cs);
4740
4741 /* Only Intel CPUs support hyperthreading. Even though QEMU fixes this
4742 * issue by adjusting CPUID_0000_0001_EBX and CPUID_8000_0008_ECX
4743 * based on inputs (sockets,cores,threads), it is still better to gives
4744 * users a warning.
4745 *
4746 * NOTE: the following code has to follow qemu_init_vcpu(). Otherwise
4747 * cs->nr_threads hasn't be populated yet and the checking is incorrect.
4748 */
4749 if (!IS_INTEL_CPU(env) && cs->nr_threads > 1 && !ht_warned) {
4750 error_report("AMD CPU doesn't support hyperthreading. Please configure"
4751 " -smp options properly.");
4752 ht_warned = true;
4753 }
4754
4755 x86_cpu_apic_realize(cpu, &local_err);
4756 if (local_err != NULL) {
4757 goto out;
4758 }
4759 cpu_reset(cs);
4760
4761 xcc->parent_realize(dev, &local_err);
4762
4763 out:
4764 if (local_err != NULL) {
4765 error_propagate(errp, local_err);
4766 return;
4767 }
4768 }
4769
4770 static void x86_cpu_unrealizefn(DeviceState *dev, Error **errp)
4771 {
4772 X86CPU *cpu = X86_CPU(dev);
4773 X86CPUClass *xcc = X86_CPU_GET_CLASS(dev);
4774 Error *local_err = NULL;
4775
4776 #ifndef CONFIG_USER_ONLY
4777 cpu_remove_sync(CPU(dev));
4778 qemu_unregister_reset(x86_cpu_machine_reset_cb, dev);
4779 #endif
4780
4781 if (cpu->apic_state) {
4782 object_unparent(OBJECT(cpu->apic_state));
4783 cpu->apic_state = NULL;
4784 }
4785
4786 xcc->parent_unrealize(dev, &local_err);
4787 if (local_err != NULL) {
4788 error_propagate(errp, local_err);
4789 return;
4790 }
4791 }
4792
4793 typedef struct BitProperty {
4794 FeatureWord w;
4795 uint32_t mask;
4796 } BitProperty;
4797
4798 static void x86_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name,
4799 void *opaque, Error **errp)
4800 {
4801 X86CPU *cpu = X86_CPU(obj);
4802 BitProperty *fp = opaque;
4803 uint32_t f = cpu->env.features[fp->w];
4804 bool value = (f & fp->mask) == fp->mask;
4805 visit_type_bool(v, name, &value, errp);
4806 }
4807
4808 static void x86_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name,
4809 void *opaque, Error **errp)
4810 {
4811 DeviceState *dev = DEVICE(obj);
4812 X86CPU *cpu = X86_CPU(obj);
4813 BitProperty *fp = opaque;
4814 Error *local_err = NULL;
4815 bool value;
4816
4817 if (dev->realized) {
4818 qdev_prop_set_after_realize(dev, name, errp);
4819 return;
4820 }
4821
4822 visit_type_bool(v, name, &value, &local_err);
4823 if (local_err) {
4824 error_propagate(errp, local_err);
4825 return;
4826 }
4827
4828 if (value) {
4829 cpu->env.features[fp->w] |= fp->mask;
4830 } else {
4831 cpu->env.features[fp->w] &= ~fp->mask;
4832 }
4833 cpu->env.user_features[fp->w] |= fp->mask;
4834 }
4835
4836 static void x86_cpu_release_bit_prop(Object *obj, const char *name,
4837 void *opaque)
4838 {
4839 BitProperty *prop = opaque;
4840 g_free(prop);
4841 }
4842
4843 /* Register a boolean property to get/set a single bit in a uint32_t field.
4844 *
4845 * The same property name can be registered multiple times to make it affect
4846 * multiple bits in the same FeatureWord. In that case, the getter will return
4847 * true only if all bits are set.
4848 */
4849 static void x86_cpu_register_bit_prop(X86CPU *cpu,
4850 const char *prop_name,
4851 FeatureWord w,
4852 int bitnr)
4853 {
4854 BitProperty *fp;
4855 ObjectProperty *op;
4856 uint32_t mask = (1UL << bitnr);
4857
4858 op = object_property_find(OBJECT(cpu), prop_name, NULL);
4859 if (op) {
4860 fp = op->opaque;
4861 assert(fp->w == w);
4862 fp->mask |= mask;
4863 } else {
4864 fp = g_new0(BitProperty, 1);
4865 fp->w = w;
4866 fp->mask = mask;
4867 object_property_add(OBJECT(cpu), prop_name, "bool",
4868 x86_cpu_get_bit_prop,
4869 x86_cpu_set_bit_prop,
4870 x86_cpu_release_bit_prop, fp, &error_abort);
4871 }
4872 }
4873
4874 static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
4875 FeatureWord w,
4876 int bitnr)
4877 {
4878 FeatureWordInfo *fi = &feature_word_info[w];
4879 const char *name = fi->feat_names[bitnr];
4880
4881 if (!name) {
4882 return;
4883 }
4884
4885 /* Property names should use "-" instead of "_".
4886 * Old names containing underscores are registered as aliases
4887 * using object_property_add_alias()
4888 */
4889 assert(!strchr(name, '_'));
4890 /* aliases don't use "|" delimiters anymore, they are registered
4891 * manually using object_property_add_alias() */
4892 assert(!strchr(name, '|'));
4893 x86_cpu_register_bit_prop(cpu, name, w, bitnr);
4894 }
4895
4896 static GuestPanicInformation *x86_cpu_get_crash_info(CPUState *cs)
4897 {
4898 X86CPU *cpu = X86_CPU(cs);
4899 CPUX86State *env = &cpu->env;
4900 GuestPanicInformation *panic_info = NULL;
4901
4902 if (env->features[FEAT_HYPERV_EDX] & HV_GUEST_CRASH_MSR_AVAILABLE) {
4903 panic_info = g_malloc0(sizeof(GuestPanicInformation));
4904
4905 panic_info->type = GUEST_PANIC_INFORMATION_TYPE_HYPER_V;
4906
4907 assert(HV_CRASH_PARAMS >= 5);
4908 panic_info->u.hyper_v.arg1 = env->msr_hv_crash_params[0];
4909 panic_info->u.hyper_v.arg2 = env->msr_hv_crash_params[1];
4910 panic_info->u.hyper_v.arg3 = env->msr_hv_crash_params[2];
4911 panic_info->u.hyper_v.arg4 = env->msr_hv_crash_params[3];
4912 panic_info->u.hyper_v.arg5 = env->msr_hv_crash_params[4];
4913 }
4914
4915 return panic_info;
4916 }
4917 static void x86_cpu_get_crash_info_qom(Object *obj, Visitor *v,
4918 const char *name, void *opaque,
4919 Error **errp)
4920 {
4921 CPUState *cs = CPU(obj);
4922 GuestPanicInformation *panic_info;
4923
4924 if (!cs->crash_occurred) {
4925 error_setg(errp, "No crash occured");
4926 return;
4927 }
4928
4929 panic_info = x86_cpu_get_crash_info(cs);
4930 if (panic_info == NULL) {
4931 error_setg(errp, "No crash information");
4932 return;
4933 }
4934
4935 visit_type_GuestPanicInformation(v, "crash-information", &panic_info,
4936 errp);
4937 qapi_free_GuestPanicInformation(panic_info);
4938 }
4939
4940 static void x86_cpu_initfn(Object *obj)
4941 {
4942 CPUState *cs = CPU(obj);
4943 X86CPU *cpu = X86_CPU(obj);
4944 X86CPUClass *xcc = X86_CPU_GET_CLASS(obj);
4945 CPUX86State *env = &cpu->env;
4946 FeatureWord w;
4947
4948 cs->env_ptr = env;
4949
4950 object_property_add(obj, "family", "int",
4951 x86_cpuid_version_get_family,
4952 x86_cpuid_version_set_family, NULL, NULL, NULL);
4953 object_property_add(obj, "model", "int",
4954 x86_cpuid_version_get_model,
4955 x86_cpuid_version_set_model, NULL, NULL, NULL);
4956 object_property_add(obj, "stepping", "int",
4957 x86_cpuid_version_get_stepping,
4958 x86_cpuid_version_set_stepping, NULL, NULL, NULL);
4959 object_property_add_str(obj, "vendor",
4960 x86_cpuid_get_vendor,
4961 x86_cpuid_set_vendor, NULL);
4962 object_property_add_str(obj, "model-id",
4963 x86_cpuid_get_model_id,
4964 x86_cpuid_set_model_id, NULL);
4965 object_property_add(obj, "tsc-frequency", "int",
4966 x86_cpuid_get_tsc_freq,
4967 x86_cpuid_set_tsc_freq, NULL, NULL, NULL);
4968 object_property_add(obj, "feature-words", "X86CPUFeatureWordInfo",
4969 x86_cpu_get_feature_words,
4970 NULL, NULL, (void *)env->features, NULL);
4971 object_property_add(obj, "filtered-features", "X86CPUFeatureWordInfo",
4972 x86_cpu_get_feature_words,
4973 NULL, NULL, (void *)cpu->filtered_features, NULL);
4974
4975 object_property_add(obj, "crash-information", "GuestPanicInformation",
4976 x86_cpu_get_crash_info_qom, NULL, NULL, NULL, NULL);
4977
4978 cpu->hyperv_spinlock_attempts = HYPERV_SPINLOCK_NEVER_RETRY;
4979
4980 for (w = 0; w < FEATURE_WORDS; w++) {
4981 int bitnr;
4982
4983 for (bitnr = 0; bitnr < 32; bitnr++) {
4984 x86_cpu_register_feature_bit_props(cpu, w, bitnr);
4985 }
4986 }
4987
4988 object_property_add_alias(obj, "sse3", obj, "pni", &error_abort);
4989 object_property_add_alias(obj, "pclmuldq", obj, "pclmulqdq", &error_abort);
4990 object_property_add_alias(obj, "sse4-1", obj, "sse4.1", &error_abort);
4991 object_property_add_alias(obj, "sse4-2", obj, "sse4.2", &error_abort);
4992 object_property_add_alias(obj, "xd", obj, "nx", &error_abort);
4993 object_property_add_alias(obj, "ffxsr", obj, "fxsr-opt", &error_abort);
4994 object_property_add_alias(obj, "i64", obj, "lm", &error_abort);
4995
4996 object_property_add_alias(obj, "ds_cpl", obj, "ds-cpl", &error_abort);
4997 object_property_add_alias(obj, "tsc_adjust", obj, "tsc-adjust", &error_abort);
4998 object_property_add_alias(obj, "fxsr_opt", obj, "fxsr-opt", &error_abort);
4999 object_property_add_alias(obj, "lahf_lm", obj, "lahf-lm", &error_abort);
5000 object_property_add_alias(obj, "cmp_legacy", obj, "cmp-legacy", &error_abort);
5001 object_property_add_alias(obj, "nodeid_msr", obj, "nodeid-msr", &error_abort);
5002 object_property_add_alias(obj, "perfctr_core", obj, "perfctr-core", &error_abort);
5003 object_property_add_alias(obj, "perfctr_nb", obj, "perfctr-nb", &error_abort);
5004 object_property_add_alias(obj, "kvm_nopiodelay", obj, "kvm-nopiodelay", &error_abort);
5005 object_property_add_alias(obj, "kvm_mmu", obj, "kvm-mmu", &error_abort);
5006 object_property_add_alias(obj, "kvm_asyncpf", obj, "kvm-asyncpf", &error_abort);
5007 object_property_add_alias(obj, "kvm_steal_time", obj, "kvm-steal-time", &error_abort);
5008 object_property_add_alias(obj, "kvm_pv_eoi", obj, "kvm-pv-eoi", &error_abort);
5009 object_property_add_alias(obj, "kvm_pv_unhalt", obj, "kvm-pv-unhalt", &error_abort);
5010 object_property_add_alias(obj, "svm_lock", obj, "svm-lock", &error_abort);
5011 object_property_add_alias(obj, "nrip_save", obj, "nrip-save", &error_abort);
5012 object_property_add_alias(obj, "tsc_scale", obj, "tsc-scale", &error_abort);
5013 object_property_add_alias(obj, "vmcb_clean", obj, "vmcb-clean", &error_abort);
5014 object_property_add_alias(obj, "pause_filter", obj, "pause-filter", &error_abort);
5015 object_property_add_alias(obj, "sse4_1", obj, "sse4.1", &error_abort);
5016 object_property_add_alias(obj, "sse4_2", obj, "sse4.2", &error_abort);
5017
5018 if (xcc->cpu_def) {
5019 x86_cpu_load_def(cpu, xcc->cpu_def, &error_abort);
5020 }
5021 }
5022
5023 static int64_t x86_cpu_get_arch_id(CPUState *cs)
5024 {
5025 X86CPU *cpu = X86_CPU(cs);
5026
5027 return cpu->apic_id;
5028 }
5029
5030 static bool x86_cpu_get_paging_enabled(const CPUState *cs)
5031 {
5032 X86CPU *cpu = X86_CPU(cs);
5033
5034 return cpu->env.cr[0] & CR0_PG_MASK;
5035 }
5036
5037 static void x86_cpu_set_pc(CPUState *cs, vaddr value)
5038 {
5039 X86CPU *cpu = X86_CPU(cs);
5040
5041 cpu->env.eip = value;
5042 }
5043
5044 static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
5045 {
5046 X86CPU *cpu = X86_CPU(cs);
5047
5048 cpu->env.eip = tb->pc - tb->cs_base;
5049 }
5050
5051 static bool x86_cpu_has_work(CPUState *cs)
5052 {
5053 X86CPU *cpu = X86_CPU(cs);
5054 CPUX86State *env = &cpu->env;
5055
5056 return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
5057 CPU_INTERRUPT_POLL)) &&
5058 (env->eflags & IF_MASK)) ||
5059 (cs->interrupt_request & (CPU_INTERRUPT_NMI |
5060 CPU_INTERRUPT_INIT |
5061 CPU_INTERRUPT_SIPI |
5062 CPU_INTERRUPT_MCE)) ||
5063 ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
5064 !(env->hflags & HF_SMM_MASK));
5065 }
5066
5067 static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
5068 {
5069 X86CPU *cpu = X86_CPU(cs);
5070 CPUX86State *env = &cpu->env;
5071
5072 info->mach = (env->hflags & HF_CS64_MASK ? bfd_mach_x86_64
5073 : env->hflags & HF_CS32_MASK ? bfd_mach_i386_i386
5074 : bfd_mach_i386_i8086);
5075 info->print_insn = print_insn_i386;
5076
5077 info->cap_arch = CS_ARCH_X86;
5078 info->cap_mode = (env->hflags & HF_CS64_MASK ? CS_MODE_64
5079 : env->hflags & HF_CS32_MASK ? CS_MODE_32
5080 : CS_MODE_16);
5081 info->cap_insn_unit = 1;
5082 info->cap_insn_split = 8;
5083 }
5084
5085 void x86_update_hflags(CPUX86State *env)
5086 {
5087 uint32_t hflags;
5088 #define HFLAG_COPY_MASK \
5089 ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
5090 HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
5091 HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
5092 HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
5093
5094 hflags = env->hflags & HFLAG_COPY_MASK;
5095 hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
5096 hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
5097 hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
5098 (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
5099 hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
5100
5101 if (env->cr[4] & CR4_OSFXSR_MASK) {
5102 hflags |= HF_OSFXSR_MASK;
5103 }
5104
5105 if (env->efer & MSR_EFER_LMA) {
5106 hflags |= HF_LMA_MASK;
5107 }
5108
5109 if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
5110 hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
5111 } else {
5112 hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
5113 (DESC_B_SHIFT - HF_CS32_SHIFT);
5114 hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
5115 (DESC_B_SHIFT - HF_SS32_SHIFT);
5116 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) ||
5117 !(hflags & HF_CS32_MASK)) {
5118 hflags |= HF_ADDSEG_MASK;
5119 } else {
5120 hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base |
5121 env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
5122 }
5123 }
5124 env->hflags = hflags;
5125 }
5126
5127 static Property x86_cpu_properties[] = {
5128 #ifdef CONFIG_USER_ONLY
5129 /* apic_id = 0 by default for *-user, see commit 9886e834 */
5130 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, 0),
5131 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, 0),
5132 DEFINE_PROP_INT32("core-id", X86CPU, core_id, 0),
5133 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, 0),
5134 #else
5135 DEFINE_PROP_UINT32("apic-id", X86CPU, apic_id, UNASSIGNED_APIC_ID),
5136 DEFINE_PROP_INT32("thread-id", X86CPU, thread_id, -1),
5137 DEFINE_PROP_INT32("core-id", X86CPU, core_id, -1),
5138 DEFINE_PROP_INT32("socket-id", X86CPU, socket_id, -1),
5139 #endif
5140 DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
5141 DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
5142 { .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
5143 DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
5144 DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
5145 DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
5146 DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
5147 DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
5148 DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
5149 DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
5150 DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
5151 DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
5152 DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
5153 DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
5154 DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
5155 DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
5156 DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
5157 DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0),
5158 DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false),
5159 DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true),
5160 DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX),
5161 DEFINE_PROP_UINT32("xlevel", X86CPU, env.cpuid_xlevel, UINT32_MAX),
5162 DEFINE_PROP_UINT32("xlevel2", X86CPU, env.cpuid_xlevel2, UINT32_MAX),
5163 DEFINE_PROP_UINT32("min-level", X86CPU, env.cpuid_min_level, 0),
5164 DEFINE_PROP_UINT32("min-xlevel", X86CPU, env.cpuid_min_xlevel, 0),
5165 DEFINE_PROP_UINT32("min-xlevel2", X86CPU, env.cpuid_min_xlevel2, 0),
5166 DEFINE_PROP_BOOL("full-cpuid-auto-level", X86CPU, full_cpuid_auto_level, true),
5167 DEFINE_PROP_STRING("hv-vendor-id", X86CPU, hyperv_vendor_id),
5168 DEFINE_PROP_BOOL("cpuid-0xb", X86CPU, enable_cpuid_0xb, true),
5169 DEFINE_PROP_BOOL("lmce", X86CPU, enable_lmce, false),
5170 DEFINE_PROP_BOOL("l3-cache", X86CPU, enable_l3_cache, true),
5171 DEFINE_PROP_BOOL("kvm-no-smi-migration", X86CPU, kvm_no_smi_migration,
5172 false),
5173 DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true),
5174 DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true),
5175 /*
5176 * lecacy_cache defaults to true unless the CPU model provides its
5177 * own cache information (see x86_cpu_load_def()).
5178 */
5179 DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true),
5180
5181 /*
5182 * From "Requirements for Implementing the Microsoft
5183 * Hypervisor Interface":
5184 * https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/tlfs
5185 *
5186 * "Starting with Windows Server 2012 and Windows 8, if
5187 * CPUID.40000005.EAX contains a value of -1, Windows assumes that
5188 * the hypervisor imposes no specific limit to the number of VPs.
5189 * In this case, Windows Server 2012 guest VMs may use more than
5190 * 64 VPs, up to the maximum supported number of processors applicable
5191 * to the specific Windows version being used."
5192 */
5193 DEFINE_PROP_INT32("x-hv-max-vps", X86CPU, hv_max_vps, -1),
5194 DEFINE_PROP_END_OF_LIST()
5195 };
5196
5197 static void x86_cpu_common_class_init(ObjectClass *oc, void *data)
5198 {
5199 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5200 CPUClass *cc = CPU_CLASS(oc);
5201 DeviceClass *dc = DEVICE_CLASS(oc);
5202
5203 device_class_set_parent_realize(dc, x86_cpu_realizefn,
5204 &xcc->parent_realize);
5205 device_class_set_parent_unrealize(dc, x86_cpu_unrealizefn,
5206 &xcc->parent_unrealize);
5207 dc->props = x86_cpu_properties;
5208
5209 xcc->parent_reset = cc->reset;
5210 cc->reset = x86_cpu_reset;
5211 cc->reset_dump_flags = CPU_DUMP_FPU | CPU_DUMP_CCOP;
5212
5213 cc->class_by_name = x86_cpu_class_by_name;
5214 cc->parse_features = x86_cpu_parse_featurestr;
5215 cc->has_work = x86_cpu_has_work;
5216 #ifdef CONFIG_TCG
5217 cc->do_interrupt = x86_cpu_do_interrupt;
5218 cc->cpu_exec_interrupt = x86_cpu_exec_interrupt;
5219 #endif
5220 cc->dump_state = x86_cpu_dump_state;
5221 cc->get_crash_info = x86_cpu_get_crash_info;
5222 cc->set_pc = x86_cpu_set_pc;
5223 cc->synchronize_from_tb = x86_cpu_synchronize_from_tb;
5224 cc->gdb_read_register = x86_cpu_gdb_read_register;
5225 cc->gdb_write_register = x86_cpu_gdb_write_register;
5226 cc->get_arch_id = x86_cpu_get_arch_id;
5227 cc->get_paging_enabled = x86_cpu_get_paging_enabled;
5228 #ifdef CONFIG_USER_ONLY
5229 cc->handle_mmu_fault = x86_cpu_handle_mmu_fault;
5230 #else
5231 cc->asidx_from_attrs = x86_asidx_from_attrs;
5232 cc->get_memory_mapping = x86_cpu_get_memory_mapping;
5233 cc->get_phys_page_debug = x86_cpu_get_phys_page_debug;
5234 cc->write_elf64_note = x86_cpu_write_elf64_note;
5235 cc->write_elf64_qemunote = x86_cpu_write_elf64_qemunote;
5236 cc->write_elf32_note = x86_cpu_write_elf32_note;
5237 cc->write_elf32_qemunote = x86_cpu_write_elf32_qemunote;
5238 cc->vmsd = &vmstate_x86_cpu;
5239 #endif
5240 cc->gdb_arch_name = x86_gdb_arch_name;
5241 #ifdef TARGET_X86_64
5242 cc->gdb_core_xml_file = "i386-64bit.xml";
5243 cc->gdb_num_core_regs = 57;
5244 #else
5245 cc->gdb_core_xml_file = "i386-32bit.xml";
5246 cc->gdb_num_core_regs = 41;
5247 #endif
5248 #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
5249 cc->debug_excp_handler = breakpoint_handler;
5250 #endif
5251 cc->cpu_exec_enter = x86_cpu_exec_enter;
5252 cc->cpu_exec_exit = x86_cpu_exec_exit;
5253 #ifdef CONFIG_TCG
5254 cc->tcg_initialize = tcg_x86_init;
5255 #endif
5256 cc->disas_set_info = x86_disas_set_info;
5257
5258 dc->user_creatable = true;
5259 }
5260
5261 static const TypeInfo x86_cpu_type_info = {
5262 .name = TYPE_X86_CPU,
5263 .parent = TYPE_CPU,
5264 .instance_size = sizeof(X86CPU),
5265 .instance_init = x86_cpu_initfn,
5266 .abstract = true,
5267 .class_size = sizeof(X86CPUClass),
5268 .class_init = x86_cpu_common_class_init,
5269 };
5270
5271
5272 /* "base" CPU model, used by query-cpu-model-expansion */
5273 static void x86_cpu_base_class_init(ObjectClass *oc, void *data)
5274 {
5275 X86CPUClass *xcc = X86_CPU_CLASS(oc);
5276
5277 xcc->static_model = true;
5278 xcc->migration_safe = true;
5279 xcc->model_description = "base CPU model type with no features enabled";
5280 xcc->ordering = 8;
5281 }
5282
5283 static const TypeInfo x86_base_cpu_type_info = {
5284 .name = X86_CPU_TYPE_NAME("base"),
5285 .parent = TYPE_X86_CPU,
5286 .class_init = x86_cpu_base_class_init,
5287 };
5288
5289 static void x86_cpu_register_types(void)
5290 {
5291 int i;
5292
5293 type_register_static(&x86_cpu_type_info);
5294 for (i = 0; i < ARRAY_SIZE(builtin_x86_defs); i++) {
5295 x86_register_cpudef_type(&builtin_x86_defs[i]);
5296 }
5297 type_register_static(&max_x86_cpu_type_info);
5298 type_register_static(&x86_base_cpu_type_info);
5299 #if defined(CONFIG_KVM) || defined(CONFIG_HVF)
5300 type_register_static(&host_x86_cpu_type_info);
5301 #endif
5302 }
5303
5304 type_init(x86_cpu_register_types)